[go: nahoru, domu]

1/*
2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/module.h>
19#include "be.h"
20#include "be_cmds.h"
21
22static struct be_cmd_priv_map cmd_priv_map[] = {
23	{
24		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25		CMD_SUBSYSTEM_ETH,
26		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28	},
29	{
30		OPCODE_COMMON_GET_FLOW_CONTROL,
31		CMD_SUBSYSTEM_COMMON,
32		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34	},
35	{
36		OPCODE_COMMON_SET_FLOW_CONTROL,
37		CMD_SUBSYSTEM_COMMON,
38		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40	},
41	{
42		OPCODE_ETH_GET_PPORT_STATS,
43		CMD_SUBSYSTEM_ETH,
44		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46	},
47	{
48		OPCODE_COMMON_GET_PHY_DETAILS,
49		CMD_SUBSYSTEM_COMMON,
50		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52	}
53};
54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56{
57	int i;
58	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
59	u32 cmd_privileges = adapter->cmd_privileges;
60
61	for (i = 0; i < num_entries; i++)
62		if (opcode == cmd_priv_map[i].opcode &&
63		    subsystem == cmd_priv_map[i].subsystem)
64			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
65				return false;
66
67	return true;
68}
69
70static inline void *embedded_payload(struct be_mcc_wrb *wrb)
71{
72	return wrb->payload.embedded_payload;
73}
74
75static void be_mcc_notify(struct be_adapter *adapter)
76{
77	struct be_queue_info *mccq = &adapter->mcc_obj.q;
78	u32 val = 0;
79
80	if (be_error(adapter))
81		return;
82
83	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
84	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
85
86	wmb();
87	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
88}
89
90/* To check if valid bit is set, check the entire word as we don't know
91 * the endianness of the data (old entry is host endian while a new entry is
92 * little endian) */
93static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
94{
95	u32 flags;
96
97	if (compl->flags != 0) {
98		flags = le32_to_cpu(compl->flags);
99		if (flags & CQE_FLAGS_VALID_MASK) {
100			compl->flags = flags;
101			return true;
102		}
103	}
104	return false;
105}
106
107/* Need to reset the entire word that houses the valid bit */
108static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
109{
110	compl->flags = 0;
111}
112
113static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
114{
115	unsigned long addr;
116
117	addr = tag1;
118	addr = ((addr << 16) << 16) | tag0;
119	return (void *)addr;
120}
121
122static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
123{
124	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
125	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
126	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
127	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
128	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
129	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
130		return true;
131	else
132		return false;
133}
134
135/* Place holder for all the async MCC cmds wherein the caller is not in a busy
136 * loop (has not issued be_mcc_notify_wait())
137 */
138static void be_async_cmd_process(struct be_adapter *adapter,
139				 struct be_mcc_compl *compl,
140				 struct be_cmd_resp_hdr *resp_hdr)
141{
142	enum mcc_base_status base_status = base_status(compl->status);
143	u8 opcode = 0, subsystem = 0;
144
145	if (resp_hdr) {
146		opcode = resp_hdr->opcode;
147		subsystem = resp_hdr->subsystem;
148	}
149
150	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
151	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
152		complete(&adapter->et_cmd_compl);
153		return;
154	}
155
156	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
157	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
158	    subsystem == CMD_SUBSYSTEM_COMMON) {
159		adapter->flash_status = compl->status;
160		complete(&adapter->et_cmd_compl);
161		return;
162	}
163
164	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
165	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
166	    subsystem == CMD_SUBSYSTEM_ETH &&
167	    base_status == MCC_STATUS_SUCCESS) {
168		be_parse_stats(adapter);
169		adapter->stats_cmd_sent = false;
170		return;
171	}
172
173	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
174	    subsystem == CMD_SUBSYSTEM_COMMON) {
175		if (base_status == MCC_STATUS_SUCCESS) {
176			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
177							(void *)resp_hdr;
178			adapter->drv_stats.be_on_die_temperature =
179						resp->on_die_temperature;
180		} else {
181			adapter->be_get_temp_freq = 0;
182		}
183		return;
184	}
185}
186
187static int be_mcc_compl_process(struct be_adapter *adapter,
188				struct be_mcc_compl *compl)
189{
190	enum mcc_base_status base_status;
191	enum mcc_addl_status addl_status;
192	struct be_cmd_resp_hdr *resp_hdr;
193	u8 opcode = 0, subsystem = 0;
194
195	/* Just swap the status to host endian; mcc tag is opaquely copied
196	 * from mcc_wrb */
197	be_dws_le_to_cpu(compl, 4);
198
199	base_status = base_status(compl->status);
200	addl_status = addl_status(compl->status);
201
202	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
203	if (resp_hdr) {
204		opcode = resp_hdr->opcode;
205		subsystem = resp_hdr->subsystem;
206	}
207
208	be_async_cmd_process(adapter, compl, resp_hdr);
209
210	if (base_status != MCC_STATUS_SUCCESS &&
211	    !be_skip_err_log(opcode, base_status, addl_status)) {
212		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
213			dev_warn(&adapter->pdev->dev,
214				 "VF is not privileged to issue opcode %d-%d\n",
215				 opcode, subsystem);
216		} else {
217			dev_err(&adapter->pdev->dev,
218				"opcode %d-%d failed:status %d-%d\n",
219				opcode, subsystem, base_status, addl_status);
220		}
221	}
222	return compl->status;
223}
224
225/* Link state evt is a string of bytes; no need for endian swapping */
226static void be_async_link_state_process(struct be_adapter *adapter,
227					struct be_mcc_compl *compl)
228{
229	struct be_async_event_link_state *evt =
230			(struct be_async_event_link_state *)compl;
231
232	/* When link status changes, link speed must be re-queried from FW */
233	adapter->phy.link_speed = -1;
234
235	/* On BEx the FW does not send a separate link status
236	 * notification for physical and logical link.
237	 * On other chips just process the logical link
238	 * status notification
239	 */
240	if (!BEx_chip(adapter) &&
241	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
242		return;
243
244	/* For the initial link status do not rely on the ASYNC event as
245	 * it may not be received in some cases.
246	 */
247	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
248		be_link_status_update(adapter,
249				      evt->port_link_status & LINK_STATUS_MASK);
250}
251
252/* Grp5 CoS Priority evt */
253static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
254					       struct be_mcc_compl *compl)
255{
256	struct be_async_event_grp5_cos_priority *evt =
257			(struct be_async_event_grp5_cos_priority *)compl;
258
259	if (evt->valid) {
260		adapter->vlan_prio_bmap = evt->available_priority_bmap;
261		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
262		adapter->recommended_prio =
263			evt->reco_default_priority << VLAN_PRIO_SHIFT;
264	}
265}
266
267/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
268static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
269					    struct be_mcc_compl *compl)
270{
271	struct be_async_event_grp5_qos_link_speed *evt =
272			(struct be_async_event_grp5_qos_link_speed *)compl;
273
274	if (adapter->phy.link_speed >= 0 &&
275	    evt->physical_port == adapter->port_num)
276		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
277}
278
279/*Grp5 PVID evt*/
280static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
281					     struct be_mcc_compl *compl)
282{
283	struct be_async_event_grp5_pvid_state *evt =
284			(struct be_async_event_grp5_pvid_state *)compl;
285
286	if (evt->enabled) {
287		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
288		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
289	} else {
290		adapter->pvid = 0;
291	}
292}
293
294static void be_async_grp5_evt_process(struct be_adapter *adapter,
295				      struct be_mcc_compl *compl)
296{
297	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
298				ASYNC_EVENT_TYPE_MASK;
299
300	switch (event_type) {
301	case ASYNC_EVENT_COS_PRIORITY:
302		be_async_grp5_cos_priority_process(adapter, compl);
303		break;
304	case ASYNC_EVENT_QOS_SPEED:
305		be_async_grp5_qos_speed_process(adapter, compl);
306		break;
307	case ASYNC_EVENT_PVID_STATE:
308		be_async_grp5_pvid_state_process(adapter, compl);
309		break;
310	default:
311		break;
312	}
313}
314
315static void be_async_dbg_evt_process(struct be_adapter *adapter,
316				     struct be_mcc_compl *cmp)
317{
318	u8 event_type = 0;
319	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
320
321	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
322			ASYNC_EVENT_TYPE_MASK;
323
324	switch (event_type) {
325	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
326		if (evt->valid)
327			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
328		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
329	break;
330	default:
331		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
332			 event_type);
333	break;
334	}
335}
336
337static inline bool is_link_state_evt(u32 flags)
338{
339	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
340			ASYNC_EVENT_CODE_LINK_STATE;
341}
342
343static inline bool is_grp5_evt(u32 flags)
344{
345	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
346			ASYNC_EVENT_CODE_GRP_5;
347}
348
349static inline bool is_dbg_evt(u32 flags)
350{
351	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
352			ASYNC_EVENT_CODE_QNQ;
353}
354
355static void be_mcc_event_process(struct be_adapter *adapter,
356				 struct be_mcc_compl *compl)
357{
358	if (is_link_state_evt(compl->flags))
359		be_async_link_state_process(adapter, compl);
360	else if (is_grp5_evt(compl->flags))
361		be_async_grp5_evt_process(adapter, compl);
362	else if (is_dbg_evt(compl->flags))
363		be_async_dbg_evt_process(adapter, compl);
364}
365
366static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
367{
368	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
369	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
370
371	if (be_mcc_compl_is_new(compl)) {
372		queue_tail_inc(mcc_cq);
373		return compl;
374	}
375	return NULL;
376}
377
378void be_async_mcc_enable(struct be_adapter *adapter)
379{
380	spin_lock_bh(&adapter->mcc_cq_lock);
381
382	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
383	adapter->mcc_obj.rearm_cq = true;
384
385	spin_unlock_bh(&adapter->mcc_cq_lock);
386}
387
388void be_async_mcc_disable(struct be_adapter *adapter)
389{
390	spin_lock_bh(&adapter->mcc_cq_lock);
391
392	adapter->mcc_obj.rearm_cq = false;
393	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
394
395	spin_unlock_bh(&adapter->mcc_cq_lock);
396}
397
398int be_process_mcc(struct be_adapter *adapter)
399{
400	struct be_mcc_compl *compl;
401	int num = 0, status = 0;
402	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
403
404	spin_lock(&adapter->mcc_cq_lock);
405
406	while ((compl = be_mcc_compl_get(adapter))) {
407		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
408			be_mcc_event_process(adapter, compl);
409		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
410			status = be_mcc_compl_process(adapter, compl);
411			atomic_dec(&mcc_obj->q.used);
412		}
413		be_mcc_compl_use(compl);
414		num++;
415	}
416
417	if (num)
418		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
419
420	spin_unlock(&adapter->mcc_cq_lock);
421	return status;
422}
423
424/* Wait till no more pending mcc requests are present */
425static int be_mcc_wait_compl(struct be_adapter *adapter)
426{
427#define mcc_timeout		120000 /* 12s timeout */
428	int i, status = 0;
429	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
430
431	for (i = 0; i < mcc_timeout; i++) {
432		if (be_error(adapter))
433			return -EIO;
434
435		local_bh_disable();
436		status = be_process_mcc(adapter);
437		local_bh_enable();
438
439		if (atomic_read(&mcc_obj->q.used) == 0)
440			break;
441		udelay(100);
442	}
443	if (i == mcc_timeout) {
444		dev_err(&adapter->pdev->dev, "FW not responding\n");
445		adapter->fw_timeout = true;
446		return -EIO;
447	}
448	return status;
449}
450
451/* Notify MCC requests and wait for completion */
452static int be_mcc_notify_wait(struct be_adapter *adapter)
453{
454	int status;
455	struct be_mcc_wrb *wrb;
456	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
457	u16 index = mcc_obj->q.head;
458	struct be_cmd_resp_hdr *resp;
459
460	index_dec(&index, mcc_obj->q.len);
461	wrb = queue_index_node(&mcc_obj->q, index);
462
463	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
464
465	be_mcc_notify(adapter);
466
467	status = be_mcc_wait_compl(adapter);
468	if (status == -EIO)
469		goto out;
470
471	status = (resp->base_status |
472		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
473		   CQE_ADDL_STATUS_SHIFT));
474out:
475	return status;
476}
477
478static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
479{
480	int msecs = 0;
481	u32 ready;
482
483	do {
484		if (be_error(adapter))
485			return -EIO;
486
487		ready = ioread32(db);
488		if (ready == 0xffffffff)
489			return -1;
490
491		ready &= MPU_MAILBOX_DB_RDY_MASK;
492		if (ready)
493			break;
494
495		if (msecs > 4000) {
496			dev_err(&adapter->pdev->dev, "FW not responding\n");
497			adapter->fw_timeout = true;
498			be_detect_error(adapter);
499			return -1;
500		}
501
502		msleep(1);
503		msecs++;
504	} while (true);
505
506	return 0;
507}
508
509/*
510 * Insert the mailbox address into the doorbell in two steps
511 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
512 */
513static int be_mbox_notify_wait(struct be_adapter *adapter)
514{
515	int status;
516	u32 val = 0;
517	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
518	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
519	struct be_mcc_mailbox *mbox = mbox_mem->va;
520	struct be_mcc_compl *compl = &mbox->compl;
521
522	/* wait for ready to be set */
523	status = be_mbox_db_ready_wait(adapter, db);
524	if (status != 0)
525		return status;
526
527	val |= MPU_MAILBOX_DB_HI_MASK;
528	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
529	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
530	iowrite32(val, db);
531
532	/* wait for ready to be set */
533	status = be_mbox_db_ready_wait(adapter, db);
534	if (status != 0)
535		return status;
536
537	val = 0;
538	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
539	val |= (u32)(mbox_mem->dma >> 4) << 2;
540	iowrite32(val, db);
541
542	status = be_mbox_db_ready_wait(adapter, db);
543	if (status != 0)
544		return status;
545
546	/* A cq entry has been made now */
547	if (be_mcc_compl_is_new(compl)) {
548		status = be_mcc_compl_process(adapter, &mbox->compl);
549		be_mcc_compl_use(compl);
550		if (status)
551			return status;
552	} else {
553		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
554		return -1;
555	}
556	return 0;
557}
558
559static u16 be_POST_stage_get(struct be_adapter *adapter)
560{
561	u32 sem;
562
563	if (BEx_chip(adapter))
564		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
565	else
566		pci_read_config_dword(adapter->pdev,
567				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
568
569	return sem & POST_STAGE_MASK;
570}
571
572static int lancer_wait_ready(struct be_adapter *adapter)
573{
574#define SLIPORT_READY_TIMEOUT 30
575	u32 sliport_status;
576	int status = 0, i;
577
578	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
579		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
580		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
581			break;
582
583		msleep(1000);
584	}
585
586	if (i == SLIPORT_READY_TIMEOUT)
587		status = -1;
588
589	return status;
590}
591
592static bool lancer_provisioning_error(struct be_adapter *adapter)
593{
594	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
595
596	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
597	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
598		sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
599		sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
600
601		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
602		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
603			return true;
604	}
605	return false;
606}
607
608int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
609{
610	int status;
611	u32 sliport_status, err, reset_needed;
612	bool resource_error;
613
614	resource_error = lancer_provisioning_error(adapter);
615	if (resource_error)
616		return -EAGAIN;
617
618	status = lancer_wait_ready(adapter);
619	if (!status) {
620		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
621		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
622		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
623		if (err && reset_needed) {
624			iowrite32(SLI_PORT_CONTROL_IP_MASK,
625				  adapter->db + SLIPORT_CONTROL_OFFSET);
626
627			/* check adapter has corrected the error */
628			status = lancer_wait_ready(adapter);
629			sliport_status = ioread32(adapter->db +
630						  SLIPORT_STATUS_OFFSET);
631			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
632						SLIPORT_STATUS_RN_MASK);
633			if (status || sliport_status)
634				status = -1;
635		} else if (err || reset_needed) {
636			status = -1;
637		}
638	}
639	/* Stop error recovery if error is not recoverable.
640	 * No resource error is temporary errors and will go away
641	 * when PF provisions resources.
642	 */
643	resource_error = lancer_provisioning_error(adapter);
644	if (resource_error)
645		status = -EAGAIN;
646
647	return status;
648}
649
650int be_fw_wait_ready(struct be_adapter *adapter)
651{
652	u16 stage;
653	int status, timeout = 0;
654	struct device *dev = &adapter->pdev->dev;
655
656	if (lancer_chip(adapter)) {
657		status = lancer_wait_ready(adapter);
658		return status;
659	}
660
661	do {
662		stage = be_POST_stage_get(adapter);
663		if (stage == POST_STAGE_ARMFW_RDY)
664			return 0;
665
666		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
667		if (msleep_interruptible(2000)) {
668			dev_err(dev, "Waiting for POST aborted\n");
669			return -EINTR;
670		}
671		timeout += 2;
672	} while (timeout < 60);
673
674	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
675	return -1;
676}
677
678static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
679{
680	return &wrb->payload.sgl[0];
681}
682
683static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
684{
685	wrb->tag0 = addr & 0xFFFFFFFF;
686	wrb->tag1 = upper_32_bits(addr);
687}
688
689/* Don't touch the hdr after it's prepared */
690/* mem will be NULL for embedded commands */
691static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
692				   u8 subsystem, u8 opcode, int cmd_len,
693				   struct be_mcc_wrb *wrb,
694				   struct be_dma_mem *mem)
695{
696	struct be_sge *sge;
697
698	req_hdr->opcode = opcode;
699	req_hdr->subsystem = subsystem;
700	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
701	req_hdr->version = 0;
702	fill_wrb_tags(wrb, (ulong) req_hdr);
703	wrb->payload_length = cmd_len;
704	if (mem) {
705		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
706			MCC_WRB_SGE_CNT_SHIFT;
707		sge = nonembedded_sgl(wrb);
708		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
709		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
710		sge->len = cpu_to_le32(mem->size);
711	} else
712		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
713	be_dws_cpu_to_le(wrb, 8);
714}
715
716static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
717				      struct be_dma_mem *mem)
718{
719	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
720	u64 dma = (u64)mem->dma;
721
722	for (i = 0; i < buf_pages; i++) {
723		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
724		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
725		dma += PAGE_SIZE_4K;
726	}
727}
728
729static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
730{
731	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
732	struct be_mcc_wrb *wrb
733		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
734	memset(wrb, 0, sizeof(*wrb));
735	return wrb;
736}
737
738static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
739{
740	struct be_queue_info *mccq = &adapter->mcc_obj.q;
741	struct be_mcc_wrb *wrb;
742
743	if (!mccq->created)
744		return NULL;
745
746	if (atomic_read(&mccq->used) >= mccq->len)
747		return NULL;
748
749	wrb = queue_head_node(mccq);
750	queue_head_inc(mccq);
751	atomic_inc(&mccq->used);
752	memset(wrb, 0, sizeof(*wrb));
753	return wrb;
754}
755
756static bool use_mcc(struct be_adapter *adapter)
757{
758	return adapter->mcc_obj.q.created;
759}
760
761/* Must be used only in process context */
762static int be_cmd_lock(struct be_adapter *adapter)
763{
764	if (use_mcc(adapter)) {
765		spin_lock_bh(&adapter->mcc_lock);
766		return 0;
767	} else {
768		return mutex_lock_interruptible(&adapter->mbox_lock);
769	}
770}
771
772/* Must be used only in process context */
773static void be_cmd_unlock(struct be_adapter *adapter)
774{
775	if (use_mcc(adapter))
776		spin_unlock_bh(&adapter->mcc_lock);
777	else
778		return mutex_unlock(&adapter->mbox_lock);
779}
780
781static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
782				      struct be_mcc_wrb *wrb)
783{
784	struct be_mcc_wrb *dest_wrb;
785
786	if (use_mcc(adapter)) {
787		dest_wrb = wrb_from_mccq(adapter);
788		if (!dest_wrb)
789			return NULL;
790	} else {
791		dest_wrb = wrb_from_mbox(adapter);
792	}
793
794	memcpy(dest_wrb, wrb, sizeof(*wrb));
795	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
796		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
797
798	return dest_wrb;
799}
800
801/* Must be used only in process context */
802static int be_cmd_notify_wait(struct be_adapter *adapter,
803			      struct be_mcc_wrb *wrb)
804{
805	struct be_mcc_wrb *dest_wrb;
806	int status;
807
808	status = be_cmd_lock(adapter);
809	if (status)
810		return status;
811
812	dest_wrb = be_cmd_copy(adapter, wrb);
813	if (!dest_wrb)
814		return -EBUSY;
815
816	if (use_mcc(adapter))
817		status = be_mcc_notify_wait(adapter);
818	else
819		status = be_mbox_notify_wait(adapter);
820
821	if (!status)
822		memcpy(wrb, dest_wrb, sizeof(*wrb));
823
824	be_cmd_unlock(adapter);
825	return status;
826}
827
828/* Tell fw we're about to start firing cmds by writing a
829 * special pattern across the wrb hdr; uses mbox
830 */
831int be_cmd_fw_init(struct be_adapter *adapter)
832{
833	u8 *wrb;
834	int status;
835
836	if (lancer_chip(adapter))
837		return 0;
838
839	if (mutex_lock_interruptible(&adapter->mbox_lock))
840		return -1;
841
842	wrb = (u8 *)wrb_from_mbox(adapter);
843	*wrb++ = 0xFF;
844	*wrb++ = 0x12;
845	*wrb++ = 0x34;
846	*wrb++ = 0xFF;
847	*wrb++ = 0xFF;
848	*wrb++ = 0x56;
849	*wrb++ = 0x78;
850	*wrb = 0xFF;
851
852	status = be_mbox_notify_wait(adapter);
853
854	mutex_unlock(&adapter->mbox_lock);
855	return status;
856}
857
858/* Tell fw we're done with firing cmds by writing a
859 * special pattern across the wrb hdr; uses mbox
860 */
861int be_cmd_fw_clean(struct be_adapter *adapter)
862{
863	u8 *wrb;
864	int status;
865
866	if (lancer_chip(adapter))
867		return 0;
868
869	if (mutex_lock_interruptible(&adapter->mbox_lock))
870		return -1;
871
872	wrb = (u8 *)wrb_from_mbox(adapter);
873	*wrb++ = 0xFF;
874	*wrb++ = 0xAA;
875	*wrb++ = 0xBB;
876	*wrb++ = 0xFF;
877	*wrb++ = 0xFF;
878	*wrb++ = 0xCC;
879	*wrb++ = 0xDD;
880	*wrb = 0xFF;
881
882	status = be_mbox_notify_wait(adapter);
883
884	mutex_unlock(&adapter->mbox_lock);
885	return status;
886}
887
888int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
889{
890	struct be_mcc_wrb *wrb;
891	struct be_cmd_req_eq_create *req;
892	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
893	int status, ver = 0;
894
895	if (mutex_lock_interruptible(&adapter->mbox_lock))
896		return -1;
897
898	wrb = wrb_from_mbox(adapter);
899	req = embedded_payload(wrb);
900
901	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
902			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
903			       NULL);
904
905	/* Support for EQ_CREATEv2 available only SH-R onwards */
906	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
907		ver = 2;
908
909	req->hdr.version = ver;
910	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
911
912	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
913	/* 4byte eqe*/
914	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
915	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
916		      __ilog2_u32(eqo->q.len / 256));
917	be_dws_cpu_to_le(req->context, sizeof(req->context));
918
919	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
920
921	status = be_mbox_notify_wait(adapter);
922	if (!status) {
923		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
924
925		eqo->q.id = le16_to_cpu(resp->eq_id);
926		eqo->msix_idx =
927			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
928		eqo->q.created = true;
929	}
930
931	mutex_unlock(&adapter->mbox_lock);
932	return status;
933}
934
935/* Use MCC */
936int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
937			  bool permanent, u32 if_handle, u32 pmac_id)
938{
939	struct be_mcc_wrb *wrb;
940	struct be_cmd_req_mac_query *req;
941	int status;
942
943	spin_lock_bh(&adapter->mcc_lock);
944
945	wrb = wrb_from_mccq(adapter);
946	if (!wrb) {
947		status = -EBUSY;
948		goto err;
949	}
950	req = embedded_payload(wrb);
951
952	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
953			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
954			       NULL);
955	req->type = MAC_ADDRESS_TYPE_NETWORK;
956	if (permanent) {
957		req->permanent = 1;
958	} else {
959		req->if_id = cpu_to_le16((u16)if_handle);
960		req->pmac_id = cpu_to_le32(pmac_id);
961		req->permanent = 0;
962	}
963
964	status = be_mcc_notify_wait(adapter);
965	if (!status) {
966		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
967
968		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
969	}
970
971err:
972	spin_unlock_bh(&adapter->mcc_lock);
973	return status;
974}
975
976/* Uses synchronous MCCQ */
977int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
978		    u32 if_id, u32 *pmac_id, u32 domain)
979{
980	struct be_mcc_wrb *wrb;
981	struct be_cmd_req_pmac_add *req;
982	int status;
983
984	spin_lock_bh(&adapter->mcc_lock);
985
986	wrb = wrb_from_mccq(adapter);
987	if (!wrb) {
988		status = -EBUSY;
989		goto err;
990	}
991	req = embedded_payload(wrb);
992
993	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
994			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
995			       NULL);
996
997	req->hdr.domain = domain;
998	req->if_id = cpu_to_le32(if_id);
999	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1000
1001	status = be_mcc_notify_wait(adapter);
1002	if (!status) {
1003		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1004
1005		*pmac_id = le32_to_cpu(resp->pmac_id);
1006	}
1007
1008err:
1009	spin_unlock_bh(&adapter->mcc_lock);
1010
1011	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1012		status = -EPERM;
1013
1014	return status;
1015}
1016
1017/* Uses synchronous MCCQ */
1018int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1019{
1020	struct be_mcc_wrb *wrb;
1021	struct be_cmd_req_pmac_del *req;
1022	int status;
1023
1024	if (pmac_id == -1)
1025		return 0;
1026
1027	spin_lock_bh(&adapter->mcc_lock);
1028
1029	wrb = wrb_from_mccq(adapter);
1030	if (!wrb) {
1031		status = -EBUSY;
1032		goto err;
1033	}
1034	req = embedded_payload(wrb);
1035
1036	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1037			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1038			       wrb, NULL);
1039
1040	req->hdr.domain = dom;
1041	req->if_id = cpu_to_le32(if_id);
1042	req->pmac_id = cpu_to_le32(pmac_id);
1043
1044	status = be_mcc_notify_wait(adapter);
1045
1046err:
1047	spin_unlock_bh(&adapter->mcc_lock);
1048	return status;
1049}
1050
1051/* Uses Mbox */
1052int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1053		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1054{
1055	struct be_mcc_wrb *wrb;
1056	struct be_cmd_req_cq_create *req;
1057	struct be_dma_mem *q_mem = &cq->dma_mem;
1058	void *ctxt;
1059	int status;
1060
1061	if (mutex_lock_interruptible(&adapter->mbox_lock))
1062		return -1;
1063
1064	wrb = wrb_from_mbox(adapter);
1065	req = embedded_payload(wrb);
1066	ctxt = &req->context;
1067
1068	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1069			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1070			       NULL);
1071
1072	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1073
1074	if (BEx_chip(adapter)) {
1075		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1076			      coalesce_wm);
1077		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1078			      ctxt, no_delay);
1079		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1080			      __ilog2_u32(cq->len / 256));
1081		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1082		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1083		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1084	} else {
1085		req->hdr.version = 2;
1086		req->page_size = 1; /* 1 for 4K */
1087
1088		/* coalesce-wm field in this cmd is not relevant to Lancer.
1089		 * Lancer uses COMMON_MODIFY_CQ to set this field
1090		 */
1091		if (!lancer_chip(adapter))
1092			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1093				      ctxt, coalesce_wm);
1094		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1095			      no_delay);
1096		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1097			      __ilog2_u32(cq->len / 256));
1098		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1099		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1100		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1101	}
1102
1103	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1104
1105	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1106
1107	status = be_mbox_notify_wait(adapter);
1108	if (!status) {
1109		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1110
1111		cq->id = le16_to_cpu(resp->cq_id);
1112		cq->created = true;
1113	}
1114
1115	mutex_unlock(&adapter->mbox_lock);
1116
1117	return status;
1118}
1119
1120static u32 be_encoded_q_len(int q_len)
1121{
1122	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1123
1124	if (len_encoded == 16)
1125		len_encoded = 0;
1126	return len_encoded;
1127}
1128
1129static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1130				  struct be_queue_info *mccq,
1131				  struct be_queue_info *cq)
1132{
1133	struct be_mcc_wrb *wrb;
1134	struct be_cmd_req_mcc_ext_create *req;
1135	struct be_dma_mem *q_mem = &mccq->dma_mem;
1136	void *ctxt;
1137	int status;
1138
1139	if (mutex_lock_interruptible(&adapter->mbox_lock))
1140		return -1;
1141
1142	wrb = wrb_from_mbox(adapter);
1143	req = embedded_payload(wrb);
1144	ctxt = &req->context;
1145
1146	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1147			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1148			       NULL);
1149
1150	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1151	if (BEx_chip(adapter)) {
1152		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1153		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1154			      be_encoded_q_len(mccq->len));
1155		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1156	} else {
1157		req->hdr.version = 1;
1158		req->cq_id = cpu_to_le16(cq->id);
1159
1160		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1161			      be_encoded_q_len(mccq->len));
1162		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1163		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1164			      ctxt, cq->id);
1165		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1166			      ctxt, 1);
1167	}
1168
1169	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1170	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1171	req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1172	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1173
1174	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1175
1176	status = be_mbox_notify_wait(adapter);
1177	if (!status) {
1178		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1179
1180		mccq->id = le16_to_cpu(resp->id);
1181		mccq->created = true;
1182	}
1183	mutex_unlock(&adapter->mbox_lock);
1184
1185	return status;
1186}
1187
1188static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1189				  struct be_queue_info *mccq,
1190				  struct be_queue_info *cq)
1191{
1192	struct be_mcc_wrb *wrb;
1193	struct be_cmd_req_mcc_create *req;
1194	struct be_dma_mem *q_mem = &mccq->dma_mem;
1195	void *ctxt;
1196	int status;
1197
1198	if (mutex_lock_interruptible(&adapter->mbox_lock))
1199		return -1;
1200
1201	wrb = wrb_from_mbox(adapter);
1202	req = embedded_payload(wrb);
1203	ctxt = &req->context;
1204
1205	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1206			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1207			       NULL);
1208
1209	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1210
1211	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1212	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1213		      be_encoded_q_len(mccq->len));
1214	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1215
1216	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1217
1218	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1219
1220	status = be_mbox_notify_wait(adapter);
1221	if (!status) {
1222		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1223
1224		mccq->id = le16_to_cpu(resp->id);
1225		mccq->created = true;
1226	}
1227
1228	mutex_unlock(&adapter->mbox_lock);
1229	return status;
1230}
1231
1232int be_cmd_mccq_create(struct be_adapter *adapter,
1233		       struct be_queue_info *mccq, struct be_queue_info *cq)
1234{
1235	int status;
1236
1237	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1238	if (status && BEx_chip(adapter)) {
1239		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1240			"or newer to avoid conflicting priorities between NIC "
1241			"and FCoE traffic");
1242		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1243	}
1244	return status;
1245}
1246
1247int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1248{
1249	struct be_mcc_wrb wrb = {0};
1250	struct be_cmd_req_eth_tx_create *req;
1251	struct be_queue_info *txq = &txo->q;
1252	struct be_queue_info *cq = &txo->cq;
1253	struct be_dma_mem *q_mem = &txq->dma_mem;
1254	int status, ver = 0;
1255
1256	req = embedded_payload(&wrb);
1257	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1258			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1259
1260	if (lancer_chip(adapter)) {
1261		req->hdr.version = 1;
1262	} else if (BEx_chip(adapter)) {
1263		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1264			req->hdr.version = 2;
1265	} else { /* For SH */
1266		req->hdr.version = 2;
1267	}
1268
1269	if (req->hdr.version > 0)
1270		req->if_id = cpu_to_le16(adapter->if_handle);
1271	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1272	req->ulp_num = BE_ULP1_NUM;
1273	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1274	req->cq_id = cpu_to_le16(cq->id);
1275	req->queue_size = be_encoded_q_len(txq->len);
1276	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1277	ver = req->hdr.version;
1278
1279	status = be_cmd_notify_wait(adapter, &wrb);
1280	if (!status) {
1281		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1282
1283		txq->id = le16_to_cpu(resp->cid);
1284		if (ver == 2)
1285			txo->db_offset = le32_to_cpu(resp->db_offset);
1286		else
1287			txo->db_offset = DB_TXULP1_OFFSET;
1288		txq->created = true;
1289	}
1290
1291	return status;
1292}
1293
1294/* Uses MCC */
1295int be_cmd_rxq_create(struct be_adapter *adapter,
1296		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1297		      u32 if_id, u32 rss, u8 *rss_id)
1298{
1299	struct be_mcc_wrb *wrb;
1300	struct be_cmd_req_eth_rx_create *req;
1301	struct be_dma_mem *q_mem = &rxq->dma_mem;
1302	int status;
1303
1304	spin_lock_bh(&adapter->mcc_lock);
1305
1306	wrb = wrb_from_mccq(adapter);
1307	if (!wrb) {
1308		status = -EBUSY;
1309		goto err;
1310	}
1311	req = embedded_payload(wrb);
1312
1313	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1314			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1315
1316	req->cq_id = cpu_to_le16(cq_id);
1317	req->frag_size = fls(frag_size) - 1;
1318	req->num_pages = 2;
1319	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1320	req->interface_id = cpu_to_le32(if_id);
1321	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1322	req->rss_queue = cpu_to_le32(rss);
1323
1324	status = be_mcc_notify_wait(adapter);
1325	if (!status) {
1326		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1327
1328		rxq->id = le16_to_cpu(resp->id);
1329		rxq->created = true;
1330		*rss_id = resp->rss_id;
1331	}
1332
1333err:
1334	spin_unlock_bh(&adapter->mcc_lock);
1335	return status;
1336}
1337
1338/* Generic destroyer function for all types of queues
1339 * Uses Mbox
1340 */
1341int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1342		     int queue_type)
1343{
1344	struct be_mcc_wrb *wrb;
1345	struct be_cmd_req_q_destroy *req;
1346	u8 subsys = 0, opcode = 0;
1347	int status;
1348
1349	if (mutex_lock_interruptible(&adapter->mbox_lock))
1350		return -1;
1351
1352	wrb = wrb_from_mbox(adapter);
1353	req = embedded_payload(wrb);
1354
1355	switch (queue_type) {
1356	case QTYPE_EQ:
1357		subsys = CMD_SUBSYSTEM_COMMON;
1358		opcode = OPCODE_COMMON_EQ_DESTROY;
1359		break;
1360	case QTYPE_CQ:
1361		subsys = CMD_SUBSYSTEM_COMMON;
1362		opcode = OPCODE_COMMON_CQ_DESTROY;
1363		break;
1364	case QTYPE_TXQ:
1365		subsys = CMD_SUBSYSTEM_ETH;
1366		opcode = OPCODE_ETH_TX_DESTROY;
1367		break;
1368	case QTYPE_RXQ:
1369		subsys = CMD_SUBSYSTEM_ETH;
1370		opcode = OPCODE_ETH_RX_DESTROY;
1371		break;
1372	case QTYPE_MCCQ:
1373		subsys = CMD_SUBSYSTEM_COMMON;
1374		opcode = OPCODE_COMMON_MCC_DESTROY;
1375		break;
1376	default:
1377		BUG();
1378	}
1379
1380	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1381			       NULL);
1382	req->id = cpu_to_le16(q->id);
1383
1384	status = be_mbox_notify_wait(adapter);
1385	q->created = false;
1386
1387	mutex_unlock(&adapter->mbox_lock);
1388	return status;
1389}
1390
1391/* Uses MCC */
1392int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1393{
1394	struct be_mcc_wrb *wrb;
1395	struct be_cmd_req_q_destroy *req;
1396	int status;
1397
1398	spin_lock_bh(&adapter->mcc_lock);
1399
1400	wrb = wrb_from_mccq(adapter);
1401	if (!wrb) {
1402		status = -EBUSY;
1403		goto err;
1404	}
1405	req = embedded_payload(wrb);
1406
1407	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1408			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1409	req->id = cpu_to_le16(q->id);
1410
1411	status = be_mcc_notify_wait(adapter);
1412	q->created = false;
1413
1414err:
1415	spin_unlock_bh(&adapter->mcc_lock);
1416	return status;
1417}
1418
1419/* Create an rx filtering policy configuration on an i/f
1420 * Will use MBOX only if MCCQ has not been created.
1421 */
1422int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1423		     u32 *if_handle, u32 domain)
1424{
1425	struct be_mcc_wrb wrb = {0};
1426	struct be_cmd_req_if_create *req;
1427	int status;
1428
1429	req = embedded_payload(&wrb);
1430	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1431			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1432			       sizeof(*req), &wrb, NULL);
1433	req->hdr.domain = domain;
1434	req->capability_flags = cpu_to_le32(cap_flags);
1435	req->enable_flags = cpu_to_le32(en_flags);
1436	req->pmac_invalid = true;
1437
1438	status = be_cmd_notify_wait(adapter, &wrb);
1439	if (!status) {
1440		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1441
1442		*if_handle = le32_to_cpu(resp->interface_id);
1443
1444		/* Hack to retrieve VF's pmac-id on BE3 */
1445		if (BE3_chip(adapter) && !be_physfn(adapter))
1446			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1447	}
1448	return status;
1449}
1450
1451/* Uses MCCQ */
1452int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1453{
1454	struct be_mcc_wrb *wrb;
1455	struct be_cmd_req_if_destroy *req;
1456	int status;
1457
1458	if (interface_id == -1)
1459		return 0;
1460
1461	spin_lock_bh(&adapter->mcc_lock);
1462
1463	wrb = wrb_from_mccq(adapter);
1464	if (!wrb) {
1465		status = -EBUSY;
1466		goto err;
1467	}
1468	req = embedded_payload(wrb);
1469
1470	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1471			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1472			       sizeof(*req), wrb, NULL);
1473	req->hdr.domain = domain;
1474	req->interface_id = cpu_to_le32(interface_id);
1475
1476	status = be_mcc_notify_wait(adapter);
1477err:
1478	spin_unlock_bh(&adapter->mcc_lock);
1479	return status;
1480}
1481
1482/* Get stats is a non embedded command: the request is not embedded inside
1483 * WRB but is a separate dma memory block
1484 * Uses asynchronous MCC
1485 */
1486int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1487{
1488	struct be_mcc_wrb *wrb;
1489	struct be_cmd_req_hdr *hdr;
1490	int status = 0;
1491
1492	spin_lock_bh(&adapter->mcc_lock);
1493
1494	wrb = wrb_from_mccq(adapter);
1495	if (!wrb) {
1496		status = -EBUSY;
1497		goto err;
1498	}
1499	hdr = nonemb_cmd->va;
1500
1501	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1502			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1503			       nonemb_cmd);
1504
1505	/* version 1 of the cmd is not supported only by BE2 */
1506	if (BE2_chip(adapter))
1507		hdr->version = 0;
1508	if (BE3_chip(adapter) || lancer_chip(adapter))
1509		hdr->version = 1;
1510	else
1511		hdr->version = 2;
1512
1513	be_mcc_notify(adapter);
1514	adapter->stats_cmd_sent = true;
1515
1516err:
1517	spin_unlock_bh(&adapter->mcc_lock);
1518	return status;
1519}
1520
1521/* Lancer Stats */
1522int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1523			       struct be_dma_mem *nonemb_cmd)
1524{
1525	struct be_mcc_wrb *wrb;
1526	struct lancer_cmd_req_pport_stats *req;
1527	int status = 0;
1528
1529	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1530			    CMD_SUBSYSTEM_ETH))
1531		return -EPERM;
1532
1533	spin_lock_bh(&adapter->mcc_lock);
1534
1535	wrb = wrb_from_mccq(adapter);
1536	if (!wrb) {
1537		status = -EBUSY;
1538		goto err;
1539	}
1540	req = nonemb_cmd->va;
1541
1542	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1543			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1544			       wrb, nonemb_cmd);
1545
1546	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1547	req->cmd_params.params.reset_stats = 0;
1548
1549	be_mcc_notify(adapter);
1550	adapter->stats_cmd_sent = true;
1551
1552err:
1553	spin_unlock_bh(&adapter->mcc_lock);
1554	return status;
1555}
1556
1557static int be_mac_to_link_speed(int mac_speed)
1558{
1559	switch (mac_speed) {
1560	case PHY_LINK_SPEED_ZERO:
1561		return 0;
1562	case PHY_LINK_SPEED_10MBPS:
1563		return 10;
1564	case PHY_LINK_SPEED_100MBPS:
1565		return 100;
1566	case PHY_LINK_SPEED_1GBPS:
1567		return 1000;
1568	case PHY_LINK_SPEED_10GBPS:
1569		return 10000;
1570	case PHY_LINK_SPEED_20GBPS:
1571		return 20000;
1572	case PHY_LINK_SPEED_25GBPS:
1573		return 25000;
1574	case PHY_LINK_SPEED_40GBPS:
1575		return 40000;
1576	}
1577	return 0;
1578}
1579
1580/* Uses synchronous mcc
1581 * Returns link_speed in Mbps
1582 */
1583int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1584			     u8 *link_status, u32 dom)
1585{
1586	struct be_mcc_wrb *wrb;
1587	struct be_cmd_req_link_status *req;
1588	int status;
1589
1590	spin_lock_bh(&adapter->mcc_lock);
1591
1592	if (link_status)
1593		*link_status = LINK_DOWN;
1594
1595	wrb = wrb_from_mccq(adapter);
1596	if (!wrb) {
1597		status = -EBUSY;
1598		goto err;
1599	}
1600	req = embedded_payload(wrb);
1601
1602	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1603			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1604			       sizeof(*req), wrb, NULL);
1605
1606	/* version 1 of the cmd is not supported only by BE2 */
1607	if (!BE2_chip(adapter))
1608		req->hdr.version = 1;
1609
1610	req->hdr.domain = dom;
1611
1612	status = be_mcc_notify_wait(adapter);
1613	if (!status) {
1614		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1615
1616		if (link_speed) {
1617			*link_speed = resp->link_speed ?
1618				      le16_to_cpu(resp->link_speed) * 10 :
1619				      be_mac_to_link_speed(resp->mac_speed);
1620
1621			if (!resp->logical_link_status)
1622				*link_speed = 0;
1623		}
1624		if (link_status)
1625			*link_status = resp->logical_link_status;
1626	}
1627
1628err:
1629	spin_unlock_bh(&adapter->mcc_lock);
1630	return status;
1631}
1632
1633/* Uses synchronous mcc */
1634int be_cmd_get_die_temperature(struct be_adapter *adapter)
1635{
1636	struct be_mcc_wrb *wrb;
1637	struct be_cmd_req_get_cntl_addnl_attribs *req;
1638	int status = 0;
1639
1640	spin_lock_bh(&adapter->mcc_lock);
1641
1642	wrb = wrb_from_mccq(adapter);
1643	if (!wrb) {
1644		status = -EBUSY;
1645		goto err;
1646	}
1647	req = embedded_payload(wrb);
1648
1649	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1650			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1651			       sizeof(*req), wrb, NULL);
1652
1653	be_mcc_notify(adapter);
1654
1655err:
1656	spin_unlock_bh(&adapter->mcc_lock);
1657	return status;
1658}
1659
1660/* Uses synchronous mcc */
1661int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1662{
1663	struct be_mcc_wrb *wrb;
1664	struct be_cmd_req_get_fat *req;
1665	int status;
1666
1667	spin_lock_bh(&adapter->mcc_lock);
1668
1669	wrb = wrb_from_mccq(adapter);
1670	if (!wrb) {
1671		status = -EBUSY;
1672		goto err;
1673	}
1674	req = embedded_payload(wrb);
1675
1676	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1677			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1678			       NULL);
1679	req->fat_operation = cpu_to_le32(QUERY_FAT);
1680	status = be_mcc_notify_wait(adapter);
1681	if (!status) {
1682		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1683
1684		if (log_size && resp->log_size)
1685			*log_size = le32_to_cpu(resp->log_size) -
1686					sizeof(u32);
1687	}
1688err:
1689	spin_unlock_bh(&adapter->mcc_lock);
1690	return status;
1691}
1692
1693int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1694{
1695	struct be_dma_mem get_fat_cmd;
1696	struct be_mcc_wrb *wrb;
1697	struct be_cmd_req_get_fat *req;
1698	u32 offset = 0, total_size, buf_size,
1699				log_offset = sizeof(u32), payload_len;
1700	int status = 0;
1701
1702	if (buf_len == 0)
1703		return -EIO;
1704
1705	total_size = buf_len;
1706
1707	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1708	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1709					      get_fat_cmd.size,
1710					      &get_fat_cmd.dma);
1711	if (!get_fat_cmd.va) {
1712		dev_err(&adapter->pdev->dev,
1713			"Memory allocation failure while reading FAT data\n");
1714		return -ENOMEM;
1715	}
1716
1717	spin_lock_bh(&adapter->mcc_lock);
1718
1719	while (total_size) {
1720		buf_size = min(total_size, (u32)60*1024);
1721		total_size -= buf_size;
1722
1723		wrb = wrb_from_mccq(adapter);
1724		if (!wrb) {
1725			status = -EBUSY;
1726			goto err;
1727		}
1728		req = get_fat_cmd.va;
1729
1730		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1731		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1732				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1733				       wrb, &get_fat_cmd);
1734
1735		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1736		req->read_log_offset = cpu_to_le32(log_offset);
1737		req->read_log_length = cpu_to_le32(buf_size);
1738		req->data_buffer_size = cpu_to_le32(buf_size);
1739
1740		status = be_mcc_notify_wait(adapter);
1741		if (!status) {
1742			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1743
1744			memcpy(buf + offset,
1745			       resp->data_buffer,
1746			       le32_to_cpu(resp->read_log_length));
1747		} else {
1748			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1749			goto err;
1750		}
1751		offset += buf_size;
1752		log_offset += buf_size;
1753	}
1754err:
1755	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1756			    get_fat_cmd.va, get_fat_cmd.dma);
1757	spin_unlock_bh(&adapter->mcc_lock);
1758	return status;
1759}
1760
1761/* Uses synchronous mcc */
1762int be_cmd_get_fw_ver(struct be_adapter *adapter)
1763{
1764	struct be_mcc_wrb *wrb;
1765	struct be_cmd_req_get_fw_version *req;
1766	int status;
1767
1768	spin_lock_bh(&adapter->mcc_lock);
1769
1770	wrb = wrb_from_mccq(adapter);
1771	if (!wrb) {
1772		status = -EBUSY;
1773		goto err;
1774	}
1775
1776	req = embedded_payload(wrb);
1777
1778	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1779			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1780			       NULL);
1781	status = be_mcc_notify_wait(adapter);
1782	if (!status) {
1783		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1784
1785		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1786			sizeof(adapter->fw_ver));
1787		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1788			sizeof(adapter->fw_on_flash));
1789	}
1790err:
1791	spin_unlock_bh(&adapter->mcc_lock);
1792	return status;
1793}
1794
1795/* set the EQ delay interval of an EQ to specified value
1796 * Uses async mcc
1797 */
1798static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1799			       struct be_set_eqd *set_eqd, int num)
1800{
1801	struct be_mcc_wrb *wrb;
1802	struct be_cmd_req_modify_eq_delay *req;
1803	int status = 0, i;
1804
1805	spin_lock_bh(&adapter->mcc_lock);
1806
1807	wrb = wrb_from_mccq(adapter);
1808	if (!wrb) {
1809		status = -EBUSY;
1810		goto err;
1811	}
1812	req = embedded_payload(wrb);
1813
1814	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1815			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1816			       NULL);
1817
1818	req->num_eq = cpu_to_le32(num);
1819	for (i = 0; i < num; i++) {
1820		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1821		req->set_eqd[i].phase = 0;
1822		req->set_eqd[i].delay_multiplier =
1823				cpu_to_le32(set_eqd[i].delay_multiplier);
1824	}
1825
1826	be_mcc_notify(adapter);
1827err:
1828	spin_unlock_bh(&adapter->mcc_lock);
1829	return status;
1830}
1831
1832int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1833		      int num)
1834{
1835	int num_eqs, i = 0;
1836
1837	if (lancer_chip(adapter) && num > 8) {
1838		while (num) {
1839			num_eqs = min(num, 8);
1840			__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1841			i += num_eqs;
1842			num -= num_eqs;
1843		}
1844	} else {
1845		__be_cmd_modify_eqd(adapter, set_eqd, num);
1846	}
1847
1848	return 0;
1849}
1850
1851/* Uses sycnhronous mcc */
1852int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1853		       u32 num)
1854{
1855	struct be_mcc_wrb *wrb;
1856	struct be_cmd_req_vlan_config *req;
1857	int status;
1858
1859	spin_lock_bh(&adapter->mcc_lock);
1860
1861	wrb = wrb_from_mccq(adapter);
1862	if (!wrb) {
1863		status = -EBUSY;
1864		goto err;
1865	}
1866	req = embedded_payload(wrb);
1867
1868	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1869			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1870			       wrb, NULL);
1871
1872	req->interface_id = if_id;
1873	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1874	req->num_vlan = num;
1875	memcpy(req->normal_vlan, vtag_array,
1876	       req->num_vlan * sizeof(vtag_array[0]));
1877
1878	status = be_mcc_notify_wait(adapter);
1879err:
1880	spin_unlock_bh(&adapter->mcc_lock);
1881	return status;
1882}
1883
1884int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1885{
1886	struct be_mcc_wrb *wrb;
1887	struct be_dma_mem *mem = &adapter->rx_filter;
1888	struct be_cmd_req_rx_filter *req = mem->va;
1889	int status;
1890
1891	spin_lock_bh(&adapter->mcc_lock);
1892
1893	wrb = wrb_from_mccq(adapter);
1894	if (!wrb) {
1895		status = -EBUSY;
1896		goto err;
1897	}
1898	memset(req, 0, sizeof(*req));
1899	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1900			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1901			       wrb, mem);
1902
1903	req->if_id = cpu_to_le32(adapter->if_handle);
1904	if (flags & IFF_PROMISC) {
1905		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1906						 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1907						 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1908		if (value == ON)
1909			req->if_flags =
1910				cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1911					    BE_IF_FLAGS_VLAN_PROMISCUOUS |
1912					    BE_IF_FLAGS_MCAST_PROMISCUOUS);
1913	} else if (flags & IFF_ALLMULTI) {
1914		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1915		req->if_flags =	cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1916	} else if (flags & BE_FLAGS_VLAN_PROMISC) {
1917		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1918
1919		if (value == ON)
1920			req->if_flags =
1921				cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1922	} else {
1923		struct netdev_hw_addr *ha;
1924		int i = 0;
1925
1926		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1927		req->if_flags =	cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1928
1929		/* Reset mcast promisc mode if already set by setting mask
1930		 * and not setting flags field
1931		 */
1932		req->if_flags_mask |=
1933			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1934				    be_if_cap_flags(adapter));
1935		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1936		netdev_for_each_mc_addr(ha, adapter->netdev)
1937			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1938	}
1939
1940	if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1941	    req->if_flags_mask) {
1942		dev_warn(&adapter->pdev->dev,
1943			 "Cannot set rx filter flags 0x%x\n",
1944			 req->if_flags_mask);
1945		dev_warn(&adapter->pdev->dev,
1946			 "Interface is capable of 0x%x flags only\n",
1947			 be_if_cap_flags(adapter));
1948	}
1949	req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1950
1951	status = be_mcc_notify_wait(adapter);
1952
1953err:
1954	spin_unlock_bh(&adapter->mcc_lock);
1955	return status;
1956}
1957
1958/* Uses synchrounous mcc */
1959int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1960{
1961	struct be_mcc_wrb *wrb;
1962	struct be_cmd_req_set_flow_control *req;
1963	int status;
1964
1965	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1966			    CMD_SUBSYSTEM_COMMON))
1967		return -EPERM;
1968
1969	spin_lock_bh(&adapter->mcc_lock);
1970
1971	wrb = wrb_from_mccq(adapter);
1972	if (!wrb) {
1973		status = -EBUSY;
1974		goto err;
1975	}
1976	req = embedded_payload(wrb);
1977
1978	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1979			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1980			       wrb, NULL);
1981
1982	req->hdr.version = 1;
1983	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1984	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1985
1986	status = be_mcc_notify_wait(adapter);
1987
1988err:
1989	spin_unlock_bh(&adapter->mcc_lock);
1990
1991	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
1992		return  -EOPNOTSUPP;
1993
1994	return status;
1995}
1996
1997/* Uses sycn mcc */
1998int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1999{
2000	struct be_mcc_wrb *wrb;
2001	struct be_cmd_req_get_flow_control *req;
2002	int status;
2003
2004	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2005			    CMD_SUBSYSTEM_COMMON))
2006		return -EPERM;
2007
2008	spin_lock_bh(&adapter->mcc_lock);
2009
2010	wrb = wrb_from_mccq(adapter);
2011	if (!wrb) {
2012		status = -EBUSY;
2013		goto err;
2014	}
2015	req = embedded_payload(wrb);
2016
2017	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2018			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2019			       wrb, NULL);
2020
2021	status = be_mcc_notify_wait(adapter);
2022	if (!status) {
2023		struct be_cmd_resp_get_flow_control *resp =
2024						embedded_payload(wrb);
2025
2026		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2027		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2028	}
2029
2030err:
2031	spin_unlock_bh(&adapter->mcc_lock);
2032	return status;
2033}
2034
2035/* Uses mbox */
2036int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2037{
2038	struct be_mcc_wrb *wrb;
2039	struct be_cmd_req_query_fw_cfg *req;
2040	int status;
2041
2042	if (mutex_lock_interruptible(&adapter->mbox_lock))
2043		return -1;
2044
2045	wrb = wrb_from_mbox(adapter);
2046	req = embedded_payload(wrb);
2047
2048	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2049			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2050			       sizeof(*req), wrb, NULL);
2051
2052	status = be_mbox_notify_wait(adapter);
2053	if (!status) {
2054		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2055
2056		adapter->port_num = le32_to_cpu(resp->phys_port);
2057		adapter->function_mode = le32_to_cpu(resp->function_mode);
2058		adapter->function_caps = le32_to_cpu(resp->function_caps);
2059		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2060		dev_info(&adapter->pdev->dev,
2061			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2062			 adapter->function_mode, adapter->function_caps);
2063	}
2064
2065	mutex_unlock(&adapter->mbox_lock);
2066	return status;
2067}
2068
2069/* Uses mbox */
2070int be_cmd_reset_function(struct be_adapter *adapter)
2071{
2072	struct be_mcc_wrb *wrb;
2073	struct be_cmd_req_hdr *req;
2074	int status;
2075
2076	if (lancer_chip(adapter)) {
2077		status = lancer_wait_ready(adapter);
2078		if (!status) {
2079			iowrite32(SLI_PORT_CONTROL_IP_MASK,
2080				  adapter->db + SLIPORT_CONTROL_OFFSET);
2081			status = lancer_test_and_set_rdy_state(adapter);
2082		}
2083		if (status) {
2084			dev_err(&adapter->pdev->dev,
2085				"Adapter in non recoverable error\n");
2086		}
2087		return status;
2088	}
2089
2090	if (mutex_lock_interruptible(&adapter->mbox_lock))
2091		return -1;
2092
2093	wrb = wrb_from_mbox(adapter);
2094	req = embedded_payload(wrb);
2095
2096	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2097			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2098			       NULL);
2099
2100	status = be_mbox_notify_wait(adapter);
2101
2102	mutex_unlock(&adapter->mbox_lock);
2103	return status;
2104}
2105
2106int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2107		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2108{
2109	struct be_mcc_wrb *wrb;
2110	struct be_cmd_req_rss_config *req;
2111	int status;
2112
2113	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2114		return 0;
2115
2116	spin_lock_bh(&adapter->mcc_lock);
2117
2118	wrb = wrb_from_mccq(adapter);
2119	if (!wrb) {
2120		status = -EBUSY;
2121		goto err;
2122	}
2123	req = embedded_payload(wrb);
2124
2125	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2126			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2127
2128	req->if_id = cpu_to_le32(adapter->if_handle);
2129	req->enable_rss = cpu_to_le16(rss_hash_opts);
2130	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2131
2132	if (!BEx_chip(adapter))
2133		req->hdr.version = 1;
2134
2135	memcpy(req->cpu_table, rsstable, table_size);
2136	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2137	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2138
2139	status = be_mcc_notify_wait(adapter);
2140err:
2141	spin_unlock_bh(&adapter->mcc_lock);
2142	return status;
2143}
2144
2145/* Uses sync mcc */
2146int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2147			    u8 bcn, u8 sts, u8 state)
2148{
2149	struct be_mcc_wrb *wrb;
2150	struct be_cmd_req_enable_disable_beacon *req;
2151	int status;
2152
2153	spin_lock_bh(&adapter->mcc_lock);
2154
2155	wrb = wrb_from_mccq(adapter);
2156	if (!wrb) {
2157		status = -EBUSY;
2158		goto err;
2159	}
2160	req = embedded_payload(wrb);
2161
2162	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2163			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2164			       sizeof(*req), wrb, NULL);
2165
2166	req->port_num = port_num;
2167	req->beacon_state = state;
2168	req->beacon_duration = bcn;
2169	req->status_duration = sts;
2170
2171	status = be_mcc_notify_wait(adapter);
2172
2173err:
2174	spin_unlock_bh(&adapter->mcc_lock);
2175	return status;
2176}
2177
2178/* Uses sync mcc */
2179int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2180{
2181	struct be_mcc_wrb *wrb;
2182	struct be_cmd_req_get_beacon_state *req;
2183	int status;
2184
2185	spin_lock_bh(&adapter->mcc_lock);
2186
2187	wrb = wrb_from_mccq(adapter);
2188	if (!wrb) {
2189		status = -EBUSY;
2190		goto err;
2191	}
2192	req = embedded_payload(wrb);
2193
2194	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2195			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2196			       wrb, NULL);
2197
2198	req->port_num = port_num;
2199
2200	status = be_mcc_notify_wait(adapter);
2201	if (!status) {
2202		struct be_cmd_resp_get_beacon_state *resp =
2203						embedded_payload(wrb);
2204
2205		*state = resp->beacon_state;
2206	}
2207
2208err:
2209	spin_unlock_bh(&adapter->mcc_lock);
2210	return status;
2211}
2212
2213/* Uses sync mcc */
2214int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2215				      u8 page_num, u8 *data)
2216{
2217	struct be_dma_mem cmd;
2218	struct be_mcc_wrb *wrb;
2219	struct be_cmd_req_port_type *req;
2220	int status;
2221
2222	if (page_num > TR_PAGE_A2)
2223		return -EINVAL;
2224
2225	cmd.size = sizeof(struct be_cmd_resp_port_type);
2226	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2227	if (!cmd.va) {
2228		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2229		return -ENOMEM;
2230	}
2231	memset(cmd.va, 0, cmd.size);
2232
2233	spin_lock_bh(&adapter->mcc_lock);
2234
2235	wrb = wrb_from_mccq(adapter);
2236	if (!wrb) {
2237		status = -EBUSY;
2238		goto err;
2239	}
2240	req = cmd.va;
2241
2242	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2243			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2244			       cmd.size, wrb, &cmd);
2245
2246	req->port = cpu_to_le32(adapter->hba_port_num);
2247	req->page_num = cpu_to_le32(page_num);
2248	status = be_mcc_notify_wait(adapter);
2249	if (!status) {
2250		struct be_cmd_resp_port_type *resp = cmd.va;
2251
2252		memcpy(data, resp->page_data, PAGE_DATA_LEN);
2253	}
2254err:
2255	spin_unlock_bh(&adapter->mcc_lock);
2256	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2257	return status;
2258}
2259
2260int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2261			    u32 data_size, u32 data_offset,
2262			    const char *obj_name, u32 *data_written,
2263			    u8 *change_status, u8 *addn_status)
2264{
2265	struct be_mcc_wrb *wrb;
2266	struct lancer_cmd_req_write_object *req;
2267	struct lancer_cmd_resp_write_object *resp;
2268	void *ctxt = NULL;
2269	int status;
2270
2271	spin_lock_bh(&adapter->mcc_lock);
2272	adapter->flash_status = 0;
2273
2274	wrb = wrb_from_mccq(adapter);
2275	if (!wrb) {
2276		status = -EBUSY;
2277		goto err_unlock;
2278	}
2279
2280	req = embedded_payload(wrb);
2281
2282	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2283			       OPCODE_COMMON_WRITE_OBJECT,
2284			       sizeof(struct lancer_cmd_req_write_object), wrb,
2285			       NULL);
2286
2287	ctxt = &req->context;
2288	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2289		      write_length, ctxt, data_size);
2290
2291	if (data_size == 0)
2292		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2293			      eof, ctxt, 1);
2294	else
2295		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2296			      eof, ctxt, 0);
2297
2298	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2299	req->write_offset = cpu_to_le32(data_offset);
2300	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2301	req->descriptor_count = cpu_to_le32(1);
2302	req->buf_len = cpu_to_le32(data_size);
2303	req->addr_low = cpu_to_le32((cmd->dma +
2304				     sizeof(struct lancer_cmd_req_write_object))
2305				    & 0xFFFFFFFF);
2306	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2307				sizeof(struct lancer_cmd_req_write_object)));
2308
2309	be_mcc_notify(adapter);
2310	spin_unlock_bh(&adapter->mcc_lock);
2311
2312	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2313					 msecs_to_jiffies(60000)))
2314		status = -ETIMEDOUT;
2315	else
2316		status = adapter->flash_status;
2317
2318	resp = embedded_payload(wrb);
2319	if (!status) {
2320		*data_written = le32_to_cpu(resp->actual_write_len);
2321		*change_status = resp->change_status;
2322	} else {
2323		*addn_status = resp->additional_status;
2324	}
2325
2326	return status;
2327
2328err_unlock:
2329	spin_unlock_bh(&adapter->mcc_lock);
2330	return status;
2331}
2332
2333int be_cmd_query_cable_type(struct be_adapter *adapter)
2334{
2335	u8 page_data[PAGE_DATA_LEN];
2336	int status;
2337
2338	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2339						   page_data);
2340	if (!status) {
2341		switch (adapter->phy.interface_type) {
2342		case PHY_TYPE_QSFP:
2343			adapter->phy.cable_type =
2344				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2345			break;
2346		case PHY_TYPE_SFP_PLUS_10GB:
2347			adapter->phy.cable_type =
2348				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2349			break;
2350		default:
2351			adapter->phy.cable_type = 0;
2352			break;
2353		}
2354	}
2355	return status;
2356}
2357
2358int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2359{
2360	struct lancer_cmd_req_delete_object *req;
2361	struct be_mcc_wrb *wrb;
2362	int status;
2363
2364	spin_lock_bh(&adapter->mcc_lock);
2365
2366	wrb = wrb_from_mccq(adapter);
2367	if (!wrb) {
2368		status = -EBUSY;
2369		goto err;
2370	}
2371
2372	req = embedded_payload(wrb);
2373
2374	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2375			       OPCODE_COMMON_DELETE_OBJECT,
2376			       sizeof(*req), wrb, NULL);
2377
2378	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2379
2380	status = be_mcc_notify_wait(adapter);
2381err:
2382	spin_unlock_bh(&adapter->mcc_lock);
2383	return status;
2384}
2385
2386int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2387			   u32 data_size, u32 data_offset, const char *obj_name,
2388			   u32 *data_read, u32 *eof, u8 *addn_status)
2389{
2390	struct be_mcc_wrb *wrb;
2391	struct lancer_cmd_req_read_object *req;
2392	struct lancer_cmd_resp_read_object *resp;
2393	int status;
2394
2395	spin_lock_bh(&adapter->mcc_lock);
2396
2397	wrb = wrb_from_mccq(adapter);
2398	if (!wrb) {
2399		status = -EBUSY;
2400		goto err_unlock;
2401	}
2402
2403	req = embedded_payload(wrb);
2404
2405	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2406			       OPCODE_COMMON_READ_OBJECT,
2407			       sizeof(struct lancer_cmd_req_read_object), wrb,
2408			       NULL);
2409
2410	req->desired_read_len = cpu_to_le32(data_size);
2411	req->read_offset = cpu_to_le32(data_offset);
2412	strcpy(req->object_name, obj_name);
2413	req->descriptor_count = cpu_to_le32(1);
2414	req->buf_len = cpu_to_le32(data_size);
2415	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2416	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2417
2418	status = be_mcc_notify_wait(adapter);
2419
2420	resp = embedded_payload(wrb);
2421	if (!status) {
2422		*data_read = le32_to_cpu(resp->actual_read_len);
2423		*eof = le32_to_cpu(resp->eof);
2424	} else {
2425		*addn_status = resp->additional_status;
2426	}
2427
2428err_unlock:
2429	spin_unlock_bh(&adapter->mcc_lock);
2430	return status;
2431}
2432
2433int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2434			  u32 flash_type, u32 flash_opcode, u32 buf_size)
2435{
2436	struct be_mcc_wrb *wrb;
2437	struct be_cmd_write_flashrom *req;
2438	int status;
2439
2440	spin_lock_bh(&adapter->mcc_lock);
2441	adapter->flash_status = 0;
2442
2443	wrb = wrb_from_mccq(adapter);
2444	if (!wrb) {
2445		status = -EBUSY;
2446		goto err_unlock;
2447	}
2448	req = cmd->va;
2449
2450	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2451			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2452			       cmd);
2453
2454	req->params.op_type = cpu_to_le32(flash_type);
2455	req->params.op_code = cpu_to_le32(flash_opcode);
2456	req->params.data_buf_size = cpu_to_le32(buf_size);
2457
2458	be_mcc_notify(adapter);
2459	spin_unlock_bh(&adapter->mcc_lock);
2460
2461	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2462					 msecs_to_jiffies(40000)))
2463		status = -ETIMEDOUT;
2464	else
2465		status = adapter->flash_status;
2466
2467	return status;
2468
2469err_unlock:
2470	spin_unlock_bh(&adapter->mcc_lock);
2471	return status;
2472}
2473
2474int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2475			 u16 optype, int offset)
2476{
2477	struct be_mcc_wrb *wrb;
2478	struct be_cmd_read_flash_crc *req;
2479	int status;
2480
2481	spin_lock_bh(&adapter->mcc_lock);
2482
2483	wrb = wrb_from_mccq(adapter);
2484	if (!wrb) {
2485		status = -EBUSY;
2486		goto err;
2487	}
2488	req = embedded_payload(wrb);
2489
2490	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2491			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2492			       wrb, NULL);
2493
2494	req->params.op_type = cpu_to_le32(optype);
2495	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2496	req->params.offset = cpu_to_le32(offset);
2497	req->params.data_buf_size = cpu_to_le32(0x4);
2498
2499	status = be_mcc_notify_wait(adapter);
2500	if (!status)
2501		memcpy(flashed_crc, req->crc, 4);
2502
2503err:
2504	spin_unlock_bh(&adapter->mcc_lock);
2505	return status;
2506}
2507
2508int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2509			    struct be_dma_mem *nonemb_cmd)
2510{
2511	struct be_mcc_wrb *wrb;
2512	struct be_cmd_req_acpi_wol_magic_config *req;
2513	int status;
2514
2515	spin_lock_bh(&adapter->mcc_lock);
2516
2517	wrb = wrb_from_mccq(adapter);
2518	if (!wrb) {
2519		status = -EBUSY;
2520		goto err;
2521	}
2522	req = nonemb_cmd->va;
2523
2524	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2525			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2526			       wrb, nonemb_cmd);
2527	memcpy(req->magic_mac, mac, ETH_ALEN);
2528
2529	status = be_mcc_notify_wait(adapter);
2530
2531err:
2532	spin_unlock_bh(&adapter->mcc_lock);
2533	return status;
2534}
2535
2536int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2537			u8 loopback_type, u8 enable)
2538{
2539	struct be_mcc_wrb *wrb;
2540	struct be_cmd_req_set_lmode *req;
2541	int status;
2542
2543	spin_lock_bh(&adapter->mcc_lock);
2544
2545	wrb = wrb_from_mccq(adapter);
2546	if (!wrb) {
2547		status = -EBUSY;
2548		goto err;
2549	}
2550
2551	req = embedded_payload(wrb);
2552
2553	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2554			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2555			       wrb, NULL);
2556
2557	req->src_port = port_num;
2558	req->dest_port = port_num;
2559	req->loopback_type = loopback_type;
2560	req->loopback_state = enable;
2561
2562	status = be_mcc_notify_wait(adapter);
2563err:
2564	spin_unlock_bh(&adapter->mcc_lock);
2565	return status;
2566}
2567
2568int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2569			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2570			 u64 pattern)
2571{
2572	struct be_mcc_wrb *wrb;
2573	struct be_cmd_req_loopback_test *req;
2574	struct be_cmd_resp_loopback_test *resp;
2575	int status;
2576
2577	spin_lock_bh(&adapter->mcc_lock);
2578
2579	wrb = wrb_from_mccq(adapter);
2580	if (!wrb) {
2581		status = -EBUSY;
2582		goto err;
2583	}
2584
2585	req = embedded_payload(wrb);
2586
2587	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2588			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2589			       NULL);
2590
2591	req->hdr.timeout = cpu_to_le32(15);
2592	req->pattern = cpu_to_le64(pattern);
2593	req->src_port = cpu_to_le32(port_num);
2594	req->dest_port = cpu_to_le32(port_num);
2595	req->pkt_size = cpu_to_le32(pkt_size);
2596	req->num_pkts = cpu_to_le32(num_pkts);
2597	req->loopback_type = cpu_to_le32(loopback_type);
2598
2599	be_mcc_notify(adapter);
2600
2601	spin_unlock_bh(&adapter->mcc_lock);
2602
2603	wait_for_completion(&adapter->et_cmd_compl);
2604	resp = embedded_payload(wrb);
2605	status = le32_to_cpu(resp->status);
2606
2607	return status;
2608err:
2609	spin_unlock_bh(&adapter->mcc_lock);
2610	return status;
2611}
2612
2613int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2614			u32 byte_cnt, struct be_dma_mem *cmd)
2615{
2616	struct be_mcc_wrb *wrb;
2617	struct be_cmd_req_ddrdma_test *req;
2618	int status;
2619	int i, j = 0;
2620
2621	spin_lock_bh(&adapter->mcc_lock);
2622
2623	wrb = wrb_from_mccq(adapter);
2624	if (!wrb) {
2625		status = -EBUSY;
2626		goto err;
2627	}
2628	req = cmd->va;
2629	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2630			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2631			       cmd);
2632
2633	req->pattern = cpu_to_le64(pattern);
2634	req->byte_count = cpu_to_le32(byte_cnt);
2635	for (i = 0; i < byte_cnt; i++) {
2636		req->snd_buff[i] = (u8)(pattern >> (j*8));
2637		j++;
2638		if (j > 7)
2639			j = 0;
2640	}
2641
2642	status = be_mcc_notify_wait(adapter);
2643
2644	if (!status) {
2645		struct be_cmd_resp_ddrdma_test *resp;
2646
2647		resp = cmd->va;
2648		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2649		    resp->snd_err) {
2650			status = -1;
2651		}
2652	}
2653
2654err:
2655	spin_unlock_bh(&adapter->mcc_lock);
2656	return status;
2657}
2658
2659int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2660			    struct be_dma_mem *nonemb_cmd)
2661{
2662	struct be_mcc_wrb *wrb;
2663	struct be_cmd_req_seeprom_read *req;
2664	int status;
2665
2666	spin_lock_bh(&adapter->mcc_lock);
2667
2668	wrb = wrb_from_mccq(adapter);
2669	if (!wrb) {
2670		status = -EBUSY;
2671		goto err;
2672	}
2673	req = nonemb_cmd->va;
2674
2675	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2676			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2677			       nonemb_cmd);
2678
2679	status = be_mcc_notify_wait(adapter);
2680
2681err:
2682	spin_unlock_bh(&adapter->mcc_lock);
2683	return status;
2684}
2685
2686int be_cmd_get_phy_info(struct be_adapter *adapter)
2687{
2688	struct be_mcc_wrb *wrb;
2689	struct be_cmd_req_get_phy_info *req;
2690	struct be_dma_mem cmd;
2691	int status;
2692
2693	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2694			    CMD_SUBSYSTEM_COMMON))
2695		return -EPERM;
2696
2697	spin_lock_bh(&adapter->mcc_lock);
2698
2699	wrb = wrb_from_mccq(adapter);
2700	if (!wrb) {
2701		status = -EBUSY;
2702		goto err;
2703	}
2704	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2705	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2706	if (!cmd.va) {
2707		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2708		status = -ENOMEM;
2709		goto err;
2710	}
2711
2712	req = cmd.va;
2713
2714	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2715			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2716			       wrb, &cmd);
2717
2718	status = be_mcc_notify_wait(adapter);
2719	if (!status) {
2720		struct be_phy_info *resp_phy_info =
2721				cmd.va + sizeof(struct be_cmd_req_hdr);
2722
2723		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2724		adapter->phy.interface_type =
2725			le16_to_cpu(resp_phy_info->interface_type);
2726		adapter->phy.auto_speeds_supported =
2727			le16_to_cpu(resp_phy_info->auto_speeds_supported);
2728		adapter->phy.fixed_speeds_supported =
2729			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2730		adapter->phy.misc_params =
2731			le32_to_cpu(resp_phy_info->misc_params);
2732
2733		if (BE2_chip(adapter)) {
2734			adapter->phy.fixed_speeds_supported =
2735				BE_SUPPORTED_SPEED_10GBPS |
2736				BE_SUPPORTED_SPEED_1GBPS;
2737		}
2738	}
2739	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2740err:
2741	spin_unlock_bh(&adapter->mcc_lock);
2742	return status;
2743}
2744
2745int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2746{
2747	struct be_mcc_wrb *wrb;
2748	struct be_cmd_req_set_qos *req;
2749	int status;
2750
2751	spin_lock_bh(&adapter->mcc_lock);
2752
2753	wrb = wrb_from_mccq(adapter);
2754	if (!wrb) {
2755		status = -EBUSY;
2756		goto err;
2757	}
2758
2759	req = embedded_payload(wrb);
2760
2761	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2762			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2763
2764	req->hdr.domain = domain;
2765	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2766	req->max_bps_nic = cpu_to_le32(bps);
2767
2768	status = be_mcc_notify_wait(adapter);
2769
2770err:
2771	spin_unlock_bh(&adapter->mcc_lock);
2772	return status;
2773}
2774
2775int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2776{
2777	struct be_mcc_wrb *wrb;
2778	struct be_cmd_req_cntl_attribs *req;
2779	struct be_cmd_resp_cntl_attribs *resp;
2780	int status;
2781	int payload_len = max(sizeof(*req), sizeof(*resp));
2782	struct mgmt_controller_attrib *attribs;
2783	struct be_dma_mem attribs_cmd;
2784
2785	if (mutex_lock_interruptible(&adapter->mbox_lock))
2786		return -1;
2787
2788	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2789	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2790	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2791					      &attribs_cmd.dma);
2792	if (!attribs_cmd.va) {
2793		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2794		status = -ENOMEM;
2795		goto err;
2796	}
2797
2798	wrb = wrb_from_mbox(adapter);
2799	if (!wrb) {
2800		status = -EBUSY;
2801		goto err;
2802	}
2803	req = attribs_cmd.va;
2804
2805	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2806			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2807			       wrb, &attribs_cmd);
2808
2809	status = be_mbox_notify_wait(adapter);
2810	if (!status) {
2811		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2812		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2813	}
2814
2815err:
2816	mutex_unlock(&adapter->mbox_lock);
2817	if (attribs_cmd.va)
2818		pci_free_consistent(adapter->pdev, attribs_cmd.size,
2819				    attribs_cmd.va, attribs_cmd.dma);
2820	return status;
2821}
2822
2823/* Uses mbox */
2824int be_cmd_req_native_mode(struct be_adapter *adapter)
2825{
2826	struct be_mcc_wrb *wrb;
2827	struct be_cmd_req_set_func_cap *req;
2828	int status;
2829
2830	if (mutex_lock_interruptible(&adapter->mbox_lock))
2831		return -1;
2832
2833	wrb = wrb_from_mbox(adapter);
2834	if (!wrb) {
2835		status = -EBUSY;
2836		goto err;
2837	}
2838
2839	req = embedded_payload(wrb);
2840
2841	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2842			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2843			       sizeof(*req), wrb, NULL);
2844
2845	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2846				CAPABILITY_BE3_NATIVE_ERX_API);
2847	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2848
2849	status = be_mbox_notify_wait(adapter);
2850	if (!status) {
2851		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2852
2853		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2854					CAPABILITY_BE3_NATIVE_ERX_API;
2855		if (!adapter->be3_native)
2856			dev_warn(&adapter->pdev->dev,
2857				 "adapter not in advanced mode\n");
2858	}
2859err:
2860	mutex_unlock(&adapter->mbox_lock);
2861	return status;
2862}
2863
2864/* Get privilege(s) for a function */
2865int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2866			     u32 domain)
2867{
2868	struct be_mcc_wrb *wrb;
2869	struct be_cmd_req_get_fn_privileges *req;
2870	int status;
2871
2872	spin_lock_bh(&adapter->mcc_lock);
2873
2874	wrb = wrb_from_mccq(adapter);
2875	if (!wrb) {
2876		status = -EBUSY;
2877		goto err;
2878	}
2879
2880	req = embedded_payload(wrb);
2881
2882	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2883			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2884			       wrb, NULL);
2885
2886	req->hdr.domain = domain;
2887
2888	status = be_mcc_notify_wait(adapter);
2889	if (!status) {
2890		struct be_cmd_resp_get_fn_privileges *resp =
2891						embedded_payload(wrb);
2892
2893		*privilege = le32_to_cpu(resp->privilege_mask);
2894
2895		/* In UMC mode FW does not return right privileges.
2896		 * Override with correct privilege equivalent to PF.
2897		 */
2898		if (BEx_chip(adapter) && be_is_mc(adapter) &&
2899		    be_physfn(adapter))
2900			*privilege = MAX_PRIVILEGES;
2901	}
2902
2903err:
2904	spin_unlock_bh(&adapter->mcc_lock);
2905	return status;
2906}
2907
2908/* Set privilege(s) for a function */
2909int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2910			     u32 domain)
2911{
2912	struct be_mcc_wrb *wrb;
2913	struct be_cmd_req_set_fn_privileges *req;
2914	int status;
2915
2916	spin_lock_bh(&adapter->mcc_lock);
2917
2918	wrb = wrb_from_mccq(adapter);
2919	if (!wrb) {
2920		status = -EBUSY;
2921		goto err;
2922	}
2923
2924	req = embedded_payload(wrb);
2925	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2926			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2927			       wrb, NULL);
2928	req->hdr.domain = domain;
2929	if (lancer_chip(adapter))
2930		req->privileges_lancer = cpu_to_le32(privileges);
2931	else
2932		req->privileges = cpu_to_le32(privileges);
2933
2934	status = be_mcc_notify_wait(adapter);
2935err:
2936	spin_unlock_bh(&adapter->mcc_lock);
2937	return status;
2938}
2939
2940/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2941 * pmac_id_valid: false => pmac_id or MAC address is requested.
2942 *		  If pmac_id is returned, pmac_id_valid is returned as true
2943 */
2944int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2945			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2946			     u8 domain)
2947{
2948	struct be_mcc_wrb *wrb;
2949	struct be_cmd_req_get_mac_list *req;
2950	int status;
2951	int mac_count;
2952	struct be_dma_mem get_mac_list_cmd;
2953	int i;
2954
2955	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2956	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2957	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2958						   get_mac_list_cmd.size,
2959						   &get_mac_list_cmd.dma);
2960
2961	if (!get_mac_list_cmd.va) {
2962		dev_err(&adapter->pdev->dev,
2963			"Memory allocation failure during GET_MAC_LIST\n");
2964		return -ENOMEM;
2965	}
2966
2967	spin_lock_bh(&adapter->mcc_lock);
2968
2969	wrb = wrb_from_mccq(adapter);
2970	if (!wrb) {
2971		status = -EBUSY;
2972		goto out;
2973	}
2974
2975	req = get_mac_list_cmd.va;
2976
2977	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2978			       OPCODE_COMMON_GET_MAC_LIST,
2979			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2980	req->hdr.domain = domain;
2981	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2982	if (*pmac_id_valid) {
2983		req->mac_id = cpu_to_le32(*pmac_id);
2984		req->iface_id = cpu_to_le16(if_handle);
2985		req->perm_override = 0;
2986	} else {
2987		req->perm_override = 1;
2988	}
2989
2990	status = be_mcc_notify_wait(adapter);
2991	if (!status) {
2992		struct be_cmd_resp_get_mac_list *resp =
2993						get_mac_list_cmd.va;
2994
2995		if (*pmac_id_valid) {
2996			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2997			       ETH_ALEN);
2998			goto out;
2999		}
3000
3001		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3002		/* Mac list returned could contain one or more active mac_ids
3003		 * or one or more true or pseudo permanant mac addresses.
3004		 * If an active mac_id is present, return first active mac_id
3005		 * found.
3006		 */
3007		for (i = 0; i < mac_count; i++) {
3008			struct get_list_macaddr *mac_entry;
3009			u16 mac_addr_size;
3010			u32 mac_id;
3011
3012			mac_entry = &resp->macaddr_list[i];
3013			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3014			/* mac_id is a 32 bit value and mac_addr size
3015			 * is 6 bytes
3016			 */
3017			if (mac_addr_size == sizeof(u32)) {
3018				*pmac_id_valid = true;
3019				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3020				*pmac_id = le32_to_cpu(mac_id);
3021				goto out;
3022			}
3023		}
3024		/* If no active mac_id found, return first mac addr */
3025		*pmac_id_valid = false;
3026		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3027		       ETH_ALEN);
3028	}
3029
3030out:
3031	spin_unlock_bh(&adapter->mcc_lock);
3032	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3033			    get_mac_list_cmd.va, get_mac_list_cmd.dma);
3034	return status;
3035}
3036
3037int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3038			  u8 *mac, u32 if_handle, bool active, u32 domain)
3039{
3040	if (!active)
3041		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3042					 if_handle, domain);
3043	if (BEx_chip(adapter))
3044		return be_cmd_mac_addr_query(adapter, mac, false,
3045					     if_handle, curr_pmac_id);
3046	else
3047		/* Fetch the MAC address using pmac_id */
3048		return be_cmd_get_mac_from_list(adapter, mac, &active,
3049						&curr_pmac_id,
3050						if_handle, domain);
3051}
3052
3053int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3054{
3055	int status;
3056	bool pmac_valid = false;
3057
3058	memset(mac, 0, ETH_ALEN);
3059
3060	if (BEx_chip(adapter)) {
3061		if (be_physfn(adapter))
3062			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3063						       0);
3064		else
3065			status = be_cmd_mac_addr_query(adapter, mac, false,
3066						       adapter->if_handle, 0);
3067	} else {
3068		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3069						  NULL, adapter->if_handle, 0);
3070	}
3071
3072	return status;
3073}
3074
3075/* Uses synchronous MCCQ */
3076int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3077			u8 mac_count, u32 domain)
3078{
3079	struct be_mcc_wrb *wrb;
3080	struct be_cmd_req_set_mac_list *req;
3081	int status;
3082	struct be_dma_mem cmd;
3083
3084	memset(&cmd, 0, sizeof(struct be_dma_mem));
3085	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3086	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3087				    &cmd.dma, GFP_KERNEL);
3088	if (!cmd.va)
3089		return -ENOMEM;
3090
3091	spin_lock_bh(&adapter->mcc_lock);
3092
3093	wrb = wrb_from_mccq(adapter);
3094	if (!wrb) {
3095		status = -EBUSY;
3096		goto err;
3097	}
3098
3099	req = cmd.va;
3100	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3101			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3102			       wrb, &cmd);
3103
3104	req->hdr.domain = domain;
3105	req->mac_count = mac_count;
3106	if (mac_count)
3107		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3108
3109	status = be_mcc_notify_wait(adapter);
3110
3111err:
3112	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3113	spin_unlock_bh(&adapter->mcc_lock);
3114	return status;
3115}
3116
3117/* Wrapper to delete any active MACs and provision the new mac.
3118 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3119 * current list are active.
3120 */
3121int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3122{
3123	bool active_mac = false;
3124	u8 old_mac[ETH_ALEN];
3125	u32 pmac_id;
3126	int status;
3127
3128	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3129					  &pmac_id, if_id, dom);
3130
3131	if (!status && active_mac)
3132		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3133
3134	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3135}
3136
3137int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3138			  u32 domain, u16 intf_id, u16 hsw_mode)
3139{
3140	struct be_mcc_wrb *wrb;
3141	struct be_cmd_req_set_hsw_config *req;
3142	void *ctxt;
3143	int status;
3144
3145	spin_lock_bh(&adapter->mcc_lock);
3146
3147	wrb = wrb_from_mccq(adapter);
3148	if (!wrb) {
3149		status = -EBUSY;
3150		goto err;
3151	}
3152
3153	req = embedded_payload(wrb);
3154	ctxt = &req->context;
3155
3156	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3157			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3158			       NULL);
3159
3160	req->hdr.domain = domain;
3161	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3162	if (pvid) {
3163		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3164		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3165	}
3166	if (!BEx_chip(adapter) && hsw_mode) {
3167		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3168			      ctxt, adapter->hba_port_num);
3169		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3170		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3171			      ctxt, hsw_mode);
3172	}
3173
3174	be_dws_cpu_to_le(req->context, sizeof(req->context));
3175	status = be_mcc_notify_wait(adapter);
3176
3177err:
3178	spin_unlock_bh(&adapter->mcc_lock);
3179	return status;
3180}
3181
3182/* Get Hyper switch config */
3183int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3184			  u32 domain, u16 intf_id, u8 *mode)
3185{
3186	struct be_mcc_wrb *wrb;
3187	struct be_cmd_req_get_hsw_config *req;
3188	void *ctxt;
3189	int status;
3190	u16 vid;
3191
3192	spin_lock_bh(&adapter->mcc_lock);
3193
3194	wrb = wrb_from_mccq(adapter);
3195	if (!wrb) {
3196		status = -EBUSY;
3197		goto err;
3198	}
3199
3200	req = embedded_payload(wrb);
3201	ctxt = &req->context;
3202
3203	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3204			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3205			       NULL);
3206
3207	req->hdr.domain = domain;
3208	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3209		      ctxt, intf_id);
3210	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3211
3212	if (!BEx_chip(adapter) && mode) {
3213		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3214			      ctxt, adapter->hba_port_num);
3215		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3216	}
3217	be_dws_cpu_to_le(req->context, sizeof(req->context));
3218
3219	status = be_mcc_notify_wait(adapter);
3220	if (!status) {
3221		struct be_cmd_resp_get_hsw_config *resp =
3222						embedded_payload(wrb);
3223
3224		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3225		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3226				    pvid, &resp->context);
3227		if (pvid)
3228			*pvid = le16_to_cpu(vid);
3229		if (mode)
3230			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3231					      port_fwd_type, &resp->context);
3232	}
3233
3234err:
3235	spin_unlock_bh(&adapter->mcc_lock);
3236	return status;
3237}
3238
3239int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3240{
3241	struct be_mcc_wrb *wrb;
3242	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3243	int status = 0;
3244	struct be_dma_mem cmd;
3245
3246	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3247			    CMD_SUBSYSTEM_ETH))
3248		return -EPERM;
3249
3250	if (be_is_wol_excluded(adapter))
3251		return status;
3252
3253	if (mutex_lock_interruptible(&adapter->mbox_lock))
3254		return -1;
3255
3256	memset(&cmd, 0, sizeof(struct be_dma_mem));
3257	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3258	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3259	if (!cmd.va) {
3260		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3261		status = -ENOMEM;
3262		goto err;
3263	}
3264
3265	wrb = wrb_from_mbox(adapter);
3266	if (!wrb) {
3267		status = -EBUSY;
3268		goto err;
3269	}
3270
3271	req = cmd.va;
3272
3273	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3274			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3275			       sizeof(*req), wrb, &cmd);
3276
3277	req->hdr.version = 1;
3278	req->query_options = BE_GET_WOL_CAP;
3279
3280	status = be_mbox_notify_wait(adapter);
3281	if (!status) {
3282		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3283
3284		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3285
3286		adapter->wol_cap = resp->wol_settings;
3287		if (adapter->wol_cap & BE_WOL_CAP)
3288			adapter->wol_en = true;
3289	}
3290err:
3291	mutex_unlock(&adapter->mbox_lock);
3292	if (cmd.va)
3293		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3294	return status;
3295
3296}
3297
3298int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3299{
3300	struct be_dma_mem extfat_cmd;
3301	struct be_fat_conf_params *cfgs;
3302	int status;
3303	int i, j;
3304
3305	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3306	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3307	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3308					     &extfat_cmd.dma);
3309	if (!extfat_cmd.va)
3310		return -ENOMEM;
3311
3312	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3313	if (status)
3314		goto err;
3315
3316	cfgs = (struct be_fat_conf_params *)
3317			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3318	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3319		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3320
3321		for (j = 0; j < num_modes; j++) {
3322			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3323				cfgs->module[i].trace_lvl[j].dbg_lvl =
3324							cpu_to_le32(level);
3325		}
3326	}
3327
3328	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3329err:
3330	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3331			    extfat_cmd.dma);
3332	return status;
3333}
3334
3335int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3336{
3337	struct be_dma_mem extfat_cmd;
3338	struct be_fat_conf_params *cfgs;
3339	int status, j;
3340	int level = 0;
3341
3342	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3343	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3344	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3345					     &extfat_cmd.dma);
3346
3347	if (!extfat_cmd.va) {
3348		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3349			__func__);
3350		goto err;
3351	}
3352
3353	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3354	if (!status) {
3355		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3356						sizeof(struct be_cmd_resp_hdr));
3357
3358		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3359			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3360				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3361		}
3362	}
3363	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3364			    extfat_cmd.dma);
3365err:
3366	return level;
3367}
3368
3369int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3370				   struct be_dma_mem *cmd)
3371{
3372	struct be_mcc_wrb *wrb;
3373	struct be_cmd_req_get_ext_fat_caps *req;
3374	int status;
3375
3376	if (mutex_lock_interruptible(&adapter->mbox_lock))
3377		return -1;
3378
3379	wrb = wrb_from_mbox(adapter);
3380	if (!wrb) {
3381		status = -EBUSY;
3382		goto err;
3383	}
3384
3385	req = cmd->va;
3386	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3387			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3388			       cmd->size, wrb, cmd);
3389	req->parameter_type = cpu_to_le32(1);
3390
3391	status = be_mbox_notify_wait(adapter);
3392err:
3393	mutex_unlock(&adapter->mbox_lock);
3394	return status;
3395}
3396
3397int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3398				   struct be_dma_mem *cmd,
3399				   struct be_fat_conf_params *configs)
3400{
3401	struct be_mcc_wrb *wrb;
3402	struct be_cmd_req_set_ext_fat_caps *req;
3403	int status;
3404
3405	spin_lock_bh(&adapter->mcc_lock);
3406
3407	wrb = wrb_from_mccq(adapter);
3408	if (!wrb) {
3409		status = -EBUSY;
3410		goto err;
3411	}
3412
3413	req = cmd->va;
3414	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3415	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3416			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3417			       cmd->size, wrb, cmd);
3418
3419	status = be_mcc_notify_wait(adapter);
3420err:
3421	spin_unlock_bh(&adapter->mcc_lock);
3422	return status;
3423}
3424
3425int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3426{
3427	struct be_mcc_wrb *wrb;
3428	struct be_cmd_req_get_port_name *req;
3429	int status;
3430
3431	if (!lancer_chip(adapter)) {
3432		*port_name = adapter->hba_port_num + '0';
3433		return 0;
3434	}
3435
3436	spin_lock_bh(&adapter->mcc_lock);
3437
3438	wrb = wrb_from_mccq(adapter);
3439	if (!wrb) {
3440		status = -EBUSY;
3441		goto err;
3442	}
3443
3444	req = embedded_payload(wrb);
3445
3446	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3447			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3448			       NULL);
3449	req->hdr.version = 1;
3450
3451	status = be_mcc_notify_wait(adapter);
3452	if (!status) {
3453		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3454
3455		*port_name = resp->port_name[adapter->hba_port_num];
3456	} else {
3457		*port_name = adapter->hba_port_num + '0';
3458	}
3459err:
3460	spin_unlock_bh(&adapter->mcc_lock);
3461	return status;
3462}
3463
3464/* Descriptor type */
3465enum {
3466	FUNC_DESC = 1,
3467	VFT_DESC = 2
3468};
3469
3470static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3471					       int desc_type)
3472{
3473	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3474	struct be_nic_res_desc *nic;
3475	int i;
3476
3477	for (i = 0; i < desc_count; i++) {
3478		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3479		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3480			nic = (struct be_nic_res_desc *)hdr;
3481			if (desc_type == FUNC_DESC ||
3482			    (desc_type == VFT_DESC &&
3483			     nic->flags & (1 << VFT_SHIFT)))
3484				return nic;
3485		}
3486
3487		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3488		hdr = (void *)hdr + hdr->desc_len;
3489	}
3490	return NULL;
3491}
3492
3493static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3494{
3495	return be_get_nic_desc(buf, desc_count, VFT_DESC);
3496}
3497
3498static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3499{
3500	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3501}
3502
3503static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3504						 u32 desc_count)
3505{
3506	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3507	struct be_pcie_res_desc *pcie;
3508	int i;
3509
3510	for (i = 0; i < desc_count; i++) {
3511		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3512		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3513			pcie = (struct be_pcie_res_desc	*)hdr;
3514			if (pcie->pf_num == devfn)
3515				return pcie;
3516		}
3517
3518		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3519		hdr = (void *)hdr + hdr->desc_len;
3520	}
3521	return NULL;
3522}
3523
3524static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3525{
3526	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3527	int i;
3528
3529	for (i = 0; i < desc_count; i++) {
3530		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3531			return (struct be_port_res_desc *)hdr;
3532
3533		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3534		hdr = (void *)hdr + hdr->desc_len;
3535	}
3536	return NULL;
3537}
3538
3539static void be_copy_nic_desc(struct be_resources *res,
3540			     struct be_nic_res_desc *desc)
3541{
3542	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3543	res->max_vlans = le16_to_cpu(desc->vlan_count);
3544	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3545	res->max_tx_qs = le16_to_cpu(desc->txq_count);
3546	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3547	res->max_rx_qs = le16_to_cpu(desc->rq_count);
3548	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3549	/* Clear flags that driver is not interested in */
3550	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3551				BE_IF_CAP_FLAGS_WANT;
3552	/* Need 1 RXQ as the default RXQ */
3553	if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3554		res->max_rss_qs -= 1;
3555}
3556
3557/* Uses Mbox */
3558int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3559{
3560	struct be_mcc_wrb *wrb;
3561	struct be_cmd_req_get_func_config *req;
3562	int status;
3563	struct be_dma_mem cmd;
3564
3565	if (mutex_lock_interruptible(&adapter->mbox_lock))
3566		return -1;
3567
3568	memset(&cmd, 0, sizeof(struct be_dma_mem));
3569	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3570	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3571	if (!cmd.va) {
3572		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3573		status = -ENOMEM;
3574		goto err;
3575	}
3576
3577	wrb = wrb_from_mbox(adapter);
3578	if (!wrb) {
3579		status = -EBUSY;
3580		goto err;
3581	}
3582
3583	req = cmd.va;
3584
3585	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3586			       OPCODE_COMMON_GET_FUNC_CONFIG,
3587			       cmd.size, wrb, &cmd);
3588
3589	if (skyhawk_chip(adapter))
3590		req->hdr.version = 1;
3591
3592	status = be_mbox_notify_wait(adapter);
3593	if (!status) {
3594		struct be_cmd_resp_get_func_config *resp = cmd.va;
3595		u32 desc_count = le32_to_cpu(resp->desc_count);
3596		struct be_nic_res_desc *desc;
3597
3598		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3599		if (!desc) {
3600			status = -EINVAL;
3601			goto err;
3602		}
3603
3604		adapter->pf_number = desc->pf_num;
3605		be_copy_nic_desc(res, desc);
3606	}
3607err:
3608	mutex_unlock(&adapter->mbox_lock);
3609	if (cmd.va)
3610		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3611	return status;
3612}
3613
3614/* Will use MBOX only if MCCQ has not been created */
3615int be_cmd_get_profile_config(struct be_adapter *adapter,
3616			      struct be_resources *res, u8 domain)
3617{
3618	struct be_cmd_resp_get_profile_config *resp;
3619	struct be_cmd_req_get_profile_config *req;
3620	struct be_nic_res_desc *vf_res;
3621	struct be_pcie_res_desc *pcie;
3622	struct be_port_res_desc *port;
3623	struct be_nic_res_desc *nic;
3624	struct be_mcc_wrb wrb = {0};
3625	struct be_dma_mem cmd;
3626	u32 desc_count;
3627	int status;
3628
3629	memset(&cmd, 0, sizeof(struct be_dma_mem));
3630	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3631	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3632	if (!cmd.va)
3633		return -ENOMEM;
3634
3635	req = cmd.va;
3636	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3637			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3638			       cmd.size, &wrb, &cmd);
3639
3640	req->hdr.domain = domain;
3641	if (!lancer_chip(adapter))
3642		req->hdr.version = 1;
3643	req->type = ACTIVE_PROFILE_TYPE;
3644
3645	status = be_cmd_notify_wait(adapter, &wrb);
3646	if (status)
3647		goto err;
3648
3649	resp = cmd.va;
3650	desc_count = le32_to_cpu(resp->desc_count);
3651
3652	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3653				desc_count);
3654	if (pcie)
3655		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3656
3657	port = be_get_port_desc(resp->func_param, desc_count);
3658	if (port)
3659		adapter->mc_type = port->mc_type;
3660
3661	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3662	if (nic)
3663		be_copy_nic_desc(res, nic);
3664
3665	vf_res = be_get_vft_desc(resp->func_param, desc_count);
3666	if (vf_res)
3667		res->vf_if_cap_flags = vf_res->cap_flags;
3668err:
3669	if (cmd.va)
3670		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3671	return status;
3672}
3673
3674/* Will use MBOX only if MCCQ has not been created */
3675static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3676				     int size, int count, u8 version, u8 domain)
3677{
3678	struct be_cmd_req_set_profile_config *req;
3679	struct be_mcc_wrb wrb = {0};
3680	struct be_dma_mem cmd;
3681	int status;
3682
3683	memset(&cmd, 0, sizeof(struct be_dma_mem));
3684	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3685	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3686	if (!cmd.va)
3687		return -ENOMEM;
3688
3689	req = cmd.va;
3690	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3691			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3692			       &wrb, &cmd);
3693	req->hdr.version = version;
3694	req->hdr.domain = domain;
3695	req->desc_count = cpu_to_le32(count);
3696	memcpy(req->desc, desc, size);
3697
3698	status = be_cmd_notify_wait(adapter, &wrb);
3699
3700	if (cmd.va)
3701		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3702	return status;
3703}
3704
3705/* Mark all fields invalid */
3706static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3707{
3708	memset(nic, 0, sizeof(*nic));
3709	nic->unicast_mac_count = 0xFFFF;
3710	nic->mcc_count = 0xFFFF;
3711	nic->vlan_count = 0xFFFF;
3712	nic->mcast_mac_count = 0xFFFF;
3713	nic->txq_count = 0xFFFF;
3714	nic->rq_count = 0xFFFF;
3715	nic->rssq_count = 0xFFFF;
3716	nic->lro_count = 0xFFFF;
3717	nic->cq_count = 0xFFFF;
3718	nic->toe_conn_count = 0xFFFF;
3719	nic->eq_count = 0xFFFF;
3720	nic->iface_count = 0xFFFF;
3721	nic->link_param = 0xFF;
3722	nic->channel_id_param = cpu_to_le16(0xF000);
3723	nic->acpi_params = 0xFF;
3724	nic->wol_param = 0x0F;
3725	nic->tunnel_iface_count = 0xFFFF;
3726	nic->direct_tenant_iface_count = 0xFFFF;
3727	nic->bw_min = 0xFFFFFFFF;
3728	nic->bw_max = 0xFFFFFFFF;
3729}
3730
3731/* Mark all fields invalid */
3732static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3733{
3734	memset(pcie, 0, sizeof(*pcie));
3735	pcie->sriov_state = 0xFF;
3736	pcie->pf_state = 0xFF;
3737	pcie->pf_type = 0xFF;
3738	pcie->num_vfs = 0xFFFF;
3739}
3740
3741int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3742		      u8 domain)
3743{
3744	struct be_nic_res_desc nic_desc;
3745	u32 bw_percent;
3746	u16 version = 0;
3747
3748	if (BE3_chip(adapter))
3749		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3750
3751	be_reset_nic_desc(&nic_desc);
3752	nic_desc.pf_num = adapter->pf_number;
3753	nic_desc.vf_num = domain;
3754	if (lancer_chip(adapter)) {
3755		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3756		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3757		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3758					(1 << NOSV_SHIFT);
3759		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3760	} else {
3761		version = 1;
3762		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3763		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3764		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3765		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3766		nic_desc.bw_max = cpu_to_le32(bw_percent);
3767	}
3768
3769	return be_cmd_set_profile_config(adapter, &nic_desc,
3770					 nic_desc.hdr.desc_len,
3771					 1, version, domain);
3772}
3773
3774int be_cmd_set_sriov_config(struct be_adapter *adapter,
3775			    struct be_resources res, u16 num_vfs)
3776{
3777	struct {
3778		struct be_pcie_res_desc pcie;
3779		struct be_nic_res_desc nic_vft;
3780	} __packed desc;
3781	u16 vf_q_count;
3782
3783	if (BEx_chip(adapter) || lancer_chip(adapter))
3784		return 0;
3785
3786	/* PF PCIE descriptor */
3787	be_reset_pcie_desc(&desc.pcie);
3788	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3789	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3790	desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3791	desc.pcie.pf_num = adapter->pdev->devfn;
3792	desc.pcie.sriov_state = num_vfs ? 1 : 0;
3793	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3794
3795	/* VF NIC Template descriptor */
3796	be_reset_nic_desc(&desc.nic_vft);
3797	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3798	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3799	desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
3800				(1 << NOSV_SHIFT);
3801	desc.nic_vft.pf_num = adapter->pdev->devfn;
3802	desc.nic_vft.vf_num = 0;
3803
3804	if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3805		/* If number of VFs requested is 8 less than max supported,
3806		 * assign 8 queue pairs to the PF and divide the remaining
3807		 * resources evenly among the VFs
3808		 */
3809		if (num_vfs < (be_max_vfs(adapter) - 8))
3810			vf_q_count = (res.max_rss_qs - 8) / num_vfs;
3811		else
3812			vf_q_count = res.max_rss_qs / num_vfs;
3813
3814		desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
3815		desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
3816		desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
3817		desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
3818	} else {
3819		desc.nic_vft.txq_count = cpu_to_le16(1);
3820		desc.nic_vft.rq_count = cpu_to_le16(1);
3821		desc.nic_vft.rssq_count = cpu_to_le16(0);
3822		/* One CQ for each TX, RX and MCCQ */
3823		desc.nic_vft.cq_count = cpu_to_le16(3);
3824	}
3825
3826	return be_cmd_set_profile_config(adapter, &desc,
3827					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3828}
3829
3830int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3831{
3832	struct be_mcc_wrb *wrb;
3833	struct be_cmd_req_manage_iface_filters *req;
3834	int status;
3835
3836	if (iface == 0xFFFFFFFF)
3837		return -1;
3838
3839	spin_lock_bh(&adapter->mcc_lock);
3840
3841	wrb = wrb_from_mccq(adapter);
3842	if (!wrb) {
3843		status = -EBUSY;
3844		goto err;
3845	}
3846	req = embedded_payload(wrb);
3847
3848	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3849			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3850			       wrb, NULL);
3851	req->op = op;
3852	req->target_iface_id = cpu_to_le32(iface);
3853
3854	status = be_mcc_notify_wait(adapter);
3855err:
3856	spin_unlock_bh(&adapter->mcc_lock);
3857	return status;
3858}
3859
3860int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3861{
3862	struct be_port_res_desc port_desc;
3863
3864	memset(&port_desc, 0, sizeof(port_desc));
3865	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3866	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3867	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3868	port_desc.link_num = adapter->hba_port_num;
3869	if (port) {
3870		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3871					(1 << RCVID_SHIFT);
3872		port_desc.nv_port = swab16(port);
3873	} else {
3874		port_desc.nv_flags = NV_TYPE_DISABLED;
3875		port_desc.nv_port = 0;
3876	}
3877
3878	return be_cmd_set_profile_config(adapter, &port_desc,
3879					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3880}
3881
3882int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3883		     int vf_num)
3884{
3885	struct be_mcc_wrb *wrb;
3886	struct be_cmd_req_get_iface_list *req;
3887	struct be_cmd_resp_get_iface_list *resp;
3888	int status;
3889
3890	spin_lock_bh(&adapter->mcc_lock);
3891
3892	wrb = wrb_from_mccq(adapter);
3893	if (!wrb) {
3894		status = -EBUSY;
3895		goto err;
3896	}
3897	req = embedded_payload(wrb);
3898
3899	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3900			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3901			       wrb, NULL);
3902	req->hdr.domain = vf_num + 1;
3903
3904	status = be_mcc_notify_wait(adapter);
3905	if (!status) {
3906		resp = (struct be_cmd_resp_get_iface_list *)req;
3907		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3908	}
3909
3910err:
3911	spin_unlock_bh(&adapter->mcc_lock);
3912	return status;
3913}
3914
3915static int lancer_wait_idle(struct be_adapter *adapter)
3916{
3917#define SLIPORT_IDLE_TIMEOUT 30
3918	u32 reg_val;
3919	int status = 0, i;
3920
3921	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3922		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3923		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3924			break;
3925
3926		ssleep(1);
3927	}
3928
3929	if (i == SLIPORT_IDLE_TIMEOUT)
3930		status = -1;
3931
3932	return status;
3933}
3934
3935int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3936{
3937	int status = 0;
3938
3939	status = lancer_wait_idle(adapter);
3940	if (status)
3941		return status;
3942
3943	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3944
3945	return status;
3946}
3947
3948/* Routine to check whether dump image is present or not */
3949bool dump_present(struct be_adapter *adapter)
3950{
3951	u32 sliport_status = 0;
3952
3953	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3954	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3955}
3956
3957int lancer_initiate_dump(struct be_adapter *adapter)
3958{
3959	struct device *dev = &adapter->pdev->dev;
3960	int status;
3961
3962	if (dump_present(adapter)) {
3963		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
3964		return -EEXIST;
3965	}
3966
3967	/* give firmware reset and diagnostic dump */
3968	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3969				     PHYSDEV_CONTROL_DD_MASK);
3970	if (status < 0) {
3971		dev_err(dev, "FW reset failed\n");
3972		return status;
3973	}
3974
3975	status = lancer_wait_idle(adapter);
3976	if (status)
3977		return status;
3978
3979	if (!dump_present(adapter)) {
3980		dev_err(dev, "FW dump not generated\n");
3981		return -EIO;
3982	}
3983
3984	return 0;
3985}
3986
3987int lancer_delete_dump(struct be_adapter *adapter)
3988{
3989	int status;
3990
3991	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
3992	return be_cmd_status(status);
3993}
3994
3995/* Uses sync mcc */
3996int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3997{
3998	struct be_mcc_wrb *wrb;
3999	struct be_cmd_enable_disable_vf *req;
4000	int status;
4001
4002	if (BEx_chip(adapter))
4003		return 0;
4004
4005	spin_lock_bh(&adapter->mcc_lock);
4006
4007	wrb = wrb_from_mccq(adapter);
4008	if (!wrb) {
4009		status = -EBUSY;
4010		goto err;
4011	}
4012
4013	req = embedded_payload(wrb);
4014
4015	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4016			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4017			       wrb, NULL);
4018
4019	req->hdr.domain = domain;
4020	req->enable = 1;
4021	status = be_mcc_notify_wait(adapter);
4022err:
4023	spin_unlock_bh(&adapter->mcc_lock);
4024	return status;
4025}
4026
4027int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4028{
4029	struct be_mcc_wrb *wrb;
4030	struct be_cmd_req_intr_set *req;
4031	int status;
4032
4033	if (mutex_lock_interruptible(&adapter->mbox_lock))
4034		return -1;
4035
4036	wrb = wrb_from_mbox(adapter);
4037
4038	req = embedded_payload(wrb);
4039
4040	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4041			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4042			       wrb, NULL);
4043
4044	req->intr_enabled = intr_enable;
4045
4046	status = be_mbox_notify_wait(adapter);
4047
4048	mutex_unlock(&adapter->mbox_lock);
4049	return status;
4050}
4051
4052/* Uses MBOX */
4053int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4054{
4055	struct be_cmd_req_get_active_profile *req;
4056	struct be_mcc_wrb *wrb;
4057	int status;
4058
4059	if (mutex_lock_interruptible(&adapter->mbox_lock))
4060		return -1;
4061
4062	wrb = wrb_from_mbox(adapter);
4063	if (!wrb) {
4064		status = -EBUSY;
4065		goto err;
4066	}
4067
4068	req = embedded_payload(wrb);
4069
4070	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4071			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4072			       wrb, NULL);
4073
4074	status = be_mbox_notify_wait(adapter);
4075	if (!status) {
4076		struct be_cmd_resp_get_active_profile *resp =
4077							embedded_payload(wrb);
4078
4079		*profile_id = le16_to_cpu(resp->active_profile_id);
4080	}
4081
4082err:
4083	mutex_unlock(&adapter->mbox_lock);
4084	return status;
4085}
4086
4087int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4088				   int link_state, u8 domain)
4089{
4090	struct be_mcc_wrb *wrb;
4091	struct be_cmd_req_set_ll_link *req;
4092	int status;
4093
4094	if (BEx_chip(adapter) || lancer_chip(adapter))
4095		return 0;
4096
4097	spin_lock_bh(&adapter->mcc_lock);
4098
4099	wrb = wrb_from_mccq(adapter);
4100	if (!wrb) {
4101		status = -EBUSY;
4102		goto err;
4103	}
4104
4105	req = embedded_payload(wrb);
4106
4107	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4108			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4109			       sizeof(*req), wrb, NULL);
4110
4111	req->hdr.version = 1;
4112	req->hdr.domain = domain;
4113
4114	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4115		req->link_config |= 1;
4116
4117	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4118		req->link_config |= 1 << PLINK_TRACK_SHIFT;
4119
4120	status = be_mcc_notify_wait(adapter);
4121err:
4122	spin_unlock_bh(&adapter->mcc_lock);
4123	return status;
4124}
4125
4126int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4127		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4128{
4129	struct be_adapter *adapter = netdev_priv(netdev_handle);
4130	struct be_mcc_wrb *wrb;
4131	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4132	struct be_cmd_req_hdr *req;
4133	struct be_cmd_resp_hdr *resp;
4134	int status;
4135
4136	spin_lock_bh(&adapter->mcc_lock);
4137
4138	wrb = wrb_from_mccq(adapter);
4139	if (!wrb) {
4140		status = -EBUSY;
4141		goto err;
4142	}
4143	req = embedded_payload(wrb);
4144	resp = embedded_payload(wrb);
4145
4146	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4147			       hdr->opcode, wrb_payload_size, wrb, NULL);
4148	memcpy(req, wrb_payload, wrb_payload_size);
4149	be_dws_cpu_to_le(req, wrb_payload_size);
4150
4151	status = be_mcc_notify_wait(adapter);
4152	if (cmd_status)
4153		*cmd_status = (status & 0xffff);
4154	if (ext_status)
4155		*ext_status = 0;
4156	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4157	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4158err:
4159	spin_unlock_bh(&adapter->mcc_lock);
4160	return status;
4161}
4162EXPORT_SYMBOL(be_roce_mcc_cmd);
4163