xref: /linux/drivers/net/ethernet/emulex/benet/be_cmds.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include "be.h"
19 #include "be_cmds.h"
20 
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq = 64;
23 
24 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
25 {
26 	return wrb->payload.embedded_payload;
27 }
28 
29 static void be_mcc_notify(struct be_adapter *adapter)
30 {
31 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
32 	u32 val = 0;
33 
34 	if (be_error(adapter))
35 		return;
36 
37 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
38 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39 
40 	wmb();
41 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
42 }
43 
44 /* To check if valid bit is set, check the entire word as we don't know
45  * the endianness of the data (old entry is host endian while a new entry is
46  * little endian) */
47 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
48 {
49 	if (compl->flags != 0) {
50 		compl->flags = le32_to_cpu(compl->flags);
51 		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
52 		return true;
53 	} else {
54 		return false;
55 	}
56 }
57 
58 /* Need to reset the entire word that houses the valid bit */
59 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
60 {
61 	compl->flags = 0;
62 }
63 
64 static int be_mcc_compl_process(struct be_adapter *adapter,
65 	struct be_mcc_compl *compl)
66 {
67 	u16 compl_status, extd_status;
68 
69 	/* Just swap the status to host endian; mcc tag is opaquely copied
70 	 * from mcc_wrb */
71 	be_dws_le_to_cpu(compl, 4);
72 
73 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
74 				CQE_STATUS_COMPL_MASK;
75 
76 	if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
77 		(compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
78 		(compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
79 		adapter->flash_status = compl_status;
80 		complete(&adapter->flash_compl);
81 	}
82 
83 	if (compl_status == MCC_STATUS_SUCCESS) {
84 		if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
85 			 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
86 			(compl->tag1 == CMD_SUBSYSTEM_ETH)) {
87 			be_parse_stats(adapter);
88 			adapter->stats_cmd_sent = false;
89 		}
90 		if (compl->tag0 ==
91 				OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
92 			struct be_mcc_wrb *mcc_wrb =
93 				queue_index_node(&adapter->mcc_obj.q,
94 						compl->tag1);
95 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
96 				embedded_payload(mcc_wrb);
97 			adapter->drv_stats.be_on_die_temperature =
98 				resp->on_die_temperature;
99 		}
100 	} else {
101 		if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
102 			be_get_temp_freq = 0;
103 
104 		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
105 			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
106 			goto done;
107 
108 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
109 			dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
110 				"permitted to execute this cmd (opcode %d)\n",
111 				compl->tag0);
112 		} else {
113 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
114 					CQE_STATUS_EXTD_MASK;
115 			dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
116 				"status %d, extd-status %d\n",
117 				compl->tag0, compl_status, extd_status);
118 		}
119 	}
120 done:
121 	return compl_status;
122 }
123 
124 /* Link state evt is a string of bytes; no need for endian swapping */
125 static void be_async_link_state_process(struct be_adapter *adapter,
126 		struct be_async_event_link_state *evt)
127 {
128 	be_link_status_update(adapter, evt->port_link_status);
129 }
130 
131 /* Grp5 CoS Priority evt */
132 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
133 		struct be_async_event_grp5_cos_priority *evt)
134 {
135 	if (evt->valid) {
136 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
137 		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
138 		adapter->recommended_prio =
139 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
140 	}
141 }
142 
143 /* Grp5 QOS Speed evt */
144 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
145 		struct be_async_event_grp5_qos_link_speed *evt)
146 {
147 	if (evt->physical_port == adapter->port_num) {
148 		/* qos_link_speed is in units of 10 Mbps */
149 		adapter->link_speed = evt->qos_link_speed * 10;
150 	}
151 }
152 
153 /*Grp5 PVID evt*/
154 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
155 		struct be_async_event_grp5_pvid_state *evt)
156 {
157 	if (evt->enabled)
158 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
159 	else
160 		adapter->pvid = 0;
161 }
162 
163 static void be_async_grp5_evt_process(struct be_adapter *adapter,
164 		u32 trailer, struct be_mcc_compl *evt)
165 {
166 	u8 event_type = 0;
167 
168 	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
169 		ASYNC_TRAILER_EVENT_TYPE_MASK;
170 
171 	switch (event_type) {
172 	case ASYNC_EVENT_COS_PRIORITY:
173 		be_async_grp5_cos_priority_process(adapter,
174 		(struct be_async_event_grp5_cos_priority *)evt);
175 	break;
176 	case ASYNC_EVENT_QOS_SPEED:
177 		be_async_grp5_qos_speed_process(adapter,
178 		(struct be_async_event_grp5_qos_link_speed *)evt);
179 	break;
180 	case ASYNC_EVENT_PVID_STATE:
181 		be_async_grp5_pvid_state_process(adapter,
182 		(struct be_async_event_grp5_pvid_state *)evt);
183 	break;
184 	default:
185 		dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
186 		break;
187 	}
188 }
189 
190 static inline bool is_link_state_evt(u32 trailer)
191 {
192 	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
193 		ASYNC_TRAILER_EVENT_CODE_MASK) ==
194 				ASYNC_EVENT_CODE_LINK_STATE;
195 }
196 
197 static inline bool is_grp5_evt(u32 trailer)
198 {
199 	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
200 		ASYNC_TRAILER_EVENT_CODE_MASK) ==
201 				ASYNC_EVENT_CODE_GRP_5);
202 }
203 
204 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
205 {
206 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
207 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
208 
209 	if (be_mcc_compl_is_new(compl)) {
210 		queue_tail_inc(mcc_cq);
211 		return compl;
212 	}
213 	return NULL;
214 }
215 
216 void be_async_mcc_enable(struct be_adapter *adapter)
217 {
218 	spin_lock_bh(&adapter->mcc_cq_lock);
219 
220 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
221 	adapter->mcc_obj.rearm_cq = true;
222 
223 	spin_unlock_bh(&adapter->mcc_cq_lock);
224 }
225 
226 void be_async_mcc_disable(struct be_adapter *adapter)
227 {
228 	adapter->mcc_obj.rearm_cq = false;
229 }
230 
231 int be_process_mcc(struct be_adapter *adapter, int *status)
232 {
233 	struct be_mcc_compl *compl;
234 	int num = 0;
235 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
236 
237 	spin_lock_bh(&adapter->mcc_cq_lock);
238 	while ((compl = be_mcc_compl_get(adapter))) {
239 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
240 			/* Interpret flags as an async trailer */
241 			if (is_link_state_evt(compl->flags))
242 				be_async_link_state_process(adapter,
243 				(struct be_async_event_link_state *) compl);
244 			else if (is_grp5_evt(compl->flags))
245 				be_async_grp5_evt_process(adapter,
246 				compl->flags, compl);
247 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
248 				*status = be_mcc_compl_process(adapter, compl);
249 				atomic_dec(&mcc_obj->q.used);
250 		}
251 		be_mcc_compl_use(compl);
252 		num++;
253 	}
254 
255 	spin_unlock_bh(&adapter->mcc_cq_lock);
256 	return num;
257 }
258 
259 /* Wait till no more pending mcc requests are present */
260 static int be_mcc_wait_compl(struct be_adapter *adapter)
261 {
262 #define mcc_timeout		120000 /* 12s timeout */
263 	int i, num, status = 0;
264 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
265 
266 	for (i = 0; i < mcc_timeout; i++) {
267 		if (be_error(adapter))
268 			return -EIO;
269 
270 		num = be_process_mcc(adapter, &status);
271 		if (num)
272 			be_cq_notify(adapter, mcc_obj->cq.id,
273 				mcc_obj->rearm_cq, num);
274 
275 		if (atomic_read(&mcc_obj->q.used) == 0)
276 			break;
277 		udelay(100);
278 	}
279 	if (i == mcc_timeout) {
280 		dev_err(&adapter->pdev->dev, "FW not responding\n");
281 		adapter->fw_timeout = true;
282 		return -1;
283 	}
284 	return status;
285 }
286 
287 /* Notify MCC requests and wait for completion */
288 static int be_mcc_notify_wait(struct be_adapter *adapter)
289 {
290 	be_mcc_notify(adapter);
291 	return be_mcc_wait_compl(adapter);
292 }
293 
294 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
295 {
296 	int msecs = 0;
297 	u32 ready;
298 
299 	do {
300 		if (be_error(adapter))
301 			return -EIO;
302 
303 		ready = ioread32(db);
304 		if (ready == 0xffffffff)
305 			return -1;
306 
307 		ready &= MPU_MAILBOX_DB_RDY_MASK;
308 		if (ready)
309 			break;
310 
311 		if (msecs > 4000) {
312 			dev_err(&adapter->pdev->dev, "FW not responding\n");
313 			adapter->fw_timeout = true;
314 			be_detect_dump_ue(adapter);
315 			return -1;
316 		}
317 
318 		msleep(1);
319 		msecs++;
320 	} while (true);
321 
322 	return 0;
323 }
324 
325 /*
326  * Insert the mailbox address into the doorbell in two steps
327  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
328  */
329 static int be_mbox_notify_wait(struct be_adapter *adapter)
330 {
331 	int status;
332 	u32 val = 0;
333 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
334 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
335 	struct be_mcc_mailbox *mbox = mbox_mem->va;
336 	struct be_mcc_compl *compl = &mbox->compl;
337 
338 	/* wait for ready to be set */
339 	status = be_mbox_db_ready_wait(adapter, db);
340 	if (status != 0)
341 		return status;
342 
343 	val |= MPU_MAILBOX_DB_HI_MASK;
344 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
345 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
346 	iowrite32(val, db);
347 
348 	/* wait for ready to be set */
349 	status = be_mbox_db_ready_wait(adapter, db);
350 	if (status != 0)
351 		return status;
352 
353 	val = 0;
354 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
355 	val |= (u32)(mbox_mem->dma >> 4) << 2;
356 	iowrite32(val, db);
357 
358 	status = be_mbox_db_ready_wait(adapter, db);
359 	if (status != 0)
360 		return status;
361 
362 	/* A cq entry has been made now */
363 	if (be_mcc_compl_is_new(compl)) {
364 		status = be_mcc_compl_process(adapter, &mbox->compl);
365 		be_mcc_compl_use(compl);
366 		if (status)
367 			return status;
368 	} else {
369 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
370 		return -1;
371 	}
372 	return 0;
373 }
374 
375 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
376 {
377 	u32 sem;
378 
379 	if (lancer_chip(adapter))
380 		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
381 	else
382 		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
383 
384 	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
385 	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
386 		return -1;
387 	else
388 		return 0;
389 }
390 
391 int be_cmd_POST(struct be_adapter *adapter)
392 {
393 	u16 stage;
394 	int status, timeout = 0;
395 	struct device *dev = &adapter->pdev->dev;
396 
397 	do {
398 		status = be_POST_stage_get(adapter, &stage);
399 		if (status) {
400 			dev_err(dev, "POST error; stage=0x%x\n", stage);
401 			return -1;
402 		} else if (stage != POST_STAGE_ARMFW_RDY) {
403 			if (msleep_interruptible(2000)) {
404 				dev_err(dev, "Waiting for POST aborted\n");
405 				return -EINTR;
406 			}
407 			timeout += 2;
408 		} else {
409 			return 0;
410 		}
411 	} while (timeout < 60);
412 
413 	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
414 	return -1;
415 }
416 
417 
418 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
419 {
420 	return &wrb->payload.sgl[0];
421 }
422 
423 
424 /* Don't touch the hdr after it's prepared */
425 /* mem will be NULL for embedded commands */
426 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
427 				u8 subsystem, u8 opcode, int cmd_len,
428 				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
429 {
430 	struct be_sge *sge;
431 
432 	req_hdr->opcode = opcode;
433 	req_hdr->subsystem = subsystem;
434 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
435 	req_hdr->version = 0;
436 
437 	wrb->tag0 = opcode;
438 	wrb->tag1 = subsystem;
439 	wrb->payload_length = cmd_len;
440 	if (mem) {
441 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
442 			MCC_WRB_SGE_CNT_SHIFT;
443 		sge = nonembedded_sgl(wrb);
444 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
445 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
446 		sge->len = cpu_to_le32(mem->size);
447 	} else
448 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
449 	be_dws_cpu_to_le(wrb, 8);
450 }
451 
452 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
453 			struct be_dma_mem *mem)
454 {
455 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
456 	u64 dma = (u64)mem->dma;
457 
458 	for (i = 0; i < buf_pages; i++) {
459 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
460 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
461 		dma += PAGE_SIZE_4K;
462 	}
463 }
464 
465 /* Converts interrupt delay in microseconds to multiplier value */
466 static u32 eq_delay_to_mult(u32 usec_delay)
467 {
468 #define MAX_INTR_RATE			651042
469 	const u32 round = 10;
470 	u32 multiplier;
471 
472 	if (usec_delay == 0)
473 		multiplier = 0;
474 	else {
475 		u32 interrupt_rate = 1000000 / usec_delay;
476 		/* Max delay, corresponding to the lowest interrupt rate */
477 		if (interrupt_rate == 0)
478 			multiplier = 1023;
479 		else {
480 			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
481 			multiplier /= interrupt_rate;
482 			/* Round the multiplier to the closest value.*/
483 			multiplier = (multiplier + round/2) / round;
484 			multiplier = min(multiplier, (u32)1023);
485 		}
486 	}
487 	return multiplier;
488 }
489 
490 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
491 {
492 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
493 	struct be_mcc_wrb *wrb
494 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
495 	memset(wrb, 0, sizeof(*wrb));
496 	return wrb;
497 }
498 
499 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
500 {
501 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
502 	struct be_mcc_wrb *wrb;
503 
504 	if (atomic_read(&mccq->used) >= mccq->len) {
505 		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
506 		return NULL;
507 	}
508 
509 	wrb = queue_head_node(mccq);
510 	queue_head_inc(mccq);
511 	atomic_inc(&mccq->used);
512 	memset(wrb, 0, sizeof(*wrb));
513 	return wrb;
514 }
515 
516 /* Tell fw we're about to start firing cmds by writing a
517  * special pattern across the wrb hdr; uses mbox
518  */
519 int be_cmd_fw_init(struct be_adapter *adapter)
520 {
521 	u8 *wrb;
522 	int status;
523 
524 	if (mutex_lock_interruptible(&adapter->mbox_lock))
525 		return -1;
526 
527 	wrb = (u8 *)wrb_from_mbox(adapter);
528 	*wrb++ = 0xFF;
529 	*wrb++ = 0x12;
530 	*wrb++ = 0x34;
531 	*wrb++ = 0xFF;
532 	*wrb++ = 0xFF;
533 	*wrb++ = 0x56;
534 	*wrb++ = 0x78;
535 	*wrb = 0xFF;
536 
537 	status = be_mbox_notify_wait(adapter);
538 
539 	mutex_unlock(&adapter->mbox_lock);
540 	return status;
541 }
542 
543 /* Tell fw we're done with firing cmds by writing a
544  * special pattern across the wrb hdr; uses mbox
545  */
546 int be_cmd_fw_clean(struct be_adapter *adapter)
547 {
548 	u8 *wrb;
549 	int status;
550 
551 	if (mutex_lock_interruptible(&adapter->mbox_lock))
552 		return -1;
553 
554 	wrb = (u8 *)wrb_from_mbox(adapter);
555 	*wrb++ = 0xFF;
556 	*wrb++ = 0xAA;
557 	*wrb++ = 0xBB;
558 	*wrb++ = 0xFF;
559 	*wrb++ = 0xFF;
560 	*wrb++ = 0xCC;
561 	*wrb++ = 0xDD;
562 	*wrb = 0xFF;
563 
564 	status = be_mbox_notify_wait(adapter);
565 
566 	mutex_unlock(&adapter->mbox_lock);
567 	return status;
568 }
569 int be_cmd_eq_create(struct be_adapter *adapter,
570 		struct be_queue_info *eq, int eq_delay)
571 {
572 	struct be_mcc_wrb *wrb;
573 	struct be_cmd_req_eq_create *req;
574 	struct be_dma_mem *q_mem = &eq->dma_mem;
575 	int status;
576 
577 	if (mutex_lock_interruptible(&adapter->mbox_lock))
578 		return -1;
579 
580 	wrb = wrb_from_mbox(adapter);
581 	req = embedded_payload(wrb);
582 
583 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
584 		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
585 
586 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
587 
588 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
589 	/* 4byte eqe*/
590 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
591 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
592 			__ilog2_u32(eq->len/256));
593 	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
594 			eq_delay_to_mult(eq_delay));
595 	be_dws_cpu_to_le(req->context, sizeof(req->context));
596 
597 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
598 
599 	status = be_mbox_notify_wait(adapter);
600 	if (!status) {
601 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
602 		eq->id = le16_to_cpu(resp->eq_id);
603 		eq->created = true;
604 	}
605 
606 	mutex_unlock(&adapter->mbox_lock);
607 	return status;
608 }
609 
610 /* Use MCC */
611 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
612 			u8 type, bool permanent, u32 if_handle, u32 pmac_id)
613 {
614 	struct be_mcc_wrb *wrb;
615 	struct be_cmd_req_mac_query *req;
616 	int status;
617 
618 	spin_lock_bh(&adapter->mcc_lock);
619 
620 	wrb = wrb_from_mccq(adapter);
621 	if (!wrb) {
622 		status = -EBUSY;
623 		goto err;
624 	}
625 	req = embedded_payload(wrb);
626 
627 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
628 		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
629 	req->type = type;
630 	if (permanent) {
631 		req->permanent = 1;
632 	} else {
633 		req->if_id = cpu_to_le16((u16) if_handle);
634 		req->pmac_id = cpu_to_le32(pmac_id);
635 		req->permanent = 0;
636 	}
637 
638 	status = be_mcc_notify_wait(adapter);
639 	if (!status) {
640 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
641 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
642 	}
643 
644 err:
645 	spin_unlock_bh(&adapter->mcc_lock);
646 	return status;
647 }
648 
649 /* Uses synchronous MCCQ */
650 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
651 		u32 if_id, u32 *pmac_id, u32 domain)
652 {
653 	struct be_mcc_wrb *wrb;
654 	struct be_cmd_req_pmac_add *req;
655 	int status;
656 
657 	spin_lock_bh(&adapter->mcc_lock);
658 
659 	wrb = wrb_from_mccq(adapter);
660 	if (!wrb) {
661 		status = -EBUSY;
662 		goto err;
663 	}
664 	req = embedded_payload(wrb);
665 
666 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
667 		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
668 
669 	req->hdr.domain = domain;
670 	req->if_id = cpu_to_le32(if_id);
671 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
672 
673 	status = be_mcc_notify_wait(adapter);
674 	if (!status) {
675 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
676 		*pmac_id = le32_to_cpu(resp->pmac_id);
677 	}
678 
679 err:
680 	spin_unlock_bh(&adapter->mcc_lock);
681 
682 	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
683 		status = -EPERM;
684 
685 	return status;
686 }
687 
688 /* Uses synchronous MCCQ */
689 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
690 {
691 	struct be_mcc_wrb *wrb;
692 	struct be_cmd_req_pmac_del *req;
693 	int status;
694 
695 	if (pmac_id == -1)
696 		return 0;
697 
698 	spin_lock_bh(&adapter->mcc_lock);
699 
700 	wrb = wrb_from_mccq(adapter);
701 	if (!wrb) {
702 		status = -EBUSY;
703 		goto err;
704 	}
705 	req = embedded_payload(wrb);
706 
707 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
708 		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
709 
710 	req->hdr.domain = dom;
711 	req->if_id = cpu_to_le32(if_id);
712 	req->pmac_id = cpu_to_le32(pmac_id);
713 
714 	status = be_mcc_notify_wait(adapter);
715 
716 err:
717 	spin_unlock_bh(&adapter->mcc_lock);
718 	return status;
719 }
720 
721 /* Uses Mbox */
722 int be_cmd_cq_create(struct be_adapter *adapter,
723 		struct be_queue_info *cq, struct be_queue_info *eq,
724 		bool sol_evts, bool no_delay, int coalesce_wm)
725 {
726 	struct be_mcc_wrb *wrb;
727 	struct be_cmd_req_cq_create *req;
728 	struct be_dma_mem *q_mem = &cq->dma_mem;
729 	void *ctxt;
730 	int status;
731 
732 	if (mutex_lock_interruptible(&adapter->mbox_lock))
733 		return -1;
734 
735 	wrb = wrb_from_mbox(adapter);
736 	req = embedded_payload(wrb);
737 	ctxt = &req->context;
738 
739 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
740 		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
741 
742 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
743 	if (lancer_chip(adapter)) {
744 		req->hdr.version = 2;
745 		req->page_size = 1; /* 1 for 4K */
746 		AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
747 								no_delay);
748 		AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
749 						__ilog2_u32(cq->len/256));
750 		AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
751 		AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
752 								ctxt, 1);
753 		AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
754 								ctxt, eq->id);
755 		AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
756 	} else {
757 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
758 								coalesce_wm);
759 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
760 								ctxt, no_delay);
761 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
762 						__ilog2_u32(cq->len/256));
763 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
764 		AMAP_SET_BITS(struct amap_cq_context_be, solevent,
765 								ctxt, sol_evts);
766 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
767 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
768 		AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
769 	}
770 
771 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
772 
773 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
774 
775 	status = be_mbox_notify_wait(adapter);
776 	if (!status) {
777 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
778 		cq->id = le16_to_cpu(resp->cq_id);
779 		cq->created = true;
780 	}
781 
782 	mutex_unlock(&adapter->mbox_lock);
783 
784 	return status;
785 }
786 
787 static u32 be_encoded_q_len(int q_len)
788 {
789 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
790 	if (len_encoded == 16)
791 		len_encoded = 0;
792 	return len_encoded;
793 }
794 
795 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
796 			struct be_queue_info *mccq,
797 			struct be_queue_info *cq)
798 {
799 	struct be_mcc_wrb *wrb;
800 	struct be_cmd_req_mcc_ext_create *req;
801 	struct be_dma_mem *q_mem = &mccq->dma_mem;
802 	void *ctxt;
803 	int status;
804 
805 	if (mutex_lock_interruptible(&adapter->mbox_lock))
806 		return -1;
807 
808 	wrb = wrb_from_mbox(adapter);
809 	req = embedded_payload(wrb);
810 	ctxt = &req->context;
811 
812 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
813 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
814 
815 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
816 	if (lancer_chip(adapter)) {
817 		req->hdr.version = 1;
818 		req->cq_id = cpu_to_le16(cq->id);
819 
820 		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
821 						be_encoded_q_len(mccq->len));
822 		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
823 		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
824 								ctxt, cq->id);
825 		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
826 								 ctxt, 1);
827 
828 	} else {
829 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
830 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
831 						be_encoded_q_len(mccq->len));
832 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
833 	}
834 
835 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
836 	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
837 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
838 
839 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
840 
841 	status = be_mbox_notify_wait(adapter);
842 	if (!status) {
843 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
844 		mccq->id = le16_to_cpu(resp->id);
845 		mccq->created = true;
846 	}
847 	mutex_unlock(&adapter->mbox_lock);
848 
849 	return status;
850 }
851 
852 int be_cmd_mccq_org_create(struct be_adapter *adapter,
853 			struct be_queue_info *mccq,
854 			struct be_queue_info *cq)
855 {
856 	struct be_mcc_wrb *wrb;
857 	struct be_cmd_req_mcc_create *req;
858 	struct be_dma_mem *q_mem = &mccq->dma_mem;
859 	void *ctxt;
860 	int status;
861 
862 	if (mutex_lock_interruptible(&adapter->mbox_lock))
863 		return -1;
864 
865 	wrb = wrb_from_mbox(adapter);
866 	req = embedded_payload(wrb);
867 	ctxt = &req->context;
868 
869 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
870 			OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
871 
872 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
873 
874 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
875 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
876 			be_encoded_q_len(mccq->len));
877 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
878 
879 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
880 
881 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
882 
883 	status = be_mbox_notify_wait(adapter);
884 	if (!status) {
885 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
886 		mccq->id = le16_to_cpu(resp->id);
887 		mccq->created = true;
888 	}
889 
890 	mutex_unlock(&adapter->mbox_lock);
891 	return status;
892 }
893 
894 int be_cmd_mccq_create(struct be_adapter *adapter,
895 			struct be_queue_info *mccq,
896 			struct be_queue_info *cq)
897 {
898 	int status;
899 
900 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
901 	if (status && !lancer_chip(adapter)) {
902 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
903 			"or newer to avoid conflicting priorities between NIC "
904 			"and FCoE traffic");
905 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
906 	}
907 	return status;
908 }
909 
910 int be_cmd_txq_create(struct be_adapter *adapter,
911 			struct be_queue_info *txq,
912 			struct be_queue_info *cq)
913 {
914 	struct be_mcc_wrb *wrb;
915 	struct be_cmd_req_eth_tx_create *req;
916 	struct be_dma_mem *q_mem = &txq->dma_mem;
917 	void *ctxt;
918 	int status;
919 
920 	spin_lock_bh(&adapter->mcc_lock);
921 
922 	wrb = wrb_from_mccq(adapter);
923 	if (!wrb) {
924 		status = -EBUSY;
925 		goto err;
926 	}
927 
928 	req = embedded_payload(wrb);
929 	ctxt = &req->context;
930 
931 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
932 		OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
933 
934 	if (lancer_chip(adapter)) {
935 		req->hdr.version = 1;
936 		AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
937 					adapter->if_handle);
938 	}
939 
940 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
941 	req->ulp_num = BE_ULP1_NUM;
942 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
943 
944 	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
945 		be_encoded_q_len(txq->len));
946 	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
947 	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
948 
949 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
950 
951 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
952 
953 	status = be_mcc_notify_wait(adapter);
954 	if (!status) {
955 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
956 		txq->id = le16_to_cpu(resp->cid);
957 		txq->created = true;
958 	}
959 
960 err:
961 	spin_unlock_bh(&adapter->mcc_lock);
962 
963 	return status;
964 }
965 
966 /* Uses MCC */
967 int be_cmd_rxq_create(struct be_adapter *adapter,
968 		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
969 		u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
970 {
971 	struct be_mcc_wrb *wrb;
972 	struct be_cmd_req_eth_rx_create *req;
973 	struct be_dma_mem *q_mem = &rxq->dma_mem;
974 	int status;
975 
976 	spin_lock_bh(&adapter->mcc_lock);
977 
978 	wrb = wrb_from_mccq(adapter);
979 	if (!wrb) {
980 		status = -EBUSY;
981 		goto err;
982 	}
983 	req = embedded_payload(wrb);
984 
985 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
986 				OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
987 
988 	req->cq_id = cpu_to_le16(cq_id);
989 	req->frag_size = fls(frag_size) - 1;
990 	req->num_pages = 2;
991 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
992 	req->interface_id = cpu_to_le32(if_id);
993 	req->max_frame_size = cpu_to_le16(max_frame_size);
994 	req->rss_queue = cpu_to_le32(rss);
995 
996 	status = be_mcc_notify_wait(adapter);
997 	if (!status) {
998 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
999 		rxq->id = le16_to_cpu(resp->id);
1000 		rxq->created = true;
1001 		*rss_id = resp->rss_id;
1002 	}
1003 
1004 err:
1005 	spin_unlock_bh(&adapter->mcc_lock);
1006 	return status;
1007 }
1008 
1009 /* Generic destroyer function for all types of queues
1010  * Uses Mbox
1011  */
1012 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1013 		int queue_type)
1014 {
1015 	struct be_mcc_wrb *wrb;
1016 	struct be_cmd_req_q_destroy *req;
1017 	u8 subsys = 0, opcode = 0;
1018 	int status;
1019 
1020 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1021 		return -1;
1022 
1023 	wrb = wrb_from_mbox(adapter);
1024 	req = embedded_payload(wrb);
1025 
1026 	switch (queue_type) {
1027 	case QTYPE_EQ:
1028 		subsys = CMD_SUBSYSTEM_COMMON;
1029 		opcode = OPCODE_COMMON_EQ_DESTROY;
1030 		break;
1031 	case QTYPE_CQ:
1032 		subsys = CMD_SUBSYSTEM_COMMON;
1033 		opcode = OPCODE_COMMON_CQ_DESTROY;
1034 		break;
1035 	case QTYPE_TXQ:
1036 		subsys = CMD_SUBSYSTEM_ETH;
1037 		opcode = OPCODE_ETH_TX_DESTROY;
1038 		break;
1039 	case QTYPE_RXQ:
1040 		subsys = CMD_SUBSYSTEM_ETH;
1041 		opcode = OPCODE_ETH_RX_DESTROY;
1042 		break;
1043 	case QTYPE_MCCQ:
1044 		subsys = CMD_SUBSYSTEM_COMMON;
1045 		opcode = OPCODE_COMMON_MCC_DESTROY;
1046 		break;
1047 	default:
1048 		BUG();
1049 	}
1050 
1051 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1052 				NULL);
1053 	req->id = cpu_to_le16(q->id);
1054 
1055 	status = be_mbox_notify_wait(adapter);
1056 	if (!status)
1057 		q->created = false;
1058 
1059 	mutex_unlock(&adapter->mbox_lock);
1060 	return status;
1061 }
1062 
1063 /* Uses MCC */
1064 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1065 {
1066 	struct be_mcc_wrb *wrb;
1067 	struct be_cmd_req_q_destroy *req;
1068 	int status;
1069 
1070 	spin_lock_bh(&adapter->mcc_lock);
1071 
1072 	wrb = wrb_from_mccq(adapter);
1073 	if (!wrb) {
1074 		status = -EBUSY;
1075 		goto err;
1076 	}
1077 	req = embedded_payload(wrb);
1078 
1079 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1080 			OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1081 	req->id = cpu_to_le16(q->id);
1082 
1083 	status = be_mcc_notify_wait(adapter);
1084 	if (!status)
1085 		q->created = false;
1086 
1087 err:
1088 	spin_unlock_bh(&adapter->mcc_lock);
1089 	return status;
1090 }
1091 
1092 /* Create an rx filtering policy configuration on an i/f
1093  * Uses MCCQ
1094  */
1095 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1096 		u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
1097 {
1098 	struct be_mcc_wrb *wrb;
1099 	struct be_cmd_req_if_create *req;
1100 	int status;
1101 
1102 	spin_lock_bh(&adapter->mcc_lock);
1103 
1104 	wrb = wrb_from_mccq(adapter);
1105 	if (!wrb) {
1106 		status = -EBUSY;
1107 		goto err;
1108 	}
1109 	req = embedded_payload(wrb);
1110 
1111 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1112 		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1113 	req->hdr.domain = domain;
1114 	req->capability_flags = cpu_to_le32(cap_flags);
1115 	req->enable_flags = cpu_to_le32(en_flags);
1116 	if (mac)
1117 		memcpy(req->mac_addr, mac, ETH_ALEN);
1118 	else
1119 		req->pmac_invalid = true;
1120 
1121 	status = be_mcc_notify_wait(adapter);
1122 	if (!status) {
1123 		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1124 		*if_handle = le32_to_cpu(resp->interface_id);
1125 		if (mac)
1126 			*pmac_id = le32_to_cpu(resp->pmac_id);
1127 	}
1128 
1129 err:
1130 	spin_unlock_bh(&adapter->mcc_lock);
1131 	return status;
1132 }
1133 
1134 /* Uses MCCQ */
1135 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1136 {
1137 	struct be_mcc_wrb *wrb;
1138 	struct be_cmd_req_if_destroy *req;
1139 	int status;
1140 
1141 	if (interface_id == -1)
1142 		return 0;
1143 
1144 	spin_lock_bh(&adapter->mcc_lock);
1145 
1146 	wrb = wrb_from_mccq(adapter);
1147 	if (!wrb) {
1148 		status = -EBUSY;
1149 		goto err;
1150 	}
1151 	req = embedded_payload(wrb);
1152 
1153 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1154 		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1155 	req->hdr.domain = domain;
1156 	req->interface_id = cpu_to_le32(interface_id);
1157 
1158 	status = be_mcc_notify_wait(adapter);
1159 err:
1160 	spin_unlock_bh(&adapter->mcc_lock);
1161 	return status;
1162 }
1163 
1164 /* Get stats is a non embedded command: the request is not embedded inside
1165  * WRB but is a separate dma memory block
1166  * Uses asynchronous MCC
1167  */
1168 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1169 {
1170 	struct be_mcc_wrb *wrb;
1171 	struct be_cmd_req_hdr *hdr;
1172 	int status = 0;
1173 
1174 	if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1175 		be_cmd_get_die_temperature(adapter);
1176 
1177 	spin_lock_bh(&adapter->mcc_lock);
1178 
1179 	wrb = wrb_from_mccq(adapter);
1180 	if (!wrb) {
1181 		status = -EBUSY;
1182 		goto err;
1183 	}
1184 	hdr = nonemb_cmd->va;
1185 
1186 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1187 		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1188 
1189 	if (adapter->generation == BE_GEN3)
1190 		hdr->version = 1;
1191 
1192 	be_mcc_notify(adapter);
1193 	adapter->stats_cmd_sent = true;
1194 
1195 err:
1196 	spin_unlock_bh(&adapter->mcc_lock);
1197 	return status;
1198 }
1199 
1200 /* Lancer Stats */
1201 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1202 				struct be_dma_mem *nonemb_cmd)
1203 {
1204 
1205 	struct be_mcc_wrb *wrb;
1206 	struct lancer_cmd_req_pport_stats *req;
1207 	int status = 0;
1208 
1209 	spin_lock_bh(&adapter->mcc_lock);
1210 
1211 	wrb = wrb_from_mccq(adapter);
1212 	if (!wrb) {
1213 		status = -EBUSY;
1214 		goto err;
1215 	}
1216 	req = nonemb_cmd->va;
1217 
1218 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1219 			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1220 			nonemb_cmd);
1221 
1222 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1223 	req->cmd_params.params.reset_stats = 0;
1224 
1225 	be_mcc_notify(adapter);
1226 	adapter->stats_cmd_sent = true;
1227 
1228 err:
1229 	spin_unlock_bh(&adapter->mcc_lock);
1230 	return status;
1231 }
1232 
1233 /* Uses synchronous mcc */
1234 int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1235 			u16 *link_speed, u32 dom)
1236 {
1237 	struct be_mcc_wrb *wrb;
1238 	struct be_cmd_req_link_status *req;
1239 	int status;
1240 
1241 	spin_lock_bh(&adapter->mcc_lock);
1242 
1243 	wrb = wrb_from_mccq(adapter);
1244 	if (!wrb) {
1245 		status = -EBUSY;
1246 		goto err;
1247 	}
1248 	req = embedded_payload(wrb);
1249 
1250 	if (lancer_chip(adapter))
1251 		req->hdr.version = 1;
1252 
1253 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1254 		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1255 
1256 	status = be_mcc_notify_wait(adapter);
1257 	if (!status) {
1258 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1259 		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1260 			*link_speed = le16_to_cpu(resp->link_speed);
1261 			if (mac_speed)
1262 				*mac_speed = resp->mac_speed;
1263 		}
1264 	}
1265 
1266 err:
1267 	spin_unlock_bh(&adapter->mcc_lock);
1268 	return status;
1269 }
1270 
1271 /* Uses synchronous mcc */
1272 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1273 {
1274 	struct be_mcc_wrb *wrb;
1275 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1276 	u16 mccq_index;
1277 	int status;
1278 
1279 	spin_lock_bh(&adapter->mcc_lock);
1280 
1281 	mccq_index = adapter->mcc_obj.q.head;
1282 
1283 	wrb = wrb_from_mccq(adapter);
1284 	if (!wrb) {
1285 		status = -EBUSY;
1286 		goto err;
1287 	}
1288 	req = embedded_payload(wrb);
1289 
1290 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1291 		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1292 		wrb, NULL);
1293 
1294 	wrb->tag1 = mccq_index;
1295 
1296 	be_mcc_notify(adapter);
1297 
1298 err:
1299 	spin_unlock_bh(&adapter->mcc_lock);
1300 	return status;
1301 }
1302 
1303 /* Uses synchronous mcc */
1304 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1305 {
1306 	struct be_mcc_wrb *wrb;
1307 	struct be_cmd_req_get_fat *req;
1308 	int status;
1309 
1310 	spin_lock_bh(&adapter->mcc_lock);
1311 
1312 	wrb = wrb_from_mccq(adapter);
1313 	if (!wrb) {
1314 		status = -EBUSY;
1315 		goto err;
1316 	}
1317 	req = embedded_payload(wrb);
1318 
1319 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1320 		OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1321 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1322 	status = be_mcc_notify_wait(adapter);
1323 	if (!status) {
1324 		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1325 		if (log_size && resp->log_size)
1326 			*log_size = le32_to_cpu(resp->log_size) -
1327 					sizeof(u32);
1328 	}
1329 err:
1330 	spin_unlock_bh(&adapter->mcc_lock);
1331 	return status;
1332 }
1333 
1334 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1335 {
1336 	struct be_dma_mem get_fat_cmd;
1337 	struct be_mcc_wrb *wrb;
1338 	struct be_cmd_req_get_fat *req;
1339 	u32 offset = 0, total_size, buf_size,
1340 				log_offset = sizeof(u32), payload_len;
1341 	int status;
1342 
1343 	if (buf_len == 0)
1344 		return;
1345 
1346 	total_size = buf_len;
1347 
1348 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1349 	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1350 			get_fat_cmd.size,
1351 			&get_fat_cmd.dma);
1352 	if (!get_fat_cmd.va) {
1353 		status = -ENOMEM;
1354 		dev_err(&adapter->pdev->dev,
1355 		"Memory allocation failure while retrieving FAT data\n");
1356 		return;
1357 	}
1358 
1359 	spin_lock_bh(&adapter->mcc_lock);
1360 
1361 	while (total_size) {
1362 		buf_size = min(total_size, (u32)60*1024);
1363 		total_size -= buf_size;
1364 
1365 		wrb = wrb_from_mccq(adapter);
1366 		if (!wrb) {
1367 			status = -EBUSY;
1368 			goto err;
1369 		}
1370 		req = get_fat_cmd.va;
1371 
1372 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1373 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1374 				OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1375 				&get_fat_cmd);
1376 
1377 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1378 		req->read_log_offset = cpu_to_le32(log_offset);
1379 		req->read_log_length = cpu_to_le32(buf_size);
1380 		req->data_buffer_size = cpu_to_le32(buf_size);
1381 
1382 		status = be_mcc_notify_wait(adapter);
1383 		if (!status) {
1384 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1385 			memcpy(buf + offset,
1386 				resp->data_buffer,
1387 				le32_to_cpu(resp->read_log_length));
1388 		} else {
1389 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1390 			goto err;
1391 		}
1392 		offset += buf_size;
1393 		log_offset += buf_size;
1394 	}
1395 err:
1396 	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1397 			get_fat_cmd.va,
1398 			get_fat_cmd.dma);
1399 	spin_unlock_bh(&adapter->mcc_lock);
1400 }
1401 
1402 /* Uses synchronous mcc */
1403 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1404 			char *fw_on_flash)
1405 {
1406 	struct be_mcc_wrb *wrb;
1407 	struct be_cmd_req_get_fw_version *req;
1408 	int status;
1409 
1410 	spin_lock_bh(&adapter->mcc_lock);
1411 
1412 	wrb = wrb_from_mccq(adapter);
1413 	if (!wrb) {
1414 		status = -EBUSY;
1415 		goto err;
1416 	}
1417 
1418 	req = embedded_payload(wrb);
1419 
1420 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1421 		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1422 	status = be_mcc_notify_wait(adapter);
1423 	if (!status) {
1424 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1425 		strcpy(fw_ver, resp->firmware_version_string);
1426 		if (fw_on_flash)
1427 			strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1428 	}
1429 err:
1430 	spin_unlock_bh(&adapter->mcc_lock);
1431 	return status;
1432 }
1433 
1434 /* set the EQ delay interval of an EQ to specified value
1435  * Uses async mcc
1436  */
1437 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1438 {
1439 	struct be_mcc_wrb *wrb;
1440 	struct be_cmd_req_modify_eq_delay *req;
1441 	int status = 0;
1442 
1443 	spin_lock_bh(&adapter->mcc_lock);
1444 
1445 	wrb = wrb_from_mccq(adapter);
1446 	if (!wrb) {
1447 		status = -EBUSY;
1448 		goto err;
1449 	}
1450 	req = embedded_payload(wrb);
1451 
1452 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1453 		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1454 
1455 	req->num_eq = cpu_to_le32(1);
1456 	req->delay[0].eq_id = cpu_to_le32(eq_id);
1457 	req->delay[0].phase = 0;
1458 	req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1459 
1460 	be_mcc_notify(adapter);
1461 
1462 err:
1463 	spin_unlock_bh(&adapter->mcc_lock);
1464 	return status;
1465 }
1466 
1467 /* Uses sycnhronous mcc */
1468 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1469 			u32 num, bool untagged, bool promiscuous)
1470 {
1471 	struct be_mcc_wrb *wrb;
1472 	struct be_cmd_req_vlan_config *req;
1473 	int status;
1474 
1475 	spin_lock_bh(&adapter->mcc_lock);
1476 
1477 	wrb = wrb_from_mccq(adapter);
1478 	if (!wrb) {
1479 		status = -EBUSY;
1480 		goto err;
1481 	}
1482 	req = embedded_payload(wrb);
1483 
1484 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1485 		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1486 
1487 	req->interface_id = if_id;
1488 	req->promiscuous = promiscuous;
1489 	req->untagged = untagged;
1490 	req->num_vlan = num;
1491 	if (!promiscuous) {
1492 		memcpy(req->normal_vlan, vtag_array,
1493 			req->num_vlan * sizeof(vtag_array[0]));
1494 	}
1495 
1496 	status = be_mcc_notify_wait(adapter);
1497 
1498 err:
1499 	spin_unlock_bh(&adapter->mcc_lock);
1500 	return status;
1501 }
1502 
1503 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1504 {
1505 	struct be_mcc_wrb *wrb;
1506 	struct be_dma_mem *mem = &adapter->rx_filter;
1507 	struct be_cmd_req_rx_filter *req = mem->va;
1508 	int status;
1509 
1510 	spin_lock_bh(&adapter->mcc_lock);
1511 
1512 	wrb = wrb_from_mccq(adapter);
1513 	if (!wrb) {
1514 		status = -EBUSY;
1515 		goto err;
1516 	}
1517 	memset(req, 0, sizeof(*req));
1518 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1519 				OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1520 				wrb, mem);
1521 
1522 	req->if_id = cpu_to_le32(adapter->if_handle);
1523 	if (flags & IFF_PROMISC) {
1524 		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1525 					BE_IF_FLAGS_VLAN_PROMISCUOUS);
1526 		if (value == ON)
1527 			req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1528 						BE_IF_FLAGS_VLAN_PROMISCUOUS);
1529 	} else if (flags & IFF_ALLMULTI) {
1530 		req->if_flags_mask = req->if_flags =
1531 				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1532 	} else {
1533 		struct netdev_hw_addr *ha;
1534 		int i = 0;
1535 
1536 		req->if_flags_mask = req->if_flags =
1537 				cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1538 
1539 		/* Reset mcast promisc mode if already set by setting mask
1540 		 * and not setting flags field
1541 		 */
1542 		req->if_flags_mask |=
1543 				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1544 
1545 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1546 		netdev_for_each_mc_addr(ha, adapter->netdev)
1547 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1548 	}
1549 
1550 	status = be_mcc_notify_wait(adapter);
1551 err:
1552 	spin_unlock_bh(&adapter->mcc_lock);
1553 	return status;
1554 }
1555 
1556 /* Uses synchrounous mcc */
1557 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1558 {
1559 	struct be_mcc_wrb *wrb;
1560 	struct be_cmd_req_set_flow_control *req;
1561 	int status;
1562 
1563 	spin_lock_bh(&adapter->mcc_lock);
1564 
1565 	wrb = wrb_from_mccq(adapter);
1566 	if (!wrb) {
1567 		status = -EBUSY;
1568 		goto err;
1569 	}
1570 	req = embedded_payload(wrb);
1571 
1572 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1573 		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1574 
1575 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1576 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1577 
1578 	status = be_mcc_notify_wait(adapter);
1579 
1580 err:
1581 	spin_unlock_bh(&adapter->mcc_lock);
1582 	return status;
1583 }
1584 
1585 /* Uses sycn mcc */
1586 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1587 {
1588 	struct be_mcc_wrb *wrb;
1589 	struct be_cmd_req_get_flow_control *req;
1590 	int status;
1591 
1592 	spin_lock_bh(&adapter->mcc_lock);
1593 
1594 	wrb = wrb_from_mccq(adapter);
1595 	if (!wrb) {
1596 		status = -EBUSY;
1597 		goto err;
1598 	}
1599 	req = embedded_payload(wrb);
1600 
1601 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1602 		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1603 
1604 	status = be_mcc_notify_wait(adapter);
1605 	if (!status) {
1606 		struct be_cmd_resp_get_flow_control *resp =
1607 						embedded_payload(wrb);
1608 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
1609 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
1610 	}
1611 
1612 err:
1613 	spin_unlock_bh(&adapter->mcc_lock);
1614 	return status;
1615 }
1616 
1617 /* Uses mbox */
1618 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1619 		u32 *mode, u32 *caps)
1620 {
1621 	struct be_mcc_wrb *wrb;
1622 	struct be_cmd_req_query_fw_cfg *req;
1623 	int status;
1624 
1625 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1626 		return -1;
1627 
1628 	wrb = wrb_from_mbox(adapter);
1629 	req = embedded_payload(wrb);
1630 
1631 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1632 		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1633 
1634 	status = be_mbox_notify_wait(adapter);
1635 	if (!status) {
1636 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1637 		*port_num = le32_to_cpu(resp->phys_port);
1638 		*mode = le32_to_cpu(resp->function_mode);
1639 		*caps = le32_to_cpu(resp->function_caps);
1640 	}
1641 
1642 	mutex_unlock(&adapter->mbox_lock);
1643 	return status;
1644 }
1645 
1646 /* Uses mbox */
1647 int be_cmd_reset_function(struct be_adapter *adapter)
1648 {
1649 	struct be_mcc_wrb *wrb;
1650 	struct be_cmd_req_hdr *req;
1651 	int status;
1652 
1653 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1654 		return -1;
1655 
1656 	wrb = wrb_from_mbox(adapter);
1657 	req = embedded_payload(wrb);
1658 
1659 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1660 		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1661 
1662 	status = be_mbox_notify_wait(adapter);
1663 
1664 	mutex_unlock(&adapter->mbox_lock);
1665 	return status;
1666 }
1667 
1668 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1669 {
1670 	struct be_mcc_wrb *wrb;
1671 	struct be_cmd_req_rss_config *req;
1672 	u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1673 			0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1674 			0x3ea83c02, 0x4a110304};
1675 	int status;
1676 
1677 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1678 		return -1;
1679 
1680 	wrb = wrb_from_mbox(adapter);
1681 	req = embedded_payload(wrb);
1682 
1683 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1684 		OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1685 
1686 	req->if_id = cpu_to_le32(adapter->if_handle);
1687 	req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1688 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1689 	memcpy(req->cpu_table, rsstable, table_size);
1690 	memcpy(req->hash, myhash, sizeof(myhash));
1691 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1692 
1693 	status = be_mbox_notify_wait(adapter);
1694 
1695 	mutex_unlock(&adapter->mbox_lock);
1696 	return status;
1697 }
1698 
1699 /* Uses sync mcc */
1700 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1701 			u8 bcn, u8 sts, u8 state)
1702 {
1703 	struct be_mcc_wrb *wrb;
1704 	struct be_cmd_req_enable_disable_beacon *req;
1705 	int status;
1706 
1707 	spin_lock_bh(&adapter->mcc_lock);
1708 
1709 	wrb = wrb_from_mccq(adapter);
1710 	if (!wrb) {
1711 		status = -EBUSY;
1712 		goto err;
1713 	}
1714 	req = embedded_payload(wrb);
1715 
1716 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1717 		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1718 
1719 	req->port_num = port_num;
1720 	req->beacon_state = state;
1721 	req->beacon_duration = bcn;
1722 	req->status_duration = sts;
1723 
1724 	status = be_mcc_notify_wait(adapter);
1725 
1726 err:
1727 	spin_unlock_bh(&adapter->mcc_lock);
1728 	return status;
1729 }
1730 
1731 /* Uses sync mcc */
1732 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1733 {
1734 	struct be_mcc_wrb *wrb;
1735 	struct be_cmd_req_get_beacon_state *req;
1736 	int status;
1737 
1738 	spin_lock_bh(&adapter->mcc_lock);
1739 
1740 	wrb = wrb_from_mccq(adapter);
1741 	if (!wrb) {
1742 		status = -EBUSY;
1743 		goto err;
1744 	}
1745 	req = embedded_payload(wrb);
1746 
1747 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1748 		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1749 
1750 	req->port_num = port_num;
1751 
1752 	status = be_mcc_notify_wait(adapter);
1753 	if (!status) {
1754 		struct be_cmd_resp_get_beacon_state *resp =
1755 						embedded_payload(wrb);
1756 		*state = resp->beacon_state;
1757 	}
1758 
1759 err:
1760 	spin_unlock_bh(&adapter->mcc_lock);
1761 	return status;
1762 }
1763 
1764 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1765 			u32 data_size, u32 data_offset, const char *obj_name,
1766 			u32 *data_written, u8 *addn_status)
1767 {
1768 	struct be_mcc_wrb *wrb;
1769 	struct lancer_cmd_req_write_object *req;
1770 	struct lancer_cmd_resp_write_object *resp;
1771 	void *ctxt = NULL;
1772 	int status;
1773 
1774 	spin_lock_bh(&adapter->mcc_lock);
1775 	adapter->flash_status = 0;
1776 
1777 	wrb = wrb_from_mccq(adapter);
1778 	if (!wrb) {
1779 		status = -EBUSY;
1780 		goto err_unlock;
1781 	}
1782 
1783 	req = embedded_payload(wrb);
1784 
1785 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1786 				OPCODE_COMMON_WRITE_OBJECT,
1787 				sizeof(struct lancer_cmd_req_write_object), wrb,
1788 				NULL);
1789 
1790 	ctxt = &req->context;
1791 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1792 			write_length, ctxt, data_size);
1793 
1794 	if (data_size == 0)
1795 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1796 				eof, ctxt, 1);
1797 	else
1798 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1799 				eof, ctxt, 0);
1800 
1801 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1802 	req->write_offset = cpu_to_le32(data_offset);
1803 	strcpy(req->object_name, obj_name);
1804 	req->descriptor_count = cpu_to_le32(1);
1805 	req->buf_len = cpu_to_le32(data_size);
1806 	req->addr_low = cpu_to_le32((cmd->dma +
1807 				sizeof(struct lancer_cmd_req_write_object))
1808 				& 0xFFFFFFFF);
1809 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1810 				sizeof(struct lancer_cmd_req_write_object)));
1811 
1812 	be_mcc_notify(adapter);
1813 	spin_unlock_bh(&adapter->mcc_lock);
1814 
1815 	if (!wait_for_completion_timeout(&adapter->flash_compl,
1816 			msecs_to_jiffies(12000)))
1817 		status = -1;
1818 	else
1819 		status = adapter->flash_status;
1820 
1821 	resp = embedded_payload(wrb);
1822 	if (!status) {
1823 		*data_written = le32_to_cpu(resp->actual_write_len);
1824 	} else {
1825 		*addn_status = resp->additional_status;
1826 		status = resp->status;
1827 	}
1828 
1829 	return status;
1830 
1831 err_unlock:
1832 	spin_unlock_bh(&adapter->mcc_lock);
1833 	return status;
1834 }
1835 
1836 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1837 		u32 data_size, u32 data_offset, const char *obj_name,
1838 		u32 *data_read, u32 *eof, u8 *addn_status)
1839 {
1840 	struct be_mcc_wrb *wrb;
1841 	struct lancer_cmd_req_read_object *req;
1842 	struct lancer_cmd_resp_read_object *resp;
1843 	int status;
1844 
1845 	spin_lock_bh(&adapter->mcc_lock);
1846 
1847 	wrb = wrb_from_mccq(adapter);
1848 	if (!wrb) {
1849 		status = -EBUSY;
1850 		goto err_unlock;
1851 	}
1852 
1853 	req = embedded_payload(wrb);
1854 
1855 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1856 			OPCODE_COMMON_READ_OBJECT,
1857 			sizeof(struct lancer_cmd_req_read_object), wrb,
1858 			NULL);
1859 
1860 	req->desired_read_len = cpu_to_le32(data_size);
1861 	req->read_offset = cpu_to_le32(data_offset);
1862 	strcpy(req->object_name, obj_name);
1863 	req->descriptor_count = cpu_to_le32(1);
1864 	req->buf_len = cpu_to_le32(data_size);
1865 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1866 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1867 
1868 	status = be_mcc_notify_wait(adapter);
1869 
1870 	resp = embedded_payload(wrb);
1871 	if (!status) {
1872 		*data_read = le32_to_cpu(resp->actual_read_len);
1873 		*eof = le32_to_cpu(resp->eof);
1874 	} else {
1875 		*addn_status = resp->additional_status;
1876 	}
1877 
1878 err_unlock:
1879 	spin_unlock_bh(&adapter->mcc_lock);
1880 	return status;
1881 }
1882 
1883 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1884 			u32 flash_type, u32 flash_opcode, u32 buf_size)
1885 {
1886 	struct be_mcc_wrb *wrb;
1887 	struct be_cmd_write_flashrom *req;
1888 	int status;
1889 
1890 	spin_lock_bh(&adapter->mcc_lock);
1891 	adapter->flash_status = 0;
1892 
1893 	wrb = wrb_from_mccq(adapter);
1894 	if (!wrb) {
1895 		status = -EBUSY;
1896 		goto err_unlock;
1897 	}
1898 	req = cmd->va;
1899 
1900 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1901 		OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
1902 
1903 	req->params.op_type = cpu_to_le32(flash_type);
1904 	req->params.op_code = cpu_to_le32(flash_opcode);
1905 	req->params.data_buf_size = cpu_to_le32(buf_size);
1906 
1907 	be_mcc_notify(adapter);
1908 	spin_unlock_bh(&adapter->mcc_lock);
1909 
1910 	if (!wait_for_completion_timeout(&adapter->flash_compl,
1911 			msecs_to_jiffies(40000)))
1912 		status = -1;
1913 	else
1914 		status = adapter->flash_status;
1915 
1916 	return status;
1917 
1918 err_unlock:
1919 	spin_unlock_bh(&adapter->mcc_lock);
1920 	return status;
1921 }
1922 
1923 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1924 			 int offset)
1925 {
1926 	struct be_mcc_wrb *wrb;
1927 	struct be_cmd_write_flashrom *req;
1928 	int status;
1929 
1930 	spin_lock_bh(&adapter->mcc_lock);
1931 
1932 	wrb = wrb_from_mccq(adapter);
1933 	if (!wrb) {
1934 		status = -EBUSY;
1935 		goto err;
1936 	}
1937 	req = embedded_payload(wrb);
1938 
1939 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1940 		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1941 
1942 	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1943 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1944 	req->params.offset = cpu_to_le32(offset);
1945 	req->params.data_buf_size = cpu_to_le32(0x4);
1946 
1947 	status = be_mcc_notify_wait(adapter);
1948 	if (!status)
1949 		memcpy(flashed_crc, req->params.data_buf, 4);
1950 
1951 err:
1952 	spin_unlock_bh(&adapter->mcc_lock);
1953 	return status;
1954 }
1955 
1956 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1957 				struct be_dma_mem *nonemb_cmd)
1958 {
1959 	struct be_mcc_wrb *wrb;
1960 	struct be_cmd_req_acpi_wol_magic_config *req;
1961 	int status;
1962 
1963 	spin_lock_bh(&adapter->mcc_lock);
1964 
1965 	wrb = wrb_from_mccq(adapter);
1966 	if (!wrb) {
1967 		status = -EBUSY;
1968 		goto err;
1969 	}
1970 	req = nonemb_cmd->va;
1971 
1972 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1973 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
1974 		nonemb_cmd);
1975 	memcpy(req->magic_mac, mac, ETH_ALEN);
1976 
1977 	status = be_mcc_notify_wait(adapter);
1978 
1979 err:
1980 	spin_unlock_bh(&adapter->mcc_lock);
1981 	return status;
1982 }
1983 
1984 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1985 			u8 loopback_type, u8 enable)
1986 {
1987 	struct be_mcc_wrb *wrb;
1988 	struct be_cmd_req_set_lmode *req;
1989 	int status;
1990 
1991 	spin_lock_bh(&adapter->mcc_lock);
1992 
1993 	wrb = wrb_from_mccq(adapter);
1994 	if (!wrb) {
1995 		status = -EBUSY;
1996 		goto err;
1997 	}
1998 
1999 	req = embedded_payload(wrb);
2000 
2001 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2002 			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2003 			NULL);
2004 
2005 	req->src_port = port_num;
2006 	req->dest_port = port_num;
2007 	req->loopback_type = loopback_type;
2008 	req->loopback_state = enable;
2009 
2010 	status = be_mcc_notify_wait(adapter);
2011 err:
2012 	spin_unlock_bh(&adapter->mcc_lock);
2013 	return status;
2014 }
2015 
2016 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2017 		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2018 {
2019 	struct be_mcc_wrb *wrb;
2020 	struct be_cmd_req_loopback_test *req;
2021 	int status;
2022 
2023 	spin_lock_bh(&adapter->mcc_lock);
2024 
2025 	wrb = wrb_from_mccq(adapter);
2026 	if (!wrb) {
2027 		status = -EBUSY;
2028 		goto err;
2029 	}
2030 
2031 	req = embedded_payload(wrb);
2032 
2033 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2034 			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2035 	req->hdr.timeout = cpu_to_le32(4);
2036 
2037 	req->pattern = cpu_to_le64(pattern);
2038 	req->src_port = cpu_to_le32(port_num);
2039 	req->dest_port = cpu_to_le32(port_num);
2040 	req->pkt_size = cpu_to_le32(pkt_size);
2041 	req->num_pkts = cpu_to_le32(num_pkts);
2042 	req->loopback_type = cpu_to_le32(loopback_type);
2043 
2044 	status = be_mcc_notify_wait(adapter);
2045 	if (!status) {
2046 		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2047 		status = le32_to_cpu(resp->status);
2048 	}
2049 
2050 err:
2051 	spin_unlock_bh(&adapter->mcc_lock);
2052 	return status;
2053 }
2054 
2055 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2056 				u32 byte_cnt, struct be_dma_mem *cmd)
2057 {
2058 	struct be_mcc_wrb *wrb;
2059 	struct be_cmd_req_ddrdma_test *req;
2060 	int status;
2061 	int i, j = 0;
2062 
2063 	spin_lock_bh(&adapter->mcc_lock);
2064 
2065 	wrb = wrb_from_mccq(adapter);
2066 	if (!wrb) {
2067 		status = -EBUSY;
2068 		goto err;
2069 	}
2070 	req = cmd->va;
2071 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2072 			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2073 
2074 	req->pattern = cpu_to_le64(pattern);
2075 	req->byte_count = cpu_to_le32(byte_cnt);
2076 	for (i = 0; i < byte_cnt; i++) {
2077 		req->snd_buff[i] = (u8)(pattern >> (j*8));
2078 		j++;
2079 		if (j > 7)
2080 			j = 0;
2081 	}
2082 
2083 	status = be_mcc_notify_wait(adapter);
2084 
2085 	if (!status) {
2086 		struct be_cmd_resp_ddrdma_test *resp;
2087 		resp = cmd->va;
2088 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2089 				resp->snd_err) {
2090 			status = -1;
2091 		}
2092 	}
2093 
2094 err:
2095 	spin_unlock_bh(&adapter->mcc_lock);
2096 	return status;
2097 }
2098 
2099 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2100 				struct be_dma_mem *nonemb_cmd)
2101 {
2102 	struct be_mcc_wrb *wrb;
2103 	struct be_cmd_req_seeprom_read *req;
2104 	struct be_sge *sge;
2105 	int status;
2106 
2107 	spin_lock_bh(&adapter->mcc_lock);
2108 
2109 	wrb = wrb_from_mccq(adapter);
2110 	if (!wrb) {
2111 		status = -EBUSY;
2112 		goto err;
2113 	}
2114 	req = nonemb_cmd->va;
2115 	sge = nonembedded_sgl(wrb);
2116 
2117 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2118 			OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2119 			nonemb_cmd);
2120 
2121 	status = be_mcc_notify_wait(adapter);
2122 
2123 err:
2124 	spin_unlock_bh(&adapter->mcc_lock);
2125 	return status;
2126 }
2127 
2128 int be_cmd_get_phy_info(struct be_adapter *adapter,
2129 				struct be_phy_info *phy_info)
2130 {
2131 	struct be_mcc_wrb *wrb;
2132 	struct be_cmd_req_get_phy_info *req;
2133 	struct be_dma_mem cmd;
2134 	int status;
2135 
2136 	spin_lock_bh(&adapter->mcc_lock);
2137 
2138 	wrb = wrb_from_mccq(adapter);
2139 	if (!wrb) {
2140 		status = -EBUSY;
2141 		goto err;
2142 	}
2143 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2144 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2145 					&cmd.dma);
2146 	if (!cmd.va) {
2147 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2148 		status = -ENOMEM;
2149 		goto err;
2150 	}
2151 
2152 	req = cmd.va;
2153 
2154 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2155 			OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2156 			wrb, &cmd);
2157 
2158 	status = be_mcc_notify_wait(adapter);
2159 	if (!status) {
2160 		struct be_phy_info *resp_phy_info =
2161 				cmd.va + sizeof(struct be_cmd_req_hdr);
2162 		phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2163 		phy_info->interface_type =
2164 			le16_to_cpu(resp_phy_info->interface_type);
2165 	}
2166 	pci_free_consistent(adapter->pdev, cmd.size,
2167 				cmd.va, cmd.dma);
2168 err:
2169 	spin_unlock_bh(&adapter->mcc_lock);
2170 	return status;
2171 }
2172 
2173 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2174 {
2175 	struct be_mcc_wrb *wrb;
2176 	struct be_cmd_req_set_qos *req;
2177 	int status;
2178 
2179 	spin_lock_bh(&adapter->mcc_lock);
2180 
2181 	wrb = wrb_from_mccq(adapter);
2182 	if (!wrb) {
2183 		status = -EBUSY;
2184 		goto err;
2185 	}
2186 
2187 	req = embedded_payload(wrb);
2188 
2189 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2190 			OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2191 
2192 	req->hdr.domain = domain;
2193 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2194 	req->max_bps_nic = cpu_to_le32(bps);
2195 
2196 	status = be_mcc_notify_wait(adapter);
2197 
2198 err:
2199 	spin_unlock_bh(&adapter->mcc_lock);
2200 	return status;
2201 }
2202 
2203 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2204 {
2205 	struct be_mcc_wrb *wrb;
2206 	struct be_cmd_req_cntl_attribs *req;
2207 	struct be_cmd_resp_cntl_attribs *resp;
2208 	int status;
2209 	int payload_len = max(sizeof(*req), sizeof(*resp));
2210 	struct mgmt_controller_attrib *attribs;
2211 	struct be_dma_mem attribs_cmd;
2212 
2213 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2214 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2215 	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2216 						&attribs_cmd.dma);
2217 	if (!attribs_cmd.va) {
2218 		dev_err(&adapter->pdev->dev,
2219 				"Memory allocation failure\n");
2220 		return -ENOMEM;
2221 	}
2222 
2223 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2224 		return -1;
2225 
2226 	wrb = wrb_from_mbox(adapter);
2227 	if (!wrb) {
2228 		status = -EBUSY;
2229 		goto err;
2230 	}
2231 	req = attribs_cmd.va;
2232 
2233 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2234 			 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2235 			&attribs_cmd);
2236 
2237 	status = be_mbox_notify_wait(adapter);
2238 	if (!status) {
2239 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2240 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2241 	}
2242 
2243 err:
2244 	mutex_unlock(&adapter->mbox_lock);
2245 	pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2246 					attribs_cmd.dma);
2247 	return status;
2248 }
2249 
2250 /* Uses mbox */
2251 int be_cmd_req_native_mode(struct be_adapter *adapter)
2252 {
2253 	struct be_mcc_wrb *wrb;
2254 	struct be_cmd_req_set_func_cap *req;
2255 	int status;
2256 
2257 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2258 		return -1;
2259 
2260 	wrb = wrb_from_mbox(adapter);
2261 	if (!wrb) {
2262 		status = -EBUSY;
2263 		goto err;
2264 	}
2265 
2266 	req = embedded_payload(wrb);
2267 
2268 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2269 		OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2270 
2271 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2272 				CAPABILITY_BE3_NATIVE_ERX_API);
2273 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2274 
2275 	status = be_mbox_notify_wait(adapter);
2276 	if (!status) {
2277 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2278 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2279 					CAPABILITY_BE3_NATIVE_ERX_API;
2280 	}
2281 err:
2282 	mutex_unlock(&adapter->mbox_lock);
2283 	return status;
2284 }
2285 
2286 /* Uses synchronous MCCQ */
2287 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2288 							u32 *pmac_id)
2289 {
2290 	struct be_mcc_wrb *wrb;
2291 	struct be_cmd_req_get_mac_list *req;
2292 	int status;
2293 	int mac_count;
2294 
2295 	spin_lock_bh(&adapter->mcc_lock);
2296 
2297 	wrb = wrb_from_mccq(adapter);
2298 	if (!wrb) {
2299 		status = -EBUSY;
2300 		goto err;
2301 	}
2302 	req = embedded_payload(wrb);
2303 
2304 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2305 				OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2306 				wrb, NULL);
2307 
2308 	req->hdr.domain = domain;
2309 
2310 	status = be_mcc_notify_wait(adapter);
2311 	if (!status) {
2312 		struct be_cmd_resp_get_mac_list *resp =
2313 						embedded_payload(wrb);
2314 		int i;
2315 		u8 *ctxt = &resp->context[0][0];
2316 		status = -EIO;
2317 		mac_count = resp->mac_count;
2318 		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
2319 		for (i = 0; i < mac_count; i++) {
2320 			if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
2321 					   act, ctxt)) {
2322 				*pmac_id = AMAP_GET_BITS
2323 					(struct amap_get_mac_list_context,
2324 					 macid, ctxt);
2325 				status = 0;
2326 				break;
2327 			}
2328 			ctxt += sizeof(struct amap_get_mac_list_context) / 8;
2329 		}
2330 	}
2331 
2332 err:
2333 	spin_unlock_bh(&adapter->mcc_lock);
2334 	return status;
2335 }
2336 
2337 /* Uses synchronous MCCQ */
2338 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2339 			u8 mac_count, u32 domain)
2340 {
2341 	struct be_mcc_wrb *wrb;
2342 	struct be_cmd_req_set_mac_list *req;
2343 	int status;
2344 	struct be_dma_mem cmd;
2345 
2346 	memset(&cmd, 0, sizeof(struct be_dma_mem));
2347 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2348 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2349 			&cmd.dma, GFP_KERNEL);
2350 	if (!cmd.va) {
2351 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2352 		return -ENOMEM;
2353 	}
2354 
2355 	spin_lock_bh(&adapter->mcc_lock);
2356 
2357 	wrb = wrb_from_mccq(adapter);
2358 	if (!wrb) {
2359 		status = -EBUSY;
2360 		goto err;
2361 	}
2362 
2363 	req = cmd.va;
2364 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2365 				OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2366 				wrb, &cmd);
2367 
2368 	req->hdr.domain = domain;
2369 	req->mac_count = mac_count;
2370 	if (mac_count)
2371 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2372 
2373 	status = be_mcc_notify_wait(adapter);
2374 
2375 err:
2376 	dma_free_coherent(&adapter->pdev->dev, cmd.size,
2377 				cmd.va, cmd.dma);
2378 	spin_unlock_bh(&adapter->mcc_lock);
2379 	return status;
2380 }
2381