xref: /linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2011-2012 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30 
31 #define BNX2X_MAX_EMUL_MULTI		16
32 
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34 
35 /**** Exe Queue interfaces ****/
36 
37 /**
38  * bnx2x_exe_queue_init - init the Exe Queue object
39  *
40  * @o:		poiter to the object
41  * @exe_len:	length
42  * @owner:	poiter to the owner
43  * @validate:	validate function pointer
44  * @optimize:	optimize function pointer
45  * @exec:	execute function pointer
46  * @get:	get function pointer
47  */
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 					struct bnx2x_exe_queue_obj *o,
50 					int exe_len,
51 					union bnx2x_qable_obj *owner,
52 					exe_q_validate validate,
53 					exe_q_remove remove,
54 					exe_q_optimize optimize,
55 					exe_q_execute exec,
56 					exe_q_get get)
57 {
58 	memset(o, 0, sizeof(*o));
59 
60 	INIT_LIST_HEAD(&o->exe_queue);
61 	INIT_LIST_HEAD(&o->pending_comp);
62 
63 	spin_lock_init(&o->lock);
64 
65 	o->exe_chunk_len = exe_len;
66 	o->owner         = owner;
67 
68 	/* Owner specific callbacks */
69 	o->validate      = validate;
70 	o->remove        = remove;
71 	o->optimize      = optimize;
72 	o->execute       = exec;
73 	o->get           = get;
74 
75 	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76 	   exe_len);
77 }
78 
79 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 					     struct bnx2x_exeq_elem *elem)
81 {
82 	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 	kfree(elem);
84 }
85 
86 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87 {
88 	struct bnx2x_exeq_elem *elem;
89 	int cnt = 0;
90 
91 	spin_lock_bh(&o->lock);
92 
93 	list_for_each_entry(elem, &o->exe_queue, link)
94 		cnt++;
95 
96 	spin_unlock_bh(&o->lock);
97 
98 	return cnt;
99 }
100 
101 /**
102  * bnx2x_exe_queue_add - add a new element to the execution queue
103  *
104  * @bp:		driver handle
105  * @o:		queue
106  * @cmd:	new command to add
107  * @restore:	true - do not optimize the command
108  *
109  * If the element is optimized or is illegal, frees it.
110  */
111 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 				      struct bnx2x_exe_queue_obj *o,
113 				      struct bnx2x_exeq_elem *elem,
114 				      bool restore)
115 {
116 	int rc;
117 
118 	spin_lock_bh(&o->lock);
119 
120 	if (!restore) {
121 		/* Try to cancel this element queue */
122 		rc = o->optimize(bp, o->owner, elem);
123 		if (rc)
124 			goto free_and_exit;
125 
126 		/* Check if this request is ok */
127 		rc = o->validate(bp, o->owner, elem);
128 		if (rc) {
129 			BNX2X_ERR("Preamble failed: %d\n", rc);
130 			goto free_and_exit;
131 		}
132 	}
133 
134 	/* If so, add it to the execution queue */
135 	list_add_tail(&elem->link, &o->exe_queue);
136 
137 	spin_unlock_bh(&o->lock);
138 
139 	return 0;
140 
141 free_and_exit:
142 	bnx2x_exe_queue_free_elem(bp, elem);
143 
144 	spin_unlock_bh(&o->lock);
145 
146 	return rc;
147 
148 }
149 
150 static inline void __bnx2x_exe_queue_reset_pending(
151 	struct bnx2x *bp,
152 	struct bnx2x_exe_queue_obj *o)
153 {
154 	struct bnx2x_exeq_elem *elem;
155 
156 	while (!list_empty(&o->pending_comp)) {
157 		elem = list_first_entry(&o->pending_comp,
158 					struct bnx2x_exeq_elem, link);
159 
160 		list_del(&elem->link);
161 		bnx2x_exe_queue_free_elem(bp, elem);
162 	}
163 }
164 
165 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 						 struct bnx2x_exe_queue_obj *o)
167 {
168 
169 	spin_lock_bh(&o->lock);
170 
171 	__bnx2x_exe_queue_reset_pending(bp, o);
172 
173 	spin_unlock_bh(&o->lock);
174 
175 }
176 
177 /**
178  * bnx2x_exe_queue_step - execute one execution chunk atomically
179  *
180  * @bp:			driver handle
181  * @o:			queue
182  * @ramrod_flags:	flags
183  *
184  * (Atomicy is ensured using the exe_queue->lock).
185  */
186 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 				       struct bnx2x_exe_queue_obj *o,
188 				       unsigned long *ramrod_flags)
189 {
190 	struct bnx2x_exeq_elem *elem, spacer;
191 	int cur_len = 0, rc;
192 
193 	memset(&spacer, 0, sizeof(spacer));
194 
195 	spin_lock_bh(&o->lock);
196 
197 	/*
198 	 * Next step should not be performed until the current is finished,
199 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 	 * properly clear object internals without sending any command to the FW
201 	 * which also implies there won't be any completion to clear the
202 	 * 'pending' list.
203 	 */
204 	if (!list_empty(&o->pending_comp)) {
205 		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207 			__bnx2x_exe_queue_reset_pending(bp, o);
208 		} else {
209 			spin_unlock_bh(&o->lock);
210 			return 1;
211 		}
212 	}
213 
214 	/*
215 	 * Run through the pending commands list and create a next
216 	 * execution chunk.
217 	 */
218 	while (!list_empty(&o->exe_queue)) {
219 		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220 					link);
221 		WARN_ON(!elem->cmd_len);
222 
223 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224 			cur_len += elem->cmd_len;
225 			/*
226 			 * Prevent from both lists being empty when moving an
227 			 * element. This will allow the call of
228 			 * bnx2x_exe_queue_empty() without locking.
229 			 */
230 			list_add_tail(&spacer.link, &o->pending_comp);
231 			mb();
232 			list_del(&elem->link);
233 			list_add_tail(&elem->link, &o->pending_comp);
234 			list_del(&spacer.link);
235 		} else
236 			break;
237 	}
238 
239 	/* Sanity check */
240 	if (!cur_len) {
241 		spin_unlock_bh(&o->lock);
242 		return 0;
243 	}
244 
245 	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
246 	if (rc < 0)
247 		/*
248 		 *  In case of an error return the commands back to the queue
249 		 *  and reset the pending_comp.
250 		 */
251 		list_splice_init(&o->pending_comp, &o->exe_queue);
252 	else if (!rc)
253 		/*
254 		 * If zero is returned, means there are no outstanding pending
255 		 * completions and we may dismiss the pending list.
256 		 */
257 		__bnx2x_exe_queue_reset_pending(bp, o);
258 
259 	spin_unlock_bh(&o->lock);
260 	return rc;
261 }
262 
263 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
264 {
265 	bool empty = list_empty(&o->exe_queue);
266 
267 	/* Don't reorder!!! */
268 	mb();
269 
270 	return empty && list_empty(&o->pending_comp);
271 }
272 
273 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
274 	struct bnx2x *bp)
275 {
276 	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
277 	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
278 }
279 
280 /************************ raw_obj functions ***********************************/
281 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
282 {
283 	return !!test_bit(o->state, o->pstate);
284 }
285 
286 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
287 {
288 	smp_mb__before_clear_bit();
289 	clear_bit(o->state, o->pstate);
290 	smp_mb__after_clear_bit();
291 }
292 
293 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
294 {
295 	smp_mb__before_clear_bit();
296 	set_bit(o->state, o->pstate);
297 	smp_mb__after_clear_bit();
298 }
299 
300 /**
301  * bnx2x_state_wait - wait until the given bit(state) is cleared
302  *
303  * @bp:		device handle
304  * @state:	state which is to be cleared
305  * @state_p:	state buffer
306  *
307  */
308 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
309 				   unsigned long *pstate)
310 {
311 	/* can take a while if any port is running */
312 	int cnt = 5000;
313 
314 
315 	if (CHIP_REV_IS_EMUL(bp))
316 		cnt *= 20;
317 
318 	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
319 
320 	might_sleep();
321 	while (cnt--) {
322 		if (!test_bit(state, pstate)) {
323 #ifdef BNX2X_STOP_ON_ERROR
324 			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
325 #endif
326 			return 0;
327 		}
328 
329 		usleep_range(1000, 1000);
330 
331 		if (bp->panic)
332 			return -EIO;
333 	}
334 
335 	/* timeout! */
336 	BNX2X_ERR("timeout waiting for state %d\n", state);
337 #ifdef BNX2X_STOP_ON_ERROR
338 	bnx2x_panic();
339 #endif
340 
341 	return -EBUSY;
342 }
343 
344 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
345 {
346 	return bnx2x_state_wait(bp, raw->state, raw->pstate);
347 }
348 
349 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
350 /* credit handling callbacks */
351 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
352 {
353 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
354 
355 	WARN_ON(!mp);
356 
357 	return mp->get_entry(mp, offset);
358 }
359 
360 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
361 {
362 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
363 
364 	WARN_ON(!mp);
365 
366 	return mp->get(mp, 1);
367 }
368 
369 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
370 {
371 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
372 
373 	WARN_ON(!vp);
374 
375 	return vp->get_entry(vp, offset);
376 }
377 
378 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
379 {
380 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
381 
382 	WARN_ON(!vp);
383 
384 	return vp->get(vp, 1);
385 }
386 
387 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
388 {
389 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
390 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
391 
392 	if (!mp->get(mp, 1))
393 		return false;
394 
395 	if (!vp->get(vp, 1)) {
396 		mp->put(mp, 1);
397 		return false;
398 	}
399 
400 	return true;
401 }
402 
403 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
404 {
405 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
406 
407 	return mp->put_entry(mp, offset);
408 }
409 
410 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
411 {
412 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
413 
414 	return mp->put(mp, 1);
415 }
416 
417 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
418 {
419 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
420 
421 	return vp->put_entry(vp, offset);
422 }
423 
424 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
425 {
426 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
427 
428 	return vp->put(vp, 1);
429 }
430 
431 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
432 {
433 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
434 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
435 
436 	if (!mp->put(mp, 1))
437 		return false;
438 
439 	if (!vp->put(vp, 1)) {
440 		mp->get(mp, 1);
441 		return false;
442 	}
443 
444 	return true;
445 }
446 
447 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
448 				int n, u8 *buf)
449 {
450 	struct bnx2x_vlan_mac_registry_elem *pos;
451 	u8 *next = buf;
452 	int counter = 0;
453 
454 	/* traverse list */
455 	list_for_each_entry(pos, &o->head, link) {
456 		if (counter < n) {
457 			/* place leading zeroes in buffer */
458 			memset(next, 0, MAC_LEADING_ZERO_CNT);
459 
460 			/* place mac after leading zeroes*/
461 			memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
462 			       ETH_ALEN);
463 
464 			/* calculate address of next element and
465 			 * advance counter
466 			 */
467 			counter++;
468 			next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
469 
470 			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
471 			   counter, next, pos->u.mac.mac);
472 		}
473 	}
474 	return counter * ETH_ALEN;
475 }
476 
477 /* check_add() callbacks */
478 static int bnx2x_check_mac_add(struct bnx2x *bp,
479 			       struct bnx2x_vlan_mac_obj *o,
480 			       union bnx2x_classification_ramrod_data *data)
481 {
482 	struct bnx2x_vlan_mac_registry_elem *pos;
483 
484 	DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
485 
486 	if (!is_valid_ether_addr(data->mac.mac))
487 		return -EINVAL;
488 
489 	/* Check if a requested MAC already exists */
490 	list_for_each_entry(pos, &o->head, link)
491 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
492 			return -EEXIST;
493 
494 	return 0;
495 }
496 
497 static int bnx2x_check_vlan_add(struct bnx2x *bp,
498 				struct bnx2x_vlan_mac_obj *o,
499 				union bnx2x_classification_ramrod_data *data)
500 {
501 	struct bnx2x_vlan_mac_registry_elem *pos;
502 
503 	DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
504 
505 	list_for_each_entry(pos, &o->head, link)
506 		if (data->vlan.vlan == pos->u.vlan.vlan)
507 			return -EEXIST;
508 
509 	return 0;
510 }
511 
512 static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
513 				    struct bnx2x_vlan_mac_obj *o,
514 				   union bnx2x_classification_ramrod_data *data)
515 {
516 	struct bnx2x_vlan_mac_registry_elem *pos;
517 
518 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
519 	   data->vlan_mac.mac, data->vlan_mac.vlan);
520 
521 	list_for_each_entry(pos, &o->head, link)
522 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
523 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
524 			     ETH_ALEN)))
525 			return -EEXIST;
526 
527 	return 0;
528 }
529 
530 
531 /* check_del() callbacks */
532 static struct bnx2x_vlan_mac_registry_elem *
533 	bnx2x_check_mac_del(struct bnx2x *bp,
534 			    struct bnx2x_vlan_mac_obj *o,
535 			    union bnx2x_classification_ramrod_data *data)
536 {
537 	struct bnx2x_vlan_mac_registry_elem *pos;
538 
539 	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
540 
541 	list_for_each_entry(pos, &o->head, link)
542 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
543 			return pos;
544 
545 	return NULL;
546 }
547 
548 static struct bnx2x_vlan_mac_registry_elem *
549 	bnx2x_check_vlan_del(struct bnx2x *bp,
550 			     struct bnx2x_vlan_mac_obj *o,
551 			     union bnx2x_classification_ramrod_data *data)
552 {
553 	struct bnx2x_vlan_mac_registry_elem *pos;
554 
555 	DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
556 
557 	list_for_each_entry(pos, &o->head, link)
558 		if (data->vlan.vlan == pos->u.vlan.vlan)
559 			return pos;
560 
561 	return NULL;
562 }
563 
564 static struct bnx2x_vlan_mac_registry_elem *
565 	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
566 				 struct bnx2x_vlan_mac_obj *o,
567 				 union bnx2x_classification_ramrod_data *data)
568 {
569 	struct bnx2x_vlan_mac_registry_elem *pos;
570 
571 	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
572 	   data->vlan_mac.mac, data->vlan_mac.vlan);
573 
574 	list_for_each_entry(pos, &o->head, link)
575 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
576 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
577 			     ETH_ALEN)))
578 			return pos;
579 
580 	return NULL;
581 }
582 
583 /* check_move() callback */
584 static bool bnx2x_check_move(struct bnx2x *bp,
585 			     struct bnx2x_vlan_mac_obj *src_o,
586 			     struct bnx2x_vlan_mac_obj *dst_o,
587 			     union bnx2x_classification_ramrod_data *data)
588 {
589 	struct bnx2x_vlan_mac_registry_elem *pos;
590 	int rc;
591 
592 	/* Check if we can delete the requested configuration from the first
593 	 * object.
594 	 */
595 	pos = src_o->check_del(bp, src_o, data);
596 
597 	/*  check if configuration can be added */
598 	rc = dst_o->check_add(bp, dst_o, data);
599 
600 	/* If this classification can not be added (is already set)
601 	 * or can't be deleted - return an error.
602 	 */
603 	if (rc || !pos)
604 		return false;
605 
606 	return true;
607 }
608 
609 static bool bnx2x_check_move_always_err(
610 	struct bnx2x *bp,
611 	struct bnx2x_vlan_mac_obj *src_o,
612 	struct bnx2x_vlan_mac_obj *dst_o,
613 	union bnx2x_classification_ramrod_data *data)
614 {
615 	return false;
616 }
617 
618 
619 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
620 {
621 	struct bnx2x_raw_obj *raw = &o->raw;
622 	u8 rx_tx_flag = 0;
623 
624 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
625 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
626 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
627 
628 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
629 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
630 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
631 
632 	return rx_tx_flag;
633 }
634 
635 
636 void bnx2x_set_mac_in_nig(struct bnx2x *bp,
637 			  bool add, unsigned char *dev_addr, int index)
638 {
639 	u32 wb_data[2];
640 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 			 NIG_REG_LLH0_FUNC_MEM;
642 
643 	if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
644 		return;
645 
646 	if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
647 		return;
648 
649 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
650 			 (add ? "ADD" : "DELETE"), index);
651 
652 	if (add) {
653 		/* LLH_FUNC_MEM is a u64 WB register */
654 		reg_offset += 8*index;
655 
656 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
657 			      (dev_addr[4] <<  8) |  dev_addr[5]);
658 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
659 
660 		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
661 	}
662 
663 	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
664 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
665 }
666 
667 /**
668  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
669  *
670  * @bp:		device handle
671  * @o:		queue for which we want to configure this rule
672  * @add:	if true the command is an ADD command, DEL otherwise
673  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
674  * @hdr:	pointer to a header to setup
675  *
676  */
677 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
678 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
679 	struct eth_classify_cmd_header *hdr)
680 {
681 	struct bnx2x_raw_obj *raw = &o->raw;
682 
683 	hdr->client_id = raw->cl_id;
684 	hdr->func_id = raw->func_id;
685 
686 	/* Rx or/and Tx (internal switching) configuration ? */
687 	hdr->cmd_general_data |=
688 		bnx2x_vlan_mac_get_rx_tx_flag(o);
689 
690 	if (add)
691 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
692 
693 	hdr->cmd_general_data |=
694 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
695 }
696 
697 /**
698  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
699  *
700  * @cid:	connection id
701  * @type:	BNX2X_FILTER_XXX_PENDING
702  * @hdr:	poiter to header to setup
703  * @rule_cnt:
704  *
705  * currently we always configure one rule and echo field to contain a CID and an
706  * opcode type.
707  */
708 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
709 				struct eth_classify_header *hdr, int rule_cnt)
710 {
711 	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
712 	hdr->rule_cnt = (u8)rule_cnt;
713 }
714 
715 
716 /* hw_config() callbacks */
717 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
718 				 struct bnx2x_vlan_mac_obj *o,
719 				 struct bnx2x_exeq_elem *elem, int rule_idx,
720 				 int cam_offset)
721 {
722 	struct bnx2x_raw_obj *raw = &o->raw;
723 	struct eth_classify_rules_ramrod_data *data =
724 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
725 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
726 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
727 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
728 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
729 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
730 
731 	/*
732 	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
733 	 * relevant. In addition, current implementation is tuned for a
734 	 * single ETH MAC.
735 	 *
736 	 * When multiple unicast ETH MACs PF configuration in switch
737 	 * independent mode is required (NetQ, multiple netdev MACs,
738 	 * etc.), consider better utilisation of 8 per function MAC
739 	 * entries in the LLH register. There is also
740 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
741 	 * total number of CAM entries to 16.
742 	 *
743 	 * Currently we won't configure NIG for MACs other than a primary ETH
744 	 * MAC and iSCSI L2 MAC.
745 	 *
746 	 * If this MAC is moving from one Queue to another, no need to change
747 	 * NIG configuration.
748 	 */
749 	if (cmd != BNX2X_VLAN_MAC_MOVE) {
750 		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
751 			bnx2x_set_mac_in_nig(bp, add, mac,
752 					     BNX2X_LLH_CAM_ISCSI_ETH_LINE);
753 		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
754 			bnx2x_set_mac_in_nig(bp, add, mac,
755 					     BNX2X_LLH_CAM_ETH_LINE);
756 	}
757 
758 	/* Reset the ramrod data buffer for the first rule */
759 	if (rule_idx == 0)
760 		memset(data, 0, sizeof(*data));
761 
762 	/* Setup a command header */
763 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
764 				      &rule_entry->mac.header);
765 
766 	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
767 	   (add ? "add" : "delete"), mac, raw->cl_id);
768 
769 	/* Set a MAC itself */
770 	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771 			      &rule_entry->mac.mac_mid,
772 			      &rule_entry->mac.mac_lsb, mac);
773 
774 	/* MOVE: Add a rule that will add this MAC to the target Queue */
775 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
776 		rule_entry++;
777 		rule_cnt++;
778 
779 		/* Setup ramrod data */
780 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
781 					elem->cmd_data.vlan_mac.target_obj,
782 					      true, CLASSIFY_RULE_OPCODE_MAC,
783 					      &rule_entry->mac.header);
784 
785 		/* Set a MAC itself */
786 		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787 				      &rule_entry->mac.mac_mid,
788 				      &rule_entry->mac.mac_lsb, mac);
789 	}
790 
791 	/* Set the ramrod data header */
792 	/* TODO: take this to the higher level in order to prevent multiple
793 		 writing */
794 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 					rule_cnt);
796 }
797 
798 /**
799  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
800  *
801  * @bp:		device handle
802  * @o:		queue
803  * @type:
804  * @cam_offset:	offset in cam memory
805  * @hdr:	pointer to a header to setup
806  *
807  * E1/E1H
808  */
809 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 	struct mac_configuration_hdr *hdr)
812 {
813 	struct bnx2x_raw_obj *r = &o->raw;
814 
815 	hdr->length = 1;
816 	hdr->offset = (u8)cam_offset;
817 	hdr->client_id = 0xff;
818 	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
819 }
820 
821 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
822 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
823 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
824 {
825 	struct bnx2x_raw_obj *r = &o->raw;
826 	u32 cl_bit_vec = (1 << r->cl_id);
827 
828 	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
829 	cfg_entry->pf_id = r->func_id;
830 	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
831 
832 	if (add) {
833 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
834 			 T_ETH_MAC_COMMAND_SET);
835 		SET_FLAG(cfg_entry->flags,
836 			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
837 
838 		/* Set a MAC in a ramrod data */
839 		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
840 				      &cfg_entry->middle_mac_addr,
841 				      &cfg_entry->lsb_mac_addr, mac);
842 	} else
843 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
844 			 T_ETH_MAC_COMMAND_INVALIDATE);
845 }
846 
847 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
848 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
849 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
850 {
851 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
852 	struct bnx2x_raw_obj *raw = &o->raw;
853 
854 	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
855 					 &config->hdr);
856 	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
857 					 cfg_entry);
858 
859 	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
860 			 (add ? "setting" : "clearing"),
861 			 mac, raw->cl_id, cam_offset);
862 }
863 
864 /**
865  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
866  *
867  * @bp:		device handle
868  * @o:		bnx2x_vlan_mac_obj
869  * @elem:	bnx2x_exeq_elem
870  * @rule_idx:	rule_idx
871  * @cam_offset: cam_offset
872  */
873 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
874 				  struct bnx2x_vlan_mac_obj *o,
875 				  struct bnx2x_exeq_elem *elem, int rule_idx,
876 				  int cam_offset)
877 {
878 	struct bnx2x_raw_obj *raw = &o->raw;
879 	struct mac_configuration_cmd *config =
880 		(struct mac_configuration_cmd *)(raw->rdata);
881 	/*
882 	 * 57710 and 57711 do not support MOVE command,
883 	 * so it's either ADD or DEL
884 	 */
885 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
886 		true : false;
887 
888 	/* Reset the ramrod data buffer */
889 	memset(config, 0, sizeof(*config));
890 
891 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
892 				     cam_offset, add,
893 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
894 				     ETH_VLAN_FILTER_ANY_VLAN, config);
895 }
896 
897 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
898 				  struct bnx2x_vlan_mac_obj *o,
899 				  struct bnx2x_exeq_elem *elem, int rule_idx,
900 				  int cam_offset)
901 {
902 	struct bnx2x_raw_obj *raw = &o->raw;
903 	struct eth_classify_rules_ramrod_data *data =
904 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
905 	int rule_cnt = rule_idx + 1;
906 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
907 	int cmd = elem->cmd_data.vlan_mac.cmd;
908 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
909 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
910 
911 	/* Reset the ramrod data buffer for the first rule */
912 	if (rule_idx == 0)
913 		memset(data, 0, sizeof(*data));
914 
915 	/* Set a rule header */
916 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
917 				      &rule_entry->vlan.header);
918 
919 	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
920 			 vlan);
921 
922 	/* Set a VLAN itself */
923 	rule_entry->vlan.vlan = cpu_to_le16(vlan);
924 
925 	/* MOVE: Add a rule that will add this MAC to the target Queue */
926 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
927 		rule_entry++;
928 		rule_cnt++;
929 
930 		/* Setup ramrod data */
931 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
932 					elem->cmd_data.vlan_mac.target_obj,
933 					      true, CLASSIFY_RULE_OPCODE_VLAN,
934 					      &rule_entry->vlan.header);
935 
936 		/* Set a VLAN itself */
937 		rule_entry->vlan.vlan = cpu_to_le16(vlan);
938 	}
939 
940 	/* Set the ramrod data header */
941 	/* TODO: take this to the higher level in order to prevent multiple
942 		 writing */
943 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 					rule_cnt);
945 }
946 
947 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
948 				      struct bnx2x_vlan_mac_obj *o,
949 				      struct bnx2x_exeq_elem *elem,
950 				      int rule_idx, int cam_offset)
951 {
952 	struct bnx2x_raw_obj *raw = &o->raw;
953 	struct eth_classify_rules_ramrod_data *data =
954 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
955 	int rule_cnt = rule_idx + 1;
956 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
957 	int cmd = elem->cmd_data.vlan_mac.cmd;
958 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
959 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
960 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
961 
962 
963 	/* Reset the ramrod data buffer for the first rule */
964 	if (rule_idx == 0)
965 		memset(data, 0, sizeof(*data));
966 
967 	/* Set a rule header */
968 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
969 				      &rule_entry->pair.header);
970 
971 	/* Set VLAN and MAC themselvs */
972 	rule_entry->pair.vlan = cpu_to_le16(vlan);
973 	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
974 			      &rule_entry->pair.mac_mid,
975 			      &rule_entry->pair.mac_lsb, mac);
976 
977 	/* MOVE: Add a rule that will add this MAC to the target Queue */
978 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
979 		rule_entry++;
980 		rule_cnt++;
981 
982 		/* Setup ramrod data */
983 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
984 					elem->cmd_data.vlan_mac.target_obj,
985 					      true, CLASSIFY_RULE_OPCODE_PAIR,
986 					      &rule_entry->pair.header);
987 
988 		/* Set a VLAN itself */
989 		rule_entry->pair.vlan = cpu_to_le16(vlan);
990 		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
991 				      &rule_entry->pair.mac_mid,
992 				      &rule_entry->pair.mac_lsb, mac);
993 	}
994 
995 	/* Set the ramrod data header */
996 	/* TODO: take this to the higher level in order to prevent multiple
997 		 writing */
998 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
999 					rule_cnt);
1000 }
1001 
1002 /**
1003  * bnx2x_set_one_vlan_mac_e1h -
1004  *
1005  * @bp:		device handle
1006  * @o:		bnx2x_vlan_mac_obj
1007  * @elem:	bnx2x_exeq_elem
1008  * @rule_idx:	rule_idx
1009  * @cam_offset:	cam_offset
1010  */
1011 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1012 				       struct bnx2x_vlan_mac_obj *o,
1013 				       struct bnx2x_exeq_elem *elem,
1014 				       int rule_idx, int cam_offset)
1015 {
1016 	struct bnx2x_raw_obj *raw = &o->raw;
1017 	struct mac_configuration_cmd *config =
1018 		(struct mac_configuration_cmd *)(raw->rdata);
1019 	/*
1020 	 * 57710 and 57711 do not support MOVE command,
1021 	 * so it's either ADD or DEL
1022 	 */
1023 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1024 		true : false;
1025 
1026 	/* Reset the ramrod data buffer */
1027 	memset(config, 0, sizeof(*config));
1028 
1029 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1030 				     cam_offset, add,
1031 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1032 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1033 				     ETH_VLAN_FILTER_CLASSIFY, config);
1034 }
1035 
1036 #define list_next_entry(pos, member) \
1037 	list_entry((pos)->member.next, typeof(*(pos)), member)
1038 
1039 /**
1040  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1041  *
1042  * @bp:		device handle
1043  * @p:		command parameters
1044  * @ppos:	pointer to the cooky
1045  *
1046  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1047  * previously configured elements list.
1048  *
1049  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1050  * into an account
1051  *
1052  * pointer to the cooky  - that should be given back in the next call to make
1053  * function handle the next element. If *ppos is set to NULL it will restart the
1054  * iterator. If returned *ppos == NULL this means that the last element has been
1055  * handled.
1056  *
1057  */
1058 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1059 			   struct bnx2x_vlan_mac_ramrod_params *p,
1060 			   struct bnx2x_vlan_mac_registry_elem **ppos)
1061 {
1062 	struct bnx2x_vlan_mac_registry_elem *pos;
1063 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1064 
1065 	/* If list is empty - there is nothing to do here */
1066 	if (list_empty(&o->head)) {
1067 		*ppos = NULL;
1068 		return 0;
1069 	}
1070 
1071 	/* make a step... */
1072 	if (*ppos == NULL)
1073 		*ppos = list_first_entry(&o->head,
1074 					 struct bnx2x_vlan_mac_registry_elem,
1075 					 link);
1076 	else
1077 		*ppos = list_next_entry(*ppos, link);
1078 
1079 	pos = *ppos;
1080 
1081 	/* If it's the last step - return NULL */
1082 	if (list_is_last(&pos->link, &o->head))
1083 		*ppos = NULL;
1084 
1085 	/* Prepare a 'user_req' */
1086 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1087 
1088 	/* Set the command */
1089 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1090 
1091 	/* Set vlan_mac_flags */
1092 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1093 
1094 	/* Set a restore bit */
1095 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1096 
1097 	return bnx2x_config_vlan_mac(bp, p);
1098 }
1099 
1100 /*
1101  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1102  * pointer to an element with a specific criteria and NULL if such an element
1103  * hasn't been found.
1104  */
1105 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1106 	struct bnx2x_exe_queue_obj *o,
1107 	struct bnx2x_exeq_elem *elem)
1108 {
1109 	struct bnx2x_exeq_elem *pos;
1110 	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1111 
1112 	/* Check pending for execution commands */
1113 	list_for_each_entry(pos, &o->exe_queue, link)
1114 		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1115 			      sizeof(*data)) &&
1116 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1117 			return pos;
1118 
1119 	return NULL;
1120 }
1121 
1122 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1123 	struct bnx2x_exe_queue_obj *o,
1124 	struct bnx2x_exeq_elem *elem)
1125 {
1126 	struct bnx2x_exeq_elem *pos;
1127 	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1128 
1129 	/* Check pending for execution commands */
1130 	list_for_each_entry(pos, &o->exe_queue, link)
1131 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1132 			      sizeof(*data)) &&
1133 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1134 			return pos;
1135 
1136 	return NULL;
1137 }
1138 
1139 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1140 	struct bnx2x_exe_queue_obj *o,
1141 	struct bnx2x_exeq_elem *elem)
1142 {
1143 	struct bnx2x_exeq_elem *pos;
1144 	struct bnx2x_vlan_mac_ramrod_data *data =
1145 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1146 
1147 	/* Check pending for execution commands */
1148 	list_for_each_entry(pos, &o->exe_queue, link)
1149 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1150 			      sizeof(*data)) &&
1151 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1152 			return pos;
1153 
1154 	return NULL;
1155 }
1156 
1157 /**
1158  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1159  *
1160  * @bp:		device handle
1161  * @qo:		bnx2x_qable_obj
1162  * @elem:	bnx2x_exeq_elem
1163  *
1164  * Checks that the requested configuration can be added. If yes and if
1165  * requested, consume CAM credit.
1166  *
1167  * The 'validate' is run after the 'optimize'.
1168  *
1169  */
1170 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1171 					      union bnx2x_qable_obj *qo,
1172 					      struct bnx2x_exeq_elem *elem)
1173 {
1174 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1175 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1176 	int rc;
1177 
1178 	/* Check the registry */
1179 	rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1180 	if (rc) {
1181 		DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1182 		return rc;
1183 	}
1184 
1185 	/*
1186 	 * Check if there is a pending ADD command for this
1187 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1188 	 */
1189 	if (exeq->get(exeq, elem)) {
1190 		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1191 		return -EEXIST;
1192 	}
1193 
1194 	/*
1195 	 * TODO: Check the pending MOVE from other objects where this
1196 	 * object is a destination object.
1197 	 */
1198 
1199 	/* Consume the credit if not requested not to */
1200 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1201 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1202 	    o->get_credit(o)))
1203 		return -EINVAL;
1204 
1205 	return 0;
1206 }
1207 
1208 /**
1209  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1210  *
1211  * @bp:		device handle
1212  * @qo:		quable object to check
1213  * @elem:	element that needs to be deleted
1214  *
1215  * Checks that the requested configuration can be deleted. If yes and if
1216  * requested, returns a CAM credit.
1217  *
1218  * The 'validate' is run after the 'optimize'.
1219  */
1220 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1221 					      union bnx2x_qable_obj *qo,
1222 					      struct bnx2x_exeq_elem *elem)
1223 {
1224 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1225 	struct bnx2x_vlan_mac_registry_elem *pos;
1226 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1227 	struct bnx2x_exeq_elem query_elem;
1228 
1229 	/* If this classification can not be deleted (doesn't exist)
1230 	 * - return a BNX2X_EXIST.
1231 	 */
1232 	pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1233 	if (!pos) {
1234 		DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1235 		return -EEXIST;
1236 	}
1237 
1238 	/*
1239 	 * Check if there are pending DEL or MOVE commands for this
1240 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1241 	 */
1242 	memcpy(&query_elem, elem, sizeof(query_elem));
1243 
1244 	/* Check for MOVE commands */
1245 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1246 	if (exeq->get(exeq, &query_elem)) {
1247 		BNX2X_ERR("There is a pending MOVE command already\n");
1248 		return -EINVAL;
1249 	}
1250 
1251 	/* Check for DEL commands */
1252 	if (exeq->get(exeq, elem)) {
1253 		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1254 		return -EEXIST;
1255 	}
1256 
1257 	/* Return the credit to the credit pool if not requested not to */
1258 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1259 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1260 	    o->put_credit(o))) {
1261 		BNX2X_ERR("Failed to return a credit\n");
1262 		return -EINVAL;
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 /**
1269  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1270  *
1271  * @bp:		device handle
1272  * @qo:		quable object to check (source)
1273  * @elem:	element that needs to be moved
1274  *
1275  * Checks that the requested configuration can be moved. If yes and if
1276  * requested, returns a CAM credit.
1277  *
1278  * The 'validate' is run after the 'optimize'.
1279  */
1280 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1281 					       union bnx2x_qable_obj *qo,
1282 					       struct bnx2x_exeq_elem *elem)
1283 {
1284 	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1285 	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1286 	struct bnx2x_exeq_elem query_elem;
1287 	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1288 	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1289 
1290 	/*
1291 	 * Check if we can perform this operation based on the current registry
1292 	 * state.
1293 	 */
1294 	if (!src_o->check_move(bp, src_o, dest_o,
1295 			       &elem->cmd_data.vlan_mac.u)) {
1296 		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1297 		return -EINVAL;
1298 	}
1299 
1300 	/*
1301 	 * Check if there is an already pending DEL or MOVE command for the
1302 	 * source object or ADD command for a destination object. Return an
1303 	 * error if so.
1304 	 */
1305 	memcpy(&query_elem, elem, sizeof(query_elem));
1306 
1307 	/* Check DEL on source */
1308 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1309 	if (src_exeq->get(src_exeq, &query_elem)) {
1310 		BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1311 		return -EINVAL;
1312 	}
1313 
1314 	/* Check MOVE on source */
1315 	if (src_exeq->get(src_exeq, elem)) {
1316 		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1317 		return -EEXIST;
1318 	}
1319 
1320 	/* Check ADD on destination */
1321 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1322 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1323 		BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1324 		return -EINVAL;
1325 	}
1326 
1327 	/* Consume the credit if not requested not to */
1328 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1329 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1330 	    dest_o->get_credit(dest_o)))
1331 		return -EINVAL;
1332 
1333 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1334 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1335 	    src_o->put_credit(src_o))) {
1336 		/* return the credit taken from dest... */
1337 		dest_o->put_credit(dest_o);
1338 		return -EINVAL;
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1345 				   union bnx2x_qable_obj *qo,
1346 				   struct bnx2x_exeq_elem *elem)
1347 {
1348 	switch (elem->cmd_data.vlan_mac.cmd) {
1349 	case BNX2X_VLAN_MAC_ADD:
1350 		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1351 	case BNX2X_VLAN_MAC_DEL:
1352 		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1353 	case BNX2X_VLAN_MAC_MOVE:
1354 		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1355 	default:
1356 		return -EINVAL;
1357 	}
1358 }
1359 
1360 static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1361 				  union bnx2x_qable_obj *qo,
1362 				  struct bnx2x_exeq_elem *elem)
1363 {
1364 	int rc = 0;
1365 
1366 	/* If consumption wasn't required, nothing to do */
1367 	if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1368 		     &elem->cmd_data.vlan_mac.vlan_mac_flags))
1369 		return 0;
1370 
1371 	switch (elem->cmd_data.vlan_mac.cmd) {
1372 	case BNX2X_VLAN_MAC_ADD:
1373 	case BNX2X_VLAN_MAC_MOVE:
1374 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1375 		break;
1376 	case BNX2X_VLAN_MAC_DEL:
1377 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1378 		break;
1379 	default:
1380 		return -EINVAL;
1381 	}
1382 
1383 	if (rc != true)
1384 		return -EINVAL;
1385 
1386 	return 0;
1387 }
1388 
1389 /**
1390  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1391  *
1392  * @bp:		device handle
1393  * @o:		bnx2x_vlan_mac_obj
1394  *
1395  */
1396 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1397 			       struct bnx2x_vlan_mac_obj *o)
1398 {
1399 	int cnt = 5000, rc;
1400 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1401 	struct bnx2x_raw_obj *raw = &o->raw;
1402 
1403 	while (cnt--) {
1404 		/* Wait for the current command to complete */
1405 		rc = raw->wait_comp(bp, raw);
1406 		if (rc)
1407 			return rc;
1408 
1409 		/* Wait until there are no pending commands */
1410 		if (!bnx2x_exe_queue_empty(exeq))
1411 			usleep_range(1000, 1000);
1412 		else
1413 			return 0;
1414 	}
1415 
1416 	return -EBUSY;
1417 }
1418 
1419 /**
1420  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1421  *
1422  * @bp:		device handle
1423  * @o:		bnx2x_vlan_mac_obj
1424  * @cqe:
1425  * @cont:	if true schedule next execution chunk
1426  *
1427  */
1428 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1429 				   struct bnx2x_vlan_mac_obj *o,
1430 				   union event_ring_elem *cqe,
1431 				   unsigned long *ramrod_flags)
1432 {
1433 	struct bnx2x_raw_obj *r = &o->raw;
1434 	int rc;
1435 
1436 	/* Reset pending list */
1437 	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1438 
1439 	/* Clear pending */
1440 	r->clear_pending(r);
1441 
1442 	/* If ramrod failed this is most likely a SW bug */
1443 	if (cqe->message.error)
1444 		return -EINVAL;
1445 
1446 	/* Run the next bulk of pending commands if requeted */
1447 	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1448 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1449 		if (rc < 0)
1450 			return rc;
1451 	}
1452 
1453 	/* If there is more work to do return PENDING */
1454 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1455 		return 1;
1456 
1457 	return 0;
1458 }
1459 
1460 /**
1461  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1462  *
1463  * @bp:		device handle
1464  * @o:		bnx2x_qable_obj
1465  * @elem:	bnx2x_exeq_elem
1466  */
1467 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1468 				   union bnx2x_qable_obj *qo,
1469 				   struct bnx2x_exeq_elem *elem)
1470 {
1471 	struct bnx2x_exeq_elem query, *pos;
1472 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1473 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1474 
1475 	memcpy(&query, elem, sizeof(query));
1476 
1477 	switch (elem->cmd_data.vlan_mac.cmd) {
1478 	case BNX2X_VLAN_MAC_ADD:
1479 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1480 		break;
1481 	case BNX2X_VLAN_MAC_DEL:
1482 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1483 		break;
1484 	default:
1485 		/* Don't handle anything other than ADD or DEL */
1486 		return 0;
1487 	}
1488 
1489 	/* If we found the appropriate element - delete it */
1490 	pos = exeq->get(exeq, &query);
1491 	if (pos) {
1492 
1493 		/* Return the credit of the optimized command */
1494 		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1495 			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1496 			if ((query.cmd_data.vlan_mac.cmd ==
1497 			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1498 				BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1499 				return -EINVAL;
1500 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1501 				BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1502 				return -EINVAL;
1503 			}
1504 		}
1505 
1506 		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1507 			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1508 			   "ADD" : "DEL");
1509 
1510 		list_del(&pos->link);
1511 		bnx2x_exe_queue_free_elem(bp, pos);
1512 		return 1;
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 /**
1519  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1520  *
1521  * @bp:	  device handle
1522  * @o:
1523  * @elem:
1524  * @restore:
1525  * @re:
1526  *
1527  * prepare a registry element according to the current command request.
1528  */
1529 static inline int bnx2x_vlan_mac_get_registry_elem(
1530 	struct bnx2x *bp,
1531 	struct bnx2x_vlan_mac_obj *o,
1532 	struct bnx2x_exeq_elem *elem,
1533 	bool restore,
1534 	struct bnx2x_vlan_mac_registry_elem **re)
1535 {
1536 	int cmd = elem->cmd_data.vlan_mac.cmd;
1537 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1538 
1539 	/* Allocate a new registry element if needed. */
1540 	if (!restore &&
1541 	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1542 		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1543 		if (!reg_elem)
1544 			return -ENOMEM;
1545 
1546 		/* Get a new CAM offset */
1547 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1548 			/*
1549 			 * This shell never happen, because we have checked the
1550 			 * CAM availiability in the 'validate'.
1551 			 */
1552 			WARN_ON(1);
1553 			kfree(reg_elem);
1554 			return -EINVAL;
1555 		}
1556 
1557 		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1558 
1559 		/* Set a VLAN-MAC data */
1560 		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1561 			  sizeof(reg_elem->u));
1562 
1563 		/* Copy the flags (needed for DEL and RESTORE flows) */
1564 		reg_elem->vlan_mac_flags =
1565 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1566 	} else /* DEL, RESTORE */
1567 		reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1568 
1569 	*re = reg_elem;
1570 	return 0;
1571 }
1572 
1573 /**
1574  * bnx2x_execute_vlan_mac - execute vlan mac command
1575  *
1576  * @bp:			device handle
1577  * @qo:
1578  * @exe_chunk:
1579  * @ramrod_flags:
1580  *
1581  * go and send a ramrod!
1582  */
1583 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1584 				  union bnx2x_qable_obj *qo,
1585 				  struct list_head *exe_chunk,
1586 				  unsigned long *ramrod_flags)
1587 {
1588 	struct bnx2x_exeq_elem *elem;
1589 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1590 	struct bnx2x_raw_obj *r = &o->raw;
1591 	int rc, idx = 0;
1592 	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1593 	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1594 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1595 	int cmd;
1596 
1597 	/*
1598 	 * If DRIVER_ONLY execution is requested, cleanup a registry
1599 	 * and exit. Otherwise send a ramrod to FW.
1600 	 */
1601 	if (!drv_only) {
1602 		WARN_ON(r->check_pending(r));
1603 
1604 		/* Set pending */
1605 		r->set_pending(r);
1606 
1607 		/* Fill tha ramrod data */
1608 		list_for_each_entry(elem, exe_chunk, link) {
1609 			cmd = elem->cmd_data.vlan_mac.cmd;
1610 			/*
1611 			 * We will add to the target object in MOVE command, so
1612 			 * change the object for a CAM search.
1613 			 */
1614 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1615 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1616 			else
1617 				cam_obj = o;
1618 
1619 			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1620 							      elem, restore,
1621 							      &reg_elem);
1622 			if (rc)
1623 				goto error_exit;
1624 
1625 			WARN_ON(!reg_elem);
1626 
1627 			/* Push a new entry into the registry */
1628 			if (!restore &&
1629 			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1630 			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1631 				list_add(&reg_elem->link, &cam_obj->head);
1632 
1633 			/* Configure a single command in a ramrod data buffer */
1634 			o->set_one_rule(bp, o, elem, idx,
1635 					reg_elem->cam_offset);
1636 
1637 			/* MOVE command consumes 2 entries in the ramrod data */
1638 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1639 				idx += 2;
1640 			else
1641 				idx++;
1642 		}
1643 
1644 		/*
1645 		 *  No need for an explicit memory barrier here as long we would
1646 		 *  need to ensure the ordering of writing to the SPQ element
1647 		 *  and updating of the SPQ producer which involves a memory
1648 		 *  read and we will have to put a full memory barrier there
1649 		 *  (inside bnx2x_sp_post()).
1650 		 */
1651 
1652 		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1653 				   U64_HI(r->rdata_mapping),
1654 				   U64_LO(r->rdata_mapping),
1655 				   ETH_CONNECTION_TYPE);
1656 		if (rc)
1657 			goto error_exit;
1658 	}
1659 
1660 	/* Now, when we are done with the ramrod - clean up the registry */
1661 	list_for_each_entry(elem, exe_chunk, link) {
1662 		cmd = elem->cmd_data.vlan_mac.cmd;
1663 		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1664 		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1665 			reg_elem = o->check_del(bp, o,
1666 						&elem->cmd_data.vlan_mac.u);
1667 
1668 			WARN_ON(!reg_elem);
1669 
1670 			o->put_cam_offset(o, reg_elem->cam_offset);
1671 			list_del(&reg_elem->link);
1672 			kfree(reg_elem);
1673 		}
1674 	}
1675 
1676 	if (!drv_only)
1677 		return 1;
1678 	else
1679 		return 0;
1680 
1681 error_exit:
1682 	r->clear_pending(r);
1683 
1684 	/* Cleanup a registry in case of a failure */
1685 	list_for_each_entry(elem, exe_chunk, link) {
1686 		cmd = elem->cmd_data.vlan_mac.cmd;
1687 
1688 		if (cmd == BNX2X_VLAN_MAC_MOVE)
1689 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1690 		else
1691 			cam_obj = o;
1692 
1693 		/* Delete all newly added above entries */
1694 		if (!restore &&
1695 		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1696 		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1697 			reg_elem = o->check_del(bp, cam_obj,
1698 						&elem->cmd_data.vlan_mac.u);
1699 			if (reg_elem) {
1700 				list_del(&reg_elem->link);
1701 				kfree(reg_elem);
1702 			}
1703 		}
1704 	}
1705 
1706 	return rc;
1707 }
1708 
1709 static inline int bnx2x_vlan_mac_push_new_cmd(
1710 	struct bnx2x *bp,
1711 	struct bnx2x_vlan_mac_ramrod_params *p)
1712 {
1713 	struct bnx2x_exeq_elem *elem;
1714 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1715 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1716 
1717 	/* Allocate the execution queue element */
1718 	elem = bnx2x_exe_queue_alloc_elem(bp);
1719 	if (!elem)
1720 		return -ENOMEM;
1721 
1722 	/* Set the command 'length' */
1723 	switch (p->user_req.cmd) {
1724 	case BNX2X_VLAN_MAC_MOVE:
1725 		elem->cmd_len = 2;
1726 		break;
1727 	default:
1728 		elem->cmd_len = 1;
1729 	}
1730 
1731 	/* Fill the object specific info */
1732 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1733 
1734 	/* Try to add a new command to the pending list */
1735 	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1736 }
1737 
1738 /**
1739  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1740  *
1741  * @bp:	  device handle
1742  * @p:
1743  *
1744  */
1745 int bnx2x_config_vlan_mac(
1746 	struct bnx2x *bp,
1747 	struct bnx2x_vlan_mac_ramrod_params *p)
1748 {
1749 	int rc = 0;
1750 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1751 	unsigned long *ramrod_flags = &p->ramrod_flags;
1752 	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1753 	struct bnx2x_raw_obj *raw = &o->raw;
1754 
1755 	/*
1756 	 * Add new elements to the execution list for commands that require it.
1757 	 */
1758 	if (!cont) {
1759 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1760 		if (rc)
1761 			return rc;
1762 	}
1763 
1764 	/*
1765 	 * If nothing will be executed further in this iteration we want to
1766 	 * return PENDING if there are pending commands
1767 	 */
1768 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1769 		rc = 1;
1770 
1771 	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1772 		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1773 		raw->clear_pending(raw);
1774 	}
1775 
1776 	/* Execute commands if required */
1777 	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1778 	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1779 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1780 		if (rc < 0)
1781 			return rc;
1782 	}
1783 
1784 	/*
1785 	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1786 	 * then user want to wait until the last command is done.
1787 	 */
1788 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1789 		/*
1790 		 * Wait maximum for the current exe_queue length iterations plus
1791 		 * one (for the current pending command).
1792 		 */
1793 		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1794 
1795 		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1796 		       max_iterations--) {
1797 
1798 			/* Wait for the current command to complete */
1799 			rc = raw->wait_comp(bp, raw);
1800 			if (rc)
1801 				return rc;
1802 
1803 			/* Make a next step */
1804 			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1805 						  ramrod_flags);
1806 			if (rc < 0)
1807 				return rc;
1808 		}
1809 
1810 		return 0;
1811 	}
1812 
1813 	return rc;
1814 }
1815 
1816 
1817 
1818 /**
1819  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1820  *
1821  * @bp:			device handle
1822  * @o:
1823  * @vlan_mac_flags:
1824  * @ramrod_flags:	execution flags to be used for this deletion
1825  *
1826  * if the last operation has completed successfully and there are no
1827  * moreelements left, positive value if the last operation has completed
1828  * successfully and there are more previously configured elements, negative
1829  * value is current operation has failed.
1830  */
1831 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1832 				  struct bnx2x_vlan_mac_obj *o,
1833 				  unsigned long *vlan_mac_flags,
1834 				  unsigned long *ramrod_flags)
1835 {
1836 	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1837 	int rc = 0;
1838 	struct bnx2x_vlan_mac_ramrod_params p;
1839 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1840 	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1841 
1842 	/* Clear pending commands first */
1843 
1844 	spin_lock_bh(&exeq->lock);
1845 
1846 	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1847 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1848 		    *vlan_mac_flags) {
1849 			rc = exeq->remove(bp, exeq->owner, exeq_pos);
1850 			if (rc) {
1851 				BNX2X_ERR("Failed to remove command\n");
1852 				spin_unlock_bh(&exeq->lock);
1853 				return rc;
1854 			}
1855 			list_del(&exeq_pos->link);
1856 		}
1857 	}
1858 
1859 	spin_unlock_bh(&exeq->lock);
1860 
1861 	/* Prepare a command request */
1862 	memset(&p, 0, sizeof(p));
1863 	p.vlan_mac_obj = o;
1864 	p.ramrod_flags = *ramrod_flags;
1865 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1866 
1867 	/*
1868 	 * Add all but the last VLAN-MAC to the execution queue without actually
1869 	 * execution anything.
1870 	 */
1871 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1872 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1873 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
1874 
1875 	list_for_each_entry(pos, &o->head, link) {
1876 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
1877 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1878 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1879 			rc = bnx2x_config_vlan_mac(bp, &p);
1880 			if (rc < 0) {
1881 				BNX2X_ERR("Failed to add a new DEL command\n");
1882 				return rc;
1883 			}
1884 		}
1885 	}
1886 
1887 	p.ramrod_flags = *ramrod_flags;
1888 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
1889 
1890 	return bnx2x_config_vlan_mac(bp, &p);
1891 }
1892 
1893 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1894 	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1895 	unsigned long *pstate, bnx2x_obj_type type)
1896 {
1897 	raw->func_id = func_id;
1898 	raw->cid = cid;
1899 	raw->cl_id = cl_id;
1900 	raw->rdata = rdata;
1901 	raw->rdata_mapping = rdata_mapping;
1902 	raw->state = state;
1903 	raw->pstate = pstate;
1904 	raw->obj_type = type;
1905 	raw->check_pending = bnx2x_raw_check_pending;
1906 	raw->clear_pending = bnx2x_raw_clear_pending;
1907 	raw->set_pending = bnx2x_raw_set_pending;
1908 	raw->wait_comp = bnx2x_raw_wait;
1909 }
1910 
1911 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1912 	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1913 	int state, unsigned long *pstate, bnx2x_obj_type type,
1914 	struct bnx2x_credit_pool_obj *macs_pool,
1915 	struct bnx2x_credit_pool_obj *vlans_pool)
1916 {
1917 	INIT_LIST_HEAD(&o->head);
1918 
1919 	o->macs_pool = macs_pool;
1920 	o->vlans_pool = vlans_pool;
1921 
1922 	o->delete_all = bnx2x_vlan_mac_del_all;
1923 	o->restore = bnx2x_vlan_mac_restore;
1924 	o->complete = bnx2x_complete_vlan_mac;
1925 	o->wait = bnx2x_wait_vlan_mac;
1926 
1927 	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1928 			   state, pstate, type);
1929 }
1930 
1931 
1932 void bnx2x_init_mac_obj(struct bnx2x *bp,
1933 			struct bnx2x_vlan_mac_obj *mac_obj,
1934 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1935 			dma_addr_t rdata_mapping, int state,
1936 			unsigned long *pstate, bnx2x_obj_type type,
1937 			struct bnx2x_credit_pool_obj *macs_pool)
1938 {
1939 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1940 
1941 	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1942 				   rdata_mapping, state, pstate, type,
1943 				   macs_pool, NULL);
1944 
1945 	/* CAM credit pool handling */
1946 	mac_obj->get_credit = bnx2x_get_credit_mac;
1947 	mac_obj->put_credit = bnx2x_put_credit_mac;
1948 	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1949 	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1950 
1951 	if (CHIP_IS_E1x(bp)) {
1952 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1953 		mac_obj->check_del         = bnx2x_check_mac_del;
1954 		mac_obj->check_add         = bnx2x_check_mac_add;
1955 		mac_obj->check_move        = bnx2x_check_move_always_err;
1956 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1957 
1958 		/* Exe Queue */
1959 		bnx2x_exe_queue_init(bp,
1960 				     &mac_obj->exe_queue, 1, qable_obj,
1961 				     bnx2x_validate_vlan_mac,
1962 				     bnx2x_remove_vlan_mac,
1963 				     bnx2x_optimize_vlan_mac,
1964 				     bnx2x_execute_vlan_mac,
1965 				     bnx2x_exeq_get_mac);
1966 	} else {
1967 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1968 		mac_obj->check_del         = bnx2x_check_mac_del;
1969 		mac_obj->check_add         = bnx2x_check_mac_add;
1970 		mac_obj->check_move        = bnx2x_check_move;
1971 		mac_obj->ramrod_cmd        =
1972 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1973 		mac_obj->get_n_elements    = bnx2x_get_n_elements;
1974 
1975 		/* Exe Queue */
1976 		bnx2x_exe_queue_init(bp,
1977 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1978 				     qable_obj, bnx2x_validate_vlan_mac,
1979 				     bnx2x_remove_vlan_mac,
1980 				     bnx2x_optimize_vlan_mac,
1981 				     bnx2x_execute_vlan_mac,
1982 				     bnx2x_exeq_get_mac);
1983 	}
1984 }
1985 
1986 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1987 			 struct bnx2x_vlan_mac_obj *vlan_obj,
1988 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1989 			 dma_addr_t rdata_mapping, int state,
1990 			 unsigned long *pstate, bnx2x_obj_type type,
1991 			 struct bnx2x_credit_pool_obj *vlans_pool)
1992 {
1993 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1994 
1995 	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1996 				   rdata_mapping, state, pstate, type, NULL,
1997 				   vlans_pool);
1998 
1999 	vlan_obj->get_credit = bnx2x_get_credit_vlan;
2000 	vlan_obj->put_credit = bnx2x_put_credit_vlan;
2001 	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2002 	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2003 
2004 	if (CHIP_IS_E1x(bp)) {
2005 		BNX2X_ERR("Do not support chips others than E2 and newer\n");
2006 		BUG();
2007 	} else {
2008 		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
2009 		vlan_obj->check_del         = bnx2x_check_vlan_del;
2010 		vlan_obj->check_add         = bnx2x_check_vlan_add;
2011 		vlan_obj->check_move        = bnx2x_check_move;
2012 		vlan_obj->ramrod_cmd        =
2013 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2014 
2015 		/* Exe Queue */
2016 		bnx2x_exe_queue_init(bp,
2017 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2018 				     qable_obj, bnx2x_validate_vlan_mac,
2019 				     bnx2x_remove_vlan_mac,
2020 				     bnx2x_optimize_vlan_mac,
2021 				     bnx2x_execute_vlan_mac,
2022 				     bnx2x_exeq_get_vlan);
2023 	}
2024 }
2025 
2026 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2027 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2028 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
2029 			     dma_addr_t rdata_mapping, int state,
2030 			     unsigned long *pstate, bnx2x_obj_type type,
2031 			     struct bnx2x_credit_pool_obj *macs_pool,
2032 			     struct bnx2x_credit_pool_obj *vlans_pool)
2033 {
2034 	union bnx2x_qable_obj *qable_obj =
2035 		(union bnx2x_qable_obj *)vlan_mac_obj;
2036 
2037 	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2038 				   rdata_mapping, state, pstate, type,
2039 				   macs_pool, vlans_pool);
2040 
2041 	/* CAM pool handling */
2042 	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2043 	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2044 	/*
2045 	 * CAM offset is relevant for 57710 and 57711 chips only which have a
2046 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2047 	 * will be taken from MACs' pool object only.
2048 	 */
2049 	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2050 	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2051 
2052 	if (CHIP_IS_E1(bp)) {
2053 		BNX2X_ERR("Do not support chips others than E2\n");
2054 		BUG();
2055 	} else if (CHIP_IS_E1H(bp)) {
2056 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2057 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2058 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2059 		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2060 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2061 
2062 		/* Exe Queue */
2063 		bnx2x_exe_queue_init(bp,
2064 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2065 				     bnx2x_validate_vlan_mac,
2066 				     bnx2x_remove_vlan_mac,
2067 				     bnx2x_optimize_vlan_mac,
2068 				     bnx2x_execute_vlan_mac,
2069 				     bnx2x_exeq_get_vlan_mac);
2070 	} else {
2071 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2072 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2073 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2074 		vlan_mac_obj->check_move        = bnx2x_check_move;
2075 		vlan_mac_obj->ramrod_cmd        =
2076 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2077 
2078 		/* Exe Queue */
2079 		bnx2x_exe_queue_init(bp,
2080 				     &vlan_mac_obj->exe_queue,
2081 				     CLASSIFY_RULES_COUNT,
2082 				     qable_obj, bnx2x_validate_vlan_mac,
2083 				     bnx2x_remove_vlan_mac,
2084 				     bnx2x_optimize_vlan_mac,
2085 				     bnx2x_execute_vlan_mac,
2086 				     bnx2x_exeq_get_vlan_mac);
2087 	}
2088 
2089 }
2090 
2091 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2092 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2093 			struct tstorm_eth_mac_filter_config *mac_filters,
2094 			u16 pf_id)
2095 {
2096 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2097 
2098 	u32 addr = BAR_TSTRORM_INTMEM +
2099 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2100 
2101 	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2102 }
2103 
2104 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2105 				 struct bnx2x_rx_mode_ramrod_params *p)
2106 {
2107 	/* update the bp MAC filter structure  */
2108 	u32 mask = (1 << p->cl_id);
2109 
2110 	struct tstorm_eth_mac_filter_config *mac_filters =
2111 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2112 
2113 	/* initial seeting is drop-all */
2114 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2115 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2116 	u8 unmatched_unicast = 0;
2117 
2118     /* In e1x there we only take into account rx acceot flag since tx switching
2119      * isn't enabled. */
2120 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2121 		/* accept matched ucast */
2122 		drop_all_ucast = 0;
2123 
2124 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2125 		/* accept matched mcast */
2126 		drop_all_mcast = 0;
2127 
2128 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2129 		/* accept all mcast */
2130 		drop_all_ucast = 0;
2131 		accp_all_ucast = 1;
2132 	}
2133 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2134 		/* accept all mcast */
2135 		drop_all_mcast = 0;
2136 		accp_all_mcast = 1;
2137 	}
2138 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2139 		/* accept (all) bcast */
2140 		accp_all_bcast = 1;
2141 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2142 		/* accept unmatched unicasts */
2143 		unmatched_unicast = 1;
2144 
2145 	mac_filters->ucast_drop_all = drop_all_ucast ?
2146 		mac_filters->ucast_drop_all | mask :
2147 		mac_filters->ucast_drop_all & ~mask;
2148 
2149 	mac_filters->mcast_drop_all = drop_all_mcast ?
2150 		mac_filters->mcast_drop_all | mask :
2151 		mac_filters->mcast_drop_all & ~mask;
2152 
2153 	mac_filters->ucast_accept_all = accp_all_ucast ?
2154 		mac_filters->ucast_accept_all | mask :
2155 		mac_filters->ucast_accept_all & ~mask;
2156 
2157 	mac_filters->mcast_accept_all = accp_all_mcast ?
2158 		mac_filters->mcast_accept_all | mask :
2159 		mac_filters->mcast_accept_all & ~mask;
2160 
2161 	mac_filters->bcast_accept_all = accp_all_bcast ?
2162 		mac_filters->bcast_accept_all | mask :
2163 		mac_filters->bcast_accept_all & ~mask;
2164 
2165 	mac_filters->unmatched_unicast = unmatched_unicast ?
2166 		mac_filters->unmatched_unicast | mask :
2167 		mac_filters->unmatched_unicast & ~mask;
2168 
2169 	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2170 					 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2171 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2172 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2173 	   mac_filters->bcast_accept_all);
2174 
2175 	/* write the MAC filter structure*/
2176 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2177 
2178 	/* The operation is completed */
2179 	clear_bit(p->state, p->pstate);
2180 	smp_mb__after_clear_bit();
2181 
2182 	return 0;
2183 }
2184 
2185 /* Setup ramrod data */
2186 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2187 				struct eth_classify_header *hdr,
2188 				u8 rule_cnt)
2189 {
2190 	hdr->echo = cid;
2191 	hdr->rule_cnt = rule_cnt;
2192 }
2193 
2194 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2195 				unsigned long accept_flags,
2196 				struct eth_filter_rules_cmd *cmd,
2197 				bool clear_accept_all)
2198 {
2199 	u16 state;
2200 
2201 	/* start with 'drop-all' */
2202 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2203 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2204 
2205 	if (accept_flags) {
2206 		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2207 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2208 
2209 		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2210 			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211 
2212 		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2213 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2214 			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2215 		}
2216 
2217 		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2218 			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2219 			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2220 		}
2221 		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2222 			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2223 
2224 		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2225 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2226 			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2227 		}
2228 		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2229 			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2230 	}
2231 
2232 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2233 	if (clear_accept_all) {
2234 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2235 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2236 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2237 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2238 	}
2239 
2240 	cmd->state = cpu_to_le16(state);
2241 
2242 }
2243 
2244 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2245 				struct bnx2x_rx_mode_ramrod_params *p)
2246 {
2247 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2248 	int rc;
2249 	u8 rule_idx = 0;
2250 
2251 	/* Reset the ramrod data buffer */
2252 	memset(data, 0, sizeof(*data));
2253 
2254 	/* Setup ramrod data */
2255 
2256 	/* Tx (internal switching) */
2257 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2258 		data->rules[rule_idx].client_id = p->cl_id;
2259 		data->rules[rule_idx].func_id = p->func_id;
2260 
2261 		data->rules[rule_idx].cmd_general_data =
2262 			ETH_FILTER_RULES_CMD_TX_CMD;
2263 
2264 		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2265 			&(data->rules[rule_idx++]), false);
2266 	}
2267 
2268 	/* Rx */
2269 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2270 		data->rules[rule_idx].client_id = p->cl_id;
2271 		data->rules[rule_idx].func_id = p->func_id;
2272 
2273 		data->rules[rule_idx].cmd_general_data =
2274 			ETH_FILTER_RULES_CMD_RX_CMD;
2275 
2276 		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2277 			&(data->rules[rule_idx++]), false);
2278 	}
2279 
2280 
2281 	/*
2282 	 * If FCoE Queue configuration has been requested configure the Rx and
2283 	 * internal switching modes for this queue in separate rules.
2284 	 *
2285 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2286 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2287 	 */
2288 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2289 		/*  Tx (internal switching) */
2290 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2291 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2292 			data->rules[rule_idx].func_id = p->func_id;
2293 
2294 			data->rules[rule_idx].cmd_general_data =
2295 						ETH_FILTER_RULES_CMD_TX_CMD;
2296 
2297 			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2298 						     &(data->rules[rule_idx++]),
2299 						       true);
2300 		}
2301 
2302 		/* Rx */
2303 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2304 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2305 			data->rules[rule_idx].func_id = p->func_id;
2306 
2307 			data->rules[rule_idx].cmd_general_data =
2308 						ETH_FILTER_RULES_CMD_RX_CMD;
2309 
2310 			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2311 						     &(data->rules[rule_idx++]),
2312 						       true);
2313 		}
2314 	}
2315 
2316 	/*
2317 	 * Set the ramrod header (most importantly - number of rules to
2318 	 * configure).
2319 	 */
2320 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2321 
2322 	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2323 			 data->header.rule_cnt, p->rx_accept_flags,
2324 			 p->tx_accept_flags);
2325 
2326 	/*
2327 	 *  No need for an explicit memory barrier here as long we would
2328 	 *  need to ensure the ordering of writing to the SPQ element
2329 	 *  and updating of the SPQ producer which involves a memory
2330 	 *  read and we will have to put a full memory barrier there
2331 	 *  (inside bnx2x_sp_post()).
2332 	 */
2333 
2334 	/* Send a ramrod */
2335 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2336 			   U64_HI(p->rdata_mapping),
2337 			   U64_LO(p->rdata_mapping),
2338 			   ETH_CONNECTION_TYPE);
2339 	if (rc)
2340 		return rc;
2341 
2342 	/* Ramrod completion is pending */
2343 	return 1;
2344 }
2345 
2346 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2347 				      struct bnx2x_rx_mode_ramrod_params *p)
2348 {
2349 	return bnx2x_state_wait(bp, p->state, p->pstate);
2350 }
2351 
2352 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2353 				    struct bnx2x_rx_mode_ramrod_params *p)
2354 {
2355 	/* Do nothing */
2356 	return 0;
2357 }
2358 
2359 int bnx2x_config_rx_mode(struct bnx2x *bp,
2360 			 struct bnx2x_rx_mode_ramrod_params *p)
2361 {
2362 	int rc;
2363 
2364 	/* Configure the new classification in the chip */
2365 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2366 	if (rc < 0)
2367 		return rc;
2368 
2369 	/* Wait for a ramrod completion if was requested */
2370 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2371 		rc = p->rx_mode_obj->wait_comp(bp, p);
2372 		if (rc)
2373 			return rc;
2374 	}
2375 
2376 	return rc;
2377 }
2378 
2379 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2380 			    struct bnx2x_rx_mode_obj *o)
2381 {
2382 	if (CHIP_IS_E1x(bp)) {
2383 		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2384 		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2385 	} else {
2386 		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2387 		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2388 	}
2389 }
2390 
2391 /********************* Multicast verbs: SET, CLEAR ****************************/
2392 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2393 {
2394 	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2395 }
2396 
2397 struct bnx2x_mcast_mac_elem {
2398 	struct list_head link;
2399 	u8 mac[ETH_ALEN];
2400 	u8 pad[2]; /* For a natural alignment of the following buffer */
2401 };
2402 
2403 struct bnx2x_pending_mcast_cmd {
2404 	struct list_head link;
2405 	int type; /* BNX2X_MCAST_CMD_X */
2406 	union {
2407 		struct list_head macs_head;
2408 		u32 macs_num; /* Needed for DEL command */
2409 		int next_bin; /* Needed for RESTORE flow with aprox match */
2410 	} data;
2411 
2412 	bool done; /* set to true, when the command has been handled,
2413 		    * practically used in 57712 handling only, where one pending
2414 		    * command may be handled in a few operations. As long as for
2415 		    * other chips every operation handling is completed in a
2416 		    * single ramrod, there is no need to utilize this field.
2417 		    */
2418 };
2419 
2420 static int bnx2x_mcast_wait(struct bnx2x *bp,
2421 			    struct bnx2x_mcast_obj *o)
2422 {
2423 	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2424 			o->raw.wait_comp(bp, &o->raw))
2425 		return -EBUSY;
2426 
2427 	return 0;
2428 }
2429 
2430 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2431 				   struct bnx2x_mcast_obj *o,
2432 				   struct bnx2x_mcast_ramrod_params *p,
2433 				   int cmd)
2434 {
2435 	int total_sz;
2436 	struct bnx2x_pending_mcast_cmd *new_cmd;
2437 	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2438 	struct bnx2x_mcast_list_elem *pos;
2439 	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2440 			     p->mcast_list_len : 0);
2441 
2442 	/* If the command is empty ("handle pending commands only"), break */
2443 	if (!p->mcast_list_len)
2444 		return 0;
2445 
2446 	total_sz = sizeof(*new_cmd) +
2447 		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2448 
2449 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2450 	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2451 
2452 	if (!new_cmd)
2453 		return -ENOMEM;
2454 
2455 	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2456 	   cmd, macs_list_len);
2457 
2458 	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2459 
2460 	new_cmd->type = cmd;
2461 	new_cmd->done = false;
2462 
2463 	switch (cmd) {
2464 	case BNX2X_MCAST_CMD_ADD:
2465 		cur_mac = (struct bnx2x_mcast_mac_elem *)
2466 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2467 
2468 		/* Push the MACs of the current command into the pendig command
2469 		 * MACs list: FIFO
2470 		 */
2471 		list_for_each_entry(pos, &p->mcast_list, link) {
2472 			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2473 			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2474 			cur_mac++;
2475 		}
2476 
2477 		break;
2478 
2479 	case BNX2X_MCAST_CMD_DEL:
2480 		new_cmd->data.macs_num = p->mcast_list_len;
2481 		break;
2482 
2483 	case BNX2X_MCAST_CMD_RESTORE:
2484 		new_cmd->data.next_bin = 0;
2485 		break;
2486 
2487 	default:
2488 		BNX2X_ERR("Unknown command: %d\n", cmd);
2489 		return -EINVAL;
2490 	}
2491 
2492 	/* Push the new pending command to the tail of the pending list: FIFO */
2493 	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2494 
2495 	o->set_sched(o);
2496 
2497 	return 1;
2498 }
2499 
2500 /**
2501  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2502  *
2503  * @o:
2504  * @last:	index to start looking from (including)
2505  *
2506  * returns the next found (set) bin or a negative value if none is found.
2507  */
2508 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2509 {
2510 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2511 
2512 	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2513 		if (o->registry.aprox_match.vec[i])
2514 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2515 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2516 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2517 						       vec, cur_bit)) {
2518 					return cur_bit;
2519 				}
2520 			}
2521 		inner_start = 0;
2522 	}
2523 
2524 	/* None found */
2525 	return -1;
2526 }
2527 
2528 /**
2529  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2530  *
2531  * @o:
2532  *
2533  * returns the index of the found bin or -1 if none is found
2534  */
2535 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2536 {
2537 	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2538 
2539 	if (cur_bit >= 0)
2540 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2541 
2542 	return cur_bit;
2543 }
2544 
2545 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2546 {
2547 	struct bnx2x_raw_obj *raw = &o->raw;
2548 	u8 rx_tx_flag = 0;
2549 
2550 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2551 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2552 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2553 
2554 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2555 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2556 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2557 
2558 	return rx_tx_flag;
2559 }
2560 
2561 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2562 					struct bnx2x_mcast_obj *o, int idx,
2563 					union bnx2x_mcast_config_data *cfg_data,
2564 					int cmd)
2565 {
2566 	struct bnx2x_raw_obj *r = &o->raw;
2567 	struct eth_multicast_rules_ramrod_data *data =
2568 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2569 	u8 func_id = r->func_id;
2570 	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2571 	int bin;
2572 
2573 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2574 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2575 
2576 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2577 
2578 	/* Get a bin and update a bins' vector */
2579 	switch (cmd) {
2580 	case BNX2X_MCAST_CMD_ADD:
2581 		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2582 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2583 		break;
2584 
2585 	case BNX2X_MCAST_CMD_DEL:
2586 		/* If there were no more bins to clear
2587 		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2588 		 * clear any (0xff) bin.
2589 		 * See bnx2x_mcast_validate_e2() for explanation when it may
2590 		 * happen.
2591 		 */
2592 		bin = bnx2x_mcast_clear_first_bin(o);
2593 		break;
2594 
2595 	case BNX2X_MCAST_CMD_RESTORE:
2596 		bin = cfg_data->bin;
2597 		break;
2598 
2599 	default:
2600 		BNX2X_ERR("Unknown command: %d\n", cmd);
2601 		return;
2602 	}
2603 
2604 	DP(BNX2X_MSG_SP, "%s bin %d\n",
2605 			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2606 			 "Setting"  : "Clearing"), bin);
2607 
2608 	data->rules[idx].bin_id    = (u8)bin;
2609 	data->rules[idx].func_id   = func_id;
2610 	data->rules[idx].engine_id = o->engine_id;
2611 }
2612 
2613 /**
2614  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2615  *
2616  * @bp:		device handle
2617  * @o:
2618  * @start_bin:	index in the registry to start from (including)
2619  * @rdata_idx:	index in the ramrod data to start from
2620  *
2621  * returns last handled bin index or -1 if all bins have been handled
2622  */
2623 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2624 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2625 	int *rdata_idx)
2626 {
2627 	int cur_bin, cnt = *rdata_idx;
2628 	union bnx2x_mcast_config_data cfg_data = {0};
2629 
2630 	/* go through the registry and configure the bins from it */
2631 	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2632 	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2633 
2634 		cfg_data.bin = (u8)cur_bin;
2635 		o->set_one_rule(bp, o, cnt, &cfg_data,
2636 				BNX2X_MCAST_CMD_RESTORE);
2637 
2638 		cnt++;
2639 
2640 		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2641 
2642 		/* Break if we reached the maximum number
2643 		 * of rules.
2644 		 */
2645 		if (cnt >= o->max_cmd_len)
2646 			break;
2647 	}
2648 
2649 	*rdata_idx = cnt;
2650 
2651 	return cur_bin;
2652 }
2653 
2654 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2655 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2656 	int *line_idx)
2657 {
2658 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2659 	int cnt = *line_idx;
2660 	union bnx2x_mcast_config_data cfg_data = {0};
2661 
2662 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2663 				 link) {
2664 
2665 		cfg_data.mac = &pmac_pos->mac[0];
2666 		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2667 
2668 		cnt++;
2669 
2670 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2671 		   pmac_pos->mac);
2672 
2673 		list_del(&pmac_pos->link);
2674 
2675 		/* Break if we reached the maximum number
2676 		 * of rules.
2677 		 */
2678 		if (cnt >= o->max_cmd_len)
2679 			break;
2680 	}
2681 
2682 	*line_idx = cnt;
2683 
2684 	/* if no more MACs to configure - we are done */
2685 	if (list_empty(&cmd_pos->data.macs_head))
2686 		cmd_pos->done = true;
2687 }
2688 
2689 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2690 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2691 	int *line_idx)
2692 {
2693 	int cnt = *line_idx;
2694 
2695 	while (cmd_pos->data.macs_num) {
2696 		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2697 
2698 		cnt++;
2699 
2700 		cmd_pos->data.macs_num--;
2701 
2702 		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2703 				   cmd_pos->data.macs_num, cnt);
2704 
2705 		/* Break if we reached the maximum
2706 		 * number of rules.
2707 		 */
2708 		if (cnt >= o->max_cmd_len)
2709 			break;
2710 	}
2711 
2712 	*line_idx = cnt;
2713 
2714 	/* If we cleared all bins - we are done */
2715 	if (!cmd_pos->data.macs_num)
2716 		cmd_pos->done = true;
2717 }
2718 
2719 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2720 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2721 	int *line_idx)
2722 {
2723 	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2724 						line_idx);
2725 
2726 	if (cmd_pos->data.next_bin < 0)
2727 		/* If o->set_restore returned -1 we are done */
2728 		cmd_pos->done = true;
2729 	else
2730 		/* Start from the next bin next time */
2731 		cmd_pos->data.next_bin++;
2732 }
2733 
2734 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2735 				struct bnx2x_mcast_ramrod_params *p)
2736 {
2737 	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2738 	int cnt = 0;
2739 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2740 
2741 	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2742 				 link) {
2743 		switch (cmd_pos->type) {
2744 		case BNX2X_MCAST_CMD_ADD:
2745 			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2746 			break;
2747 
2748 		case BNX2X_MCAST_CMD_DEL:
2749 			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2750 			break;
2751 
2752 		case BNX2X_MCAST_CMD_RESTORE:
2753 			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2754 							   &cnt);
2755 			break;
2756 
2757 		default:
2758 			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2759 			return -EINVAL;
2760 		}
2761 
2762 		/* If the command has been completed - remove it from the list
2763 		 * and free the memory
2764 		 */
2765 		if (cmd_pos->done) {
2766 			list_del(&cmd_pos->link);
2767 			kfree(cmd_pos);
2768 		}
2769 
2770 		/* Break if we reached the maximum number of rules */
2771 		if (cnt >= o->max_cmd_len)
2772 			break;
2773 	}
2774 
2775 	return cnt;
2776 }
2777 
2778 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2779 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2780 	int *line_idx)
2781 {
2782 	struct bnx2x_mcast_list_elem *mlist_pos;
2783 	union bnx2x_mcast_config_data cfg_data = {0};
2784 	int cnt = *line_idx;
2785 
2786 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2787 		cfg_data.mac = mlist_pos->mac;
2788 		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2789 
2790 		cnt++;
2791 
2792 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2793 				 mlist_pos->mac);
2794 	}
2795 
2796 	*line_idx = cnt;
2797 }
2798 
2799 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2800 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2801 	int *line_idx)
2802 {
2803 	int cnt = *line_idx, i;
2804 
2805 	for (i = 0; i < p->mcast_list_len; i++) {
2806 		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2807 
2808 		cnt++;
2809 
2810 		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2811 				 p->mcast_list_len - i - 1);
2812 	}
2813 
2814 	*line_idx = cnt;
2815 }
2816 
2817 /**
2818  * bnx2x_mcast_handle_current_cmd -
2819  *
2820  * @bp:		device handle
2821  * @p:
2822  * @cmd:
2823  * @start_cnt:	first line in the ramrod data that may be used
2824  *
2825  * This function is called iff there is enough place for the current command in
2826  * the ramrod data.
2827  * Returns number of lines filled in the ramrod data in total.
2828  */
2829 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2830 			struct bnx2x_mcast_ramrod_params *p, int cmd,
2831 			int start_cnt)
2832 {
2833 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2834 	int cnt = start_cnt;
2835 
2836 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2837 
2838 	switch (cmd) {
2839 	case BNX2X_MCAST_CMD_ADD:
2840 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2841 		break;
2842 
2843 	case BNX2X_MCAST_CMD_DEL:
2844 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2845 		break;
2846 
2847 	case BNX2X_MCAST_CMD_RESTORE:
2848 		o->hdl_restore(bp, o, 0, &cnt);
2849 		break;
2850 
2851 	default:
2852 		BNX2X_ERR("Unknown command: %d\n", cmd);
2853 		return -EINVAL;
2854 	}
2855 
2856 	/* The current command has been handled */
2857 	p->mcast_list_len = 0;
2858 
2859 	return cnt;
2860 }
2861 
2862 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2863 				   struct bnx2x_mcast_ramrod_params *p,
2864 				   int cmd)
2865 {
2866 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2867 	int reg_sz = o->get_registry_size(o);
2868 
2869 	switch (cmd) {
2870 	/* DEL command deletes all currently configured MACs */
2871 	case BNX2X_MCAST_CMD_DEL:
2872 		o->set_registry_size(o, 0);
2873 		/* Don't break */
2874 
2875 	/* RESTORE command will restore the entire multicast configuration */
2876 	case BNX2X_MCAST_CMD_RESTORE:
2877 		/* Here we set the approximate amount of work to do, which in
2878 		 * fact may be only less as some MACs in postponed ADD
2879 		 * command(s) scheduled before this command may fall into
2880 		 * the same bin and the actual number of bins set in the
2881 		 * registry would be less than we estimated here. See
2882 		 * bnx2x_mcast_set_one_rule_e2() for further details.
2883 		 */
2884 		p->mcast_list_len = reg_sz;
2885 		break;
2886 
2887 	case BNX2X_MCAST_CMD_ADD:
2888 	case BNX2X_MCAST_CMD_CONT:
2889 		/* Here we assume that all new MACs will fall into new bins.
2890 		 * However we will correct the real registry size after we
2891 		 * handle all pending commands.
2892 		 */
2893 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
2894 		break;
2895 
2896 	default:
2897 		BNX2X_ERR("Unknown command: %d\n", cmd);
2898 		return -EINVAL;
2899 
2900 	}
2901 
2902 	/* Increase the total number of MACs pending to be configured */
2903 	o->total_pending_num += p->mcast_list_len;
2904 
2905 	return 0;
2906 }
2907 
2908 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2909 				      struct bnx2x_mcast_ramrod_params *p,
2910 				      int old_num_bins)
2911 {
2912 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2913 
2914 	o->set_registry_size(o, old_num_bins);
2915 	o->total_pending_num -= p->mcast_list_len;
2916 }
2917 
2918 /**
2919  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2920  *
2921  * @bp:		device handle
2922  * @p:
2923  * @len:	number of rules to handle
2924  */
2925 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2926 					struct bnx2x_mcast_ramrod_params *p,
2927 					u8 len)
2928 {
2929 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2930 	struct eth_multicast_rules_ramrod_data *data =
2931 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2932 
2933 	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2934 			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2935 	data->header.rule_cnt = len;
2936 }
2937 
2938 /**
2939  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2940  *
2941  * @bp:		device handle
2942  * @o:
2943  *
2944  * Recalculate the actual number of set bins in the registry using Brian
2945  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2946  *
2947  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2948  */
2949 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2950 						  struct bnx2x_mcast_obj *o)
2951 {
2952 	int i, cnt = 0;
2953 	u64 elem;
2954 
2955 	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2956 		elem = o->registry.aprox_match.vec[i];
2957 		for (; elem; cnt++)
2958 			elem &= elem - 1;
2959 	}
2960 
2961 	o->set_registry_size(o, cnt);
2962 
2963 	return 0;
2964 }
2965 
2966 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2967 				struct bnx2x_mcast_ramrod_params *p,
2968 				int cmd)
2969 {
2970 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2971 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2972 	struct eth_multicast_rules_ramrod_data *data =
2973 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2974 	int cnt = 0, rc;
2975 
2976 	/* Reset the ramrod data buffer */
2977 	memset(data, 0, sizeof(*data));
2978 
2979 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2980 
2981 	/* If there are no more pending commands - clear SCHEDULED state */
2982 	if (list_empty(&o->pending_cmds_head))
2983 		o->clear_sched(o);
2984 
2985 	/* The below may be true iff there was enough room in ramrod
2986 	 * data for all pending commands and for the current
2987 	 * command. Otherwise the current command would have been added
2988 	 * to the pending commands and p->mcast_list_len would have been
2989 	 * zeroed.
2990 	 */
2991 	if (p->mcast_list_len > 0)
2992 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2993 
2994 	/* We've pulled out some MACs - update the total number of
2995 	 * outstanding.
2996 	 */
2997 	o->total_pending_num -= cnt;
2998 
2999 	/* send a ramrod */
3000 	WARN_ON(o->total_pending_num < 0);
3001 	WARN_ON(cnt > o->max_cmd_len);
3002 
3003 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3004 
3005 	/* Update a registry size if there are no more pending operations.
3006 	 *
3007 	 * We don't want to change the value of the registry size if there are
3008 	 * pending operations because we want it to always be equal to the
3009 	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3010 	 * set bins after the last requested operation in order to properly
3011 	 * evaluate the size of the next DEL/RESTORE operation.
3012 	 *
3013 	 * Note that we update the registry itself during command(s) handling
3014 	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3015 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3016 	 * with a limited amount of update commands (per MAC/bin) and we don't
3017 	 * know in this scope what the actual state of bins configuration is
3018 	 * going to be after this ramrod.
3019 	 */
3020 	if (!o->total_pending_num)
3021 		bnx2x_mcast_refresh_registry_e2(bp, o);
3022 
3023 	/*
3024 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3025 	 * RAMROD_PENDING status immediately.
3026 	 */
3027 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3028 		raw->clear_pending(raw);
3029 		return 0;
3030 	} else {
3031 		/*
3032 		 *  No need for an explicit memory barrier here as long we would
3033 		 *  need to ensure the ordering of writing to the SPQ element
3034 		 *  and updating of the SPQ producer which involves a memory
3035 		 *  read and we will have to put a full memory barrier there
3036 		 *  (inside bnx2x_sp_post()).
3037 		 */
3038 
3039 		/* Send a ramrod */
3040 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3041 				   raw->cid, U64_HI(raw->rdata_mapping),
3042 				   U64_LO(raw->rdata_mapping),
3043 				   ETH_CONNECTION_TYPE);
3044 		if (rc)
3045 			return rc;
3046 
3047 		/* Ramrod completion is pending */
3048 		return 1;
3049 	}
3050 }
3051 
3052 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3053 				    struct bnx2x_mcast_ramrod_params *p,
3054 				    int cmd)
3055 {
3056 	/* Mark, that there is a work to do */
3057 	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3058 		p->mcast_list_len = 1;
3059 
3060 	return 0;
3061 }
3062 
3063 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3064 				       struct bnx2x_mcast_ramrod_params *p,
3065 				       int old_num_bins)
3066 {
3067 	/* Do nothing */
3068 }
3069 
3070 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3071 do { \
3072 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3073 } while (0)
3074 
3075 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3076 					   struct bnx2x_mcast_obj *o,
3077 					   struct bnx2x_mcast_ramrod_params *p,
3078 					   u32 *mc_filter)
3079 {
3080 	struct bnx2x_mcast_list_elem *mlist_pos;
3081 	int bit;
3082 
3083 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3084 		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3085 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3086 
3087 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3088 				 mlist_pos->mac, bit);
3089 
3090 		/* bookkeeping... */
3091 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3092 				  bit);
3093 	}
3094 }
3095 
3096 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3097 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3098 	u32 *mc_filter)
3099 {
3100 	int bit;
3101 
3102 	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3103 	     bit >= 0;
3104 	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3105 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3106 		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3107 	}
3108 }
3109 
3110 /* On 57711 we write the multicast MACs' aproximate match
3111  * table by directly into the TSTORM's internal RAM. So we don't
3112  * really need to handle any tricks to make it work.
3113  */
3114 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3115 				 struct bnx2x_mcast_ramrod_params *p,
3116 				 int cmd)
3117 {
3118 	int i;
3119 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3120 	struct bnx2x_raw_obj *r = &o->raw;
3121 
3122 	/* If CLEAR_ONLY has been requested - clear the registry
3123 	 * and clear a pending bit.
3124 	 */
3125 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3126 		u32 mc_filter[MC_HASH_SIZE] = {0};
3127 
3128 		/* Set the multicast filter bits before writing it into
3129 		 * the internal memory.
3130 		 */
3131 		switch (cmd) {
3132 		case BNX2X_MCAST_CMD_ADD:
3133 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3134 			break;
3135 
3136 		case BNX2X_MCAST_CMD_DEL:
3137 			DP(BNX2X_MSG_SP,
3138 			   "Invalidating multicast MACs configuration\n");
3139 
3140 			/* clear the registry */
3141 			memset(o->registry.aprox_match.vec, 0,
3142 			       sizeof(o->registry.aprox_match.vec));
3143 			break;
3144 
3145 		case BNX2X_MCAST_CMD_RESTORE:
3146 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3147 			break;
3148 
3149 		default:
3150 			BNX2X_ERR("Unknown command: %d\n", cmd);
3151 			return -EINVAL;
3152 		}
3153 
3154 		/* Set the mcast filter in the internal memory */
3155 		for (i = 0; i < MC_HASH_SIZE; i++)
3156 			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3157 	} else
3158 		/* clear the registry */
3159 		memset(o->registry.aprox_match.vec, 0,
3160 		       sizeof(o->registry.aprox_match.vec));
3161 
3162 	/* We are done */
3163 	r->clear_pending(r);
3164 
3165 	return 0;
3166 }
3167 
3168 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3169 				   struct bnx2x_mcast_ramrod_params *p,
3170 				   int cmd)
3171 {
3172 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3173 	int reg_sz = o->get_registry_size(o);
3174 
3175 	switch (cmd) {
3176 	/* DEL command deletes all currently configured MACs */
3177 	case BNX2X_MCAST_CMD_DEL:
3178 		o->set_registry_size(o, 0);
3179 		/* Don't break */
3180 
3181 	/* RESTORE command will restore the entire multicast configuration */
3182 	case BNX2X_MCAST_CMD_RESTORE:
3183 		p->mcast_list_len = reg_sz;
3184 		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3185 				   cmd, p->mcast_list_len);
3186 		break;
3187 
3188 	case BNX2X_MCAST_CMD_ADD:
3189 	case BNX2X_MCAST_CMD_CONT:
3190 		/* Multicast MACs on 57710 are configured as unicast MACs and
3191 		 * there is only a limited number of CAM entries for that
3192 		 * matter.
3193 		 */
3194 		if (p->mcast_list_len > o->max_cmd_len) {
3195 			BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3196 				  o->max_cmd_len);
3197 			return -EINVAL;
3198 		}
3199 		/* Every configured MAC should be cleared if DEL command is
3200 		 * called. Only the last ADD command is relevant as long as
3201 		 * every ADD commands overrides the previous configuration.
3202 		 */
3203 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3204 		if (p->mcast_list_len > 0)
3205 			o->set_registry_size(o, p->mcast_list_len);
3206 
3207 		break;
3208 
3209 	default:
3210 		BNX2X_ERR("Unknown command: %d\n", cmd);
3211 		return -EINVAL;
3212 
3213 	}
3214 
3215 	/* We want to ensure that commands are executed one by one for 57710.
3216 	 * Therefore each none-empty command will consume o->max_cmd_len.
3217 	 */
3218 	if (p->mcast_list_len)
3219 		o->total_pending_num += o->max_cmd_len;
3220 
3221 	return 0;
3222 }
3223 
3224 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3225 				      struct bnx2x_mcast_ramrod_params *p,
3226 				      int old_num_macs)
3227 {
3228 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3229 
3230 	o->set_registry_size(o, old_num_macs);
3231 
3232 	/* If current command hasn't been handled yet and we are
3233 	 * here means that it's meant to be dropped and we have to
3234 	 * update the number of outstandling MACs accordingly.
3235 	 */
3236 	if (p->mcast_list_len)
3237 		o->total_pending_num -= o->max_cmd_len;
3238 }
3239 
3240 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3241 					struct bnx2x_mcast_obj *o, int idx,
3242 					union bnx2x_mcast_config_data *cfg_data,
3243 					int cmd)
3244 {
3245 	struct bnx2x_raw_obj *r = &o->raw;
3246 	struct mac_configuration_cmd *data =
3247 		(struct mac_configuration_cmd *)(r->rdata);
3248 
3249 	/* copy mac */
3250 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3251 		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3252 				      &data->config_table[idx].middle_mac_addr,
3253 				      &data->config_table[idx].lsb_mac_addr,
3254 				      cfg_data->mac);
3255 
3256 		data->config_table[idx].vlan_id = 0;
3257 		data->config_table[idx].pf_id = r->func_id;
3258 		data->config_table[idx].clients_bit_vector =
3259 			cpu_to_le32(1 << r->cl_id);
3260 
3261 		SET_FLAG(data->config_table[idx].flags,
3262 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3263 			 T_ETH_MAC_COMMAND_SET);
3264 	}
3265 }
3266 
3267 /**
3268  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3269  *
3270  * @bp:		device handle
3271  * @p:
3272  * @len:	number of rules to handle
3273  */
3274 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3275 					struct bnx2x_mcast_ramrod_params *p,
3276 					u8 len)
3277 {
3278 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3279 	struct mac_configuration_cmd *data =
3280 		(struct mac_configuration_cmd *)(r->rdata);
3281 
3282 	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3283 		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3284 		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3285 
3286 	data->hdr.offset = offset;
3287 	data->hdr.client_id = 0xff;
3288 	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3289 			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3290 	data->hdr.length = len;
3291 }
3292 
3293 /**
3294  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3295  *
3296  * @bp:		device handle
3297  * @o:
3298  * @start_idx:	index in the registry to start from
3299  * @rdata_idx:	index in the ramrod data to start from
3300  *
3301  * restore command for 57710 is like all other commands - always a stand alone
3302  * command - start_idx and rdata_idx will always be 0. This function will always
3303  * succeed.
3304  * returns -1 to comply with 57712 variant.
3305  */
3306 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3307 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3308 	int *rdata_idx)
3309 {
3310 	struct bnx2x_mcast_mac_elem *elem;
3311 	int i = 0;
3312 	union bnx2x_mcast_config_data cfg_data = {0};
3313 
3314 	/* go through the registry and configure the MACs from it. */
3315 	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3316 		cfg_data.mac = &elem->mac[0];
3317 		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3318 
3319 		i++;
3320 
3321 		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3322 				   cfg_data.mac);
3323 	}
3324 
3325 	*rdata_idx = i;
3326 
3327 	return -1;
3328 }
3329 
3330 
3331 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3332 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3333 {
3334 	struct bnx2x_pending_mcast_cmd *cmd_pos;
3335 	struct bnx2x_mcast_mac_elem *pmac_pos;
3336 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3337 	union bnx2x_mcast_config_data cfg_data = {0};
3338 	int cnt = 0;
3339 
3340 
3341 	/* If nothing to be done - return */
3342 	if (list_empty(&o->pending_cmds_head))
3343 		return 0;
3344 
3345 	/* Handle the first command */
3346 	cmd_pos = list_first_entry(&o->pending_cmds_head,
3347 				   struct bnx2x_pending_mcast_cmd, link);
3348 
3349 	switch (cmd_pos->type) {
3350 	case BNX2X_MCAST_CMD_ADD:
3351 		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3352 			cfg_data.mac = &pmac_pos->mac[0];
3353 			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3354 
3355 			cnt++;
3356 
3357 			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3358 					 pmac_pos->mac);
3359 		}
3360 		break;
3361 
3362 	case BNX2X_MCAST_CMD_DEL:
3363 		cnt = cmd_pos->data.macs_num;
3364 		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3365 		break;
3366 
3367 	case BNX2X_MCAST_CMD_RESTORE:
3368 		o->hdl_restore(bp, o, 0, &cnt);
3369 		break;
3370 
3371 	default:
3372 		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3373 		return -EINVAL;
3374 	}
3375 
3376 	list_del(&cmd_pos->link);
3377 	kfree(cmd_pos);
3378 
3379 	return cnt;
3380 }
3381 
3382 /**
3383  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3384  *
3385  * @fw_hi:
3386  * @fw_mid:
3387  * @fw_lo:
3388  * @mac:
3389  */
3390 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3391 					 __le16 *fw_lo, u8 *mac)
3392 {
3393 	mac[1] = ((u8 *)fw_hi)[0];
3394 	mac[0] = ((u8 *)fw_hi)[1];
3395 	mac[3] = ((u8 *)fw_mid)[0];
3396 	mac[2] = ((u8 *)fw_mid)[1];
3397 	mac[5] = ((u8 *)fw_lo)[0];
3398 	mac[4] = ((u8 *)fw_lo)[1];
3399 }
3400 
3401 /**
3402  * bnx2x_mcast_refresh_registry_e1 -
3403  *
3404  * @bp:		device handle
3405  * @cnt:
3406  *
3407  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3408  * and update the registry correspondingly: if ADD - allocate a memory and add
3409  * the entries to the registry (list), if DELETE - clear the registry and free
3410  * the memory.
3411  */
3412 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3413 						  struct bnx2x_mcast_obj *o)
3414 {
3415 	struct bnx2x_raw_obj *raw = &o->raw;
3416 	struct bnx2x_mcast_mac_elem *elem;
3417 	struct mac_configuration_cmd *data =
3418 			(struct mac_configuration_cmd *)(raw->rdata);
3419 
3420 	/* If first entry contains a SET bit - the command was ADD,
3421 	 * otherwise - DEL_ALL
3422 	 */
3423 	if (GET_FLAG(data->config_table[0].flags,
3424 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3425 		int i, len = data->hdr.length;
3426 
3427 		/* Break if it was a RESTORE command */
3428 		if (!list_empty(&o->registry.exact_match.macs))
3429 			return 0;
3430 
3431 		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3432 		if (!elem) {
3433 			BNX2X_ERR("Failed to allocate registry memory\n");
3434 			return -ENOMEM;
3435 		}
3436 
3437 		for (i = 0; i < len; i++, elem++) {
3438 			bnx2x_get_fw_mac_addr(
3439 				&data->config_table[i].msb_mac_addr,
3440 				&data->config_table[i].middle_mac_addr,
3441 				&data->config_table[i].lsb_mac_addr,
3442 				elem->mac);
3443 			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3444 			   elem->mac);
3445 			list_add_tail(&elem->link,
3446 				      &o->registry.exact_match.macs);
3447 		}
3448 	} else {
3449 		elem = list_first_entry(&o->registry.exact_match.macs,
3450 					struct bnx2x_mcast_mac_elem, link);
3451 		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3452 		kfree(elem);
3453 		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3454 	}
3455 
3456 	return 0;
3457 }
3458 
3459 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3460 				struct bnx2x_mcast_ramrod_params *p,
3461 				int cmd)
3462 {
3463 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3464 	struct bnx2x_raw_obj *raw = &o->raw;
3465 	struct mac_configuration_cmd *data =
3466 		(struct mac_configuration_cmd *)(raw->rdata);
3467 	int cnt = 0, i, rc;
3468 
3469 	/* Reset the ramrod data buffer */
3470 	memset(data, 0, sizeof(*data));
3471 
3472 	/* First set all entries as invalid */
3473 	for (i = 0; i < o->max_cmd_len ; i++)
3474 		SET_FLAG(data->config_table[i].flags,
3475 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3476 			 T_ETH_MAC_COMMAND_INVALIDATE);
3477 
3478 	/* Handle pending commands first */
3479 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3480 
3481 	/* If there are no more pending commands - clear SCHEDULED state */
3482 	if (list_empty(&o->pending_cmds_head))
3483 		o->clear_sched(o);
3484 
3485 	/* The below may be true iff there were no pending commands */
3486 	if (!cnt)
3487 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3488 
3489 	/* For 57710 every command has o->max_cmd_len length to ensure that
3490 	 * commands are done one at a time.
3491 	 */
3492 	o->total_pending_num -= o->max_cmd_len;
3493 
3494 	/* send a ramrod */
3495 
3496 	WARN_ON(cnt > o->max_cmd_len);
3497 
3498 	/* Set ramrod header (in particular, a number of entries to update) */
3499 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3500 
3501 	/* update a registry: we need the registry contents to be always up
3502 	 * to date in order to be able to execute a RESTORE opcode. Here
3503 	 * we use the fact that for 57710 we sent one command at a time
3504 	 * hence we may take the registry update out of the command handling
3505 	 * and do it in a simpler way here.
3506 	 */
3507 	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3508 	if (rc)
3509 		return rc;
3510 
3511 	/*
3512 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3513 	 * RAMROD_PENDING status immediately.
3514 	 */
3515 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3516 		raw->clear_pending(raw);
3517 		return 0;
3518 	} else {
3519 		/*
3520 		 *  No need for an explicit memory barrier here as long we would
3521 		 *  need to ensure the ordering of writing to the SPQ element
3522 		 *  and updating of the SPQ producer which involves a memory
3523 		 *  read and we will have to put a full memory barrier there
3524 		 *  (inside bnx2x_sp_post()).
3525 		 */
3526 
3527 		/* Send a ramrod */
3528 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3529 				   U64_HI(raw->rdata_mapping),
3530 				   U64_LO(raw->rdata_mapping),
3531 				   ETH_CONNECTION_TYPE);
3532 		if (rc)
3533 			return rc;
3534 
3535 		/* Ramrod completion is pending */
3536 		return 1;
3537 	}
3538 
3539 }
3540 
3541 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3542 {
3543 	return o->registry.exact_match.num_macs_set;
3544 }
3545 
3546 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3547 {
3548 	return o->registry.aprox_match.num_bins_set;
3549 }
3550 
3551 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3552 						int n)
3553 {
3554 	o->registry.exact_match.num_macs_set = n;
3555 }
3556 
3557 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3558 						int n)
3559 {
3560 	o->registry.aprox_match.num_bins_set = n;
3561 }
3562 
3563 int bnx2x_config_mcast(struct bnx2x *bp,
3564 		       struct bnx2x_mcast_ramrod_params *p,
3565 		       int cmd)
3566 {
3567 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3568 	struct bnx2x_raw_obj *r = &o->raw;
3569 	int rc = 0, old_reg_size;
3570 
3571 	/* This is needed to recover number of currently configured mcast macs
3572 	 * in case of failure.
3573 	 */
3574 	old_reg_size = o->get_registry_size(o);
3575 
3576 	/* Do some calculations and checks */
3577 	rc = o->validate(bp, p, cmd);
3578 	if (rc)
3579 		return rc;
3580 
3581 	/* Return if there is no work to do */
3582 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3583 		return 0;
3584 
3585 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3586 	   o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3587 
3588 	/* Enqueue the current command to the pending list if we can't complete
3589 	 * it in the current iteration
3590 	 */
3591 	if (r->check_pending(r) ||
3592 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3593 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3594 		if (rc < 0)
3595 			goto error_exit1;
3596 
3597 		/* As long as the current command is in a command list we
3598 		 * don't need to handle it separately.
3599 		 */
3600 		p->mcast_list_len = 0;
3601 	}
3602 
3603 	if (!r->check_pending(r)) {
3604 
3605 		/* Set 'pending' state */
3606 		r->set_pending(r);
3607 
3608 		/* Configure the new classification in the chip */
3609 		rc = o->config_mcast(bp, p, cmd);
3610 		if (rc < 0)
3611 			goto error_exit2;
3612 
3613 		/* Wait for a ramrod completion if was requested */
3614 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3615 			rc = o->wait_comp(bp, o);
3616 	}
3617 
3618 	return rc;
3619 
3620 error_exit2:
3621 	r->clear_pending(r);
3622 
3623 error_exit1:
3624 	o->revert(bp, p, old_reg_size);
3625 
3626 	return rc;
3627 }
3628 
3629 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3630 {
3631 	smp_mb__before_clear_bit();
3632 	clear_bit(o->sched_state, o->raw.pstate);
3633 	smp_mb__after_clear_bit();
3634 }
3635 
3636 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3637 {
3638 	smp_mb__before_clear_bit();
3639 	set_bit(o->sched_state, o->raw.pstate);
3640 	smp_mb__after_clear_bit();
3641 }
3642 
3643 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3644 {
3645 	return !!test_bit(o->sched_state, o->raw.pstate);
3646 }
3647 
3648 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3649 {
3650 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3651 }
3652 
3653 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3654 			  struct bnx2x_mcast_obj *mcast_obj,
3655 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3656 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3657 			  int state, unsigned long *pstate, bnx2x_obj_type type)
3658 {
3659 	memset(mcast_obj, 0, sizeof(*mcast_obj));
3660 
3661 	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3662 			   rdata, rdata_mapping, state, pstate, type);
3663 
3664 	mcast_obj->engine_id = engine_id;
3665 
3666 	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3667 
3668 	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3669 	mcast_obj->check_sched = bnx2x_mcast_check_sched;
3670 	mcast_obj->set_sched = bnx2x_mcast_set_sched;
3671 	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3672 
3673 	if (CHIP_IS_E1(bp)) {
3674 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3675 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3676 		mcast_obj->hdl_restore       =
3677 			bnx2x_mcast_handle_restore_cmd_e1;
3678 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3679 
3680 		if (CHIP_REV_IS_SLOW(bp))
3681 			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3682 		else
3683 			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3684 
3685 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3686 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3687 		mcast_obj->validate          = bnx2x_mcast_validate_e1;
3688 		mcast_obj->revert            = bnx2x_mcast_revert_e1;
3689 		mcast_obj->get_registry_size =
3690 			bnx2x_mcast_get_registry_size_exact;
3691 		mcast_obj->set_registry_size =
3692 			bnx2x_mcast_set_registry_size_exact;
3693 
3694 		/* 57710 is the only chip that uses the exact match for mcast
3695 		 * at the moment.
3696 		 */
3697 		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3698 
3699 	} else if (CHIP_IS_E1H(bp)) {
3700 		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3701 		mcast_obj->enqueue_cmd   = NULL;
3702 		mcast_obj->hdl_restore   = NULL;
3703 		mcast_obj->check_pending = bnx2x_mcast_check_pending;
3704 
3705 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3706 		 * for one command.
3707 		 */
3708 		mcast_obj->max_cmd_len       = -1;
3709 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3710 		mcast_obj->set_one_rule      = NULL;
3711 		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3712 		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3713 		mcast_obj->get_registry_size =
3714 			bnx2x_mcast_get_registry_size_aprox;
3715 		mcast_obj->set_registry_size =
3716 			bnx2x_mcast_set_registry_size_aprox;
3717 	} else {
3718 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3719 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3720 		mcast_obj->hdl_restore       =
3721 			bnx2x_mcast_handle_restore_cmd_e2;
3722 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3723 		/* TODO: There should be a proper HSI define for this number!!!
3724 		 */
3725 		mcast_obj->max_cmd_len       = 16;
3726 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3727 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3728 		mcast_obj->validate          = bnx2x_mcast_validate_e2;
3729 		mcast_obj->revert            = bnx2x_mcast_revert_e2;
3730 		mcast_obj->get_registry_size =
3731 			bnx2x_mcast_get_registry_size_aprox;
3732 		mcast_obj->set_registry_size =
3733 			bnx2x_mcast_set_registry_size_aprox;
3734 	}
3735 }
3736 
3737 /*************************** Credit handling **********************************/
3738 
3739 /**
3740  * atomic_add_ifless - add if the result is less than a given value.
3741  *
3742  * @v:	pointer of type atomic_t
3743  * @a:	the amount to add to v...
3744  * @u:	...if (v + a) is less than u.
3745  *
3746  * returns true if (v + a) was less than u, and false otherwise.
3747  *
3748  */
3749 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3750 {
3751 	int c, old;
3752 
3753 	c = atomic_read(v);
3754 	for (;;) {
3755 		if (unlikely(c + a >= u))
3756 			return false;
3757 
3758 		old = atomic_cmpxchg((v), c, c + a);
3759 		if (likely(old == c))
3760 			break;
3761 		c = old;
3762 	}
3763 
3764 	return true;
3765 }
3766 
3767 /**
3768  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3769  *
3770  * @v:	pointer of type atomic_t
3771  * @a:	the amount to dec from v...
3772  * @u:	...if (v - a) is more or equal than u.
3773  *
3774  * returns true if (v - a) was more or equal than u, and false
3775  * otherwise.
3776  */
3777 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3778 {
3779 	int c, old;
3780 
3781 	c = atomic_read(v);
3782 	for (;;) {
3783 		if (unlikely(c - a < u))
3784 			return false;
3785 
3786 		old = atomic_cmpxchg((v), c, c - a);
3787 		if (likely(old == c))
3788 			break;
3789 		c = old;
3790 	}
3791 
3792 	return true;
3793 }
3794 
3795 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3796 {
3797 	bool rc;
3798 
3799 	smp_mb();
3800 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3801 	smp_mb();
3802 
3803 	return rc;
3804 }
3805 
3806 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3807 {
3808 	bool rc;
3809 
3810 	smp_mb();
3811 
3812 	/* Don't let to refill if credit + cnt > pool_sz */
3813 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3814 
3815 	smp_mb();
3816 
3817 	return rc;
3818 }
3819 
3820 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3821 {
3822 	int cur_credit;
3823 
3824 	smp_mb();
3825 	cur_credit = atomic_read(&o->credit);
3826 
3827 	return cur_credit;
3828 }
3829 
3830 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3831 					  int cnt)
3832 {
3833 	return true;
3834 }
3835 
3836 
3837 static bool bnx2x_credit_pool_get_entry(
3838 	struct bnx2x_credit_pool_obj *o,
3839 	int *offset)
3840 {
3841 	int idx, vec, i;
3842 
3843 	*offset = -1;
3844 
3845 	/* Find "internal cam-offset" then add to base for this object... */
3846 	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3847 
3848 		/* Skip the current vector if there are no free entries in it */
3849 		if (!o->pool_mirror[vec])
3850 			continue;
3851 
3852 		/* If we've got here we are going to find a free entry */
3853 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3854 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3855 
3856 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3857 				/* Got one!! */
3858 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3859 				*offset = o->base_pool_offset + idx;
3860 				return true;
3861 			}
3862 	}
3863 
3864 	return false;
3865 }
3866 
3867 static bool bnx2x_credit_pool_put_entry(
3868 	struct bnx2x_credit_pool_obj *o,
3869 	int offset)
3870 {
3871 	if (offset < o->base_pool_offset)
3872 		return false;
3873 
3874 	offset -= o->base_pool_offset;
3875 
3876 	if (offset >= o->pool_sz)
3877 		return false;
3878 
3879 	/* Return the entry to the pool */
3880 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3881 
3882 	return true;
3883 }
3884 
3885 static bool bnx2x_credit_pool_put_entry_always_true(
3886 	struct bnx2x_credit_pool_obj *o,
3887 	int offset)
3888 {
3889 	return true;
3890 }
3891 
3892 static bool bnx2x_credit_pool_get_entry_always_true(
3893 	struct bnx2x_credit_pool_obj *o,
3894 	int *offset)
3895 {
3896 	*offset = -1;
3897 	return true;
3898 }
3899 /**
3900  * bnx2x_init_credit_pool - initialize credit pool internals.
3901  *
3902  * @p:
3903  * @base:	Base entry in the CAM to use.
3904  * @credit:	pool size.
3905  *
3906  * If base is negative no CAM entries handling will be performed.
3907  * If credit is negative pool operations will always succeed (unlimited pool).
3908  *
3909  */
3910 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3911 					  int base, int credit)
3912 {
3913 	/* Zero the object first */
3914 	memset(p, 0, sizeof(*p));
3915 
3916 	/* Set the table to all 1s */
3917 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3918 
3919 	/* Init a pool as full */
3920 	atomic_set(&p->credit, credit);
3921 
3922 	/* The total poll size */
3923 	p->pool_sz = credit;
3924 
3925 	p->base_pool_offset = base;
3926 
3927 	/* Commit the change */
3928 	smp_mb();
3929 
3930 	p->check = bnx2x_credit_pool_check;
3931 
3932 	/* if pool credit is negative - disable the checks */
3933 	if (credit >= 0) {
3934 		p->put      = bnx2x_credit_pool_put;
3935 		p->get      = bnx2x_credit_pool_get;
3936 		p->put_entry = bnx2x_credit_pool_put_entry;
3937 		p->get_entry = bnx2x_credit_pool_get_entry;
3938 	} else {
3939 		p->put      = bnx2x_credit_pool_always_true;
3940 		p->get      = bnx2x_credit_pool_always_true;
3941 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3942 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3943 	}
3944 
3945 	/* If base is negative - disable entries handling */
3946 	if (base < 0) {
3947 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3948 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3949 	}
3950 }
3951 
3952 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3953 				struct bnx2x_credit_pool_obj *p, u8 func_id,
3954 				u8 func_num)
3955 {
3956 /* TODO: this will be defined in consts as well... */
3957 #define BNX2X_CAM_SIZE_EMUL 5
3958 
3959 	int cam_sz;
3960 
3961 	if (CHIP_IS_E1(bp)) {
3962 		/* In E1, Multicast is saved in cam... */
3963 		if (!CHIP_REV_IS_SLOW(bp))
3964 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3965 		else
3966 			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3967 
3968 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3969 
3970 	} else if (CHIP_IS_E1H(bp)) {
3971 		/* CAM credit is equaly divided between all active functions
3972 		 * on the PORT!.
3973 		 */
3974 		if ((func_num > 0)) {
3975 			if (!CHIP_REV_IS_SLOW(bp))
3976 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3977 			else
3978 				cam_sz = BNX2X_CAM_SIZE_EMUL;
3979 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3980 		} else {
3981 			/* this should never happen! Block MAC operations. */
3982 			bnx2x_init_credit_pool(p, 0, 0);
3983 		}
3984 
3985 	} else {
3986 
3987 		/*
3988 		 * CAM credit is equaly divided between all active functions
3989 		 * on the PATH.
3990 		 */
3991 		if ((func_num > 0)) {
3992 			if (!CHIP_REV_IS_SLOW(bp))
3993 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3994 			else
3995 				cam_sz = BNX2X_CAM_SIZE_EMUL;
3996 
3997 			/*
3998 			 * No need for CAM entries handling for 57712 and
3999 			 * newer.
4000 			 */
4001 			bnx2x_init_credit_pool(p, -1, cam_sz);
4002 		} else {
4003 			/* this should never happen! Block MAC operations. */
4004 			bnx2x_init_credit_pool(p, 0, 0);
4005 		}
4006 
4007 	}
4008 }
4009 
4010 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4011 				 struct bnx2x_credit_pool_obj *p,
4012 				 u8 func_id,
4013 				 u8 func_num)
4014 {
4015 	if (CHIP_IS_E1x(bp)) {
4016 		/*
4017 		 * There is no VLAN credit in HW on 57710 and 57711 only
4018 		 * MAC / MAC-VLAN can be set
4019 		 */
4020 		bnx2x_init_credit_pool(p, 0, -1);
4021 	} else {
4022 		/*
4023 		 * CAM credit is equaly divided between all active functions
4024 		 * on the PATH.
4025 		 */
4026 		if (func_num > 0) {
4027 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4028 			bnx2x_init_credit_pool(p, func_id * credit, credit);
4029 		} else
4030 			/* this should never happen! Block VLAN operations. */
4031 			bnx2x_init_credit_pool(p, 0, 0);
4032 	}
4033 }
4034 
4035 /****************** RSS Configuration ******************/
4036 /**
4037  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4038  *
4039  * @bp:		driver hanlde
4040  * @p:		pointer to rss configuration
4041  *
4042  * Prints it when NETIF_MSG_IFUP debug level is configured.
4043  */
4044 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4045 					struct bnx2x_config_rss_params *p)
4046 {
4047 	int i;
4048 
4049 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4050 	DP(BNX2X_MSG_SP, "0x0000: ");
4051 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4052 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4053 
4054 		/* Print 4 bytes in a line */
4055 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4056 		    (((i + 1) & 0x3) == 0)) {
4057 			DP_CONT(BNX2X_MSG_SP, "\n");
4058 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4059 		}
4060 	}
4061 
4062 	DP_CONT(BNX2X_MSG_SP, "\n");
4063 }
4064 
4065 /**
4066  * bnx2x_setup_rss - configure RSS
4067  *
4068  * @bp:		device handle
4069  * @p:		rss configuration
4070  *
4071  * sends on UPDATE ramrod for that matter.
4072  */
4073 static int bnx2x_setup_rss(struct bnx2x *bp,
4074 			   struct bnx2x_config_rss_params *p)
4075 {
4076 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4077 	struct bnx2x_raw_obj *r = &o->raw;
4078 	struct eth_rss_update_ramrod_data *data =
4079 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4080 	u8 rss_mode = 0;
4081 	int rc;
4082 
4083 	memset(data, 0, sizeof(*data));
4084 
4085 	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4086 
4087 	/* Set an echo field */
4088 	data->echo = (r->cid & BNX2X_SWCID_MASK) |
4089 		     (r->state << BNX2X_SWCID_SHIFT);
4090 
4091 	/* RSS mode */
4092 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4093 		rss_mode = ETH_RSS_MODE_DISABLED;
4094 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4095 		rss_mode = ETH_RSS_MODE_REGULAR;
4096 
4097 	data->rss_mode = rss_mode;
4098 
4099 	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4100 
4101 	/* RSS capabilities */
4102 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4103 		data->capabilities |=
4104 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4105 
4106 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4107 		data->capabilities |=
4108 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4109 
4110 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4111 		data->capabilities |=
4112 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4113 
4114 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4115 		data->capabilities |=
4116 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4117 
4118 	/* Hashing mask */
4119 	data->rss_result_mask = p->rss_result_mask;
4120 
4121 	/* RSS engine ID */
4122 	data->rss_engine_id = o->engine_id;
4123 
4124 	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4125 
4126 	/* Indirection table */
4127 	memcpy(data->indirection_table, p->ind_table,
4128 		  T_ETH_INDIRECTION_TABLE_SIZE);
4129 
4130 	/* Remember the last configuration */
4131 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4132 
4133 	/* Print the indirection table */
4134 	if (netif_msg_ifup(bp))
4135 		bnx2x_debug_print_ind_table(bp, p);
4136 
4137 	/* RSS keys */
4138 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4139 		memcpy(&data->rss_key[0], &p->rss_key[0],
4140 		       sizeof(data->rss_key));
4141 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4142 	}
4143 
4144 	/*
4145 	 *  No need for an explicit memory barrier here as long we would
4146 	 *  need to ensure the ordering of writing to the SPQ element
4147 	 *  and updating of the SPQ producer which involves a memory
4148 	 *  read and we will have to put a full memory barrier there
4149 	 *  (inside bnx2x_sp_post()).
4150 	 */
4151 
4152 	/* Send a ramrod */
4153 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4154 			   U64_HI(r->rdata_mapping),
4155 			   U64_LO(r->rdata_mapping),
4156 			   ETH_CONNECTION_TYPE);
4157 
4158 	if (rc < 0)
4159 		return rc;
4160 
4161 	return 1;
4162 }
4163 
4164 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4165 			     u8 *ind_table)
4166 {
4167 	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4168 }
4169 
4170 int bnx2x_config_rss(struct bnx2x *bp,
4171 		     struct bnx2x_config_rss_params *p)
4172 {
4173 	int rc;
4174 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4175 	struct bnx2x_raw_obj *r = &o->raw;
4176 
4177 	/* Do nothing if only driver cleanup was requested */
4178 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4179 		return 0;
4180 
4181 	r->set_pending(r);
4182 
4183 	rc = o->config_rss(bp, p);
4184 	if (rc < 0) {
4185 		r->clear_pending(r);
4186 		return rc;
4187 	}
4188 
4189 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4190 		rc = r->wait_comp(bp, r);
4191 
4192 	return rc;
4193 }
4194 
4195 
4196 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4197 			       struct bnx2x_rss_config_obj *rss_obj,
4198 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4199 			       void *rdata, dma_addr_t rdata_mapping,
4200 			       int state, unsigned long *pstate,
4201 			       bnx2x_obj_type type)
4202 {
4203 	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4204 			   rdata_mapping, state, pstate, type);
4205 
4206 	rss_obj->engine_id  = engine_id;
4207 	rss_obj->config_rss = bnx2x_setup_rss;
4208 }
4209 
4210 /********************** Queue state object ***********************************/
4211 
4212 /**
4213  * bnx2x_queue_state_change - perform Queue state change transition
4214  *
4215  * @bp:		device handle
4216  * @params:	parameters to perform the transition
4217  *
4218  * returns 0 in case of successfully completed transition, negative error
4219  * code in case of failure, positive (EBUSY) value if there is a completion
4220  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4221  * not set in params->ramrod_flags for asynchronous commands).
4222  *
4223  */
4224 int bnx2x_queue_state_change(struct bnx2x *bp,
4225 			     struct bnx2x_queue_state_params *params)
4226 {
4227 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4228 	int rc, pending_bit;
4229 	unsigned long *pending = &o->pending;
4230 
4231 	/* Check that the requested transition is legal */
4232 	if (o->check_transition(bp, o, params))
4233 		return -EINVAL;
4234 
4235 	/* Set "pending" bit */
4236 	pending_bit = o->set_pending(o, params);
4237 
4238 	/* Don't send a command if only driver cleanup was requested */
4239 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4240 		o->complete_cmd(bp, o, pending_bit);
4241 	else {
4242 		/* Send a ramrod */
4243 		rc = o->send_cmd(bp, params);
4244 		if (rc) {
4245 			o->next_state = BNX2X_Q_STATE_MAX;
4246 			clear_bit(pending_bit, pending);
4247 			smp_mb__after_clear_bit();
4248 			return rc;
4249 		}
4250 
4251 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4252 			rc = o->wait_comp(bp, o, pending_bit);
4253 			if (rc)
4254 				return rc;
4255 
4256 			return 0;
4257 		}
4258 	}
4259 
4260 	return !!test_bit(pending_bit, pending);
4261 }
4262 
4263 
4264 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4265 				   struct bnx2x_queue_state_params *params)
4266 {
4267 	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4268 
4269 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4270 	 * UPDATE command.
4271 	 */
4272 	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4273 	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4274 		bit = BNX2X_Q_CMD_UPDATE;
4275 	else
4276 		bit = cmd;
4277 
4278 	set_bit(bit, &obj->pending);
4279 	return bit;
4280 }
4281 
4282 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4283 				 struct bnx2x_queue_sp_obj *o,
4284 				 enum bnx2x_queue_cmd cmd)
4285 {
4286 	return bnx2x_state_wait(bp, cmd, &o->pending);
4287 }
4288 
4289 /**
4290  * bnx2x_queue_comp_cmd - complete the state change command.
4291  *
4292  * @bp:		device handle
4293  * @o:
4294  * @cmd:
4295  *
4296  * Checks that the arrived completion is expected.
4297  */
4298 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4299 				struct bnx2x_queue_sp_obj *o,
4300 				enum bnx2x_queue_cmd cmd)
4301 {
4302 	unsigned long cur_pending = o->pending;
4303 
4304 	if (!test_and_clear_bit(cmd, &cur_pending)) {
4305 		BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4306 			  cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4307 			  o->state, cur_pending, o->next_state);
4308 		return -EINVAL;
4309 	}
4310 
4311 	if (o->next_tx_only >= o->max_cos)
4312 		/* >= becuase tx only must always be smaller than cos since the
4313 		 * primary connection suports COS 0
4314 		 */
4315 		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4316 			   o->next_tx_only, o->max_cos);
4317 
4318 	DP(BNX2X_MSG_SP,
4319 	   "Completing command %d for queue %d, setting state to %d\n",
4320 	   cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4321 
4322 	if (o->next_tx_only)  /* print num tx-only if any exist */
4323 		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4324 		   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4325 
4326 	o->state = o->next_state;
4327 	o->num_tx_only = o->next_tx_only;
4328 	o->next_state = BNX2X_Q_STATE_MAX;
4329 
4330 	/* It's important that o->state and o->next_state are
4331 	 * updated before o->pending.
4332 	 */
4333 	wmb();
4334 
4335 	clear_bit(cmd, &o->pending);
4336 	smp_mb__after_clear_bit();
4337 
4338 	return 0;
4339 }
4340 
4341 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4342 				struct bnx2x_queue_state_params *cmd_params,
4343 				struct client_init_ramrod_data *data)
4344 {
4345 	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4346 
4347 	/* Rx data */
4348 
4349 	/* IPv6 TPA supported for E2 and above only */
4350 	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4351 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4352 }
4353 
4354 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4355 				struct bnx2x_queue_sp_obj *o,
4356 				struct bnx2x_general_setup_params *params,
4357 				struct client_init_general_data *gen_data,
4358 				unsigned long *flags)
4359 {
4360 	gen_data->client_id = o->cl_id;
4361 
4362 	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4363 		gen_data->statistics_counter_id =
4364 					params->stat_id;
4365 		gen_data->statistics_en_flg = 1;
4366 		gen_data->statistics_zero_flg =
4367 			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4368 	} else
4369 		gen_data->statistics_counter_id =
4370 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4371 
4372 	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4373 	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4374 	gen_data->sp_client_id = params->spcl_id;
4375 	gen_data->mtu = cpu_to_le16(params->mtu);
4376 	gen_data->func_id = o->func_id;
4377 
4378 
4379 	gen_data->cos = params->cos;
4380 
4381 	gen_data->traffic_type =
4382 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4383 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4384 
4385 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4386 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4387 }
4388 
4389 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4390 				struct bnx2x_txq_setup_params *params,
4391 				struct client_init_tx_data *tx_data,
4392 				unsigned long *flags)
4393 {
4394 	tx_data->enforce_security_flg =
4395 		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4396 	tx_data->default_vlan =
4397 		cpu_to_le16(params->default_vlan);
4398 	tx_data->default_vlan_flg =
4399 		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4400 	tx_data->tx_switching_flg =
4401 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4402 	tx_data->anti_spoofing_flg =
4403 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4404 	tx_data->force_default_pri_flg =
4405 		test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4406 
4407 	tx_data->tx_status_block_id = params->fw_sb_id;
4408 	tx_data->tx_sb_index_number = params->sb_cq_index;
4409 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4410 
4411 	tx_data->tx_bd_page_base.lo =
4412 		cpu_to_le32(U64_LO(params->dscr_map));
4413 	tx_data->tx_bd_page_base.hi =
4414 		cpu_to_le32(U64_HI(params->dscr_map));
4415 
4416 	/* Don't configure any Tx switching mode during queue SETUP */
4417 	tx_data->state = 0;
4418 }
4419 
4420 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4421 				struct rxq_pause_params *params,
4422 				struct client_init_rx_data *rx_data)
4423 {
4424 	/* flow control data */
4425 	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4426 	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4427 	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4428 	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4429 	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4430 	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4431 	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4432 }
4433 
4434 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4435 				struct bnx2x_rxq_setup_params *params,
4436 				struct client_init_rx_data *rx_data,
4437 				unsigned long *flags)
4438 {
4439 	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4440 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4441 	rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4442 				CLIENT_INIT_RX_DATA_TPA_MODE;
4443 	rx_data->vmqueue_mode_en_flg = 0;
4444 
4445 	rx_data->cache_line_alignment_log_size =
4446 		params->cache_line_log;
4447 	rx_data->enable_dynamic_hc =
4448 		test_bit(BNX2X_Q_FLG_DHC, flags);
4449 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4450 	rx_data->client_qzone_id = params->cl_qzone_id;
4451 	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4452 
4453 	/* Always start in DROP_ALL mode */
4454 	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4455 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4456 
4457 	/* We don't set drop flags */
4458 	rx_data->drop_ip_cs_err_flg = 0;
4459 	rx_data->drop_tcp_cs_err_flg = 0;
4460 	rx_data->drop_ttl0_flg = 0;
4461 	rx_data->drop_udp_cs_err_flg = 0;
4462 	rx_data->inner_vlan_removal_enable_flg =
4463 		test_bit(BNX2X_Q_FLG_VLAN, flags);
4464 	rx_data->outer_vlan_removal_enable_flg =
4465 		test_bit(BNX2X_Q_FLG_OV, flags);
4466 	rx_data->status_block_id = params->fw_sb_id;
4467 	rx_data->rx_sb_index_number = params->sb_cq_index;
4468 	rx_data->max_tpa_queues = params->max_tpa_queues;
4469 	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4470 	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4471 	rx_data->bd_page_base.lo =
4472 		cpu_to_le32(U64_LO(params->dscr_map));
4473 	rx_data->bd_page_base.hi =
4474 		cpu_to_le32(U64_HI(params->dscr_map));
4475 	rx_data->sge_page_base.lo =
4476 		cpu_to_le32(U64_LO(params->sge_map));
4477 	rx_data->sge_page_base.hi =
4478 		cpu_to_le32(U64_HI(params->sge_map));
4479 	rx_data->cqe_page_base.lo =
4480 		cpu_to_le32(U64_LO(params->rcq_map));
4481 	rx_data->cqe_page_base.hi =
4482 		cpu_to_le32(U64_HI(params->rcq_map));
4483 	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4484 
4485 	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4486 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4487 		rx_data->is_approx_mcast = 1;
4488 	}
4489 
4490 	rx_data->rss_engine_id = params->rss_engine_id;
4491 
4492 	/* silent vlan removal */
4493 	rx_data->silent_vlan_removal_flg =
4494 		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4495 	rx_data->silent_vlan_value =
4496 		cpu_to_le16(params->silent_removal_value);
4497 	rx_data->silent_vlan_mask =
4498 		cpu_to_le16(params->silent_removal_mask);
4499 
4500 }
4501 
4502 /* initialize the general, tx and rx parts of a queue object */
4503 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4504 				struct bnx2x_queue_state_params *cmd_params,
4505 				struct client_init_ramrod_data *data)
4506 {
4507 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4508 				       &cmd_params->params.setup.gen_params,
4509 				       &data->general,
4510 				       &cmd_params->params.setup.flags);
4511 
4512 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4513 				  &cmd_params->params.setup.txq_params,
4514 				  &data->tx,
4515 				  &cmd_params->params.setup.flags);
4516 
4517 	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4518 				  &cmd_params->params.setup.rxq_params,
4519 				  &data->rx,
4520 				  &cmd_params->params.setup.flags);
4521 
4522 	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4523 				     &cmd_params->params.setup.pause_params,
4524 				     &data->rx);
4525 }
4526 
4527 /* initialize the general and tx parts of a tx-only queue object */
4528 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4529 				struct bnx2x_queue_state_params *cmd_params,
4530 				struct tx_queue_init_ramrod_data *data)
4531 {
4532 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4533 				       &cmd_params->params.tx_only.gen_params,
4534 				       &data->general,
4535 				       &cmd_params->params.tx_only.flags);
4536 
4537 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4538 				  &cmd_params->params.tx_only.txq_params,
4539 				  &data->tx,
4540 				  &cmd_params->params.tx_only.flags);
4541 
4542 	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4543 			 cmd_params->q_obj->cids[0],
4544 			 data->tx.tx_bd_page_base.lo,
4545 			 data->tx.tx_bd_page_base.hi);
4546 }
4547 
4548 /**
4549  * bnx2x_q_init - init HW/FW queue
4550  *
4551  * @bp:		device handle
4552  * @params:
4553  *
4554  * HW/FW initial Queue configuration:
4555  *      - HC: Rx and Tx
4556  *      - CDU context validation
4557  *
4558  */
4559 static inline int bnx2x_q_init(struct bnx2x *bp,
4560 			       struct bnx2x_queue_state_params *params)
4561 {
4562 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4563 	struct bnx2x_queue_init_params *init = &params->params.init;
4564 	u16 hc_usec;
4565 	u8 cos;
4566 
4567 	/* Tx HC configuration */
4568 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4569 	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4570 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4571 
4572 		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4573 			init->tx.sb_cq_index,
4574 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4575 			hc_usec);
4576 	}
4577 
4578 	/* Rx HC configuration */
4579 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4580 	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4581 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4582 
4583 		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4584 			init->rx.sb_cq_index,
4585 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4586 			hc_usec);
4587 	}
4588 
4589 	/* Set CDU context validation values */
4590 	for (cos = 0; cos < o->max_cos; cos++) {
4591 		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4592 				 o->cids[cos], cos);
4593 		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4594 		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4595 	}
4596 
4597 	/* As no ramrod is sent, complete the command immediately  */
4598 	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4599 
4600 	mmiowb();
4601 	smp_mb();
4602 
4603 	return 0;
4604 }
4605 
4606 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4607 					struct bnx2x_queue_state_params *params)
4608 {
4609 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4610 	struct client_init_ramrod_data *rdata =
4611 		(struct client_init_ramrod_data *)o->rdata;
4612 	dma_addr_t data_mapping = o->rdata_mapping;
4613 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4614 
4615 	/* Clear the ramrod data */
4616 	memset(rdata, 0, sizeof(*rdata));
4617 
4618 	/* Fill the ramrod data */
4619 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4620 
4621 	/*
4622 	 *  No need for an explicit memory barrier here as long we would
4623 	 *  need to ensure the ordering of writing to the SPQ element
4624 	 *  and updating of the SPQ producer which involves a memory
4625 	 *  read and we will have to put a full memory barrier there
4626 	 *  (inside bnx2x_sp_post()).
4627 	 */
4628 
4629 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4630 			     U64_HI(data_mapping),
4631 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4632 }
4633 
4634 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4635 					struct bnx2x_queue_state_params *params)
4636 {
4637 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4638 	struct client_init_ramrod_data *rdata =
4639 		(struct client_init_ramrod_data *)o->rdata;
4640 	dma_addr_t data_mapping = o->rdata_mapping;
4641 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4642 
4643 	/* Clear the ramrod data */
4644 	memset(rdata, 0, sizeof(*rdata));
4645 
4646 	/* Fill the ramrod data */
4647 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4648 	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4649 
4650 	/*
4651 	 *  No need for an explicit memory barrier here as long we would
4652 	 *  need to ensure the ordering of writing to the SPQ element
4653 	 *  and updating of the SPQ producer which involves a memory
4654 	 *  read and we will have to put a full memory barrier there
4655 	 *  (inside bnx2x_sp_post()).
4656 	 */
4657 
4658 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4659 			     U64_HI(data_mapping),
4660 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4661 }
4662 
4663 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4664 				  struct bnx2x_queue_state_params *params)
4665 {
4666 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4667 	struct tx_queue_init_ramrod_data *rdata =
4668 		(struct tx_queue_init_ramrod_data *)o->rdata;
4669 	dma_addr_t data_mapping = o->rdata_mapping;
4670 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4671 	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4672 		&params->params.tx_only;
4673 	u8 cid_index = tx_only_params->cid_index;
4674 
4675 
4676 	if (cid_index >= o->max_cos) {
4677 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4678 			  o->cl_id, cid_index);
4679 		return -EINVAL;
4680 	}
4681 
4682 	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4683 			 tx_only_params->gen_params.cos,
4684 			 tx_only_params->gen_params.spcl_id);
4685 
4686 	/* Clear the ramrod data */
4687 	memset(rdata, 0, sizeof(*rdata));
4688 
4689 	/* Fill the ramrod data */
4690 	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4691 
4692 	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4693 			 o->cids[cid_index], rdata->general.client_id,
4694 			 rdata->general.sp_client_id, rdata->general.cos);
4695 
4696 	/*
4697 	 *  No need for an explicit memory barrier here as long we would
4698 	 *  need to ensure the ordering of writing to the SPQ element
4699 	 *  and updating of the SPQ producer which involves a memory
4700 	 *  read and we will have to put a full memory barrier there
4701 	 *  (inside bnx2x_sp_post()).
4702 	 */
4703 
4704 	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4705 			     U64_HI(data_mapping),
4706 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4707 }
4708 
4709 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4710 				     struct bnx2x_queue_sp_obj *obj,
4711 				     struct bnx2x_queue_update_params *params,
4712 				     struct client_update_ramrod_data *data)
4713 {
4714 	/* Client ID of the client to update */
4715 	data->client_id = obj->cl_id;
4716 
4717 	/* Function ID of the client to update */
4718 	data->func_id = obj->func_id;
4719 
4720 	/* Default VLAN value */
4721 	data->default_vlan = cpu_to_le16(params->def_vlan);
4722 
4723 	/* Inner VLAN stripping */
4724 	data->inner_vlan_removal_enable_flg =
4725 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4726 	data->inner_vlan_removal_change_flg =
4727 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4728 			 &params->update_flags);
4729 
4730 	/* Outer VLAN sripping */
4731 	data->outer_vlan_removal_enable_flg =
4732 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4733 	data->outer_vlan_removal_change_flg =
4734 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4735 			 &params->update_flags);
4736 
4737 	/* Drop packets that have source MAC that doesn't belong to this
4738 	 * Queue.
4739 	 */
4740 	data->anti_spoofing_enable_flg =
4741 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4742 	data->anti_spoofing_change_flg =
4743 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4744 
4745 	/* Activate/Deactivate */
4746 	data->activate_flg =
4747 		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4748 	data->activate_change_flg =
4749 		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4750 
4751 	/* Enable default VLAN */
4752 	data->default_vlan_enable_flg =
4753 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4754 	data->default_vlan_change_flg =
4755 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4756 			 &params->update_flags);
4757 
4758 	/* silent vlan removal */
4759 	data->silent_vlan_change_flg =
4760 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4761 			 &params->update_flags);
4762 	data->silent_vlan_removal_flg =
4763 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4764 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4765 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4766 }
4767 
4768 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4769 				      struct bnx2x_queue_state_params *params)
4770 {
4771 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4772 	struct client_update_ramrod_data *rdata =
4773 		(struct client_update_ramrod_data *)o->rdata;
4774 	dma_addr_t data_mapping = o->rdata_mapping;
4775 	struct bnx2x_queue_update_params *update_params =
4776 		&params->params.update;
4777 	u8 cid_index = update_params->cid_index;
4778 
4779 	if (cid_index >= o->max_cos) {
4780 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4781 			  o->cl_id, cid_index);
4782 		return -EINVAL;
4783 	}
4784 
4785 
4786 	/* Clear the ramrod data */
4787 	memset(rdata, 0, sizeof(*rdata));
4788 
4789 	/* Fill the ramrod data */
4790 	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4791 
4792 	/*
4793 	 *  No need for an explicit memory barrier here as long we would
4794 	 *  need to ensure the ordering of writing to the SPQ element
4795 	 *  and updating of the SPQ producer which involves a memory
4796 	 *  read and we will have to put a full memory barrier there
4797 	 *  (inside bnx2x_sp_post()).
4798 	 */
4799 
4800 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4801 			     o->cids[cid_index], U64_HI(data_mapping),
4802 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4803 }
4804 
4805 /**
4806  * bnx2x_q_send_deactivate - send DEACTIVATE command
4807  *
4808  * @bp:		device handle
4809  * @params:
4810  *
4811  * implemented using the UPDATE command.
4812  */
4813 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4814 					struct bnx2x_queue_state_params *params)
4815 {
4816 	struct bnx2x_queue_update_params *update = &params->params.update;
4817 
4818 	memset(update, 0, sizeof(*update));
4819 
4820 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4821 
4822 	return bnx2x_q_send_update(bp, params);
4823 }
4824 
4825 /**
4826  * bnx2x_q_send_activate - send ACTIVATE command
4827  *
4828  * @bp:		device handle
4829  * @params:
4830  *
4831  * implemented using the UPDATE command.
4832  */
4833 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4834 					struct bnx2x_queue_state_params *params)
4835 {
4836 	struct bnx2x_queue_update_params *update = &params->params.update;
4837 
4838 	memset(update, 0, sizeof(*update));
4839 
4840 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4841 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4842 
4843 	return bnx2x_q_send_update(bp, params);
4844 }
4845 
4846 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4847 					struct bnx2x_queue_state_params *params)
4848 {
4849 	/* TODO: Not implemented yet. */
4850 	return -1;
4851 }
4852 
4853 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4854 				    struct bnx2x_queue_state_params *params)
4855 {
4856 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4857 
4858 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4859 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4860 			     ETH_CONNECTION_TYPE);
4861 }
4862 
4863 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4864 				       struct bnx2x_queue_state_params *params)
4865 {
4866 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4867 	u8 cid_idx = params->params.cfc_del.cid_index;
4868 
4869 	if (cid_idx >= o->max_cos) {
4870 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4871 			  o->cl_id, cid_idx);
4872 		return -EINVAL;
4873 	}
4874 
4875 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4876 			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4877 }
4878 
4879 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4880 					struct bnx2x_queue_state_params *params)
4881 {
4882 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4883 	u8 cid_index = params->params.terminate.cid_index;
4884 
4885 	if (cid_index >= o->max_cos) {
4886 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4887 			  o->cl_id, cid_index);
4888 		return -EINVAL;
4889 	}
4890 
4891 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4892 			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4893 }
4894 
4895 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4896 				     struct bnx2x_queue_state_params *params)
4897 {
4898 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4899 
4900 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4901 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4902 			     ETH_CONNECTION_TYPE);
4903 }
4904 
4905 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4906 					struct bnx2x_queue_state_params *params)
4907 {
4908 	switch (params->cmd) {
4909 	case BNX2X_Q_CMD_INIT:
4910 		return bnx2x_q_init(bp, params);
4911 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4912 		return bnx2x_q_send_setup_tx_only(bp, params);
4913 	case BNX2X_Q_CMD_DEACTIVATE:
4914 		return bnx2x_q_send_deactivate(bp, params);
4915 	case BNX2X_Q_CMD_ACTIVATE:
4916 		return bnx2x_q_send_activate(bp, params);
4917 	case BNX2X_Q_CMD_UPDATE:
4918 		return bnx2x_q_send_update(bp, params);
4919 	case BNX2X_Q_CMD_UPDATE_TPA:
4920 		return bnx2x_q_send_update_tpa(bp, params);
4921 	case BNX2X_Q_CMD_HALT:
4922 		return bnx2x_q_send_halt(bp, params);
4923 	case BNX2X_Q_CMD_CFC_DEL:
4924 		return bnx2x_q_send_cfc_del(bp, params);
4925 	case BNX2X_Q_CMD_TERMINATE:
4926 		return bnx2x_q_send_terminate(bp, params);
4927 	case BNX2X_Q_CMD_EMPTY:
4928 		return bnx2x_q_send_empty(bp, params);
4929 	default:
4930 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4931 		return -EINVAL;
4932 	}
4933 }
4934 
4935 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4936 				    struct bnx2x_queue_state_params *params)
4937 {
4938 	switch (params->cmd) {
4939 	case BNX2X_Q_CMD_SETUP:
4940 		return bnx2x_q_send_setup_e1x(bp, params);
4941 	case BNX2X_Q_CMD_INIT:
4942 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4943 	case BNX2X_Q_CMD_DEACTIVATE:
4944 	case BNX2X_Q_CMD_ACTIVATE:
4945 	case BNX2X_Q_CMD_UPDATE:
4946 	case BNX2X_Q_CMD_UPDATE_TPA:
4947 	case BNX2X_Q_CMD_HALT:
4948 	case BNX2X_Q_CMD_CFC_DEL:
4949 	case BNX2X_Q_CMD_TERMINATE:
4950 	case BNX2X_Q_CMD_EMPTY:
4951 		return bnx2x_queue_send_cmd_cmn(bp, params);
4952 	default:
4953 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4954 		return -EINVAL;
4955 	}
4956 }
4957 
4958 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4959 				   struct bnx2x_queue_state_params *params)
4960 {
4961 	switch (params->cmd) {
4962 	case BNX2X_Q_CMD_SETUP:
4963 		return bnx2x_q_send_setup_e2(bp, params);
4964 	case BNX2X_Q_CMD_INIT:
4965 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4966 	case BNX2X_Q_CMD_DEACTIVATE:
4967 	case BNX2X_Q_CMD_ACTIVATE:
4968 	case BNX2X_Q_CMD_UPDATE:
4969 	case BNX2X_Q_CMD_UPDATE_TPA:
4970 	case BNX2X_Q_CMD_HALT:
4971 	case BNX2X_Q_CMD_CFC_DEL:
4972 	case BNX2X_Q_CMD_TERMINATE:
4973 	case BNX2X_Q_CMD_EMPTY:
4974 		return bnx2x_queue_send_cmd_cmn(bp, params);
4975 	default:
4976 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4977 		return -EINVAL;
4978 	}
4979 }
4980 
4981 /**
4982  * bnx2x_queue_chk_transition - check state machine of a regular Queue
4983  *
4984  * @bp:		device handle
4985  * @o:
4986  * @params:
4987  *
4988  * (not Forwarding)
4989  * It both checks if the requested command is legal in a current
4990  * state and, if it's legal, sets a `next_state' in the object
4991  * that will be used in the completion flow to set the `state'
4992  * of the object.
4993  *
4994  * returns 0 if a requested command is a legal transition,
4995  *         -EINVAL otherwise.
4996  */
4997 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4998 				      struct bnx2x_queue_sp_obj *o,
4999 				      struct bnx2x_queue_state_params *params)
5000 {
5001 	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5002 	enum bnx2x_queue_cmd cmd = params->cmd;
5003 	struct bnx2x_queue_update_params *update_params =
5004 		 &params->params.update;
5005 	u8 next_tx_only = o->num_tx_only;
5006 
5007 	/*
5008 	 * Forget all pending for completion commands if a driver only state
5009 	 * transition has been requested.
5010 	 */
5011 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5012 		o->pending = 0;
5013 		o->next_state = BNX2X_Q_STATE_MAX;
5014 	}
5015 
5016 	/*
5017 	 * Don't allow a next state transition if we are in the middle of
5018 	 * the previous one.
5019 	 */
5020 	if (o->pending)
5021 		return -EBUSY;
5022 
5023 	switch (state) {
5024 	case BNX2X_Q_STATE_RESET:
5025 		if (cmd == BNX2X_Q_CMD_INIT)
5026 			next_state = BNX2X_Q_STATE_INITIALIZED;
5027 
5028 		break;
5029 	case BNX2X_Q_STATE_INITIALIZED:
5030 		if (cmd == BNX2X_Q_CMD_SETUP) {
5031 			if (test_bit(BNX2X_Q_FLG_ACTIVE,
5032 				     &params->params.setup.flags))
5033 				next_state = BNX2X_Q_STATE_ACTIVE;
5034 			else
5035 				next_state = BNX2X_Q_STATE_INACTIVE;
5036 		}
5037 
5038 		break;
5039 	case BNX2X_Q_STATE_ACTIVE:
5040 		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5041 			next_state = BNX2X_Q_STATE_INACTIVE;
5042 
5043 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5044 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5045 			next_state = BNX2X_Q_STATE_ACTIVE;
5046 
5047 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5048 			next_state = BNX2X_Q_STATE_MULTI_COS;
5049 			next_tx_only = 1;
5050 		}
5051 
5052 		else if (cmd == BNX2X_Q_CMD_HALT)
5053 			next_state = BNX2X_Q_STATE_STOPPED;
5054 
5055 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5056 			/* If "active" state change is requested, update the
5057 			 *  state accordingly.
5058 			 */
5059 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5060 				     &update_params->update_flags) &&
5061 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5062 				      &update_params->update_flags))
5063 				next_state = BNX2X_Q_STATE_INACTIVE;
5064 			else
5065 				next_state = BNX2X_Q_STATE_ACTIVE;
5066 		}
5067 
5068 		break;
5069 	case BNX2X_Q_STATE_MULTI_COS:
5070 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5071 			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5072 
5073 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5074 			next_state = BNX2X_Q_STATE_MULTI_COS;
5075 			next_tx_only = o->num_tx_only + 1;
5076 		}
5077 
5078 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5079 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5080 			next_state = BNX2X_Q_STATE_MULTI_COS;
5081 
5082 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5083 			/* If "active" state change is requested, update the
5084 			 *  state accordingly.
5085 			 */
5086 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5087 				     &update_params->update_flags) &&
5088 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5089 				      &update_params->update_flags))
5090 				next_state = BNX2X_Q_STATE_INACTIVE;
5091 			else
5092 				next_state = BNX2X_Q_STATE_MULTI_COS;
5093 		}
5094 
5095 		break;
5096 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5097 		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5098 			next_tx_only = o->num_tx_only - 1;
5099 			if (next_tx_only == 0)
5100 				next_state = BNX2X_Q_STATE_ACTIVE;
5101 			else
5102 				next_state = BNX2X_Q_STATE_MULTI_COS;
5103 		}
5104 
5105 		break;
5106 	case BNX2X_Q_STATE_INACTIVE:
5107 		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5108 			next_state = BNX2X_Q_STATE_ACTIVE;
5109 
5110 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5111 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5112 			next_state = BNX2X_Q_STATE_INACTIVE;
5113 
5114 		else if (cmd == BNX2X_Q_CMD_HALT)
5115 			next_state = BNX2X_Q_STATE_STOPPED;
5116 
5117 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5118 			/* If "active" state change is requested, update the
5119 			 * state accordingly.
5120 			 */
5121 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5122 				     &update_params->update_flags) &&
5123 			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5124 				     &update_params->update_flags)){
5125 				if (o->num_tx_only == 0)
5126 					next_state = BNX2X_Q_STATE_ACTIVE;
5127 				else /* tx only queues exist for this queue */
5128 					next_state = BNX2X_Q_STATE_MULTI_COS;
5129 			} else
5130 				next_state = BNX2X_Q_STATE_INACTIVE;
5131 		}
5132 
5133 		break;
5134 	case BNX2X_Q_STATE_STOPPED:
5135 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5136 			next_state = BNX2X_Q_STATE_TERMINATED;
5137 
5138 		break;
5139 	case BNX2X_Q_STATE_TERMINATED:
5140 		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5141 			next_state = BNX2X_Q_STATE_RESET;
5142 
5143 		break;
5144 	default:
5145 		BNX2X_ERR("Illegal state: %d\n", state);
5146 	}
5147 
5148 	/* Transition is assured */
5149 	if (next_state != BNX2X_Q_STATE_MAX) {
5150 		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5151 				 state, cmd, next_state);
5152 		o->next_state = next_state;
5153 		o->next_tx_only = next_tx_only;
5154 		return 0;
5155 	}
5156 
5157 	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5158 
5159 	return -EINVAL;
5160 }
5161 
5162 void bnx2x_init_queue_obj(struct bnx2x *bp,
5163 			  struct bnx2x_queue_sp_obj *obj,
5164 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5165 			  void *rdata,
5166 			  dma_addr_t rdata_mapping, unsigned long type)
5167 {
5168 	memset(obj, 0, sizeof(*obj));
5169 
5170 	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5171 	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5172 
5173 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5174 	obj->max_cos = cid_cnt;
5175 	obj->cl_id = cl_id;
5176 	obj->func_id = func_id;
5177 	obj->rdata = rdata;
5178 	obj->rdata_mapping = rdata_mapping;
5179 	obj->type = type;
5180 	obj->next_state = BNX2X_Q_STATE_MAX;
5181 
5182 	if (CHIP_IS_E1x(bp))
5183 		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5184 	else
5185 		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5186 
5187 	obj->check_transition = bnx2x_queue_chk_transition;
5188 
5189 	obj->complete_cmd = bnx2x_queue_comp_cmd;
5190 	obj->wait_comp = bnx2x_queue_wait_comp;
5191 	obj->set_pending = bnx2x_queue_set_pending;
5192 }
5193 
5194 /********************** Function state object *********************************/
5195 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5196 					   struct bnx2x_func_sp_obj *o)
5197 {
5198 	/* in the middle of transaction - return INVALID state */
5199 	if (o->pending)
5200 		return BNX2X_F_STATE_MAX;
5201 
5202 	/*
5203 	 * unsure the order of reading of o->pending and o->state
5204 	 * o->pending should be read first
5205 	 */
5206 	rmb();
5207 
5208 	return o->state;
5209 }
5210 
5211 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5212 				struct bnx2x_func_sp_obj *o,
5213 				enum bnx2x_func_cmd cmd)
5214 {
5215 	return bnx2x_state_wait(bp, cmd, &o->pending);
5216 }
5217 
5218 /**
5219  * bnx2x_func_state_change_comp - complete the state machine transition
5220  *
5221  * @bp:		device handle
5222  * @o:
5223  * @cmd:
5224  *
5225  * Called on state change transition. Completes the state
5226  * machine transition only - no HW interaction.
5227  */
5228 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5229 					       struct bnx2x_func_sp_obj *o,
5230 					       enum bnx2x_func_cmd cmd)
5231 {
5232 	unsigned long cur_pending = o->pending;
5233 
5234 	if (!test_and_clear_bit(cmd, &cur_pending)) {
5235 		BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5236 			  cmd, BP_FUNC(bp), o->state,
5237 			  cur_pending, o->next_state);
5238 		return -EINVAL;
5239 	}
5240 
5241 	DP(BNX2X_MSG_SP,
5242 	   "Completing command %d for func %d, setting state to %d\n",
5243 	   cmd, BP_FUNC(bp), o->next_state);
5244 
5245 	o->state = o->next_state;
5246 	o->next_state = BNX2X_F_STATE_MAX;
5247 
5248 	/* It's important that o->state and o->next_state are
5249 	 * updated before o->pending.
5250 	 */
5251 	wmb();
5252 
5253 	clear_bit(cmd, &o->pending);
5254 	smp_mb__after_clear_bit();
5255 
5256 	return 0;
5257 }
5258 
5259 /**
5260  * bnx2x_func_comp_cmd - complete the state change command
5261  *
5262  * @bp:		device handle
5263  * @o:
5264  * @cmd:
5265  *
5266  * Checks that the arrived completion is expected.
5267  */
5268 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5269 			       struct bnx2x_func_sp_obj *o,
5270 			       enum bnx2x_func_cmd cmd)
5271 {
5272 	/* Complete the state machine part first, check if it's a
5273 	 * legal completion.
5274 	 */
5275 	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5276 	return rc;
5277 }
5278 
5279 /**
5280  * bnx2x_func_chk_transition - perform function state machine transition
5281  *
5282  * @bp:		device handle
5283  * @o:
5284  * @params:
5285  *
5286  * It both checks if the requested command is legal in a current
5287  * state and, if it's legal, sets a `next_state' in the object
5288  * that will be used in the completion flow to set the `state'
5289  * of the object.
5290  *
5291  * returns 0 if a requested command is a legal transition,
5292  *         -EINVAL otherwise.
5293  */
5294 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5295 				     struct bnx2x_func_sp_obj *o,
5296 				     struct bnx2x_func_state_params *params)
5297 {
5298 	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5299 	enum bnx2x_func_cmd cmd = params->cmd;
5300 
5301 	/*
5302 	 * Forget all pending for completion commands if a driver only state
5303 	 * transition has been requested.
5304 	 */
5305 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5306 		o->pending = 0;
5307 		o->next_state = BNX2X_F_STATE_MAX;
5308 	}
5309 
5310 	/*
5311 	 * Don't allow a next state transition if we are in the middle of
5312 	 * the previous one.
5313 	 */
5314 	if (o->pending)
5315 		return -EBUSY;
5316 
5317 	switch (state) {
5318 	case BNX2X_F_STATE_RESET:
5319 		if (cmd == BNX2X_F_CMD_HW_INIT)
5320 			next_state = BNX2X_F_STATE_INITIALIZED;
5321 
5322 		break;
5323 	case BNX2X_F_STATE_INITIALIZED:
5324 		if (cmd == BNX2X_F_CMD_START)
5325 			next_state = BNX2X_F_STATE_STARTED;
5326 
5327 		else if (cmd == BNX2X_F_CMD_HW_RESET)
5328 			next_state = BNX2X_F_STATE_RESET;
5329 
5330 		break;
5331 	case BNX2X_F_STATE_STARTED:
5332 		if (cmd == BNX2X_F_CMD_STOP)
5333 			next_state = BNX2X_F_STATE_INITIALIZED;
5334 		/* afex ramrods can be sent only in started mode, and only
5335 		 * if not pending for function_stop ramrod completion
5336 		 * for these events - next state remained STARTED.
5337 		 */
5338 		else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5339 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5340 			next_state = BNX2X_F_STATE_STARTED;
5341 
5342 		else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5343 			 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5344 			next_state = BNX2X_F_STATE_STARTED;
5345 		else if (cmd == BNX2X_F_CMD_TX_STOP)
5346 			next_state = BNX2X_F_STATE_TX_STOPPED;
5347 
5348 		break;
5349 	case BNX2X_F_STATE_TX_STOPPED:
5350 		if (cmd == BNX2X_F_CMD_TX_START)
5351 			next_state = BNX2X_F_STATE_STARTED;
5352 
5353 		break;
5354 	default:
5355 		BNX2X_ERR("Unknown state: %d\n", state);
5356 	}
5357 
5358 	/* Transition is assured */
5359 	if (next_state != BNX2X_F_STATE_MAX) {
5360 		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5361 				 state, cmd, next_state);
5362 		o->next_state = next_state;
5363 		return 0;
5364 	}
5365 
5366 	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5367 			 state, cmd);
5368 
5369 	return -EINVAL;
5370 }
5371 
5372 /**
5373  * bnx2x_func_init_func - performs HW init at function stage
5374  *
5375  * @bp:		device handle
5376  * @drv:
5377  *
5378  * Init HW when the current phase is
5379  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5380  * HW blocks.
5381  */
5382 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5383 				       const struct bnx2x_func_sp_drv_ops *drv)
5384 {
5385 	return drv->init_hw_func(bp);
5386 }
5387 
5388 /**
5389  * bnx2x_func_init_port - performs HW init at port stage
5390  *
5391  * @bp:		device handle
5392  * @drv:
5393  *
5394  * Init HW when the current phase is
5395  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5396  * FUNCTION-only HW blocks.
5397  *
5398  */
5399 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5400 				       const struct bnx2x_func_sp_drv_ops *drv)
5401 {
5402 	int rc = drv->init_hw_port(bp);
5403 	if (rc)
5404 		return rc;
5405 
5406 	return bnx2x_func_init_func(bp, drv);
5407 }
5408 
5409 /**
5410  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5411  *
5412  * @bp:		device handle
5413  * @drv:
5414  *
5415  * Init HW when the current phase is
5416  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5417  * PORT-only and FUNCTION-only HW blocks.
5418  */
5419 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5420 					const struct bnx2x_func_sp_drv_ops *drv)
5421 {
5422 	int rc = drv->init_hw_cmn_chip(bp);
5423 	if (rc)
5424 		return rc;
5425 
5426 	return bnx2x_func_init_port(bp, drv);
5427 }
5428 
5429 /**
5430  * bnx2x_func_init_cmn - performs HW init at common stage
5431  *
5432  * @bp:		device handle
5433  * @drv:
5434  *
5435  * Init HW when the current phase is
5436  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5437  * PORT-only and FUNCTION-only HW blocks.
5438  */
5439 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5440 				      const struct bnx2x_func_sp_drv_ops *drv)
5441 {
5442 	int rc = drv->init_hw_cmn(bp);
5443 	if (rc)
5444 		return rc;
5445 
5446 	return bnx2x_func_init_port(bp, drv);
5447 }
5448 
5449 static int bnx2x_func_hw_init(struct bnx2x *bp,
5450 			      struct bnx2x_func_state_params *params)
5451 {
5452 	u32 load_code = params->params.hw_init.load_phase;
5453 	struct bnx2x_func_sp_obj *o = params->f_obj;
5454 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5455 	int rc = 0;
5456 
5457 	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5458 			 BP_ABS_FUNC(bp), load_code);
5459 
5460 	/* Prepare buffers for unzipping the FW */
5461 	rc = drv->gunzip_init(bp);
5462 	if (rc)
5463 		return rc;
5464 
5465 	/* Prepare FW */
5466 	rc = drv->init_fw(bp);
5467 	if (rc) {
5468 		BNX2X_ERR("Error loading firmware\n");
5469 		goto init_err;
5470 	}
5471 
5472 	/* Handle the beginning of COMMON_XXX pases separatelly... */
5473 	switch (load_code) {
5474 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5475 		rc = bnx2x_func_init_cmn_chip(bp, drv);
5476 		if (rc)
5477 			goto init_err;
5478 
5479 		break;
5480 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5481 		rc = bnx2x_func_init_cmn(bp, drv);
5482 		if (rc)
5483 			goto init_err;
5484 
5485 		break;
5486 	case FW_MSG_CODE_DRV_LOAD_PORT:
5487 		rc = bnx2x_func_init_port(bp, drv);
5488 		if (rc)
5489 			goto init_err;
5490 
5491 		break;
5492 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5493 		rc = bnx2x_func_init_func(bp, drv);
5494 		if (rc)
5495 			goto init_err;
5496 
5497 		break;
5498 	default:
5499 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5500 		rc = -EINVAL;
5501 	}
5502 
5503 init_err:
5504 	drv->gunzip_end(bp);
5505 
5506 	/* In case of success, complete the comand immediatelly: no ramrods
5507 	 * have been sent.
5508 	 */
5509 	if (!rc)
5510 		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5511 
5512 	return rc;
5513 }
5514 
5515 /**
5516  * bnx2x_func_reset_func - reset HW at function stage
5517  *
5518  * @bp:		device handle
5519  * @drv:
5520  *
5521  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5522  * FUNCTION-only HW blocks.
5523  */
5524 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5525 					const struct bnx2x_func_sp_drv_ops *drv)
5526 {
5527 	drv->reset_hw_func(bp);
5528 }
5529 
5530 /**
5531  * bnx2x_func_reset_port - reser HW at port stage
5532  *
5533  * @bp:		device handle
5534  * @drv:
5535  *
5536  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5537  * FUNCTION-only and PORT-only HW blocks.
5538  *
5539  *                 !!!IMPORTANT!!!
5540  *
5541  * It's important to call reset_port before reset_func() as the last thing
5542  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5543  * makes impossible any DMAE transactions.
5544  */
5545 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5546 					const struct bnx2x_func_sp_drv_ops *drv)
5547 {
5548 	drv->reset_hw_port(bp);
5549 	bnx2x_func_reset_func(bp, drv);
5550 }
5551 
5552 /**
5553  * bnx2x_func_reset_cmn - reser HW at common stage
5554  *
5555  * @bp:		device handle
5556  * @drv:
5557  *
5558  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5559  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5560  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5561  */
5562 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5563 					const struct bnx2x_func_sp_drv_ops *drv)
5564 {
5565 	bnx2x_func_reset_port(bp, drv);
5566 	drv->reset_hw_cmn(bp);
5567 }
5568 
5569 
5570 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5571 				      struct bnx2x_func_state_params *params)
5572 {
5573 	u32 reset_phase = params->params.hw_reset.reset_phase;
5574 	struct bnx2x_func_sp_obj *o = params->f_obj;
5575 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5576 
5577 	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5578 			 reset_phase);
5579 
5580 	switch (reset_phase) {
5581 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5582 		bnx2x_func_reset_cmn(bp, drv);
5583 		break;
5584 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5585 		bnx2x_func_reset_port(bp, drv);
5586 		break;
5587 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5588 		bnx2x_func_reset_func(bp, drv);
5589 		break;
5590 	default:
5591 		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5592 			   reset_phase);
5593 		break;
5594 	}
5595 
5596 	/* Complete the comand immediatelly: no ramrods have been sent. */
5597 	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5598 
5599 	return 0;
5600 }
5601 
5602 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5603 					struct bnx2x_func_state_params *params)
5604 {
5605 	struct bnx2x_func_sp_obj *o = params->f_obj;
5606 	struct function_start_data *rdata =
5607 		(struct function_start_data *)o->rdata;
5608 	dma_addr_t data_mapping = o->rdata_mapping;
5609 	struct bnx2x_func_start_params *start_params = &params->params.start;
5610 
5611 	memset(rdata, 0, sizeof(*rdata));
5612 
5613 	/* Fill the ramrod data with provided parameters */
5614 	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5615 	rdata->sd_vlan_tag   = cpu_to_le16(start_params->sd_vlan_tag);
5616 	rdata->path_id       = BP_PATH(bp);
5617 	rdata->network_cos_mode = start_params->network_cos_mode;
5618 
5619 	/*
5620 	 *  No need for an explicit memory barrier here as long we would
5621 	 *  need to ensure the ordering of writing to the SPQ element
5622 	 *  and updating of the SPQ producer which involves a memory
5623 	 *  read and we will have to put a full memory barrier there
5624 	 *  (inside bnx2x_sp_post()).
5625 	 */
5626 
5627 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5628 			     U64_HI(data_mapping),
5629 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5630 }
5631 
5632 static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5633 					 struct bnx2x_func_state_params *params)
5634 {
5635 	struct bnx2x_func_sp_obj *o = params->f_obj;
5636 	struct function_update_data *rdata =
5637 		(struct function_update_data *)o->afex_rdata;
5638 	dma_addr_t data_mapping = o->afex_rdata_mapping;
5639 	struct bnx2x_func_afex_update_params *afex_update_params =
5640 		&params->params.afex_update;
5641 
5642 	memset(rdata, 0, sizeof(*rdata));
5643 
5644 	/* Fill the ramrod data with provided parameters */
5645 	rdata->vif_id_change_flg = 1;
5646 	rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5647 	rdata->afex_default_vlan_change_flg = 1;
5648 	rdata->afex_default_vlan =
5649 		cpu_to_le16(afex_update_params->afex_default_vlan);
5650 	rdata->allowed_priorities_change_flg = 1;
5651 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
5652 
5653 	/*  No need for an explicit memory barrier here as long we would
5654 	 *  need to ensure the ordering of writing to the SPQ element
5655 	 *  and updating of the SPQ producer which involves a memory
5656 	 *  read and we will have to put a full memory barrier there
5657 	 *  (inside bnx2x_sp_post()).
5658 	 */
5659 	DP(BNX2X_MSG_SP,
5660 	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5661 	   rdata->vif_id,
5662 	   rdata->afex_default_vlan, rdata->allowed_priorities);
5663 
5664 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5665 			     U64_HI(data_mapping),
5666 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5667 }
5668 
5669 static
5670 inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5671 					 struct bnx2x_func_state_params *params)
5672 {
5673 	struct bnx2x_func_sp_obj *o = params->f_obj;
5674 	struct afex_vif_list_ramrod_data *rdata =
5675 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
5676 	struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5677 		&params->params.afex_viflists;
5678 	u64 *p_rdata = (u64 *)rdata;
5679 
5680 	memset(rdata, 0, sizeof(*rdata));
5681 
5682 	/* Fill the ramrod data with provided parameters */
5683 	rdata->vif_list_index = afex_viflist_params->vif_list_index;
5684 	rdata->func_bit_map = afex_viflist_params->func_bit_map;
5685 	rdata->afex_vif_list_command =
5686 		afex_viflist_params->afex_vif_list_command;
5687 	rdata->func_to_clear = afex_viflist_params->func_to_clear;
5688 
5689 	/* send in echo type of sub command */
5690 	rdata->echo = afex_viflist_params->afex_vif_list_command;
5691 
5692 	/*  No need for an explicit memory barrier here as long we would
5693 	 *  need to ensure the ordering of writing to the SPQ element
5694 	 *  and updating of the SPQ producer which involves a memory
5695 	 *  read and we will have to put a full memory barrier there
5696 	 *  (inside bnx2x_sp_post()).
5697 	 */
5698 
5699 	DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5700 	   rdata->afex_vif_list_command, rdata->vif_list_index,
5701 	   rdata->func_bit_map, rdata->func_to_clear);
5702 
5703 	/* this ramrod sends data directly and not through DMA mapping */
5704 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5705 			     U64_HI(*p_rdata), U64_LO(*p_rdata),
5706 			     NONE_CONNECTION_TYPE);
5707 }
5708 
5709 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5710 				       struct bnx2x_func_state_params *params)
5711 {
5712 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5713 			     NONE_CONNECTION_TYPE);
5714 }
5715 
5716 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5717 				       struct bnx2x_func_state_params *params)
5718 {
5719 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5720 			     NONE_CONNECTION_TYPE);
5721 }
5722 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5723 				       struct bnx2x_func_state_params *params)
5724 {
5725 	struct bnx2x_func_sp_obj *o = params->f_obj;
5726 	struct flow_control_configuration *rdata =
5727 		(struct flow_control_configuration *)o->rdata;
5728 	dma_addr_t data_mapping = o->rdata_mapping;
5729 	struct bnx2x_func_tx_start_params *tx_start_params =
5730 		&params->params.tx_start;
5731 	int i;
5732 
5733 	memset(rdata, 0, sizeof(*rdata));
5734 
5735 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
5736 	rdata->dcb_version = tx_start_params->dcb_version;
5737 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5738 
5739 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5740 		rdata->traffic_type_to_priority_cos[i] =
5741 			tx_start_params->traffic_type_to_priority_cos[i];
5742 
5743 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5744 			     U64_HI(data_mapping),
5745 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5746 }
5747 
5748 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5749 			       struct bnx2x_func_state_params *params)
5750 {
5751 	switch (params->cmd) {
5752 	case BNX2X_F_CMD_HW_INIT:
5753 		return bnx2x_func_hw_init(bp, params);
5754 	case BNX2X_F_CMD_START:
5755 		return bnx2x_func_send_start(bp, params);
5756 	case BNX2X_F_CMD_STOP:
5757 		return bnx2x_func_send_stop(bp, params);
5758 	case BNX2X_F_CMD_HW_RESET:
5759 		return bnx2x_func_hw_reset(bp, params);
5760 	case BNX2X_F_CMD_AFEX_UPDATE:
5761 		return bnx2x_func_send_afex_update(bp, params);
5762 	case BNX2X_F_CMD_AFEX_VIFLISTS:
5763 		return bnx2x_func_send_afex_viflists(bp, params);
5764 	case BNX2X_F_CMD_TX_STOP:
5765 		return bnx2x_func_send_tx_stop(bp, params);
5766 	case BNX2X_F_CMD_TX_START:
5767 		return bnx2x_func_send_tx_start(bp, params);
5768 	default:
5769 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5770 		return -EINVAL;
5771 	}
5772 }
5773 
5774 void bnx2x_init_func_obj(struct bnx2x *bp,
5775 			 struct bnx2x_func_sp_obj *obj,
5776 			 void *rdata, dma_addr_t rdata_mapping,
5777 			 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5778 			 struct bnx2x_func_sp_drv_ops *drv_iface)
5779 {
5780 	memset(obj, 0, sizeof(*obj));
5781 
5782 	mutex_init(&obj->one_pending_mutex);
5783 
5784 	obj->rdata = rdata;
5785 	obj->rdata_mapping = rdata_mapping;
5786 	obj->afex_rdata = afex_rdata;
5787 	obj->afex_rdata_mapping = afex_rdata_mapping;
5788 	obj->send_cmd = bnx2x_func_send_cmd;
5789 	obj->check_transition = bnx2x_func_chk_transition;
5790 	obj->complete_cmd = bnx2x_func_comp_cmd;
5791 	obj->wait_comp = bnx2x_func_wait_comp;
5792 
5793 	obj->drv = drv_iface;
5794 }
5795 
5796 /**
5797  * bnx2x_func_state_change - perform Function state change transition
5798  *
5799  * @bp:		device handle
5800  * @params:	parameters to perform the transaction
5801  *
5802  * returns 0 in case of successfully completed transition,
5803  *         negative error code in case of failure, positive
5804  *         (EBUSY) value if there is a completion to that is
5805  *         still pending (possible only if RAMROD_COMP_WAIT is
5806  *         not set in params->ramrod_flags for asynchronous
5807  *         commands).
5808  */
5809 int bnx2x_func_state_change(struct bnx2x *bp,
5810 			    struct bnx2x_func_state_params *params)
5811 {
5812 	struct bnx2x_func_sp_obj *o = params->f_obj;
5813 	int rc;
5814 	enum bnx2x_func_cmd cmd = params->cmd;
5815 	unsigned long *pending = &o->pending;
5816 
5817 	mutex_lock(&o->one_pending_mutex);
5818 
5819 	/* Check that the requested transition is legal */
5820 	if (o->check_transition(bp, o, params)) {
5821 		mutex_unlock(&o->one_pending_mutex);
5822 		return -EINVAL;
5823 	}
5824 
5825 	/* Set "pending" bit */
5826 	set_bit(cmd, pending);
5827 
5828 	/* Don't send a command if only driver cleanup was requested */
5829 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5830 		bnx2x_func_state_change_comp(bp, o, cmd);
5831 		mutex_unlock(&o->one_pending_mutex);
5832 	} else {
5833 		/* Send a ramrod */
5834 		rc = o->send_cmd(bp, params);
5835 
5836 		mutex_unlock(&o->one_pending_mutex);
5837 
5838 		if (rc) {
5839 			o->next_state = BNX2X_F_STATE_MAX;
5840 			clear_bit(cmd, pending);
5841 			smp_mb__after_clear_bit();
5842 			return rc;
5843 		}
5844 
5845 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5846 			rc = o->wait_comp(bp, o, cmd);
5847 			if (rc)
5848 				return rc;
5849 
5850 			return 0;
5851 		}
5852 	}
5853 
5854 	return !!test_bit(cmd, pending);
5855 }
5856