xref: /linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright 2011 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
27 #include "bnx2x.h"
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_sp.h"
30 
31 #define BNX2X_MAX_EMUL_MULTI		16
32 
33 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34 
35 /**** Exe Queue interfaces ****/
36 
37 /**
38  * bnx2x_exe_queue_init - init the Exe Queue object
39  *
40  * @o:		poiter to the object
41  * @exe_len:	length
42  * @owner:	poiter to the owner
43  * @validate:	validate function pointer
44  * @optimize:	optimize function pointer
45  * @exec:	execute function pointer
46  * @get:	get function pointer
47  */
48 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 					struct bnx2x_exe_queue_obj *o,
50 					int exe_len,
51 					union bnx2x_qable_obj *owner,
52 					exe_q_validate validate,
53 					exe_q_optimize optimize,
54 					exe_q_execute exec,
55 					exe_q_get get)
56 {
57 	memset(o, 0, sizeof(*o));
58 
59 	INIT_LIST_HEAD(&o->exe_queue);
60 	INIT_LIST_HEAD(&o->pending_comp);
61 
62 	spin_lock_init(&o->lock);
63 
64 	o->exe_chunk_len = exe_len;
65 	o->owner         = owner;
66 
67 	/* Owner specific callbacks */
68 	o->validate      = validate;
69 	o->optimize      = optimize;
70 	o->execute       = exec;
71 	o->get           = get;
72 
73 	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
74 			 "length of %d\n", exe_len);
75 }
76 
77 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
78 					     struct bnx2x_exeq_elem *elem)
79 {
80 	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
81 	kfree(elem);
82 }
83 
84 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
85 {
86 	struct bnx2x_exeq_elem *elem;
87 	int cnt = 0;
88 
89 	spin_lock_bh(&o->lock);
90 
91 	list_for_each_entry(elem, &o->exe_queue, link)
92 		cnt++;
93 
94 	spin_unlock_bh(&o->lock);
95 
96 	return cnt;
97 }
98 
99 /**
100  * bnx2x_exe_queue_add - add a new element to the execution queue
101  *
102  * @bp:		driver handle
103  * @o:		queue
104  * @cmd:	new command to add
105  * @restore:	true - do not optimize the command
106  *
107  * If the element is optimized or is illegal, frees it.
108  */
109 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
110 				      struct bnx2x_exe_queue_obj *o,
111 				      struct bnx2x_exeq_elem *elem,
112 				      bool restore)
113 {
114 	int rc;
115 
116 	spin_lock_bh(&o->lock);
117 
118 	if (!restore) {
119 		/* Try to cancel this element queue */
120 		rc = o->optimize(bp, o->owner, elem);
121 		if (rc)
122 			goto free_and_exit;
123 
124 		/* Check if this request is ok */
125 		rc = o->validate(bp, o->owner, elem);
126 		if (rc) {
127 			BNX2X_ERR("Preamble failed: %d\n", rc);
128 			goto free_and_exit;
129 		}
130 	}
131 
132 	/* If so, add it to the execution queue */
133 	list_add_tail(&elem->link, &o->exe_queue);
134 
135 	spin_unlock_bh(&o->lock);
136 
137 	return 0;
138 
139 free_and_exit:
140 	bnx2x_exe_queue_free_elem(bp, elem);
141 
142 	spin_unlock_bh(&o->lock);
143 
144 	return rc;
145 
146 }
147 
148 static inline void __bnx2x_exe_queue_reset_pending(
149 	struct bnx2x *bp,
150 	struct bnx2x_exe_queue_obj *o)
151 {
152 	struct bnx2x_exeq_elem *elem;
153 
154 	while (!list_empty(&o->pending_comp)) {
155 		elem = list_first_entry(&o->pending_comp,
156 					struct bnx2x_exeq_elem, link);
157 
158 		list_del(&elem->link);
159 		bnx2x_exe_queue_free_elem(bp, elem);
160 	}
161 }
162 
163 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
164 						 struct bnx2x_exe_queue_obj *o)
165 {
166 
167 	spin_lock_bh(&o->lock);
168 
169 	__bnx2x_exe_queue_reset_pending(bp, o);
170 
171 	spin_unlock_bh(&o->lock);
172 
173 }
174 
175 /**
176  * bnx2x_exe_queue_step - execute one execution chunk atomically
177  *
178  * @bp:			driver handle
179  * @o:			queue
180  * @ramrod_flags:	flags
181  *
182  * (Atomicy is ensured using the exe_queue->lock).
183  */
184 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
185 				       struct bnx2x_exe_queue_obj *o,
186 				       unsigned long *ramrod_flags)
187 {
188 	struct bnx2x_exeq_elem *elem, spacer;
189 	int cur_len = 0, rc;
190 
191 	memset(&spacer, 0, sizeof(spacer));
192 
193 	spin_lock_bh(&o->lock);
194 
195 	/*
196 	 * Next step should not be performed until the current is finished,
197 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
198 	 * properly clear object internals without sending any command to the FW
199 	 * which also implies there won't be any completion to clear the
200 	 * 'pending' list.
201 	 */
202 	if (!list_empty(&o->pending_comp)) {
203 		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
204 			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
205 					 "resetting pending_comp\n");
206 			__bnx2x_exe_queue_reset_pending(bp, o);
207 		} else {
208 			spin_unlock_bh(&o->lock);
209 			return 1;
210 		}
211 	}
212 
213 	/*
214 	 * Run through the pending commands list and create a next
215 	 * execution chunk.
216 	 */
217 	while (!list_empty(&o->exe_queue)) {
218 		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
219 					link);
220 		WARN_ON(!elem->cmd_len);
221 
222 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
223 			cur_len += elem->cmd_len;
224 			/*
225 			 * Prevent from both lists being empty when moving an
226 			 * element. This will allow the call of
227 			 * bnx2x_exe_queue_empty() without locking.
228 			 */
229 			list_add_tail(&spacer.link, &o->pending_comp);
230 			mb();
231 			list_del(&elem->link);
232 			list_add_tail(&elem->link, &o->pending_comp);
233 			list_del(&spacer.link);
234 		} else
235 			break;
236 	}
237 
238 	/* Sanity check */
239 	if (!cur_len) {
240 		spin_unlock_bh(&o->lock);
241 		return 0;
242 	}
243 
244 	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245 	if (rc < 0)
246 		/*
247 		 *  In case of an error return the commands back to the queue
248 		 *  and reset the pending_comp.
249 		 */
250 		list_splice_init(&o->pending_comp, &o->exe_queue);
251 	else if (!rc)
252 		/*
253 		 * If zero is returned, means there are no outstanding pending
254 		 * completions and we may dismiss the pending list.
255 		 */
256 		__bnx2x_exe_queue_reset_pending(bp, o);
257 
258 	spin_unlock_bh(&o->lock);
259 	return rc;
260 }
261 
262 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
263 {
264 	bool empty = list_empty(&o->exe_queue);
265 
266 	/* Don't reorder!!! */
267 	mb();
268 
269 	return empty && list_empty(&o->pending_comp);
270 }
271 
272 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273 	struct bnx2x *bp)
274 {
275 	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276 	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277 }
278 
279 /************************ raw_obj functions ***********************************/
280 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
281 {
282 	return !!test_bit(o->state, o->pstate);
283 }
284 
285 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
286 {
287 	smp_mb__before_clear_bit();
288 	clear_bit(o->state, o->pstate);
289 	smp_mb__after_clear_bit();
290 }
291 
292 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
293 {
294 	smp_mb__before_clear_bit();
295 	set_bit(o->state, o->pstate);
296 	smp_mb__after_clear_bit();
297 }
298 
299 /**
300  * bnx2x_state_wait - wait until the given bit(state) is cleared
301  *
302  * @bp:		device handle
303  * @state:	state which is to be cleared
304  * @state_p:	state buffer
305  *
306  */
307 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308 				   unsigned long *pstate)
309 {
310 	/* can take a while if any port is running */
311 	int cnt = 5000;
312 
313 
314 	if (CHIP_REV_IS_EMUL(bp))
315 		cnt *= 20;
316 
317 	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
318 
319 	might_sleep();
320 	while (cnt--) {
321 		if (!test_bit(state, pstate)) {
322 #ifdef BNX2X_STOP_ON_ERROR
323 			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
324 #endif
325 			return 0;
326 		}
327 
328 		usleep_range(1000, 1000);
329 
330 		if (bp->panic)
331 			return -EIO;
332 	}
333 
334 	/* timeout! */
335 	BNX2X_ERR("timeout waiting for state %d\n", state);
336 #ifdef BNX2X_STOP_ON_ERROR
337 	bnx2x_panic();
338 #endif
339 
340 	return -EBUSY;
341 }
342 
343 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
344 {
345 	return bnx2x_state_wait(bp, raw->state, raw->pstate);
346 }
347 
348 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
349 /* credit handling callbacks */
350 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
351 {
352 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
353 
354 	WARN_ON(!mp);
355 
356 	return mp->get_entry(mp, offset);
357 }
358 
359 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
360 {
361 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362 
363 	WARN_ON(!mp);
364 
365 	return mp->get(mp, 1);
366 }
367 
368 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
369 {
370 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
371 
372 	WARN_ON(!vp);
373 
374 	return vp->get_entry(vp, offset);
375 }
376 
377 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
378 {
379 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
380 
381 	WARN_ON(!vp);
382 
383 	return vp->get(vp, 1);
384 }
385 
386 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
387 {
388 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390 
391 	if (!mp->get(mp, 1))
392 		return false;
393 
394 	if (!vp->get(vp, 1)) {
395 		mp->put(mp, 1);
396 		return false;
397 	}
398 
399 	return true;
400 }
401 
402 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
403 {
404 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
405 
406 	return mp->put_entry(mp, offset);
407 }
408 
409 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
410 {
411 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
412 
413 	return mp->put(mp, 1);
414 }
415 
416 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
417 {
418 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
419 
420 	return vp->put_entry(vp, offset);
421 }
422 
423 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
424 {
425 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
426 
427 	return vp->put(vp, 1);
428 }
429 
430 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
431 {
432 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433 	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434 
435 	if (!mp->put(mp, 1))
436 		return false;
437 
438 	if (!vp->put(vp, 1)) {
439 		mp->get(mp, 1);
440 		return false;
441 	}
442 
443 	return true;
444 }
445 
446 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447 				int n, u8 *buf)
448 {
449 	struct bnx2x_vlan_mac_registry_elem *pos;
450 	u8 *next = buf;
451 	int counter = 0;
452 
453 	/* traverse list */
454 	list_for_each_entry(pos, &o->head, link) {
455 		if (counter < n) {
456 			/* place leading zeroes in buffer */
457 			memset(next, 0, MAC_LEADING_ZERO_CNT);
458 
459 			/* place mac after leading zeroes*/
460 			memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461 			       ETH_ALEN);
462 
463 			/* calculate address of next element and
464 			 * advance counter
465 			 */
466 			counter++;
467 			next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
468 
469 			DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 			   counter, next, pos->u.mac.mac);
471 		}
472 	}
473 	return counter * ETH_ALEN;
474 }
475 
476 /* check_add() callbacks */
477 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
478 			       union bnx2x_classification_ramrod_data *data)
479 {
480 	struct bnx2x_vlan_mac_registry_elem *pos;
481 
482 	if (!is_valid_ether_addr(data->mac.mac))
483 		return -EINVAL;
484 
485 	/* Check if a requested MAC already exists */
486 	list_for_each_entry(pos, &o->head, link)
487 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
488 			return -EEXIST;
489 
490 	return 0;
491 }
492 
493 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
494 				union bnx2x_classification_ramrod_data *data)
495 {
496 	struct bnx2x_vlan_mac_registry_elem *pos;
497 
498 	list_for_each_entry(pos, &o->head, link)
499 		if (data->vlan.vlan == pos->u.vlan.vlan)
500 			return -EEXIST;
501 
502 	return 0;
503 }
504 
505 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
506 				   union bnx2x_classification_ramrod_data *data)
507 {
508 	struct bnx2x_vlan_mac_registry_elem *pos;
509 
510 	list_for_each_entry(pos, &o->head, link)
511 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
512 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
513 			     ETH_ALEN)))
514 			return -EEXIST;
515 
516 	return 0;
517 }
518 
519 
520 /* check_del() callbacks */
521 static struct bnx2x_vlan_mac_registry_elem *
522 	bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
523 			    union bnx2x_classification_ramrod_data *data)
524 {
525 	struct bnx2x_vlan_mac_registry_elem *pos;
526 
527 	list_for_each_entry(pos, &o->head, link)
528 		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
529 			return pos;
530 
531 	return NULL;
532 }
533 
534 static struct bnx2x_vlan_mac_registry_elem *
535 	bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
536 			     union bnx2x_classification_ramrod_data *data)
537 {
538 	struct bnx2x_vlan_mac_registry_elem *pos;
539 
540 	list_for_each_entry(pos, &o->head, link)
541 		if (data->vlan.vlan == pos->u.vlan.vlan)
542 			return pos;
543 
544 	return NULL;
545 }
546 
547 static struct bnx2x_vlan_mac_registry_elem *
548 	bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
549 				 union bnx2x_classification_ramrod_data *data)
550 {
551 	struct bnx2x_vlan_mac_registry_elem *pos;
552 
553 	list_for_each_entry(pos, &o->head, link)
554 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
555 		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
556 			     ETH_ALEN)))
557 			return pos;
558 
559 	return NULL;
560 }
561 
562 /* check_move() callback */
563 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
564 			     struct bnx2x_vlan_mac_obj *dst_o,
565 			     union bnx2x_classification_ramrod_data *data)
566 {
567 	struct bnx2x_vlan_mac_registry_elem *pos;
568 	int rc;
569 
570 	/* Check if we can delete the requested configuration from the first
571 	 * object.
572 	 */
573 	pos = src_o->check_del(src_o, data);
574 
575 	/*  check if configuration can be added */
576 	rc = dst_o->check_add(dst_o, data);
577 
578 	/* If this classification can not be added (is already set)
579 	 * or can't be deleted - return an error.
580 	 */
581 	if (rc || !pos)
582 		return false;
583 
584 	return true;
585 }
586 
587 static bool bnx2x_check_move_always_err(
588 	struct bnx2x_vlan_mac_obj *src_o,
589 	struct bnx2x_vlan_mac_obj *dst_o,
590 	union bnx2x_classification_ramrod_data *data)
591 {
592 	return false;
593 }
594 
595 
596 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
597 {
598 	struct bnx2x_raw_obj *raw = &o->raw;
599 	u8 rx_tx_flag = 0;
600 
601 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
602 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
603 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
604 
605 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
606 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
607 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
608 
609 	return rx_tx_flag;
610 }
611 
612 /* LLH CAM line allocations */
613 enum {
614 	LLH_CAM_ISCSI_ETH_LINE = 0,
615 	LLH_CAM_ETH_LINE,
616 	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
617 };
618 
619 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
620 				 bool add, unsigned char *dev_addr, int index)
621 {
622 	u32 wb_data[2];
623 	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
624 			 NIG_REG_LLH0_FUNC_MEM;
625 
626 	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
627 		return;
628 
629 	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
630 			 (add ? "ADD" : "DELETE"), index);
631 
632 	if (add) {
633 		/* LLH_FUNC_MEM is a u64 WB register */
634 		reg_offset += 8*index;
635 
636 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
637 			      (dev_addr[4] <<  8) |  dev_addr[5]);
638 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
639 
640 		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
641 	}
642 
643 	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
644 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
645 }
646 
647 /**
648  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
649  *
650  * @bp:		device handle
651  * @o:		queue for which we want to configure this rule
652  * @add:	if true the command is an ADD command, DEL otherwise
653  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
654  * @hdr:	pointer to a header to setup
655  *
656  */
657 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
658 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
659 	struct eth_classify_cmd_header *hdr)
660 {
661 	struct bnx2x_raw_obj *raw = &o->raw;
662 
663 	hdr->client_id = raw->cl_id;
664 	hdr->func_id = raw->func_id;
665 
666 	/* Rx or/and Tx (internal switching) configuration ? */
667 	hdr->cmd_general_data |=
668 		bnx2x_vlan_mac_get_rx_tx_flag(o);
669 
670 	if (add)
671 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
672 
673 	hdr->cmd_general_data |=
674 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
675 }
676 
677 /**
678  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
679  *
680  * @cid:	connection id
681  * @type:	BNX2X_FILTER_XXX_PENDING
682  * @hdr:	poiter to header to setup
683  * @rule_cnt:
684  *
685  * currently we always configure one rule and echo field to contain a CID and an
686  * opcode type.
687  */
688 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
689 				struct eth_classify_header *hdr, int rule_cnt)
690 {
691 	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
692 	hdr->rule_cnt = (u8)rule_cnt;
693 }
694 
695 
696 /* hw_config() callbacks */
697 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
698 				 struct bnx2x_vlan_mac_obj *o,
699 				 struct bnx2x_exeq_elem *elem, int rule_idx,
700 				 int cam_offset)
701 {
702 	struct bnx2x_raw_obj *raw = &o->raw;
703 	struct eth_classify_rules_ramrod_data *data =
704 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
705 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
706 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
707 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
708 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
709 	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
710 
711 	/*
712 	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
713 	 * relevant. In addition, current implementation is tuned for a
714 	 * single ETH MAC.
715 	 *
716 	 * When multiple unicast ETH MACs PF configuration in switch
717 	 * independent mode is required (NetQ, multiple netdev MACs,
718 	 * etc.), consider better utilisation of 8 per function MAC
719 	 * entries in the LLH register. There is also
720 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
721 	 * total number of CAM entries to 16.
722 	 *
723 	 * Currently we won't configure NIG for MACs other than a primary ETH
724 	 * MAC and iSCSI L2 MAC.
725 	 *
726 	 * If this MAC is moving from one Queue to another, no need to change
727 	 * NIG configuration.
728 	 */
729 	if (cmd != BNX2X_VLAN_MAC_MOVE) {
730 		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
731 			bnx2x_set_mac_in_nig(bp, add, mac,
732 					     LLH_CAM_ISCSI_ETH_LINE);
733 		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
734 			bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
735 	}
736 
737 	/* Reset the ramrod data buffer for the first rule */
738 	if (rule_idx == 0)
739 		memset(data, 0, sizeof(*data));
740 
741 	/* Setup a command header */
742 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
743 				      &rule_entry->mac.header);
744 
745 	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
746 			 add ? "add" : "delete", mac, raw->cl_id);
747 
748 	/* Set a MAC itself */
749 	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
750 			      &rule_entry->mac.mac_mid,
751 			      &rule_entry->mac.mac_lsb, mac);
752 
753 	/* MOVE: Add a rule that will add this MAC to the target Queue */
754 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
755 		rule_entry++;
756 		rule_cnt++;
757 
758 		/* Setup ramrod data */
759 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
760 					elem->cmd_data.vlan_mac.target_obj,
761 					      true, CLASSIFY_RULE_OPCODE_MAC,
762 					      &rule_entry->mac.header);
763 
764 		/* Set a MAC itself */
765 		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
766 				      &rule_entry->mac.mac_mid,
767 				      &rule_entry->mac.mac_lsb, mac);
768 	}
769 
770 	/* Set the ramrod data header */
771 	/* TODO: take this to the higher level in order to prevent multiple
772 		 writing */
773 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
774 					rule_cnt);
775 }
776 
777 /**
778  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
779  *
780  * @bp:		device handle
781  * @o:		queue
782  * @type:
783  * @cam_offset:	offset in cam memory
784  * @hdr:	pointer to a header to setup
785  *
786  * E1/E1H
787  */
788 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
789 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
790 	struct mac_configuration_hdr *hdr)
791 {
792 	struct bnx2x_raw_obj *r = &o->raw;
793 
794 	hdr->length = 1;
795 	hdr->offset = (u8)cam_offset;
796 	hdr->client_id = 0xff;
797 	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
798 }
799 
800 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
801 	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
802 	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
803 {
804 	struct bnx2x_raw_obj *r = &o->raw;
805 	u32 cl_bit_vec = (1 << r->cl_id);
806 
807 	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
808 	cfg_entry->pf_id = r->func_id;
809 	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
810 
811 	if (add) {
812 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
813 			 T_ETH_MAC_COMMAND_SET);
814 		SET_FLAG(cfg_entry->flags,
815 			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
816 
817 		/* Set a MAC in a ramrod data */
818 		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
819 				      &cfg_entry->middle_mac_addr,
820 				      &cfg_entry->lsb_mac_addr, mac);
821 	} else
822 		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
823 			 T_ETH_MAC_COMMAND_INVALIDATE);
824 }
825 
826 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
827 	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
828 	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
829 {
830 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
831 	struct bnx2x_raw_obj *raw = &o->raw;
832 
833 	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
834 					 &config->hdr);
835 	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
836 					 cfg_entry);
837 
838 	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
839 			 add ? "setting" : "clearing",
840 			 mac, raw->cl_id, cam_offset);
841 }
842 
843 /**
844  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
845  *
846  * @bp:		device handle
847  * @o:		bnx2x_vlan_mac_obj
848  * @elem:	bnx2x_exeq_elem
849  * @rule_idx:	rule_idx
850  * @cam_offset: cam_offset
851  */
852 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
853 				  struct bnx2x_vlan_mac_obj *o,
854 				  struct bnx2x_exeq_elem *elem, int rule_idx,
855 				  int cam_offset)
856 {
857 	struct bnx2x_raw_obj *raw = &o->raw;
858 	struct mac_configuration_cmd *config =
859 		(struct mac_configuration_cmd *)(raw->rdata);
860 	/*
861 	 * 57710 and 57711 do not support MOVE command,
862 	 * so it's either ADD or DEL
863 	 */
864 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
865 		true : false;
866 
867 	/* Reset the ramrod data buffer */
868 	memset(config, 0, sizeof(*config));
869 
870 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
871 				     cam_offset, add,
872 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
873 				     ETH_VLAN_FILTER_ANY_VLAN, config);
874 }
875 
876 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
877 				  struct bnx2x_vlan_mac_obj *o,
878 				  struct bnx2x_exeq_elem *elem, int rule_idx,
879 				  int cam_offset)
880 {
881 	struct bnx2x_raw_obj *raw = &o->raw;
882 	struct eth_classify_rules_ramrod_data *data =
883 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
884 	int rule_cnt = rule_idx + 1;
885 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
886 	int cmd = elem->cmd_data.vlan_mac.cmd;
887 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
888 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
889 
890 	/* Reset the ramrod data buffer for the first rule */
891 	if (rule_idx == 0)
892 		memset(data, 0, sizeof(*data));
893 
894 	/* Set a rule header */
895 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
896 				      &rule_entry->vlan.header);
897 
898 	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
899 			 vlan);
900 
901 	/* Set a VLAN itself */
902 	rule_entry->vlan.vlan = cpu_to_le16(vlan);
903 
904 	/* MOVE: Add a rule that will add this MAC to the target Queue */
905 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
906 		rule_entry++;
907 		rule_cnt++;
908 
909 		/* Setup ramrod data */
910 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
911 					elem->cmd_data.vlan_mac.target_obj,
912 					      true, CLASSIFY_RULE_OPCODE_VLAN,
913 					      &rule_entry->vlan.header);
914 
915 		/* Set a VLAN itself */
916 		rule_entry->vlan.vlan = cpu_to_le16(vlan);
917 	}
918 
919 	/* Set the ramrod data header */
920 	/* TODO: take this to the higher level in order to prevent multiple
921 		 writing */
922 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
923 					rule_cnt);
924 }
925 
926 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
927 				      struct bnx2x_vlan_mac_obj *o,
928 				      struct bnx2x_exeq_elem *elem,
929 				      int rule_idx, int cam_offset)
930 {
931 	struct bnx2x_raw_obj *raw = &o->raw;
932 	struct eth_classify_rules_ramrod_data *data =
933 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
934 	int rule_cnt = rule_idx + 1;
935 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
936 	int cmd = elem->cmd_data.vlan_mac.cmd;
937 	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
938 	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
939 	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
940 
941 
942 	/* Reset the ramrod data buffer for the first rule */
943 	if (rule_idx == 0)
944 		memset(data, 0, sizeof(*data));
945 
946 	/* Set a rule header */
947 	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
948 				      &rule_entry->pair.header);
949 
950 	/* Set VLAN and MAC themselvs */
951 	rule_entry->pair.vlan = cpu_to_le16(vlan);
952 	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
953 			      &rule_entry->pair.mac_mid,
954 			      &rule_entry->pair.mac_lsb, mac);
955 
956 	/* MOVE: Add a rule that will add this MAC to the target Queue */
957 	if (cmd == BNX2X_VLAN_MAC_MOVE) {
958 		rule_entry++;
959 		rule_cnt++;
960 
961 		/* Setup ramrod data */
962 		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
963 					elem->cmd_data.vlan_mac.target_obj,
964 					      true, CLASSIFY_RULE_OPCODE_PAIR,
965 					      &rule_entry->pair.header);
966 
967 		/* Set a VLAN itself */
968 		rule_entry->pair.vlan = cpu_to_le16(vlan);
969 		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
970 				      &rule_entry->pair.mac_mid,
971 				      &rule_entry->pair.mac_lsb, mac);
972 	}
973 
974 	/* Set the ramrod data header */
975 	/* TODO: take this to the higher level in order to prevent multiple
976 		 writing */
977 	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
978 					rule_cnt);
979 }
980 
981 /**
982  * bnx2x_set_one_vlan_mac_e1h -
983  *
984  * @bp:		device handle
985  * @o:		bnx2x_vlan_mac_obj
986  * @elem:	bnx2x_exeq_elem
987  * @rule_idx:	rule_idx
988  * @cam_offset:	cam_offset
989  */
990 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
991 				       struct bnx2x_vlan_mac_obj *o,
992 				       struct bnx2x_exeq_elem *elem,
993 				       int rule_idx, int cam_offset)
994 {
995 	struct bnx2x_raw_obj *raw = &o->raw;
996 	struct mac_configuration_cmd *config =
997 		(struct mac_configuration_cmd *)(raw->rdata);
998 	/*
999 	 * 57710 and 57711 do not support MOVE command,
1000 	 * so it's either ADD or DEL
1001 	 */
1002 	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1003 		true : false;
1004 
1005 	/* Reset the ramrod data buffer */
1006 	memset(config, 0, sizeof(*config));
1007 
1008 	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1009 				     cam_offset, add,
1010 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1011 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1012 				     ETH_VLAN_FILTER_CLASSIFY, config);
1013 }
1014 
1015 #define list_next_entry(pos, member) \
1016 	list_entry((pos)->member.next, typeof(*(pos)), member)
1017 
1018 /**
1019  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1020  *
1021  * @bp:		device handle
1022  * @p:		command parameters
1023  * @ppos:	pointer to the cooky
1024  *
1025  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1026  * previously configured elements list.
1027  *
1028  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1029  * into an account
1030  *
1031  * pointer to the cooky  - that should be given back in the next call to make
1032  * function handle the next element. If *ppos is set to NULL it will restart the
1033  * iterator. If returned *ppos == NULL this means that the last element has been
1034  * handled.
1035  *
1036  */
1037 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1038 			   struct bnx2x_vlan_mac_ramrod_params *p,
1039 			   struct bnx2x_vlan_mac_registry_elem **ppos)
1040 {
1041 	struct bnx2x_vlan_mac_registry_elem *pos;
1042 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1043 
1044 	/* If list is empty - there is nothing to do here */
1045 	if (list_empty(&o->head)) {
1046 		*ppos = NULL;
1047 		return 0;
1048 	}
1049 
1050 	/* make a step... */
1051 	if (*ppos == NULL)
1052 		*ppos = list_first_entry(&o->head,
1053 					 struct bnx2x_vlan_mac_registry_elem,
1054 					 link);
1055 	else
1056 		*ppos = list_next_entry(*ppos, link);
1057 
1058 	pos = *ppos;
1059 
1060 	/* If it's the last step - return NULL */
1061 	if (list_is_last(&pos->link, &o->head))
1062 		*ppos = NULL;
1063 
1064 	/* Prepare a 'user_req' */
1065 	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1066 
1067 	/* Set the command */
1068 	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1069 
1070 	/* Set vlan_mac_flags */
1071 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1072 
1073 	/* Set a restore bit */
1074 	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1075 
1076 	return bnx2x_config_vlan_mac(bp, p);
1077 }
1078 
1079 /*
1080  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1081  * pointer to an element with a specific criteria and NULL if such an element
1082  * hasn't been found.
1083  */
1084 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1085 	struct bnx2x_exe_queue_obj *o,
1086 	struct bnx2x_exeq_elem *elem)
1087 {
1088 	struct bnx2x_exeq_elem *pos;
1089 	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1090 
1091 	/* Check pending for execution commands */
1092 	list_for_each_entry(pos, &o->exe_queue, link)
1093 		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1094 			      sizeof(*data)) &&
1095 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1096 			return pos;
1097 
1098 	return NULL;
1099 }
1100 
1101 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1102 	struct bnx2x_exe_queue_obj *o,
1103 	struct bnx2x_exeq_elem *elem)
1104 {
1105 	struct bnx2x_exeq_elem *pos;
1106 	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1107 
1108 	/* Check pending for execution commands */
1109 	list_for_each_entry(pos, &o->exe_queue, link)
1110 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1111 			      sizeof(*data)) &&
1112 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1113 			return pos;
1114 
1115 	return NULL;
1116 }
1117 
1118 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1119 	struct bnx2x_exe_queue_obj *o,
1120 	struct bnx2x_exeq_elem *elem)
1121 {
1122 	struct bnx2x_exeq_elem *pos;
1123 	struct bnx2x_vlan_mac_ramrod_data *data =
1124 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1125 
1126 	/* Check pending for execution commands */
1127 	list_for_each_entry(pos, &o->exe_queue, link)
1128 		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1129 			      sizeof(*data)) &&
1130 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1131 			return pos;
1132 
1133 	return NULL;
1134 }
1135 
1136 /**
1137  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1138  *
1139  * @bp:		device handle
1140  * @qo:		bnx2x_qable_obj
1141  * @elem:	bnx2x_exeq_elem
1142  *
1143  * Checks that the requested configuration can be added. If yes and if
1144  * requested, consume CAM credit.
1145  *
1146  * The 'validate' is run after the 'optimize'.
1147  *
1148  */
1149 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1150 					      union bnx2x_qable_obj *qo,
1151 					      struct bnx2x_exeq_elem *elem)
1152 {
1153 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1154 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1155 	int rc;
1156 
1157 	/* Check the registry */
1158 	rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1159 	if (rc) {
1160 		DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1161 				 "current registry state\n");
1162 		return rc;
1163 	}
1164 
1165 	/*
1166 	 * Check if there is a pending ADD command for this
1167 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1168 	 */
1169 	if (exeq->get(exeq, elem)) {
1170 		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1171 		return -EEXIST;
1172 	}
1173 
1174 	/*
1175 	 * TODO: Check the pending MOVE from other objects where this
1176 	 * object is a destination object.
1177 	 */
1178 
1179 	/* Consume the credit if not requested not to */
1180 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1181 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1182 	    o->get_credit(o)))
1183 		return -EINVAL;
1184 
1185 	return 0;
1186 }
1187 
1188 /**
1189  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1190  *
1191  * @bp:		device handle
1192  * @qo:		quable object to check
1193  * @elem:	element that needs to be deleted
1194  *
1195  * Checks that the requested configuration can be deleted. If yes and if
1196  * requested, returns a CAM credit.
1197  *
1198  * The 'validate' is run after the 'optimize'.
1199  */
1200 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1201 					      union bnx2x_qable_obj *qo,
1202 					      struct bnx2x_exeq_elem *elem)
1203 {
1204 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1205 	struct bnx2x_vlan_mac_registry_elem *pos;
1206 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1207 	struct bnx2x_exeq_elem query_elem;
1208 
1209 	/* If this classification can not be deleted (doesn't exist)
1210 	 * - return a BNX2X_EXIST.
1211 	 */
1212 	pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1213 	if (!pos) {
1214 		DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1215 				 "current registry state\n");
1216 		return -EEXIST;
1217 	}
1218 
1219 	/*
1220 	 * Check if there are pending DEL or MOVE commands for this
1221 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1222 	 */
1223 	memcpy(&query_elem, elem, sizeof(query_elem));
1224 
1225 	/* Check for MOVE commands */
1226 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1227 	if (exeq->get(exeq, &query_elem)) {
1228 		BNX2X_ERR("There is a pending MOVE command already\n");
1229 		return -EINVAL;
1230 	}
1231 
1232 	/* Check for DEL commands */
1233 	if (exeq->get(exeq, elem)) {
1234 		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1235 		return -EEXIST;
1236 	}
1237 
1238 	/* Return the credit to the credit pool if not requested not to */
1239 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1240 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1241 	    o->put_credit(o))) {
1242 		BNX2X_ERR("Failed to return a credit\n");
1243 		return -EINVAL;
1244 	}
1245 
1246 	return 0;
1247 }
1248 
1249 /**
1250  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1251  *
1252  * @bp:		device handle
1253  * @qo:		quable object to check (source)
1254  * @elem:	element that needs to be moved
1255  *
1256  * Checks that the requested configuration can be moved. If yes and if
1257  * requested, returns a CAM credit.
1258  *
1259  * The 'validate' is run after the 'optimize'.
1260  */
1261 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1262 					       union bnx2x_qable_obj *qo,
1263 					       struct bnx2x_exeq_elem *elem)
1264 {
1265 	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1266 	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1267 	struct bnx2x_exeq_elem query_elem;
1268 	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1269 	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1270 
1271 	/*
1272 	 * Check if we can perform this operation based on the current registry
1273 	 * state.
1274 	 */
1275 	if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1276 		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1277 				 "current registry state\n");
1278 		return -EINVAL;
1279 	}
1280 
1281 	/*
1282 	 * Check if there is an already pending DEL or MOVE command for the
1283 	 * source object or ADD command for a destination object. Return an
1284 	 * error if so.
1285 	 */
1286 	memcpy(&query_elem, elem, sizeof(query_elem));
1287 
1288 	/* Check DEL on source */
1289 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1290 	if (src_exeq->get(src_exeq, &query_elem)) {
1291 		BNX2X_ERR("There is a pending DEL command on the source "
1292 			  "queue already\n");
1293 		return -EINVAL;
1294 	}
1295 
1296 	/* Check MOVE on source */
1297 	if (src_exeq->get(src_exeq, elem)) {
1298 		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1299 		return -EEXIST;
1300 	}
1301 
1302 	/* Check ADD on destination */
1303 	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1304 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1305 		BNX2X_ERR("There is a pending ADD command on the "
1306 			  "destination queue already\n");
1307 		return -EINVAL;
1308 	}
1309 
1310 	/* Consume the credit if not requested not to */
1311 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1312 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1313 	    dest_o->get_credit(dest_o)))
1314 		return -EINVAL;
1315 
1316 	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1317 		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1318 	    src_o->put_credit(src_o))) {
1319 		/* return the credit taken from dest... */
1320 		dest_o->put_credit(dest_o);
1321 		return -EINVAL;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1328 				   union bnx2x_qable_obj *qo,
1329 				   struct bnx2x_exeq_elem *elem)
1330 {
1331 	switch (elem->cmd_data.vlan_mac.cmd) {
1332 	case BNX2X_VLAN_MAC_ADD:
1333 		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1334 	case BNX2X_VLAN_MAC_DEL:
1335 		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1336 	case BNX2X_VLAN_MAC_MOVE:
1337 		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1338 	default:
1339 		return -EINVAL;
1340 	}
1341 }
1342 
1343 /**
1344  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1345  *
1346  * @bp:		device handle
1347  * @o:		bnx2x_vlan_mac_obj
1348  *
1349  */
1350 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1351 			       struct bnx2x_vlan_mac_obj *o)
1352 {
1353 	int cnt = 5000, rc;
1354 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1355 	struct bnx2x_raw_obj *raw = &o->raw;
1356 
1357 	while (cnt--) {
1358 		/* Wait for the current command to complete */
1359 		rc = raw->wait_comp(bp, raw);
1360 		if (rc)
1361 			return rc;
1362 
1363 		/* Wait until there are no pending commands */
1364 		if (!bnx2x_exe_queue_empty(exeq))
1365 			usleep_range(1000, 1000);
1366 		else
1367 			return 0;
1368 	}
1369 
1370 	return -EBUSY;
1371 }
1372 
1373 /**
1374  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1375  *
1376  * @bp:		device handle
1377  * @o:		bnx2x_vlan_mac_obj
1378  * @cqe:
1379  * @cont:	if true schedule next execution chunk
1380  *
1381  */
1382 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1383 				   struct bnx2x_vlan_mac_obj *o,
1384 				   union event_ring_elem *cqe,
1385 				   unsigned long *ramrod_flags)
1386 {
1387 	struct bnx2x_raw_obj *r = &o->raw;
1388 	int rc;
1389 
1390 	/* Reset pending list */
1391 	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1392 
1393 	/* Clear pending */
1394 	r->clear_pending(r);
1395 
1396 	/* If ramrod failed this is most likely a SW bug */
1397 	if (cqe->message.error)
1398 		return -EINVAL;
1399 
1400 	/* Run the next bulk of pending commands if requeted */
1401 	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1402 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1403 		if (rc < 0)
1404 			return rc;
1405 	}
1406 
1407 	/* If there is more work to do return PENDING */
1408 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1409 		return 1;
1410 
1411 	return 0;
1412 }
1413 
1414 /**
1415  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1416  *
1417  * @bp:		device handle
1418  * @o:		bnx2x_qable_obj
1419  * @elem:	bnx2x_exeq_elem
1420  */
1421 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1422 				   union bnx2x_qable_obj *qo,
1423 				   struct bnx2x_exeq_elem *elem)
1424 {
1425 	struct bnx2x_exeq_elem query, *pos;
1426 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1427 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1428 
1429 	memcpy(&query, elem, sizeof(query));
1430 
1431 	switch (elem->cmd_data.vlan_mac.cmd) {
1432 	case BNX2X_VLAN_MAC_ADD:
1433 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1434 		break;
1435 	case BNX2X_VLAN_MAC_DEL:
1436 		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1437 		break;
1438 	default:
1439 		/* Don't handle anything other than ADD or DEL */
1440 		return 0;
1441 	}
1442 
1443 	/* If we found the appropriate element - delete it */
1444 	pos = exeq->get(exeq, &query);
1445 	if (pos) {
1446 
1447 		/* Return the credit of the optimized command */
1448 		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1449 			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1450 			if ((query.cmd_data.vlan_mac.cmd ==
1451 			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1452 				BNX2X_ERR("Failed to return the credit for the "
1453 					  "optimized ADD command\n");
1454 				return -EINVAL;
1455 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1456 				BNX2X_ERR("Failed to recover the credit from "
1457 					  "the optimized DEL command\n");
1458 				return -EINVAL;
1459 			}
1460 		}
1461 
1462 		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1463 			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1464 			   "ADD" : "DEL");
1465 
1466 		list_del(&pos->link);
1467 		bnx2x_exe_queue_free_elem(bp, pos);
1468 		return 1;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1476  *
1477  * @bp:	  device handle
1478  * @o:
1479  * @elem:
1480  * @restore:
1481  * @re:
1482  *
1483  * prepare a registry element according to the current command request.
1484  */
1485 static inline int bnx2x_vlan_mac_get_registry_elem(
1486 	struct bnx2x *bp,
1487 	struct bnx2x_vlan_mac_obj *o,
1488 	struct bnx2x_exeq_elem *elem,
1489 	bool restore,
1490 	struct bnx2x_vlan_mac_registry_elem **re)
1491 {
1492 	int cmd = elem->cmd_data.vlan_mac.cmd;
1493 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1494 
1495 	/* Allocate a new registry element if needed. */
1496 	if (!restore &&
1497 	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1498 		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1499 		if (!reg_elem)
1500 			return -ENOMEM;
1501 
1502 		/* Get a new CAM offset */
1503 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1504 			/*
1505 			 * This shell never happen, because we have checked the
1506 			 * CAM availiability in the 'validate'.
1507 			 */
1508 			WARN_ON(1);
1509 			kfree(reg_elem);
1510 			return -EINVAL;
1511 		}
1512 
1513 		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1514 
1515 		/* Set a VLAN-MAC data */
1516 		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1517 			  sizeof(reg_elem->u));
1518 
1519 		/* Copy the flags (needed for DEL and RESTORE flows) */
1520 		reg_elem->vlan_mac_flags =
1521 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1522 	} else /* DEL, RESTORE */
1523 		reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1524 
1525 	*re = reg_elem;
1526 	return 0;
1527 }
1528 
1529 /**
1530  * bnx2x_execute_vlan_mac - execute vlan mac command
1531  *
1532  * @bp:			device handle
1533  * @qo:
1534  * @exe_chunk:
1535  * @ramrod_flags:
1536  *
1537  * go and send a ramrod!
1538  */
1539 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1540 				  union bnx2x_qable_obj *qo,
1541 				  struct list_head *exe_chunk,
1542 				  unsigned long *ramrod_flags)
1543 {
1544 	struct bnx2x_exeq_elem *elem;
1545 	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1546 	struct bnx2x_raw_obj *r = &o->raw;
1547 	int rc, idx = 0;
1548 	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1549 	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1550 	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1551 	int cmd;
1552 
1553 	/*
1554 	 * If DRIVER_ONLY execution is requested, cleanup a registry
1555 	 * and exit. Otherwise send a ramrod to FW.
1556 	 */
1557 	if (!drv_only) {
1558 		WARN_ON(r->check_pending(r));
1559 
1560 		/* Set pending */
1561 		r->set_pending(r);
1562 
1563 		/* Fill tha ramrod data */
1564 		list_for_each_entry(elem, exe_chunk, link) {
1565 			cmd = elem->cmd_data.vlan_mac.cmd;
1566 			/*
1567 			 * We will add to the target object in MOVE command, so
1568 			 * change the object for a CAM search.
1569 			 */
1570 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1571 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1572 			else
1573 				cam_obj = o;
1574 
1575 			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1576 							      elem, restore,
1577 							      &reg_elem);
1578 			if (rc)
1579 				goto error_exit;
1580 
1581 			WARN_ON(!reg_elem);
1582 
1583 			/* Push a new entry into the registry */
1584 			if (!restore &&
1585 			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1586 			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1587 				list_add(&reg_elem->link, &cam_obj->head);
1588 
1589 			/* Configure a single command in a ramrod data buffer */
1590 			o->set_one_rule(bp, o, elem, idx,
1591 					reg_elem->cam_offset);
1592 
1593 			/* MOVE command consumes 2 entries in the ramrod data */
1594 			if (cmd == BNX2X_VLAN_MAC_MOVE)
1595 				idx += 2;
1596 			else
1597 				idx++;
1598 		}
1599 
1600 		/*
1601 		 *  No need for an explicit memory barrier here as long we would
1602 		 *  need to ensure the ordering of writing to the SPQ element
1603 		 *  and updating of the SPQ producer which involves a memory
1604 		 *  read and we will have to put a full memory barrier there
1605 		 *  (inside bnx2x_sp_post()).
1606 		 */
1607 
1608 		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1609 				   U64_HI(r->rdata_mapping),
1610 				   U64_LO(r->rdata_mapping),
1611 				   ETH_CONNECTION_TYPE);
1612 		if (rc)
1613 			goto error_exit;
1614 	}
1615 
1616 	/* Now, when we are done with the ramrod - clean up the registry */
1617 	list_for_each_entry(elem, exe_chunk, link) {
1618 		cmd = elem->cmd_data.vlan_mac.cmd;
1619 		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1620 		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1621 			reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1622 
1623 			WARN_ON(!reg_elem);
1624 
1625 			o->put_cam_offset(o, reg_elem->cam_offset);
1626 			list_del(&reg_elem->link);
1627 			kfree(reg_elem);
1628 		}
1629 	}
1630 
1631 	if (!drv_only)
1632 		return 1;
1633 	else
1634 		return 0;
1635 
1636 error_exit:
1637 	r->clear_pending(r);
1638 
1639 	/* Cleanup a registry in case of a failure */
1640 	list_for_each_entry(elem, exe_chunk, link) {
1641 		cmd = elem->cmd_data.vlan_mac.cmd;
1642 
1643 		if (cmd == BNX2X_VLAN_MAC_MOVE)
1644 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1645 		else
1646 			cam_obj = o;
1647 
1648 		/* Delete all newly added above entries */
1649 		if (!restore &&
1650 		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1651 		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1652 			reg_elem = o->check_del(cam_obj,
1653 						&elem->cmd_data.vlan_mac.u);
1654 			if (reg_elem) {
1655 				list_del(&reg_elem->link);
1656 				kfree(reg_elem);
1657 			}
1658 		}
1659 	}
1660 
1661 	return rc;
1662 }
1663 
1664 static inline int bnx2x_vlan_mac_push_new_cmd(
1665 	struct bnx2x *bp,
1666 	struct bnx2x_vlan_mac_ramrod_params *p)
1667 {
1668 	struct bnx2x_exeq_elem *elem;
1669 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1670 	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1671 
1672 	/* Allocate the execution queue element */
1673 	elem = bnx2x_exe_queue_alloc_elem(bp);
1674 	if (!elem)
1675 		return -ENOMEM;
1676 
1677 	/* Set the command 'length' */
1678 	switch (p->user_req.cmd) {
1679 	case BNX2X_VLAN_MAC_MOVE:
1680 		elem->cmd_len = 2;
1681 		break;
1682 	default:
1683 		elem->cmd_len = 1;
1684 	}
1685 
1686 	/* Fill the object specific info */
1687 	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1688 
1689 	/* Try to add a new command to the pending list */
1690 	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1691 }
1692 
1693 /**
1694  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1695  *
1696  * @bp:	  device handle
1697  * @p:
1698  *
1699  */
1700 int bnx2x_config_vlan_mac(
1701 	struct bnx2x *bp,
1702 	struct bnx2x_vlan_mac_ramrod_params *p)
1703 {
1704 	int rc = 0;
1705 	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1706 	unsigned long *ramrod_flags = &p->ramrod_flags;
1707 	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1708 	struct bnx2x_raw_obj *raw = &o->raw;
1709 
1710 	/*
1711 	 * Add new elements to the execution list for commands that require it.
1712 	 */
1713 	if (!cont) {
1714 		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1715 		if (rc)
1716 			return rc;
1717 	}
1718 
1719 	/*
1720 	 * If nothing will be executed further in this iteration we want to
1721 	 * return PENDING if there are pending commands
1722 	 */
1723 	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1724 		rc = 1;
1725 
1726 	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1727 		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1728 				 "clearing a pending bit.\n");
1729 		raw->clear_pending(raw);
1730 	}
1731 
1732 	/* Execute commands if required */
1733 	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1734 	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1735 		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1736 		if (rc < 0)
1737 			return rc;
1738 	}
1739 
1740 	/*
1741 	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1742 	 * then user want to wait until the last command is done.
1743 	 */
1744 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1745 		/*
1746 		 * Wait maximum for the current exe_queue length iterations plus
1747 		 * one (for the current pending command).
1748 		 */
1749 		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1750 
1751 		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1752 		       max_iterations--) {
1753 
1754 			/* Wait for the current command to complete */
1755 			rc = raw->wait_comp(bp, raw);
1756 			if (rc)
1757 				return rc;
1758 
1759 			/* Make a next step */
1760 			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1761 						  ramrod_flags);
1762 			if (rc < 0)
1763 				return rc;
1764 		}
1765 
1766 		return 0;
1767 	}
1768 
1769 	return rc;
1770 }
1771 
1772 
1773 
1774 /**
1775  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1776  *
1777  * @bp:			device handle
1778  * @o:
1779  * @vlan_mac_flags:
1780  * @ramrod_flags:	execution flags to be used for this deletion
1781  *
1782  * if the last operation has completed successfully and there are no
1783  * moreelements left, positive value if the last operation has completed
1784  * successfully and there are more previously configured elements, negative
1785  * value is current operation has failed.
1786  */
1787 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1788 				  struct bnx2x_vlan_mac_obj *o,
1789 				  unsigned long *vlan_mac_flags,
1790 				  unsigned long *ramrod_flags)
1791 {
1792 	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1793 	int rc = 0;
1794 	struct bnx2x_vlan_mac_ramrod_params p;
1795 	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1796 	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1797 
1798 	/* Clear pending commands first */
1799 
1800 	spin_lock_bh(&exeq->lock);
1801 
1802 	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1803 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1804 		    *vlan_mac_flags)
1805 			list_del(&exeq_pos->link);
1806 	}
1807 
1808 	spin_unlock_bh(&exeq->lock);
1809 
1810 	/* Prepare a command request */
1811 	memset(&p, 0, sizeof(p));
1812 	p.vlan_mac_obj = o;
1813 	p.ramrod_flags = *ramrod_flags;
1814 	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1815 
1816 	/*
1817 	 * Add all but the last VLAN-MAC to the execution queue without actually
1818 	 * execution anything.
1819 	 */
1820 	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1821 	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1822 	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
1823 
1824 	list_for_each_entry(pos, &o->head, link) {
1825 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
1826 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1827 			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1828 			rc = bnx2x_config_vlan_mac(bp, &p);
1829 			if (rc < 0) {
1830 				BNX2X_ERR("Failed to add a new DEL command\n");
1831 				return rc;
1832 			}
1833 		}
1834 	}
1835 
1836 	p.ramrod_flags = *ramrod_flags;
1837 	__set_bit(RAMROD_CONT, &p.ramrod_flags);
1838 
1839 	return bnx2x_config_vlan_mac(bp, &p);
1840 }
1841 
1842 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1843 	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1844 	unsigned long *pstate, bnx2x_obj_type type)
1845 {
1846 	raw->func_id = func_id;
1847 	raw->cid = cid;
1848 	raw->cl_id = cl_id;
1849 	raw->rdata = rdata;
1850 	raw->rdata_mapping = rdata_mapping;
1851 	raw->state = state;
1852 	raw->pstate = pstate;
1853 	raw->obj_type = type;
1854 	raw->check_pending = bnx2x_raw_check_pending;
1855 	raw->clear_pending = bnx2x_raw_clear_pending;
1856 	raw->set_pending = bnx2x_raw_set_pending;
1857 	raw->wait_comp = bnx2x_raw_wait;
1858 }
1859 
1860 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1861 	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1862 	int state, unsigned long *pstate, bnx2x_obj_type type,
1863 	struct bnx2x_credit_pool_obj *macs_pool,
1864 	struct bnx2x_credit_pool_obj *vlans_pool)
1865 {
1866 	INIT_LIST_HEAD(&o->head);
1867 
1868 	o->macs_pool = macs_pool;
1869 	o->vlans_pool = vlans_pool;
1870 
1871 	o->delete_all = bnx2x_vlan_mac_del_all;
1872 	o->restore = bnx2x_vlan_mac_restore;
1873 	o->complete = bnx2x_complete_vlan_mac;
1874 	o->wait = bnx2x_wait_vlan_mac;
1875 
1876 	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1877 			   state, pstate, type);
1878 }
1879 
1880 
1881 void bnx2x_init_mac_obj(struct bnx2x *bp,
1882 			struct bnx2x_vlan_mac_obj *mac_obj,
1883 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1884 			dma_addr_t rdata_mapping, int state,
1885 			unsigned long *pstate, bnx2x_obj_type type,
1886 			struct bnx2x_credit_pool_obj *macs_pool)
1887 {
1888 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1889 
1890 	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1891 				   rdata_mapping, state, pstate, type,
1892 				   macs_pool, NULL);
1893 
1894 	/* CAM credit pool handling */
1895 	mac_obj->get_credit = bnx2x_get_credit_mac;
1896 	mac_obj->put_credit = bnx2x_put_credit_mac;
1897 	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1898 	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1899 
1900 	if (CHIP_IS_E1x(bp)) {
1901 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1902 		mac_obj->check_del         = bnx2x_check_mac_del;
1903 		mac_obj->check_add         = bnx2x_check_mac_add;
1904 		mac_obj->check_move        = bnx2x_check_move_always_err;
1905 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1906 
1907 		/* Exe Queue */
1908 		bnx2x_exe_queue_init(bp,
1909 				     &mac_obj->exe_queue, 1, qable_obj,
1910 				     bnx2x_validate_vlan_mac,
1911 				     bnx2x_optimize_vlan_mac,
1912 				     bnx2x_execute_vlan_mac,
1913 				     bnx2x_exeq_get_mac);
1914 	} else {
1915 		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1916 		mac_obj->check_del         = bnx2x_check_mac_del;
1917 		mac_obj->check_add         = bnx2x_check_mac_add;
1918 		mac_obj->check_move        = bnx2x_check_move;
1919 		mac_obj->ramrod_cmd        =
1920 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1921 		mac_obj->get_n_elements    = bnx2x_get_n_elements;
1922 
1923 		/* Exe Queue */
1924 		bnx2x_exe_queue_init(bp,
1925 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1926 				     qable_obj, bnx2x_validate_vlan_mac,
1927 				     bnx2x_optimize_vlan_mac,
1928 				     bnx2x_execute_vlan_mac,
1929 				     bnx2x_exeq_get_mac);
1930 	}
1931 }
1932 
1933 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1934 			 struct bnx2x_vlan_mac_obj *vlan_obj,
1935 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1936 			 dma_addr_t rdata_mapping, int state,
1937 			 unsigned long *pstate, bnx2x_obj_type type,
1938 			 struct bnx2x_credit_pool_obj *vlans_pool)
1939 {
1940 	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1941 
1942 	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1943 				   rdata_mapping, state, pstate, type, NULL,
1944 				   vlans_pool);
1945 
1946 	vlan_obj->get_credit = bnx2x_get_credit_vlan;
1947 	vlan_obj->put_credit = bnx2x_put_credit_vlan;
1948 	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1949 	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1950 
1951 	if (CHIP_IS_E1x(bp)) {
1952 		BNX2X_ERR("Do not support chips others than E2 and newer\n");
1953 		BUG();
1954 	} else {
1955 		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
1956 		vlan_obj->check_del         = bnx2x_check_vlan_del;
1957 		vlan_obj->check_add         = bnx2x_check_vlan_add;
1958 		vlan_obj->check_move        = bnx2x_check_move;
1959 		vlan_obj->ramrod_cmd        =
1960 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1961 
1962 		/* Exe Queue */
1963 		bnx2x_exe_queue_init(bp,
1964 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1965 				     qable_obj, bnx2x_validate_vlan_mac,
1966 				     bnx2x_optimize_vlan_mac,
1967 				     bnx2x_execute_vlan_mac,
1968 				     bnx2x_exeq_get_vlan);
1969 	}
1970 }
1971 
1972 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1973 			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1974 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
1975 			     dma_addr_t rdata_mapping, int state,
1976 			     unsigned long *pstate, bnx2x_obj_type type,
1977 			     struct bnx2x_credit_pool_obj *macs_pool,
1978 			     struct bnx2x_credit_pool_obj *vlans_pool)
1979 {
1980 	union bnx2x_qable_obj *qable_obj =
1981 		(union bnx2x_qable_obj *)vlan_mac_obj;
1982 
1983 	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1984 				   rdata_mapping, state, pstate, type,
1985 				   macs_pool, vlans_pool);
1986 
1987 	/* CAM pool handling */
1988 	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1989 	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1990 	/*
1991 	 * CAM offset is relevant for 57710 and 57711 chips only which have a
1992 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1993 	 * will be taken from MACs' pool object only.
1994 	 */
1995 	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1996 	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1997 
1998 	if (CHIP_IS_E1(bp)) {
1999 		BNX2X_ERR("Do not support chips others than E2\n");
2000 		BUG();
2001 	} else if (CHIP_IS_E1H(bp)) {
2002 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
2003 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2004 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2005 		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
2006 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2007 
2008 		/* Exe Queue */
2009 		bnx2x_exe_queue_init(bp,
2010 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2011 				     bnx2x_validate_vlan_mac,
2012 				     bnx2x_optimize_vlan_mac,
2013 				     bnx2x_execute_vlan_mac,
2014 				     bnx2x_exeq_get_vlan_mac);
2015 	} else {
2016 		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
2017 		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
2018 		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
2019 		vlan_mac_obj->check_move        = bnx2x_check_move;
2020 		vlan_mac_obj->ramrod_cmd        =
2021 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2022 
2023 		/* Exe Queue */
2024 		bnx2x_exe_queue_init(bp,
2025 				     &vlan_mac_obj->exe_queue,
2026 				     CLASSIFY_RULES_COUNT,
2027 				     qable_obj, bnx2x_validate_vlan_mac,
2028 				     bnx2x_optimize_vlan_mac,
2029 				     bnx2x_execute_vlan_mac,
2030 				     bnx2x_exeq_get_vlan_mac);
2031 	}
2032 
2033 }
2034 
2035 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2036 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2037 			struct tstorm_eth_mac_filter_config *mac_filters,
2038 			u16 pf_id)
2039 {
2040 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2041 
2042 	u32 addr = BAR_TSTRORM_INTMEM +
2043 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2044 
2045 	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2046 }
2047 
2048 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2049 				 struct bnx2x_rx_mode_ramrod_params *p)
2050 {
2051 	/* update the bp MAC filter structure  */
2052 	u32 mask = (1 << p->cl_id);
2053 
2054 	struct tstorm_eth_mac_filter_config *mac_filters =
2055 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2056 
2057 	/* initial seeting is drop-all */
2058 	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2059 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2060 	u8 unmatched_unicast = 0;
2061 
2062     /* In e1x there we only take into account rx acceot flag since tx switching
2063      * isn't enabled. */
2064 	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2065 		/* accept matched ucast */
2066 		drop_all_ucast = 0;
2067 
2068 	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2069 		/* accept matched mcast */
2070 		drop_all_mcast = 0;
2071 
2072 	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2073 		/* accept all mcast */
2074 		drop_all_ucast = 0;
2075 		accp_all_ucast = 1;
2076 	}
2077 	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2078 		/* accept all mcast */
2079 		drop_all_mcast = 0;
2080 		accp_all_mcast = 1;
2081 	}
2082 	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2083 		/* accept (all) bcast */
2084 		accp_all_bcast = 1;
2085 	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2086 		/* accept unmatched unicasts */
2087 		unmatched_unicast = 1;
2088 
2089 	mac_filters->ucast_drop_all = drop_all_ucast ?
2090 		mac_filters->ucast_drop_all | mask :
2091 		mac_filters->ucast_drop_all & ~mask;
2092 
2093 	mac_filters->mcast_drop_all = drop_all_mcast ?
2094 		mac_filters->mcast_drop_all | mask :
2095 		mac_filters->mcast_drop_all & ~mask;
2096 
2097 	mac_filters->ucast_accept_all = accp_all_ucast ?
2098 		mac_filters->ucast_accept_all | mask :
2099 		mac_filters->ucast_accept_all & ~mask;
2100 
2101 	mac_filters->mcast_accept_all = accp_all_mcast ?
2102 		mac_filters->mcast_accept_all | mask :
2103 		mac_filters->mcast_accept_all & ~mask;
2104 
2105 	mac_filters->bcast_accept_all = accp_all_bcast ?
2106 		mac_filters->bcast_accept_all | mask :
2107 		mac_filters->bcast_accept_all & ~mask;
2108 
2109 	mac_filters->unmatched_unicast = unmatched_unicast ?
2110 		mac_filters->unmatched_unicast | mask :
2111 		mac_filters->unmatched_unicast & ~mask;
2112 
2113 	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2114 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2115 			 mac_filters->ucast_drop_all,
2116 			 mac_filters->mcast_drop_all,
2117 			 mac_filters->ucast_accept_all,
2118 			 mac_filters->mcast_accept_all,
2119 			 mac_filters->bcast_accept_all);
2120 
2121 	/* write the MAC filter structure*/
2122 	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2123 
2124 	/* The operation is completed */
2125 	clear_bit(p->state, p->pstate);
2126 	smp_mb__after_clear_bit();
2127 
2128 	return 0;
2129 }
2130 
2131 /* Setup ramrod data */
2132 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2133 				struct eth_classify_header *hdr,
2134 				u8 rule_cnt)
2135 {
2136 	hdr->echo = cid;
2137 	hdr->rule_cnt = rule_cnt;
2138 }
2139 
2140 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2141 				unsigned long accept_flags,
2142 				struct eth_filter_rules_cmd *cmd,
2143 				bool clear_accept_all)
2144 {
2145 	u16 state;
2146 
2147 	/* start with 'drop-all' */
2148 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2149 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2150 
2151 	if (accept_flags) {
2152 		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2153 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2154 
2155 		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2156 			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2157 
2158 		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2159 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2160 			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2161 		}
2162 
2163 		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2164 			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2165 			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2166 		}
2167 		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2168 			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2169 
2170 		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2171 			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2172 			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2173 		}
2174 		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2175 			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2176 	}
2177 
2178 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2179 	if (clear_accept_all) {
2180 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2181 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2182 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2183 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2184 	}
2185 
2186 	cmd->state = cpu_to_le16(state);
2187 
2188 }
2189 
2190 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2191 				struct bnx2x_rx_mode_ramrod_params *p)
2192 {
2193 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2194 	int rc;
2195 	u8 rule_idx = 0;
2196 
2197 	/* Reset the ramrod data buffer */
2198 	memset(data, 0, sizeof(*data));
2199 
2200 	/* Setup ramrod data */
2201 
2202 	/* Tx (internal switching) */
2203 	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2204 		data->rules[rule_idx].client_id = p->cl_id;
2205 		data->rules[rule_idx].func_id = p->func_id;
2206 
2207 		data->rules[rule_idx].cmd_general_data =
2208 			ETH_FILTER_RULES_CMD_TX_CMD;
2209 
2210 		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2211 			&(data->rules[rule_idx++]), false);
2212 	}
2213 
2214 	/* Rx */
2215 	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2216 		data->rules[rule_idx].client_id = p->cl_id;
2217 		data->rules[rule_idx].func_id = p->func_id;
2218 
2219 		data->rules[rule_idx].cmd_general_data =
2220 			ETH_FILTER_RULES_CMD_RX_CMD;
2221 
2222 		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2223 			&(data->rules[rule_idx++]), false);
2224 	}
2225 
2226 
2227 	/*
2228 	 * If FCoE Queue configuration has been requested configure the Rx and
2229 	 * internal switching modes for this queue in separate rules.
2230 	 *
2231 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2232 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2233 	 */
2234 	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2235 		/*  Tx (internal switching) */
2236 		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2237 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2238 			data->rules[rule_idx].func_id = p->func_id;
2239 
2240 			data->rules[rule_idx].cmd_general_data =
2241 						ETH_FILTER_RULES_CMD_TX_CMD;
2242 
2243 			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2244 						     &(data->rules[rule_idx++]),
2245 						       true);
2246 		}
2247 
2248 		/* Rx */
2249 		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2250 			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2251 			data->rules[rule_idx].func_id = p->func_id;
2252 
2253 			data->rules[rule_idx].cmd_general_data =
2254 						ETH_FILTER_RULES_CMD_RX_CMD;
2255 
2256 			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2257 						     &(data->rules[rule_idx++]),
2258 						       true);
2259 		}
2260 	}
2261 
2262 	/*
2263 	 * Set the ramrod header (most importantly - number of rules to
2264 	 * configure).
2265 	 */
2266 	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2267 
2268 	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2269 			 "tx_accept_flags 0x%lx\n",
2270 			 data->header.rule_cnt, p->rx_accept_flags,
2271 			 p->tx_accept_flags);
2272 
2273 	/*
2274 	 *  No need for an explicit memory barrier here as long we would
2275 	 *  need to ensure the ordering of writing to the SPQ element
2276 	 *  and updating of the SPQ producer which involves a memory
2277 	 *  read and we will have to put a full memory barrier there
2278 	 *  (inside bnx2x_sp_post()).
2279 	 */
2280 
2281 	/* Send a ramrod */
2282 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2283 			   U64_HI(p->rdata_mapping),
2284 			   U64_LO(p->rdata_mapping),
2285 			   ETH_CONNECTION_TYPE);
2286 	if (rc)
2287 		return rc;
2288 
2289 	/* Ramrod completion is pending */
2290 	return 1;
2291 }
2292 
2293 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2294 				      struct bnx2x_rx_mode_ramrod_params *p)
2295 {
2296 	return bnx2x_state_wait(bp, p->state, p->pstate);
2297 }
2298 
2299 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2300 				    struct bnx2x_rx_mode_ramrod_params *p)
2301 {
2302 	/* Do nothing */
2303 	return 0;
2304 }
2305 
2306 int bnx2x_config_rx_mode(struct bnx2x *bp,
2307 			 struct bnx2x_rx_mode_ramrod_params *p)
2308 {
2309 	int rc;
2310 
2311 	/* Configure the new classification in the chip */
2312 	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2313 	if (rc < 0)
2314 		return rc;
2315 
2316 	/* Wait for a ramrod completion if was requested */
2317 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2318 		rc = p->rx_mode_obj->wait_comp(bp, p);
2319 		if (rc)
2320 			return rc;
2321 	}
2322 
2323 	return rc;
2324 }
2325 
2326 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2327 			    struct bnx2x_rx_mode_obj *o)
2328 {
2329 	if (CHIP_IS_E1x(bp)) {
2330 		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2331 		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2332 	} else {
2333 		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2334 		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2335 	}
2336 }
2337 
2338 /********************* Multicast verbs: SET, CLEAR ****************************/
2339 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2340 {
2341 	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2342 }
2343 
2344 struct bnx2x_mcast_mac_elem {
2345 	struct list_head link;
2346 	u8 mac[ETH_ALEN];
2347 	u8 pad[2]; /* For a natural alignment of the following buffer */
2348 };
2349 
2350 struct bnx2x_pending_mcast_cmd {
2351 	struct list_head link;
2352 	int type; /* BNX2X_MCAST_CMD_X */
2353 	union {
2354 		struct list_head macs_head;
2355 		u32 macs_num; /* Needed for DEL command */
2356 		int next_bin; /* Needed for RESTORE flow with aprox match */
2357 	} data;
2358 
2359 	bool done; /* set to true, when the command has been handled,
2360 		    * practically used in 57712 handling only, where one pending
2361 		    * command may be handled in a few operations. As long as for
2362 		    * other chips every operation handling is completed in a
2363 		    * single ramrod, there is no need to utilize this field.
2364 		    */
2365 };
2366 
2367 static int bnx2x_mcast_wait(struct bnx2x *bp,
2368 			    struct bnx2x_mcast_obj *o)
2369 {
2370 	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2371 			o->raw.wait_comp(bp, &o->raw))
2372 		return -EBUSY;
2373 
2374 	return 0;
2375 }
2376 
2377 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2378 				   struct bnx2x_mcast_obj *o,
2379 				   struct bnx2x_mcast_ramrod_params *p,
2380 				   int cmd)
2381 {
2382 	int total_sz;
2383 	struct bnx2x_pending_mcast_cmd *new_cmd;
2384 	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2385 	struct bnx2x_mcast_list_elem *pos;
2386 	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2387 			     p->mcast_list_len : 0);
2388 
2389 	/* If the command is empty ("handle pending commands only"), break */
2390 	if (!p->mcast_list_len)
2391 		return 0;
2392 
2393 	total_sz = sizeof(*new_cmd) +
2394 		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2395 
2396 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2397 	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2398 
2399 	if (!new_cmd)
2400 		return -ENOMEM;
2401 
2402 	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2403 			 "macs_list_len=%d\n", cmd, macs_list_len);
2404 
2405 	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2406 
2407 	new_cmd->type = cmd;
2408 	new_cmd->done = false;
2409 
2410 	switch (cmd) {
2411 	case BNX2X_MCAST_CMD_ADD:
2412 		cur_mac = (struct bnx2x_mcast_mac_elem *)
2413 			  ((u8 *)new_cmd + sizeof(*new_cmd));
2414 
2415 		/* Push the MACs of the current command into the pendig command
2416 		 * MACs list: FIFO
2417 		 */
2418 		list_for_each_entry(pos, &p->mcast_list, link) {
2419 			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2420 			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2421 			cur_mac++;
2422 		}
2423 
2424 		break;
2425 
2426 	case BNX2X_MCAST_CMD_DEL:
2427 		new_cmd->data.macs_num = p->mcast_list_len;
2428 		break;
2429 
2430 	case BNX2X_MCAST_CMD_RESTORE:
2431 		new_cmd->data.next_bin = 0;
2432 		break;
2433 
2434 	default:
2435 		BNX2X_ERR("Unknown command: %d\n", cmd);
2436 		return -EINVAL;
2437 	}
2438 
2439 	/* Push the new pending command to the tail of the pending list: FIFO */
2440 	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2441 
2442 	o->set_sched(o);
2443 
2444 	return 1;
2445 }
2446 
2447 /**
2448  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2449  *
2450  * @o:
2451  * @last:	index to start looking from (including)
2452  *
2453  * returns the next found (set) bin or a negative value if none is found.
2454  */
2455 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2456 {
2457 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2458 
2459 	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2460 		if (o->registry.aprox_match.vec[i])
2461 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2462 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2463 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2464 						       vec, cur_bit)) {
2465 					return cur_bit;
2466 				}
2467 			}
2468 		inner_start = 0;
2469 	}
2470 
2471 	/* None found */
2472 	return -1;
2473 }
2474 
2475 /**
2476  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2477  *
2478  * @o:
2479  *
2480  * returns the index of the found bin or -1 if none is found
2481  */
2482 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2483 {
2484 	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2485 
2486 	if (cur_bit >= 0)
2487 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2488 
2489 	return cur_bit;
2490 }
2491 
2492 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2493 {
2494 	struct bnx2x_raw_obj *raw = &o->raw;
2495 	u8 rx_tx_flag = 0;
2496 
2497 	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2498 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2499 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2500 
2501 	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2502 	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2503 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2504 
2505 	return rx_tx_flag;
2506 }
2507 
2508 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2509 					struct bnx2x_mcast_obj *o, int idx,
2510 					union bnx2x_mcast_config_data *cfg_data,
2511 					int cmd)
2512 {
2513 	struct bnx2x_raw_obj *r = &o->raw;
2514 	struct eth_multicast_rules_ramrod_data *data =
2515 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2516 	u8 func_id = r->func_id;
2517 	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2518 	int bin;
2519 
2520 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2521 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2522 
2523 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2524 
2525 	/* Get a bin and update a bins' vector */
2526 	switch (cmd) {
2527 	case BNX2X_MCAST_CMD_ADD:
2528 		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2529 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2530 		break;
2531 
2532 	case BNX2X_MCAST_CMD_DEL:
2533 		/* If there were no more bins to clear
2534 		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2535 		 * clear any (0xff) bin.
2536 		 * See bnx2x_mcast_validate_e2() for explanation when it may
2537 		 * happen.
2538 		 */
2539 		bin = bnx2x_mcast_clear_first_bin(o);
2540 		break;
2541 
2542 	case BNX2X_MCAST_CMD_RESTORE:
2543 		bin = cfg_data->bin;
2544 		break;
2545 
2546 	default:
2547 		BNX2X_ERR("Unknown command: %d\n", cmd);
2548 		return;
2549 	}
2550 
2551 	DP(BNX2X_MSG_SP, "%s bin %d\n",
2552 			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2553 			 "Setting"  : "Clearing"), bin);
2554 
2555 	data->rules[idx].bin_id    = (u8)bin;
2556 	data->rules[idx].func_id   = func_id;
2557 	data->rules[idx].engine_id = o->engine_id;
2558 }
2559 
2560 /**
2561  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2562  *
2563  * @bp:		device handle
2564  * @o:
2565  * @start_bin:	index in the registry to start from (including)
2566  * @rdata_idx:	index in the ramrod data to start from
2567  *
2568  * returns last handled bin index or -1 if all bins have been handled
2569  */
2570 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2571 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2572 	int *rdata_idx)
2573 {
2574 	int cur_bin, cnt = *rdata_idx;
2575 	union bnx2x_mcast_config_data cfg_data = {0};
2576 
2577 	/* go through the registry and configure the bins from it */
2578 	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2579 	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2580 
2581 		cfg_data.bin = (u8)cur_bin;
2582 		o->set_one_rule(bp, o, cnt, &cfg_data,
2583 				BNX2X_MCAST_CMD_RESTORE);
2584 
2585 		cnt++;
2586 
2587 		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2588 
2589 		/* Break if we reached the maximum number
2590 		 * of rules.
2591 		 */
2592 		if (cnt >= o->max_cmd_len)
2593 			break;
2594 	}
2595 
2596 	*rdata_idx = cnt;
2597 
2598 	return cur_bin;
2599 }
2600 
2601 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2602 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2603 	int *line_idx)
2604 {
2605 	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2606 	int cnt = *line_idx;
2607 	union bnx2x_mcast_config_data cfg_data = {0};
2608 
2609 	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2610 				 link) {
2611 
2612 		cfg_data.mac = &pmac_pos->mac[0];
2613 		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2614 
2615 		cnt++;
2616 
2617 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2618 				 pmac_pos->mac);
2619 
2620 		list_del(&pmac_pos->link);
2621 
2622 		/* Break if we reached the maximum number
2623 		 * of rules.
2624 		 */
2625 		if (cnt >= o->max_cmd_len)
2626 			break;
2627 	}
2628 
2629 	*line_idx = cnt;
2630 
2631 	/* if no more MACs to configure - we are done */
2632 	if (list_empty(&cmd_pos->data.macs_head))
2633 		cmd_pos->done = true;
2634 }
2635 
2636 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2637 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2638 	int *line_idx)
2639 {
2640 	int cnt = *line_idx;
2641 
2642 	while (cmd_pos->data.macs_num) {
2643 		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2644 
2645 		cnt++;
2646 
2647 		cmd_pos->data.macs_num--;
2648 
2649 		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2650 				   cmd_pos->data.macs_num, cnt);
2651 
2652 		/* Break if we reached the maximum
2653 		 * number of rules.
2654 		 */
2655 		if (cnt >= o->max_cmd_len)
2656 			break;
2657 	}
2658 
2659 	*line_idx = cnt;
2660 
2661 	/* If we cleared all bins - we are done */
2662 	if (!cmd_pos->data.macs_num)
2663 		cmd_pos->done = true;
2664 }
2665 
2666 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2667 	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2668 	int *line_idx)
2669 {
2670 	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2671 						line_idx);
2672 
2673 	if (cmd_pos->data.next_bin < 0)
2674 		/* If o->set_restore returned -1 we are done */
2675 		cmd_pos->done = true;
2676 	else
2677 		/* Start from the next bin next time */
2678 		cmd_pos->data.next_bin++;
2679 }
2680 
2681 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2682 				struct bnx2x_mcast_ramrod_params *p)
2683 {
2684 	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2685 	int cnt = 0;
2686 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2687 
2688 	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2689 				 link) {
2690 		switch (cmd_pos->type) {
2691 		case BNX2X_MCAST_CMD_ADD:
2692 			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2693 			break;
2694 
2695 		case BNX2X_MCAST_CMD_DEL:
2696 			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2697 			break;
2698 
2699 		case BNX2X_MCAST_CMD_RESTORE:
2700 			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2701 							   &cnt);
2702 			break;
2703 
2704 		default:
2705 			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2706 			return -EINVAL;
2707 		}
2708 
2709 		/* If the command has been completed - remove it from the list
2710 		 * and free the memory
2711 		 */
2712 		if (cmd_pos->done) {
2713 			list_del(&cmd_pos->link);
2714 			kfree(cmd_pos);
2715 		}
2716 
2717 		/* Break if we reached the maximum number of rules */
2718 		if (cnt >= o->max_cmd_len)
2719 			break;
2720 	}
2721 
2722 	return cnt;
2723 }
2724 
2725 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2726 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2727 	int *line_idx)
2728 {
2729 	struct bnx2x_mcast_list_elem *mlist_pos;
2730 	union bnx2x_mcast_config_data cfg_data = {0};
2731 	int cnt = *line_idx;
2732 
2733 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2734 		cfg_data.mac = mlist_pos->mac;
2735 		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2736 
2737 		cnt++;
2738 
2739 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2740 				 mlist_pos->mac);
2741 	}
2742 
2743 	*line_idx = cnt;
2744 }
2745 
2746 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2747 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2748 	int *line_idx)
2749 {
2750 	int cnt = *line_idx, i;
2751 
2752 	for (i = 0; i < p->mcast_list_len; i++) {
2753 		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2754 
2755 		cnt++;
2756 
2757 		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2758 				 p->mcast_list_len - i - 1);
2759 	}
2760 
2761 	*line_idx = cnt;
2762 }
2763 
2764 /**
2765  * bnx2x_mcast_handle_current_cmd -
2766  *
2767  * @bp:		device handle
2768  * @p:
2769  * @cmd:
2770  * @start_cnt:	first line in the ramrod data that may be used
2771  *
2772  * This function is called iff there is enough place for the current command in
2773  * the ramrod data.
2774  * Returns number of lines filled in the ramrod data in total.
2775  */
2776 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2777 			struct bnx2x_mcast_ramrod_params *p, int cmd,
2778 			int start_cnt)
2779 {
2780 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2781 	int cnt = start_cnt;
2782 
2783 	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2784 
2785 	switch (cmd) {
2786 	case BNX2X_MCAST_CMD_ADD:
2787 		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2788 		break;
2789 
2790 	case BNX2X_MCAST_CMD_DEL:
2791 		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2792 		break;
2793 
2794 	case BNX2X_MCAST_CMD_RESTORE:
2795 		o->hdl_restore(bp, o, 0, &cnt);
2796 		break;
2797 
2798 	default:
2799 		BNX2X_ERR("Unknown command: %d\n", cmd);
2800 		return -EINVAL;
2801 	}
2802 
2803 	/* The current command has been handled */
2804 	p->mcast_list_len = 0;
2805 
2806 	return cnt;
2807 }
2808 
2809 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2810 				   struct bnx2x_mcast_ramrod_params *p,
2811 				   int cmd)
2812 {
2813 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2814 	int reg_sz = o->get_registry_size(o);
2815 
2816 	switch (cmd) {
2817 	/* DEL command deletes all currently configured MACs */
2818 	case BNX2X_MCAST_CMD_DEL:
2819 		o->set_registry_size(o, 0);
2820 		/* Don't break */
2821 
2822 	/* RESTORE command will restore the entire multicast configuration */
2823 	case BNX2X_MCAST_CMD_RESTORE:
2824 		/* Here we set the approximate amount of work to do, which in
2825 		 * fact may be only less as some MACs in postponed ADD
2826 		 * command(s) scheduled before this command may fall into
2827 		 * the same bin and the actual number of bins set in the
2828 		 * registry would be less than we estimated here. See
2829 		 * bnx2x_mcast_set_one_rule_e2() for further details.
2830 		 */
2831 		p->mcast_list_len = reg_sz;
2832 		break;
2833 
2834 	case BNX2X_MCAST_CMD_ADD:
2835 	case BNX2X_MCAST_CMD_CONT:
2836 		/* Here we assume that all new MACs will fall into new bins.
2837 		 * However we will correct the real registry size after we
2838 		 * handle all pending commands.
2839 		 */
2840 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
2841 		break;
2842 
2843 	default:
2844 		BNX2X_ERR("Unknown command: %d\n", cmd);
2845 		return -EINVAL;
2846 
2847 	}
2848 
2849 	/* Increase the total number of MACs pending to be configured */
2850 	o->total_pending_num += p->mcast_list_len;
2851 
2852 	return 0;
2853 }
2854 
2855 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2856 				      struct bnx2x_mcast_ramrod_params *p,
2857 				      int old_num_bins)
2858 {
2859 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2860 
2861 	o->set_registry_size(o, old_num_bins);
2862 	o->total_pending_num -= p->mcast_list_len;
2863 }
2864 
2865 /**
2866  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2867  *
2868  * @bp:		device handle
2869  * @p:
2870  * @len:	number of rules to handle
2871  */
2872 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2873 					struct bnx2x_mcast_ramrod_params *p,
2874 					u8 len)
2875 {
2876 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2877 	struct eth_multicast_rules_ramrod_data *data =
2878 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2879 
2880 	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2881 			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2882 	data->header.rule_cnt = len;
2883 }
2884 
2885 /**
2886  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2887  *
2888  * @bp:		device handle
2889  * @o:
2890  *
2891  * Recalculate the actual number of set bins in the registry using Brian
2892  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2893  *
2894  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2895  */
2896 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2897 						  struct bnx2x_mcast_obj *o)
2898 {
2899 	int i, cnt = 0;
2900 	u64 elem;
2901 
2902 	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2903 		elem = o->registry.aprox_match.vec[i];
2904 		for (; elem; cnt++)
2905 			elem &= elem - 1;
2906 	}
2907 
2908 	o->set_registry_size(o, cnt);
2909 
2910 	return 0;
2911 }
2912 
2913 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2914 				struct bnx2x_mcast_ramrod_params *p,
2915 				int cmd)
2916 {
2917 	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2918 	struct bnx2x_mcast_obj *o = p->mcast_obj;
2919 	struct eth_multicast_rules_ramrod_data *data =
2920 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2921 	int cnt = 0, rc;
2922 
2923 	/* Reset the ramrod data buffer */
2924 	memset(data, 0, sizeof(*data));
2925 
2926 	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2927 
2928 	/* If there are no more pending commands - clear SCHEDULED state */
2929 	if (list_empty(&o->pending_cmds_head))
2930 		o->clear_sched(o);
2931 
2932 	/* The below may be true iff there was enough room in ramrod
2933 	 * data for all pending commands and for the current
2934 	 * command. Otherwise the current command would have been added
2935 	 * to the pending commands and p->mcast_list_len would have been
2936 	 * zeroed.
2937 	 */
2938 	if (p->mcast_list_len > 0)
2939 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2940 
2941 	/* We've pulled out some MACs - update the total number of
2942 	 * outstanding.
2943 	 */
2944 	o->total_pending_num -= cnt;
2945 
2946 	/* send a ramrod */
2947 	WARN_ON(o->total_pending_num < 0);
2948 	WARN_ON(cnt > o->max_cmd_len);
2949 
2950 	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2951 
2952 	/* Update a registry size if there are no more pending operations.
2953 	 *
2954 	 * We don't want to change the value of the registry size if there are
2955 	 * pending operations because we want it to always be equal to the
2956 	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2957 	 * set bins after the last requested operation in order to properly
2958 	 * evaluate the size of the next DEL/RESTORE operation.
2959 	 *
2960 	 * Note that we update the registry itself during command(s) handling
2961 	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2962 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2963 	 * with a limited amount of update commands (per MAC/bin) and we don't
2964 	 * know in this scope what the actual state of bins configuration is
2965 	 * going to be after this ramrod.
2966 	 */
2967 	if (!o->total_pending_num)
2968 		bnx2x_mcast_refresh_registry_e2(bp, o);
2969 
2970 	/*
2971 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2972 	 * RAMROD_PENDING status immediately.
2973 	 */
2974 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2975 		raw->clear_pending(raw);
2976 		return 0;
2977 	} else {
2978 		/*
2979 		 *  No need for an explicit memory barrier here as long we would
2980 		 *  need to ensure the ordering of writing to the SPQ element
2981 		 *  and updating of the SPQ producer which involves a memory
2982 		 *  read and we will have to put a full memory barrier there
2983 		 *  (inside bnx2x_sp_post()).
2984 		 */
2985 
2986 		/* Send a ramrod */
2987 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2988 				   raw->cid, U64_HI(raw->rdata_mapping),
2989 				   U64_LO(raw->rdata_mapping),
2990 				   ETH_CONNECTION_TYPE);
2991 		if (rc)
2992 			return rc;
2993 
2994 		/* Ramrod completion is pending */
2995 		return 1;
2996 	}
2997 }
2998 
2999 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3000 				    struct bnx2x_mcast_ramrod_params *p,
3001 				    int cmd)
3002 {
3003 	/* Mark, that there is a work to do */
3004 	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3005 		p->mcast_list_len = 1;
3006 
3007 	return 0;
3008 }
3009 
3010 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3011 				       struct bnx2x_mcast_ramrod_params *p,
3012 				       int old_num_bins)
3013 {
3014 	/* Do nothing */
3015 }
3016 
3017 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3018 do { \
3019 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3020 } while (0)
3021 
3022 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3023 					   struct bnx2x_mcast_obj *o,
3024 					   struct bnx2x_mcast_ramrod_params *p,
3025 					   u32 *mc_filter)
3026 {
3027 	struct bnx2x_mcast_list_elem *mlist_pos;
3028 	int bit;
3029 
3030 	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3031 		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3032 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3033 
3034 		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3035 				 mlist_pos->mac, bit);
3036 
3037 		/* bookkeeping... */
3038 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3039 				  bit);
3040 	}
3041 }
3042 
3043 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3044 	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3045 	u32 *mc_filter)
3046 {
3047 	int bit;
3048 
3049 	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3050 	     bit >= 0;
3051 	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3052 		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3053 		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3054 	}
3055 }
3056 
3057 /* On 57711 we write the multicast MACs' aproximate match
3058  * table by directly into the TSTORM's internal RAM. So we don't
3059  * really need to handle any tricks to make it work.
3060  */
3061 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3062 				 struct bnx2x_mcast_ramrod_params *p,
3063 				 int cmd)
3064 {
3065 	int i;
3066 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3067 	struct bnx2x_raw_obj *r = &o->raw;
3068 
3069 	/* If CLEAR_ONLY has been requested - clear the registry
3070 	 * and clear a pending bit.
3071 	 */
3072 	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3073 		u32 mc_filter[MC_HASH_SIZE] = {0};
3074 
3075 		/* Set the multicast filter bits before writing it into
3076 		 * the internal memory.
3077 		 */
3078 		switch (cmd) {
3079 		case BNX2X_MCAST_CMD_ADD:
3080 			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3081 			break;
3082 
3083 		case BNX2X_MCAST_CMD_DEL:
3084 			DP(BNX2X_MSG_SP,
3085 			   "Invalidating multicast MACs configuration\n");
3086 
3087 			/* clear the registry */
3088 			memset(o->registry.aprox_match.vec, 0,
3089 			       sizeof(o->registry.aprox_match.vec));
3090 			break;
3091 
3092 		case BNX2X_MCAST_CMD_RESTORE:
3093 			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3094 			break;
3095 
3096 		default:
3097 			BNX2X_ERR("Unknown command: %d\n", cmd);
3098 			return -EINVAL;
3099 		}
3100 
3101 		/* Set the mcast filter in the internal memory */
3102 		for (i = 0; i < MC_HASH_SIZE; i++)
3103 			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3104 	} else
3105 		/* clear the registry */
3106 		memset(o->registry.aprox_match.vec, 0,
3107 		       sizeof(o->registry.aprox_match.vec));
3108 
3109 	/* We are done */
3110 	r->clear_pending(r);
3111 
3112 	return 0;
3113 }
3114 
3115 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3116 				   struct bnx2x_mcast_ramrod_params *p,
3117 				   int cmd)
3118 {
3119 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3120 	int reg_sz = o->get_registry_size(o);
3121 
3122 	switch (cmd) {
3123 	/* DEL command deletes all currently configured MACs */
3124 	case BNX2X_MCAST_CMD_DEL:
3125 		o->set_registry_size(o, 0);
3126 		/* Don't break */
3127 
3128 	/* RESTORE command will restore the entire multicast configuration */
3129 	case BNX2X_MCAST_CMD_RESTORE:
3130 		p->mcast_list_len = reg_sz;
3131 		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3132 				   cmd, p->mcast_list_len);
3133 		break;
3134 
3135 	case BNX2X_MCAST_CMD_ADD:
3136 	case BNX2X_MCAST_CMD_CONT:
3137 		/* Multicast MACs on 57710 are configured as unicast MACs and
3138 		 * there is only a limited number of CAM entries for that
3139 		 * matter.
3140 		 */
3141 		if (p->mcast_list_len > o->max_cmd_len) {
3142 			BNX2X_ERR("Can't configure more than %d multicast MACs"
3143 				   "on 57710\n", o->max_cmd_len);
3144 			return -EINVAL;
3145 		}
3146 		/* Every configured MAC should be cleared if DEL command is
3147 		 * called. Only the last ADD command is relevant as long as
3148 		 * every ADD commands overrides the previous configuration.
3149 		 */
3150 		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3151 		if (p->mcast_list_len > 0)
3152 			o->set_registry_size(o, p->mcast_list_len);
3153 
3154 		break;
3155 
3156 	default:
3157 		BNX2X_ERR("Unknown command: %d\n", cmd);
3158 		return -EINVAL;
3159 
3160 	}
3161 
3162 	/* We want to ensure that commands are executed one by one for 57710.
3163 	 * Therefore each none-empty command will consume o->max_cmd_len.
3164 	 */
3165 	if (p->mcast_list_len)
3166 		o->total_pending_num += o->max_cmd_len;
3167 
3168 	return 0;
3169 }
3170 
3171 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3172 				      struct bnx2x_mcast_ramrod_params *p,
3173 				      int old_num_macs)
3174 {
3175 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3176 
3177 	o->set_registry_size(o, old_num_macs);
3178 
3179 	/* If current command hasn't been handled yet and we are
3180 	 * here means that it's meant to be dropped and we have to
3181 	 * update the number of outstandling MACs accordingly.
3182 	 */
3183 	if (p->mcast_list_len)
3184 		o->total_pending_num -= o->max_cmd_len;
3185 }
3186 
3187 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3188 					struct bnx2x_mcast_obj *o, int idx,
3189 					union bnx2x_mcast_config_data *cfg_data,
3190 					int cmd)
3191 {
3192 	struct bnx2x_raw_obj *r = &o->raw;
3193 	struct mac_configuration_cmd *data =
3194 		(struct mac_configuration_cmd *)(r->rdata);
3195 
3196 	/* copy mac */
3197 	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3198 		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3199 				      &data->config_table[idx].middle_mac_addr,
3200 				      &data->config_table[idx].lsb_mac_addr,
3201 				      cfg_data->mac);
3202 
3203 		data->config_table[idx].vlan_id = 0;
3204 		data->config_table[idx].pf_id = r->func_id;
3205 		data->config_table[idx].clients_bit_vector =
3206 			cpu_to_le32(1 << r->cl_id);
3207 
3208 		SET_FLAG(data->config_table[idx].flags,
3209 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3210 			 T_ETH_MAC_COMMAND_SET);
3211 	}
3212 }
3213 
3214 /**
3215  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3216  *
3217  * @bp:		device handle
3218  * @p:
3219  * @len:	number of rules to handle
3220  */
3221 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3222 					struct bnx2x_mcast_ramrod_params *p,
3223 					u8 len)
3224 {
3225 	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3226 	struct mac_configuration_cmd *data =
3227 		(struct mac_configuration_cmd *)(r->rdata);
3228 
3229 	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3230 		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3231 		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3232 
3233 	data->hdr.offset = offset;
3234 	data->hdr.client_id = 0xff;
3235 	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3236 			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3237 	data->hdr.length = len;
3238 }
3239 
3240 /**
3241  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3242  *
3243  * @bp:		device handle
3244  * @o:
3245  * @start_idx:	index in the registry to start from
3246  * @rdata_idx:	index in the ramrod data to start from
3247  *
3248  * restore command for 57710 is like all other commands - always a stand alone
3249  * command - start_idx and rdata_idx will always be 0. This function will always
3250  * succeed.
3251  * returns -1 to comply with 57712 variant.
3252  */
3253 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3254 	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3255 	int *rdata_idx)
3256 {
3257 	struct bnx2x_mcast_mac_elem *elem;
3258 	int i = 0;
3259 	union bnx2x_mcast_config_data cfg_data = {0};
3260 
3261 	/* go through the registry and configure the MACs from it. */
3262 	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3263 		cfg_data.mac = &elem->mac[0];
3264 		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3265 
3266 		i++;
3267 
3268 		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3269 				   cfg_data.mac);
3270 	}
3271 
3272 	*rdata_idx = i;
3273 
3274 	return -1;
3275 }
3276 
3277 
3278 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3279 	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3280 {
3281 	struct bnx2x_pending_mcast_cmd *cmd_pos;
3282 	struct bnx2x_mcast_mac_elem *pmac_pos;
3283 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3284 	union bnx2x_mcast_config_data cfg_data = {0};
3285 	int cnt = 0;
3286 
3287 
3288 	/* If nothing to be done - return */
3289 	if (list_empty(&o->pending_cmds_head))
3290 		return 0;
3291 
3292 	/* Handle the first command */
3293 	cmd_pos = list_first_entry(&o->pending_cmds_head,
3294 				   struct bnx2x_pending_mcast_cmd, link);
3295 
3296 	switch (cmd_pos->type) {
3297 	case BNX2X_MCAST_CMD_ADD:
3298 		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3299 			cfg_data.mac = &pmac_pos->mac[0];
3300 			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3301 
3302 			cnt++;
3303 
3304 			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3305 					 pmac_pos->mac);
3306 		}
3307 		break;
3308 
3309 	case BNX2X_MCAST_CMD_DEL:
3310 		cnt = cmd_pos->data.macs_num;
3311 		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3312 		break;
3313 
3314 	case BNX2X_MCAST_CMD_RESTORE:
3315 		o->hdl_restore(bp, o, 0, &cnt);
3316 		break;
3317 
3318 	default:
3319 		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3320 		return -EINVAL;
3321 	}
3322 
3323 	list_del(&cmd_pos->link);
3324 	kfree(cmd_pos);
3325 
3326 	return cnt;
3327 }
3328 
3329 /**
3330  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3331  *
3332  * @fw_hi:
3333  * @fw_mid:
3334  * @fw_lo:
3335  * @mac:
3336  */
3337 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3338 					 __le16 *fw_lo, u8 *mac)
3339 {
3340 	mac[1] = ((u8 *)fw_hi)[0];
3341 	mac[0] = ((u8 *)fw_hi)[1];
3342 	mac[3] = ((u8 *)fw_mid)[0];
3343 	mac[2] = ((u8 *)fw_mid)[1];
3344 	mac[5] = ((u8 *)fw_lo)[0];
3345 	mac[4] = ((u8 *)fw_lo)[1];
3346 }
3347 
3348 /**
3349  * bnx2x_mcast_refresh_registry_e1 -
3350  *
3351  * @bp:		device handle
3352  * @cnt:
3353  *
3354  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3355  * and update the registry correspondingly: if ADD - allocate a memory and add
3356  * the entries to the registry (list), if DELETE - clear the registry and free
3357  * the memory.
3358  */
3359 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3360 						  struct bnx2x_mcast_obj *o)
3361 {
3362 	struct bnx2x_raw_obj *raw = &o->raw;
3363 	struct bnx2x_mcast_mac_elem *elem;
3364 	struct mac_configuration_cmd *data =
3365 			(struct mac_configuration_cmd *)(raw->rdata);
3366 
3367 	/* If first entry contains a SET bit - the command was ADD,
3368 	 * otherwise - DEL_ALL
3369 	 */
3370 	if (GET_FLAG(data->config_table[0].flags,
3371 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3372 		int i, len = data->hdr.length;
3373 
3374 		/* Break if it was a RESTORE command */
3375 		if (!list_empty(&o->registry.exact_match.macs))
3376 			return 0;
3377 
3378 		elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3379 		if (!elem) {
3380 			BNX2X_ERR("Failed to allocate registry memory\n");
3381 			return -ENOMEM;
3382 		}
3383 
3384 		for (i = 0; i < len; i++, elem++) {
3385 			bnx2x_get_fw_mac_addr(
3386 				&data->config_table[i].msb_mac_addr,
3387 				&data->config_table[i].middle_mac_addr,
3388 				&data->config_table[i].lsb_mac_addr,
3389 				elem->mac);
3390 			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3391 					 elem->mac);
3392 			list_add_tail(&elem->link,
3393 				      &o->registry.exact_match.macs);
3394 		}
3395 	} else {
3396 		elem = list_first_entry(&o->registry.exact_match.macs,
3397 					struct bnx2x_mcast_mac_elem, link);
3398 		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3399 		kfree(elem);
3400 		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3401 	}
3402 
3403 	return 0;
3404 }
3405 
3406 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3407 				struct bnx2x_mcast_ramrod_params *p,
3408 				int cmd)
3409 {
3410 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3411 	struct bnx2x_raw_obj *raw = &o->raw;
3412 	struct mac_configuration_cmd *data =
3413 		(struct mac_configuration_cmd *)(raw->rdata);
3414 	int cnt = 0, i, rc;
3415 
3416 	/* Reset the ramrod data buffer */
3417 	memset(data, 0, sizeof(*data));
3418 
3419 	/* First set all entries as invalid */
3420 	for (i = 0; i < o->max_cmd_len ; i++)
3421 		SET_FLAG(data->config_table[i].flags,
3422 			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3423 			 T_ETH_MAC_COMMAND_INVALIDATE);
3424 
3425 	/* Handle pending commands first */
3426 	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3427 
3428 	/* If there are no more pending commands - clear SCHEDULED state */
3429 	if (list_empty(&o->pending_cmds_head))
3430 		o->clear_sched(o);
3431 
3432 	/* The below may be true iff there were no pending commands */
3433 	if (!cnt)
3434 		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3435 
3436 	/* For 57710 every command has o->max_cmd_len length to ensure that
3437 	 * commands are done one at a time.
3438 	 */
3439 	o->total_pending_num -= o->max_cmd_len;
3440 
3441 	/* send a ramrod */
3442 
3443 	WARN_ON(cnt > o->max_cmd_len);
3444 
3445 	/* Set ramrod header (in particular, a number of entries to update) */
3446 	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3447 
3448 	/* update a registry: we need the registry contents to be always up
3449 	 * to date in order to be able to execute a RESTORE opcode. Here
3450 	 * we use the fact that for 57710 we sent one command at a time
3451 	 * hence we may take the registry update out of the command handling
3452 	 * and do it in a simpler way here.
3453 	 */
3454 	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3455 	if (rc)
3456 		return rc;
3457 
3458 	/*
3459 	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3460 	 * RAMROD_PENDING status immediately.
3461 	 */
3462 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3463 		raw->clear_pending(raw);
3464 		return 0;
3465 	} else {
3466 		/*
3467 		 *  No need for an explicit memory barrier here as long we would
3468 		 *  need to ensure the ordering of writing to the SPQ element
3469 		 *  and updating of the SPQ producer which involves a memory
3470 		 *  read and we will have to put a full memory barrier there
3471 		 *  (inside bnx2x_sp_post()).
3472 		 */
3473 
3474 		/* Send a ramrod */
3475 		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3476 				   U64_HI(raw->rdata_mapping),
3477 				   U64_LO(raw->rdata_mapping),
3478 				   ETH_CONNECTION_TYPE);
3479 		if (rc)
3480 			return rc;
3481 
3482 		/* Ramrod completion is pending */
3483 		return 1;
3484 	}
3485 
3486 }
3487 
3488 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3489 {
3490 	return o->registry.exact_match.num_macs_set;
3491 }
3492 
3493 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3494 {
3495 	return o->registry.aprox_match.num_bins_set;
3496 }
3497 
3498 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3499 						int n)
3500 {
3501 	o->registry.exact_match.num_macs_set = n;
3502 }
3503 
3504 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3505 						int n)
3506 {
3507 	o->registry.aprox_match.num_bins_set = n;
3508 }
3509 
3510 int bnx2x_config_mcast(struct bnx2x *bp,
3511 		       struct bnx2x_mcast_ramrod_params *p,
3512 		       int cmd)
3513 {
3514 	struct bnx2x_mcast_obj *o = p->mcast_obj;
3515 	struct bnx2x_raw_obj *r = &o->raw;
3516 	int rc = 0, old_reg_size;
3517 
3518 	/* This is needed to recover number of currently configured mcast macs
3519 	 * in case of failure.
3520 	 */
3521 	old_reg_size = o->get_registry_size(o);
3522 
3523 	/* Do some calculations and checks */
3524 	rc = o->validate(bp, p, cmd);
3525 	if (rc)
3526 		return rc;
3527 
3528 	/* Return if there is no work to do */
3529 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3530 		return 0;
3531 
3532 	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3533 			 "o->max_cmd_len=%d\n", o->total_pending_num,
3534 			 p->mcast_list_len, o->max_cmd_len);
3535 
3536 	/* Enqueue the current command to the pending list if we can't complete
3537 	 * it in the current iteration
3538 	 */
3539 	if (r->check_pending(r) ||
3540 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3541 		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3542 		if (rc < 0)
3543 			goto error_exit1;
3544 
3545 		/* As long as the current command is in a command list we
3546 		 * don't need to handle it separately.
3547 		 */
3548 		p->mcast_list_len = 0;
3549 	}
3550 
3551 	if (!r->check_pending(r)) {
3552 
3553 		/* Set 'pending' state */
3554 		r->set_pending(r);
3555 
3556 		/* Configure the new classification in the chip */
3557 		rc = o->config_mcast(bp, p, cmd);
3558 		if (rc < 0)
3559 			goto error_exit2;
3560 
3561 		/* Wait for a ramrod completion if was requested */
3562 		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3563 			rc = o->wait_comp(bp, o);
3564 	}
3565 
3566 	return rc;
3567 
3568 error_exit2:
3569 	r->clear_pending(r);
3570 
3571 error_exit1:
3572 	o->revert(bp, p, old_reg_size);
3573 
3574 	return rc;
3575 }
3576 
3577 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3578 {
3579 	smp_mb__before_clear_bit();
3580 	clear_bit(o->sched_state, o->raw.pstate);
3581 	smp_mb__after_clear_bit();
3582 }
3583 
3584 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3585 {
3586 	smp_mb__before_clear_bit();
3587 	set_bit(o->sched_state, o->raw.pstate);
3588 	smp_mb__after_clear_bit();
3589 }
3590 
3591 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3592 {
3593 	return !!test_bit(o->sched_state, o->raw.pstate);
3594 }
3595 
3596 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3597 {
3598 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3599 }
3600 
3601 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3602 			  struct bnx2x_mcast_obj *mcast_obj,
3603 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3604 			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3605 			  int state, unsigned long *pstate, bnx2x_obj_type type)
3606 {
3607 	memset(mcast_obj, 0, sizeof(*mcast_obj));
3608 
3609 	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3610 			   rdata, rdata_mapping, state, pstate, type);
3611 
3612 	mcast_obj->engine_id = engine_id;
3613 
3614 	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3615 
3616 	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3617 	mcast_obj->check_sched = bnx2x_mcast_check_sched;
3618 	mcast_obj->set_sched = bnx2x_mcast_set_sched;
3619 	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3620 
3621 	if (CHIP_IS_E1(bp)) {
3622 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3623 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3624 		mcast_obj->hdl_restore       =
3625 			bnx2x_mcast_handle_restore_cmd_e1;
3626 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3627 
3628 		if (CHIP_REV_IS_SLOW(bp))
3629 			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3630 		else
3631 			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3632 
3633 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3634 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3635 		mcast_obj->validate          = bnx2x_mcast_validate_e1;
3636 		mcast_obj->revert            = bnx2x_mcast_revert_e1;
3637 		mcast_obj->get_registry_size =
3638 			bnx2x_mcast_get_registry_size_exact;
3639 		mcast_obj->set_registry_size =
3640 			bnx2x_mcast_set_registry_size_exact;
3641 
3642 		/* 57710 is the only chip that uses the exact match for mcast
3643 		 * at the moment.
3644 		 */
3645 		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3646 
3647 	} else if (CHIP_IS_E1H(bp)) {
3648 		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3649 		mcast_obj->enqueue_cmd   = NULL;
3650 		mcast_obj->hdl_restore   = NULL;
3651 		mcast_obj->check_pending = bnx2x_mcast_check_pending;
3652 
3653 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3654 		 * for one command.
3655 		 */
3656 		mcast_obj->max_cmd_len       = -1;
3657 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3658 		mcast_obj->set_one_rule      = NULL;
3659 		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3660 		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3661 		mcast_obj->get_registry_size =
3662 			bnx2x_mcast_get_registry_size_aprox;
3663 		mcast_obj->set_registry_size =
3664 			bnx2x_mcast_set_registry_size_aprox;
3665 	} else {
3666 		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3667 		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3668 		mcast_obj->hdl_restore       =
3669 			bnx2x_mcast_handle_restore_cmd_e2;
3670 		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3671 		/* TODO: There should be a proper HSI define for this number!!!
3672 		 */
3673 		mcast_obj->max_cmd_len       = 16;
3674 		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3675 		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3676 		mcast_obj->validate          = bnx2x_mcast_validate_e2;
3677 		mcast_obj->revert            = bnx2x_mcast_revert_e2;
3678 		mcast_obj->get_registry_size =
3679 			bnx2x_mcast_get_registry_size_aprox;
3680 		mcast_obj->set_registry_size =
3681 			bnx2x_mcast_set_registry_size_aprox;
3682 	}
3683 }
3684 
3685 /*************************** Credit handling **********************************/
3686 
3687 /**
3688  * atomic_add_ifless - add if the result is less than a given value.
3689  *
3690  * @v:	pointer of type atomic_t
3691  * @a:	the amount to add to v...
3692  * @u:	...if (v + a) is less than u.
3693  *
3694  * returns true if (v + a) was less than u, and false otherwise.
3695  *
3696  */
3697 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3698 {
3699 	int c, old;
3700 
3701 	c = atomic_read(v);
3702 	for (;;) {
3703 		if (unlikely(c + a >= u))
3704 			return false;
3705 
3706 		old = atomic_cmpxchg((v), c, c + a);
3707 		if (likely(old == c))
3708 			break;
3709 		c = old;
3710 	}
3711 
3712 	return true;
3713 }
3714 
3715 /**
3716  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3717  *
3718  * @v:	pointer of type atomic_t
3719  * @a:	the amount to dec from v...
3720  * @u:	...if (v - a) is more or equal than u.
3721  *
3722  * returns true if (v - a) was more or equal than u, and false
3723  * otherwise.
3724  */
3725 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3726 {
3727 	int c, old;
3728 
3729 	c = atomic_read(v);
3730 	for (;;) {
3731 		if (unlikely(c - a < u))
3732 			return false;
3733 
3734 		old = atomic_cmpxchg((v), c, c - a);
3735 		if (likely(old == c))
3736 			break;
3737 		c = old;
3738 	}
3739 
3740 	return true;
3741 }
3742 
3743 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3744 {
3745 	bool rc;
3746 
3747 	smp_mb();
3748 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3749 	smp_mb();
3750 
3751 	return rc;
3752 }
3753 
3754 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3755 {
3756 	bool rc;
3757 
3758 	smp_mb();
3759 
3760 	/* Don't let to refill if credit + cnt > pool_sz */
3761 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3762 
3763 	smp_mb();
3764 
3765 	return rc;
3766 }
3767 
3768 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3769 {
3770 	int cur_credit;
3771 
3772 	smp_mb();
3773 	cur_credit = atomic_read(&o->credit);
3774 
3775 	return cur_credit;
3776 }
3777 
3778 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3779 					  int cnt)
3780 {
3781 	return true;
3782 }
3783 
3784 
3785 static bool bnx2x_credit_pool_get_entry(
3786 	struct bnx2x_credit_pool_obj *o,
3787 	int *offset)
3788 {
3789 	int idx, vec, i;
3790 
3791 	*offset = -1;
3792 
3793 	/* Find "internal cam-offset" then add to base for this object... */
3794 	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3795 
3796 		/* Skip the current vector if there are no free entries in it */
3797 		if (!o->pool_mirror[vec])
3798 			continue;
3799 
3800 		/* If we've got here we are going to find a free entry */
3801 		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3802 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3803 
3804 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3805 				/* Got one!! */
3806 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3807 				*offset = o->base_pool_offset + idx;
3808 				return true;
3809 			}
3810 	}
3811 
3812 	return false;
3813 }
3814 
3815 static bool bnx2x_credit_pool_put_entry(
3816 	struct bnx2x_credit_pool_obj *o,
3817 	int offset)
3818 {
3819 	if (offset < o->base_pool_offset)
3820 		return false;
3821 
3822 	offset -= o->base_pool_offset;
3823 
3824 	if (offset >= o->pool_sz)
3825 		return false;
3826 
3827 	/* Return the entry to the pool */
3828 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3829 
3830 	return true;
3831 }
3832 
3833 static bool bnx2x_credit_pool_put_entry_always_true(
3834 	struct bnx2x_credit_pool_obj *o,
3835 	int offset)
3836 {
3837 	return true;
3838 }
3839 
3840 static bool bnx2x_credit_pool_get_entry_always_true(
3841 	struct bnx2x_credit_pool_obj *o,
3842 	int *offset)
3843 {
3844 	*offset = -1;
3845 	return true;
3846 }
3847 /**
3848  * bnx2x_init_credit_pool - initialize credit pool internals.
3849  *
3850  * @p:
3851  * @base:	Base entry in the CAM to use.
3852  * @credit:	pool size.
3853  *
3854  * If base is negative no CAM entries handling will be performed.
3855  * If credit is negative pool operations will always succeed (unlimited pool).
3856  *
3857  */
3858 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3859 					  int base, int credit)
3860 {
3861 	/* Zero the object first */
3862 	memset(p, 0, sizeof(*p));
3863 
3864 	/* Set the table to all 1s */
3865 	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3866 
3867 	/* Init a pool as full */
3868 	atomic_set(&p->credit, credit);
3869 
3870 	/* The total poll size */
3871 	p->pool_sz = credit;
3872 
3873 	p->base_pool_offset = base;
3874 
3875 	/* Commit the change */
3876 	smp_mb();
3877 
3878 	p->check = bnx2x_credit_pool_check;
3879 
3880 	/* if pool credit is negative - disable the checks */
3881 	if (credit >= 0) {
3882 		p->put      = bnx2x_credit_pool_put;
3883 		p->get      = bnx2x_credit_pool_get;
3884 		p->put_entry = bnx2x_credit_pool_put_entry;
3885 		p->get_entry = bnx2x_credit_pool_get_entry;
3886 	} else {
3887 		p->put      = bnx2x_credit_pool_always_true;
3888 		p->get      = bnx2x_credit_pool_always_true;
3889 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3890 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3891 	}
3892 
3893 	/* If base is negative - disable entries handling */
3894 	if (base < 0) {
3895 		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3896 		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3897 	}
3898 }
3899 
3900 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3901 				struct bnx2x_credit_pool_obj *p, u8 func_id,
3902 				u8 func_num)
3903 {
3904 /* TODO: this will be defined in consts as well... */
3905 #define BNX2X_CAM_SIZE_EMUL 5
3906 
3907 	int cam_sz;
3908 
3909 	if (CHIP_IS_E1(bp)) {
3910 		/* In E1, Multicast is saved in cam... */
3911 		if (!CHIP_REV_IS_SLOW(bp))
3912 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3913 		else
3914 			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3915 
3916 		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3917 
3918 	} else if (CHIP_IS_E1H(bp)) {
3919 		/* CAM credit is equaly divided between all active functions
3920 		 * on the PORT!.
3921 		 */
3922 		if ((func_num > 0)) {
3923 			if (!CHIP_REV_IS_SLOW(bp))
3924 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3925 			else
3926 				cam_sz = BNX2X_CAM_SIZE_EMUL;
3927 			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3928 		} else {
3929 			/* this should never happen! Block MAC operations. */
3930 			bnx2x_init_credit_pool(p, 0, 0);
3931 		}
3932 
3933 	} else {
3934 
3935 		/*
3936 		 * CAM credit is equaly divided between all active functions
3937 		 * on the PATH.
3938 		 */
3939 		if ((func_num > 0)) {
3940 			if (!CHIP_REV_IS_SLOW(bp))
3941 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3942 			else
3943 				cam_sz = BNX2X_CAM_SIZE_EMUL;
3944 
3945 			/*
3946 			 * No need for CAM entries handling for 57712 and
3947 			 * newer.
3948 			 */
3949 			bnx2x_init_credit_pool(p, -1, cam_sz);
3950 		} else {
3951 			/* this should never happen! Block MAC operations. */
3952 			bnx2x_init_credit_pool(p, 0, 0);
3953 		}
3954 
3955 	}
3956 }
3957 
3958 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3959 				 struct bnx2x_credit_pool_obj *p,
3960 				 u8 func_id,
3961 				 u8 func_num)
3962 {
3963 	if (CHIP_IS_E1x(bp)) {
3964 		/*
3965 		 * There is no VLAN credit in HW on 57710 and 57711 only
3966 		 * MAC / MAC-VLAN can be set
3967 		 */
3968 		bnx2x_init_credit_pool(p, 0, -1);
3969 	} else {
3970 		/*
3971 		 * CAM credit is equaly divided between all active functions
3972 		 * on the PATH.
3973 		 */
3974 		if (func_num > 0) {
3975 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
3976 			bnx2x_init_credit_pool(p, func_id * credit, credit);
3977 		} else
3978 			/* this should never happen! Block VLAN operations. */
3979 			bnx2x_init_credit_pool(p, 0, 0);
3980 	}
3981 }
3982 
3983 /****************** RSS Configuration ******************/
3984 /**
3985  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3986  *
3987  * @bp:		driver hanlde
3988  * @p:		pointer to rss configuration
3989  *
3990  * Prints it when NETIF_MSG_IFUP debug level is configured.
3991  */
3992 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3993 					struct bnx2x_config_rss_params *p)
3994 {
3995 	int i;
3996 
3997 	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3998 	DP(BNX2X_MSG_SP, "0x0000: ");
3999 	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4000 		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4001 
4002 		/* Print 4 bytes in a line */
4003 		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4004 		    (((i + 1) & 0x3) == 0)) {
4005 			DP_CONT(BNX2X_MSG_SP, "\n");
4006 			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4007 		}
4008 	}
4009 
4010 	DP_CONT(BNX2X_MSG_SP, "\n");
4011 }
4012 
4013 /**
4014  * bnx2x_setup_rss - configure RSS
4015  *
4016  * @bp:		device handle
4017  * @p:		rss configuration
4018  *
4019  * sends on UPDATE ramrod for that matter.
4020  */
4021 static int bnx2x_setup_rss(struct bnx2x *bp,
4022 			   struct bnx2x_config_rss_params *p)
4023 {
4024 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4025 	struct bnx2x_raw_obj *r = &o->raw;
4026 	struct eth_rss_update_ramrod_data *data =
4027 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4028 	u8 rss_mode = 0;
4029 	int rc;
4030 
4031 	memset(data, 0, sizeof(*data));
4032 
4033 	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4034 
4035 	/* Set an echo field */
4036 	data->echo = (r->cid & BNX2X_SWCID_MASK) |
4037 		     (r->state << BNX2X_SWCID_SHIFT);
4038 
4039 	/* RSS mode */
4040 	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4041 		rss_mode = ETH_RSS_MODE_DISABLED;
4042 	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4043 		rss_mode = ETH_RSS_MODE_REGULAR;
4044 	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4045 		rss_mode = ETH_RSS_MODE_VLAN_PRI;
4046 	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4047 		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4048 	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4049 		rss_mode = ETH_RSS_MODE_IP_DSCP;
4050 
4051 	data->rss_mode = rss_mode;
4052 
4053 	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4054 
4055 	/* RSS capabilities */
4056 	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4057 		data->capabilities |=
4058 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4059 
4060 	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4061 		data->capabilities |=
4062 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4063 
4064 	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4065 		data->capabilities |=
4066 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4067 
4068 	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4069 		data->capabilities |=
4070 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4071 
4072 	/* Hashing mask */
4073 	data->rss_result_mask = p->rss_result_mask;
4074 
4075 	/* RSS engine ID */
4076 	data->rss_engine_id = o->engine_id;
4077 
4078 	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4079 
4080 	/* Indirection table */
4081 	memcpy(data->indirection_table, p->ind_table,
4082 		  T_ETH_INDIRECTION_TABLE_SIZE);
4083 
4084 	/* Remember the last configuration */
4085 	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4086 
4087 	/* Print the indirection table */
4088 	if (netif_msg_ifup(bp))
4089 		bnx2x_debug_print_ind_table(bp, p);
4090 
4091 	/* RSS keys */
4092 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4093 		memcpy(&data->rss_key[0], &p->rss_key[0],
4094 		       sizeof(data->rss_key));
4095 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4096 	}
4097 
4098 	/*
4099 	 *  No need for an explicit memory barrier here as long we would
4100 	 *  need to ensure the ordering of writing to the SPQ element
4101 	 *  and updating of the SPQ producer which involves a memory
4102 	 *  read and we will have to put a full memory barrier there
4103 	 *  (inside bnx2x_sp_post()).
4104 	 */
4105 
4106 	/* Send a ramrod */
4107 	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4108 			   U64_HI(r->rdata_mapping),
4109 			   U64_LO(r->rdata_mapping),
4110 			   ETH_CONNECTION_TYPE);
4111 
4112 	if (rc < 0)
4113 		return rc;
4114 
4115 	return 1;
4116 }
4117 
4118 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4119 			     u8 *ind_table)
4120 {
4121 	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4122 }
4123 
4124 int bnx2x_config_rss(struct bnx2x *bp,
4125 		     struct bnx2x_config_rss_params *p)
4126 {
4127 	int rc;
4128 	struct bnx2x_rss_config_obj *o = p->rss_obj;
4129 	struct bnx2x_raw_obj *r = &o->raw;
4130 
4131 	/* Do nothing if only driver cleanup was requested */
4132 	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4133 		return 0;
4134 
4135 	r->set_pending(r);
4136 
4137 	rc = o->config_rss(bp, p);
4138 	if (rc < 0) {
4139 		r->clear_pending(r);
4140 		return rc;
4141 	}
4142 
4143 	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4144 		rc = r->wait_comp(bp, r);
4145 
4146 	return rc;
4147 }
4148 
4149 
4150 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4151 			       struct bnx2x_rss_config_obj *rss_obj,
4152 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4153 			       void *rdata, dma_addr_t rdata_mapping,
4154 			       int state, unsigned long *pstate,
4155 			       bnx2x_obj_type type)
4156 {
4157 	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4158 			   rdata_mapping, state, pstate, type);
4159 
4160 	rss_obj->engine_id  = engine_id;
4161 	rss_obj->config_rss = bnx2x_setup_rss;
4162 }
4163 
4164 /********************** Queue state object ***********************************/
4165 
4166 /**
4167  * bnx2x_queue_state_change - perform Queue state change transition
4168  *
4169  * @bp:		device handle
4170  * @params:	parameters to perform the transition
4171  *
4172  * returns 0 in case of successfully completed transition, negative error
4173  * code in case of failure, positive (EBUSY) value if there is a completion
4174  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4175  * not set in params->ramrod_flags for asynchronous commands).
4176  *
4177  */
4178 int bnx2x_queue_state_change(struct bnx2x *bp,
4179 			     struct bnx2x_queue_state_params *params)
4180 {
4181 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4182 	int rc, pending_bit;
4183 	unsigned long *pending = &o->pending;
4184 
4185 	/* Check that the requested transition is legal */
4186 	if (o->check_transition(bp, o, params))
4187 		return -EINVAL;
4188 
4189 	/* Set "pending" bit */
4190 	pending_bit = o->set_pending(o, params);
4191 
4192 	/* Don't send a command if only driver cleanup was requested */
4193 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4194 		o->complete_cmd(bp, o, pending_bit);
4195 	else {
4196 		/* Send a ramrod */
4197 		rc = o->send_cmd(bp, params);
4198 		if (rc) {
4199 			o->next_state = BNX2X_Q_STATE_MAX;
4200 			clear_bit(pending_bit, pending);
4201 			smp_mb__after_clear_bit();
4202 			return rc;
4203 		}
4204 
4205 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4206 			rc = o->wait_comp(bp, o, pending_bit);
4207 			if (rc)
4208 				return rc;
4209 
4210 			return 0;
4211 		}
4212 	}
4213 
4214 	return !!test_bit(pending_bit, pending);
4215 }
4216 
4217 
4218 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4219 				   struct bnx2x_queue_state_params *params)
4220 {
4221 	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4222 
4223 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4224 	 * UPDATE command.
4225 	 */
4226 	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4227 	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4228 		bit = BNX2X_Q_CMD_UPDATE;
4229 	else
4230 		bit = cmd;
4231 
4232 	set_bit(bit, &obj->pending);
4233 	return bit;
4234 }
4235 
4236 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4237 				 struct bnx2x_queue_sp_obj *o,
4238 				 enum bnx2x_queue_cmd cmd)
4239 {
4240 	return bnx2x_state_wait(bp, cmd, &o->pending);
4241 }
4242 
4243 /**
4244  * bnx2x_queue_comp_cmd - complete the state change command.
4245  *
4246  * @bp:		device handle
4247  * @o:
4248  * @cmd:
4249  *
4250  * Checks that the arrived completion is expected.
4251  */
4252 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4253 				struct bnx2x_queue_sp_obj *o,
4254 				enum bnx2x_queue_cmd cmd)
4255 {
4256 	unsigned long cur_pending = o->pending;
4257 
4258 	if (!test_and_clear_bit(cmd, &cur_pending)) {
4259 		BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4260 			  "pending 0x%lx, next_state %d\n", cmd,
4261 			  o->cids[BNX2X_PRIMARY_CID_INDEX],
4262 			  o->state, cur_pending, o->next_state);
4263 		return -EINVAL;
4264 	}
4265 
4266 	if (o->next_tx_only >= o->max_cos)
4267 		/* >= becuase tx only must always be smaller than cos since the
4268 		 * primary connection suports COS 0
4269 		 */
4270 		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4271 			   o->next_tx_only, o->max_cos);
4272 
4273 	DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4274 			 "setting state to %d\n", cmd,
4275 			 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4276 
4277 	if (o->next_tx_only)  /* print num tx-only if any exist */
4278 		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4279 			   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4280 
4281 	o->state = o->next_state;
4282 	o->num_tx_only = o->next_tx_only;
4283 	o->next_state = BNX2X_Q_STATE_MAX;
4284 
4285 	/* It's important that o->state and o->next_state are
4286 	 * updated before o->pending.
4287 	 */
4288 	wmb();
4289 
4290 	clear_bit(cmd, &o->pending);
4291 	smp_mb__after_clear_bit();
4292 
4293 	return 0;
4294 }
4295 
4296 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4297 				struct bnx2x_queue_state_params *cmd_params,
4298 				struct client_init_ramrod_data *data)
4299 {
4300 	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4301 
4302 	/* Rx data */
4303 
4304 	/* IPv6 TPA supported for E2 and above only */
4305 	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4306 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4307 }
4308 
4309 static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4310 				struct bnx2x_queue_sp_obj *o,
4311 				struct bnx2x_general_setup_params *params,
4312 				struct client_init_general_data *gen_data,
4313 				unsigned long *flags)
4314 {
4315 	gen_data->client_id = o->cl_id;
4316 
4317 	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4318 		gen_data->statistics_counter_id =
4319 					params->stat_id;
4320 		gen_data->statistics_en_flg = 1;
4321 		gen_data->statistics_zero_flg =
4322 			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4323 	} else
4324 		gen_data->statistics_counter_id =
4325 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4326 
4327 	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4328 	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4329 	gen_data->sp_client_id = params->spcl_id;
4330 	gen_data->mtu = cpu_to_le16(params->mtu);
4331 	gen_data->func_id = o->func_id;
4332 
4333 
4334 	gen_data->cos = params->cos;
4335 
4336 	gen_data->traffic_type =
4337 		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4338 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4339 
4340 	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4341 	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4342 }
4343 
4344 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4345 				struct bnx2x_txq_setup_params *params,
4346 				struct client_init_tx_data *tx_data,
4347 				unsigned long *flags)
4348 {
4349 	tx_data->enforce_security_flg =
4350 		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4351 	tx_data->default_vlan =
4352 		cpu_to_le16(params->default_vlan);
4353 	tx_data->default_vlan_flg =
4354 		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4355 	tx_data->tx_switching_flg =
4356 		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4357 	tx_data->anti_spoofing_flg =
4358 		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4359 	tx_data->tx_status_block_id = params->fw_sb_id;
4360 	tx_data->tx_sb_index_number = params->sb_cq_index;
4361 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4362 
4363 	tx_data->tx_bd_page_base.lo =
4364 		cpu_to_le32(U64_LO(params->dscr_map));
4365 	tx_data->tx_bd_page_base.hi =
4366 		cpu_to_le32(U64_HI(params->dscr_map));
4367 
4368 	/* Don't configure any Tx switching mode during queue SETUP */
4369 	tx_data->state = 0;
4370 }
4371 
4372 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4373 				struct rxq_pause_params *params,
4374 				struct client_init_rx_data *rx_data)
4375 {
4376 	/* flow control data */
4377 	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4378 	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4379 	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4380 	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4381 	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4382 	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4383 	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4384 }
4385 
4386 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4387 				struct bnx2x_rxq_setup_params *params,
4388 				struct client_init_rx_data *rx_data,
4389 				unsigned long *flags)
4390 {
4391 		/* Rx data */
4392 	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4393 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4394 	rx_data->vmqueue_mode_en_flg = 0;
4395 
4396 	rx_data->cache_line_alignment_log_size =
4397 		params->cache_line_log;
4398 	rx_data->enable_dynamic_hc =
4399 		test_bit(BNX2X_Q_FLG_DHC, flags);
4400 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4401 	rx_data->client_qzone_id = params->cl_qzone_id;
4402 	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4403 
4404 	/* Always start in DROP_ALL mode */
4405 	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4406 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4407 
4408 	/* We don't set drop flags */
4409 	rx_data->drop_ip_cs_err_flg = 0;
4410 	rx_data->drop_tcp_cs_err_flg = 0;
4411 	rx_data->drop_ttl0_flg = 0;
4412 	rx_data->drop_udp_cs_err_flg = 0;
4413 	rx_data->inner_vlan_removal_enable_flg =
4414 		test_bit(BNX2X_Q_FLG_VLAN, flags);
4415 	rx_data->outer_vlan_removal_enable_flg =
4416 		test_bit(BNX2X_Q_FLG_OV, flags);
4417 	rx_data->status_block_id = params->fw_sb_id;
4418 	rx_data->rx_sb_index_number = params->sb_cq_index;
4419 	rx_data->max_tpa_queues = params->max_tpa_queues;
4420 	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4421 	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4422 	rx_data->bd_page_base.lo =
4423 		cpu_to_le32(U64_LO(params->dscr_map));
4424 	rx_data->bd_page_base.hi =
4425 		cpu_to_le32(U64_HI(params->dscr_map));
4426 	rx_data->sge_page_base.lo =
4427 		cpu_to_le32(U64_LO(params->sge_map));
4428 	rx_data->sge_page_base.hi =
4429 		cpu_to_le32(U64_HI(params->sge_map));
4430 	rx_data->cqe_page_base.lo =
4431 		cpu_to_le32(U64_LO(params->rcq_map));
4432 	rx_data->cqe_page_base.hi =
4433 		cpu_to_le32(U64_HI(params->rcq_map));
4434 	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4435 
4436 	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4437 		rx_data->approx_mcast_engine_id = o->func_id;
4438 		rx_data->is_approx_mcast = 1;
4439 	}
4440 
4441 	rx_data->rss_engine_id = params->rss_engine_id;
4442 
4443 	/* silent vlan removal */
4444 	rx_data->silent_vlan_removal_flg =
4445 		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4446 	rx_data->silent_vlan_value =
4447 		cpu_to_le16(params->silent_removal_value);
4448 	rx_data->silent_vlan_mask =
4449 		cpu_to_le16(params->silent_removal_mask);
4450 
4451 }
4452 
4453 /* initialize the general, tx and rx parts of a queue object */
4454 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4455 				struct bnx2x_queue_state_params *cmd_params,
4456 				struct client_init_ramrod_data *data)
4457 {
4458 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4459 				       &cmd_params->params.setup.gen_params,
4460 				       &data->general,
4461 				       &cmd_params->params.setup.flags);
4462 
4463 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4464 				  &cmd_params->params.setup.txq_params,
4465 				  &data->tx,
4466 				  &cmd_params->params.setup.flags);
4467 
4468 	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4469 				  &cmd_params->params.setup.rxq_params,
4470 				  &data->rx,
4471 				  &cmd_params->params.setup.flags);
4472 
4473 	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4474 				     &cmd_params->params.setup.pause_params,
4475 				     &data->rx);
4476 }
4477 
4478 /* initialize the general and tx parts of a tx-only queue object */
4479 static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4480 				struct bnx2x_queue_state_params *cmd_params,
4481 				struct tx_queue_init_ramrod_data *data)
4482 {
4483 	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4484 				       &cmd_params->params.tx_only.gen_params,
4485 				       &data->general,
4486 				       &cmd_params->params.tx_only.flags);
4487 
4488 	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4489 				  &cmd_params->params.tx_only.txq_params,
4490 				  &data->tx,
4491 				  &cmd_params->params.tx_only.flags);
4492 
4493 	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
4494 	   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4495 }
4496 
4497 /**
4498  * bnx2x_q_init - init HW/FW queue
4499  *
4500  * @bp:		device handle
4501  * @params:
4502  *
4503  * HW/FW initial Queue configuration:
4504  *      - HC: Rx and Tx
4505  *      - CDU context validation
4506  *
4507  */
4508 static inline int bnx2x_q_init(struct bnx2x *bp,
4509 			       struct bnx2x_queue_state_params *params)
4510 {
4511 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4512 	struct bnx2x_queue_init_params *init = &params->params.init;
4513 	u16 hc_usec;
4514 	u8 cos;
4515 
4516 	/* Tx HC configuration */
4517 	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4518 	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4519 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4520 
4521 		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4522 			init->tx.sb_cq_index,
4523 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4524 			hc_usec);
4525 	}
4526 
4527 	/* Rx HC configuration */
4528 	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4529 	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4530 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4531 
4532 		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4533 			init->rx.sb_cq_index,
4534 			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4535 			hc_usec);
4536 	}
4537 
4538 	/* Set CDU context validation values */
4539 	for (cos = 0; cos < o->max_cos; cos++) {
4540 		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4541 				 o->cids[cos], cos);
4542 		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4543 		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4544 	}
4545 
4546 	/* As no ramrod is sent, complete the command immediately  */
4547 	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4548 
4549 	mmiowb();
4550 	smp_mb();
4551 
4552 	return 0;
4553 }
4554 
4555 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4556 					struct bnx2x_queue_state_params *params)
4557 {
4558 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4559 	struct client_init_ramrod_data *rdata =
4560 		(struct client_init_ramrod_data *)o->rdata;
4561 	dma_addr_t data_mapping = o->rdata_mapping;
4562 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4563 
4564 	/* Clear the ramrod data */
4565 	memset(rdata, 0, sizeof(*rdata));
4566 
4567 	/* Fill the ramrod data */
4568 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4569 
4570 	/*
4571 	 *  No need for an explicit memory barrier here as long we would
4572 	 *  need to ensure the ordering of writing to the SPQ element
4573 	 *  and updating of the SPQ producer which involves a memory
4574 	 *  read and we will have to put a full memory barrier there
4575 	 *  (inside bnx2x_sp_post()).
4576 	 */
4577 
4578 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4579 			     U64_HI(data_mapping),
4580 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4581 }
4582 
4583 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4584 					struct bnx2x_queue_state_params *params)
4585 {
4586 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4587 	struct client_init_ramrod_data *rdata =
4588 		(struct client_init_ramrod_data *)o->rdata;
4589 	dma_addr_t data_mapping = o->rdata_mapping;
4590 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4591 
4592 	/* Clear the ramrod data */
4593 	memset(rdata, 0, sizeof(*rdata));
4594 
4595 	/* Fill the ramrod data */
4596 	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4597 	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4598 
4599 	/*
4600 	 *  No need for an explicit memory barrier here as long we would
4601 	 *  need to ensure the ordering of writing to the SPQ element
4602 	 *  and updating of the SPQ producer which involves a memory
4603 	 *  read and we will have to put a full memory barrier there
4604 	 *  (inside bnx2x_sp_post()).
4605 	 */
4606 
4607 	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4608 			     U64_HI(data_mapping),
4609 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4610 }
4611 
4612 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4613 				  struct bnx2x_queue_state_params *params)
4614 {
4615 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4616 	struct tx_queue_init_ramrod_data *rdata =
4617 		(struct tx_queue_init_ramrod_data *)o->rdata;
4618 	dma_addr_t data_mapping = o->rdata_mapping;
4619 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4620 	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4621 		&params->params.tx_only;
4622 	u8 cid_index = tx_only_params->cid_index;
4623 
4624 
4625 	if (cid_index >= o->max_cos) {
4626 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4627 			  o->cl_id, cid_index);
4628 		return -EINVAL;
4629 	}
4630 
4631 	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4632 			 tx_only_params->gen_params.cos,
4633 			 tx_only_params->gen_params.spcl_id);
4634 
4635 	/* Clear the ramrod data */
4636 	memset(rdata, 0, sizeof(*rdata));
4637 
4638 	/* Fill the ramrod data */
4639 	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4640 
4641 	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4642 			 "sp-client id %d, cos %d\n",
4643 			 o->cids[cid_index],
4644 			 rdata->general.client_id,
4645 			 rdata->general.sp_client_id, rdata->general.cos);
4646 
4647 	/*
4648 	 *  No need for an explicit memory barrier here as long we would
4649 	 *  need to ensure the ordering of writing to the SPQ element
4650 	 *  and updating of the SPQ producer which involves a memory
4651 	 *  read and we will have to put a full memory barrier there
4652 	 *  (inside bnx2x_sp_post()).
4653 	 */
4654 
4655 	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4656 			     U64_HI(data_mapping),
4657 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4658 }
4659 
4660 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4661 				     struct bnx2x_queue_sp_obj *obj,
4662 				     struct bnx2x_queue_update_params *params,
4663 				     struct client_update_ramrod_data *data)
4664 {
4665 	/* Client ID of the client to update */
4666 	data->client_id = obj->cl_id;
4667 
4668 	/* Function ID of the client to update */
4669 	data->func_id = obj->func_id;
4670 
4671 	/* Default VLAN value */
4672 	data->default_vlan = cpu_to_le16(params->def_vlan);
4673 
4674 	/* Inner VLAN stripping */
4675 	data->inner_vlan_removal_enable_flg =
4676 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4677 	data->inner_vlan_removal_change_flg =
4678 		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4679 			 &params->update_flags);
4680 
4681 	/* Outer VLAN sripping */
4682 	data->outer_vlan_removal_enable_flg =
4683 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4684 	data->outer_vlan_removal_change_flg =
4685 		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4686 			 &params->update_flags);
4687 
4688 	/* Drop packets that have source MAC that doesn't belong to this
4689 	 * Queue.
4690 	 */
4691 	data->anti_spoofing_enable_flg =
4692 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4693 	data->anti_spoofing_change_flg =
4694 		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4695 
4696 	/* Activate/Deactivate */
4697 	data->activate_flg =
4698 		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4699 	data->activate_change_flg =
4700 		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4701 
4702 	/* Enable default VLAN */
4703 	data->default_vlan_enable_flg =
4704 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4705 	data->default_vlan_change_flg =
4706 		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4707 			 &params->update_flags);
4708 
4709 	/* silent vlan removal */
4710 	data->silent_vlan_change_flg =
4711 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4712 			 &params->update_flags);
4713 	data->silent_vlan_removal_flg =
4714 		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4715 	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4716 	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4717 }
4718 
4719 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4720 				      struct bnx2x_queue_state_params *params)
4721 {
4722 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4723 	struct client_update_ramrod_data *rdata =
4724 		(struct client_update_ramrod_data *)o->rdata;
4725 	dma_addr_t data_mapping = o->rdata_mapping;
4726 	struct bnx2x_queue_update_params *update_params =
4727 		&params->params.update;
4728 	u8 cid_index = update_params->cid_index;
4729 
4730 	if (cid_index >= o->max_cos) {
4731 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4732 			  o->cl_id, cid_index);
4733 		return -EINVAL;
4734 	}
4735 
4736 
4737 	/* Clear the ramrod data */
4738 	memset(rdata, 0, sizeof(*rdata));
4739 
4740 	/* Fill the ramrod data */
4741 	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4742 
4743 	/*
4744 	 *  No need for an explicit memory barrier here as long we would
4745 	 *  need to ensure the ordering of writing to the SPQ element
4746 	 *  and updating of the SPQ producer which involves a memory
4747 	 *  read and we will have to put a full memory barrier there
4748 	 *  (inside bnx2x_sp_post()).
4749 	 */
4750 
4751 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4752 			     o->cids[cid_index], U64_HI(data_mapping),
4753 			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4754 }
4755 
4756 /**
4757  * bnx2x_q_send_deactivate - send DEACTIVATE command
4758  *
4759  * @bp:		device handle
4760  * @params:
4761  *
4762  * implemented using the UPDATE command.
4763  */
4764 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4765 					struct bnx2x_queue_state_params *params)
4766 {
4767 	struct bnx2x_queue_update_params *update = &params->params.update;
4768 
4769 	memset(update, 0, sizeof(*update));
4770 
4771 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4772 
4773 	return bnx2x_q_send_update(bp, params);
4774 }
4775 
4776 /**
4777  * bnx2x_q_send_activate - send ACTIVATE command
4778  *
4779  * @bp:		device handle
4780  * @params:
4781  *
4782  * implemented using the UPDATE command.
4783  */
4784 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4785 					struct bnx2x_queue_state_params *params)
4786 {
4787 	struct bnx2x_queue_update_params *update = &params->params.update;
4788 
4789 	memset(update, 0, sizeof(*update));
4790 
4791 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4792 	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4793 
4794 	return bnx2x_q_send_update(bp, params);
4795 }
4796 
4797 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4798 					struct bnx2x_queue_state_params *params)
4799 {
4800 	/* TODO: Not implemented yet. */
4801 	return -1;
4802 }
4803 
4804 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4805 				    struct bnx2x_queue_state_params *params)
4806 {
4807 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4808 
4809 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4810 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4811 			     ETH_CONNECTION_TYPE);
4812 }
4813 
4814 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4815 				       struct bnx2x_queue_state_params *params)
4816 {
4817 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4818 	u8 cid_idx = params->params.cfc_del.cid_index;
4819 
4820 	if (cid_idx >= o->max_cos) {
4821 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4822 			  o->cl_id, cid_idx);
4823 		return -EINVAL;
4824 	}
4825 
4826 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4827 			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4828 }
4829 
4830 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4831 					struct bnx2x_queue_state_params *params)
4832 {
4833 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4834 	u8 cid_index = params->params.terminate.cid_index;
4835 
4836 	if (cid_index >= o->max_cos) {
4837 		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4838 			  o->cl_id, cid_index);
4839 		return -EINVAL;
4840 	}
4841 
4842 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4843 			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4844 }
4845 
4846 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4847 				     struct bnx2x_queue_state_params *params)
4848 {
4849 	struct bnx2x_queue_sp_obj *o = params->q_obj;
4850 
4851 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4852 			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4853 			     ETH_CONNECTION_TYPE);
4854 }
4855 
4856 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4857 					struct bnx2x_queue_state_params *params)
4858 {
4859 	switch (params->cmd) {
4860 	case BNX2X_Q_CMD_INIT:
4861 		return bnx2x_q_init(bp, params);
4862 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4863 		return bnx2x_q_send_setup_tx_only(bp, params);
4864 	case BNX2X_Q_CMD_DEACTIVATE:
4865 		return bnx2x_q_send_deactivate(bp, params);
4866 	case BNX2X_Q_CMD_ACTIVATE:
4867 		return bnx2x_q_send_activate(bp, params);
4868 	case BNX2X_Q_CMD_UPDATE:
4869 		return bnx2x_q_send_update(bp, params);
4870 	case BNX2X_Q_CMD_UPDATE_TPA:
4871 		return bnx2x_q_send_update_tpa(bp, params);
4872 	case BNX2X_Q_CMD_HALT:
4873 		return bnx2x_q_send_halt(bp, params);
4874 	case BNX2X_Q_CMD_CFC_DEL:
4875 		return bnx2x_q_send_cfc_del(bp, params);
4876 	case BNX2X_Q_CMD_TERMINATE:
4877 		return bnx2x_q_send_terminate(bp, params);
4878 	case BNX2X_Q_CMD_EMPTY:
4879 		return bnx2x_q_send_empty(bp, params);
4880 	default:
4881 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4882 		return -EINVAL;
4883 	}
4884 }
4885 
4886 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4887 				    struct bnx2x_queue_state_params *params)
4888 {
4889 	switch (params->cmd) {
4890 	case BNX2X_Q_CMD_SETUP:
4891 		return bnx2x_q_send_setup_e1x(bp, params);
4892 	case BNX2X_Q_CMD_INIT:
4893 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4894 	case BNX2X_Q_CMD_DEACTIVATE:
4895 	case BNX2X_Q_CMD_ACTIVATE:
4896 	case BNX2X_Q_CMD_UPDATE:
4897 	case BNX2X_Q_CMD_UPDATE_TPA:
4898 	case BNX2X_Q_CMD_HALT:
4899 	case BNX2X_Q_CMD_CFC_DEL:
4900 	case BNX2X_Q_CMD_TERMINATE:
4901 	case BNX2X_Q_CMD_EMPTY:
4902 		return bnx2x_queue_send_cmd_cmn(bp, params);
4903 	default:
4904 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4905 		return -EINVAL;
4906 	}
4907 }
4908 
4909 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4910 				   struct bnx2x_queue_state_params *params)
4911 {
4912 	switch (params->cmd) {
4913 	case BNX2X_Q_CMD_SETUP:
4914 		return bnx2x_q_send_setup_e2(bp, params);
4915 	case BNX2X_Q_CMD_INIT:
4916 	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4917 	case BNX2X_Q_CMD_DEACTIVATE:
4918 	case BNX2X_Q_CMD_ACTIVATE:
4919 	case BNX2X_Q_CMD_UPDATE:
4920 	case BNX2X_Q_CMD_UPDATE_TPA:
4921 	case BNX2X_Q_CMD_HALT:
4922 	case BNX2X_Q_CMD_CFC_DEL:
4923 	case BNX2X_Q_CMD_TERMINATE:
4924 	case BNX2X_Q_CMD_EMPTY:
4925 		return bnx2x_queue_send_cmd_cmn(bp, params);
4926 	default:
4927 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4928 		return -EINVAL;
4929 	}
4930 }
4931 
4932 /**
4933  * bnx2x_queue_chk_transition - check state machine of a regular Queue
4934  *
4935  * @bp:		device handle
4936  * @o:
4937  * @params:
4938  *
4939  * (not Forwarding)
4940  * It both checks if the requested command is legal in a current
4941  * state and, if it's legal, sets a `next_state' in the object
4942  * that will be used in the completion flow to set the `state'
4943  * of the object.
4944  *
4945  * returns 0 if a requested command is a legal transition,
4946  *         -EINVAL otherwise.
4947  */
4948 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4949 				      struct bnx2x_queue_sp_obj *o,
4950 				      struct bnx2x_queue_state_params *params)
4951 {
4952 	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4953 	enum bnx2x_queue_cmd cmd = params->cmd;
4954 	struct bnx2x_queue_update_params *update_params =
4955 		 &params->params.update;
4956 	u8 next_tx_only = o->num_tx_only;
4957 
4958 	/*
4959 	 * Forget all pending for completion commands if a driver only state
4960 	 * transition has been requested.
4961 	 */
4962 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4963 		o->pending = 0;
4964 		o->next_state = BNX2X_Q_STATE_MAX;
4965 	}
4966 
4967 	/*
4968 	 * Don't allow a next state transition if we are in the middle of
4969 	 * the previous one.
4970 	 */
4971 	if (o->pending)
4972 		return -EBUSY;
4973 
4974 	switch (state) {
4975 	case BNX2X_Q_STATE_RESET:
4976 		if (cmd == BNX2X_Q_CMD_INIT)
4977 			next_state = BNX2X_Q_STATE_INITIALIZED;
4978 
4979 		break;
4980 	case BNX2X_Q_STATE_INITIALIZED:
4981 		if (cmd == BNX2X_Q_CMD_SETUP) {
4982 			if (test_bit(BNX2X_Q_FLG_ACTIVE,
4983 				     &params->params.setup.flags))
4984 				next_state = BNX2X_Q_STATE_ACTIVE;
4985 			else
4986 				next_state = BNX2X_Q_STATE_INACTIVE;
4987 		}
4988 
4989 		break;
4990 	case BNX2X_Q_STATE_ACTIVE:
4991 		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4992 			next_state = BNX2X_Q_STATE_INACTIVE;
4993 
4994 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4995 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4996 			next_state = BNX2X_Q_STATE_ACTIVE;
4997 
4998 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4999 			next_state = BNX2X_Q_STATE_MULTI_COS;
5000 			next_tx_only = 1;
5001 		}
5002 
5003 		else if (cmd == BNX2X_Q_CMD_HALT)
5004 			next_state = BNX2X_Q_STATE_STOPPED;
5005 
5006 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5007 			/* If "active" state change is requested, update the
5008 			 *  state accordingly.
5009 			 */
5010 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5011 				     &update_params->update_flags) &&
5012 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5013 				      &update_params->update_flags))
5014 				next_state = BNX2X_Q_STATE_INACTIVE;
5015 			else
5016 				next_state = BNX2X_Q_STATE_ACTIVE;
5017 		}
5018 
5019 		break;
5020 	case BNX2X_Q_STATE_MULTI_COS:
5021 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5022 			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5023 
5024 		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5025 			next_state = BNX2X_Q_STATE_MULTI_COS;
5026 			next_tx_only = o->num_tx_only + 1;
5027 		}
5028 
5029 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5030 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5031 			next_state = BNX2X_Q_STATE_MULTI_COS;
5032 
5033 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5034 			/* If "active" state change is requested, update the
5035 			 *  state accordingly.
5036 			 */
5037 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5038 				     &update_params->update_flags) &&
5039 			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5040 				      &update_params->update_flags))
5041 				next_state = BNX2X_Q_STATE_INACTIVE;
5042 			else
5043 				next_state = BNX2X_Q_STATE_MULTI_COS;
5044 		}
5045 
5046 		break;
5047 	case BNX2X_Q_STATE_MCOS_TERMINATED:
5048 		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5049 			next_tx_only = o->num_tx_only - 1;
5050 			if (next_tx_only == 0)
5051 				next_state = BNX2X_Q_STATE_ACTIVE;
5052 			else
5053 				next_state = BNX2X_Q_STATE_MULTI_COS;
5054 		}
5055 
5056 		break;
5057 	case BNX2X_Q_STATE_INACTIVE:
5058 		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5059 			next_state = BNX2X_Q_STATE_ACTIVE;
5060 
5061 		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5062 			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5063 			next_state = BNX2X_Q_STATE_INACTIVE;
5064 
5065 		else if (cmd == BNX2X_Q_CMD_HALT)
5066 			next_state = BNX2X_Q_STATE_STOPPED;
5067 
5068 		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5069 			/* If "active" state change is requested, update the
5070 			 * state accordingly.
5071 			 */
5072 			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5073 				     &update_params->update_flags) &&
5074 			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5075 				     &update_params->update_flags)){
5076 				if (o->num_tx_only == 0)
5077 					next_state = BNX2X_Q_STATE_ACTIVE;
5078 				else /* tx only queues exist for this queue */
5079 					next_state = BNX2X_Q_STATE_MULTI_COS;
5080 			} else
5081 				next_state = BNX2X_Q_STATE_INACTIVE;
5082 		}
5083 
5084 		break;
5085 	case BNX2X_Q_STATE_STOPPED:
5086 		if (cmd == BNX2X_Q_CMD_TERMINATE)
5087 			next_state = BNX2X_Q_STATE_TERMINATED;
5088 
5089 		break;
5090 	case BNX2X_Q_STATE_TERMINATED:
5091 		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5092 			next_state = BNX2X_Q_STATE_RESET;
5093 
5094 		break;
5095 	default:
5096 		BNX2X_ERR("Illegal state: %d\n", state);
5097 	}
5098 
5099 	/* Transition is assured */
5100 	if (next_state != BNX2X_Q_STATE_MAX) {
5101 		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5102 				 state, cmd, next_state);
5103 		o->next_state = next_state;
5104 		o->next_tx_only = next_tx_only;
5105 		return 0;
5106 	}
5107 
5108 	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5109 
5110 	return -EINVAL;
5111 }
5112 
5113 void bnx2x_init_queue_obj(struct bnx2x *bp,
5114 			  struct bnx2x_queue_sp_obj *obj,
5115 			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5116 			  void *rdata,
5117 			  dma_addr_t rdata_mapping, unsigned long type)
5118 {
5119 	memset(obj, 0, sizeof(*obj));
5120 
5121 	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5122 	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5123 
5124 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5125 	obj->max_cos = cid_cnt;
5126 	obj->cl_id = cl_id;
5127 	obj->func_id = func_id;
5128 	obj->rdata = rdata;
5129 	obj->rdata_mapping = rdata_mapping;
5130 	obj->type = type;
5131 	obj->next_state = BNX2X_Q_STATE_MAX;
5132 
5133 	if (CHIP_IS_E1x(bp))
5134 		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5135 	else
5136 		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5137 
5138 	obj->check_transition = bnx2x_queue_chk_transition;
5139 
5140 	obj->complete_cmd = bnx2x_queue_comp_cmd;
5141 	obj->wait_comp = bnx2x_queue_wait_comp;
5142 	obj->set_pending = bnx2x_queue_set_pending;
5143 }
5144 
5145 void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5146 			     struct bnx2x_queue_sp_obj *obj,
5147 			     u32 cid, u8 index)
5148 {
5149 	obj->cids[index] = cid;
5150 }
5151 
5152 /********************** Function state object *********************************/
5153 enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5154 					   struct bnx2x_func_sp_obj *o)
5155 {
5156 	/* in the middle of transaction - return INVALID state */
5157 	if (o->pending)
5158 		return BNX2X_F_STATE_MAX;
5159 
5160 	/*
5161 	 * unsure the order of reading of o->pending and o->state
5162 	 * o->pending should be read first
5163 	 */
5164 	rmb();
5165 
5166 	return o->state;
5167 }
5168 
5169 static int bnx2x_func_wait_comp(struct bnx2x *bp,
5170 				struct bnx2x_func_sp_obj *o,
5171 				enum bnx2x_func_cmd cmd)
5172 {
5173 	return bnx2x_state_wait(bp, cmd, &o->pending);
5174 }
5175 
5176 /**
5177  * bnx2x_func_state_change_comp - complete the state machine transition
5178  *
5179  * @bp:		device handle
5180  * @o:
5181  * @cmd:
5182  *
5183  * Called on state change transition. Completes the state
5184  * machine transition only - no HW interaction.
5185  */
5186 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5187 					       struct bnx2x_func_sp_obj *o,
5188 					       enum bnx2x_func_cmd cmd)
5189 {
5190 	unsigned long cur_pending = o->pending;
5191 
5192 	if (!test_and_clear_bit(cmd, &cur_pending)) {
5193 		BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5194 			  "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5195 			  o->state, cur_pending, o->next_state);
5196 		return -EINVAL;
5197 	}
5198 
5199 	DP(BNX2X_MSG_SP,
5200 	   "Completing command %d for func %d, setting state to %d\n",
5201 	   cmd, BP_FUNC(bp), o->next_state);
5202 
5203 	o->state = o->next_state;
5204 	o->next_state = BNX2X_F_STATE_MAX;
5205 
5206 	/* It's important that o->state and o->next_state are
5207 	 * updated before o->pending.
5208 	 */
5209 	wmb();
5210 
5211 	clear_bit(cmd, &o->pending);
5212 	smp_mb__after_clear_bit();
5213 
5214 	return 0;
5215 }
5216 
5217 /**
5218  * bnx2x_func_comp_cmd - complete the state change command
5219  *
5220  * @bp:		device handle
5221  * @o:
5222  * @cmd:
5223  *
5224  * Checks that the arrived completion is expected.
5225  */
5226 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5227 			       struct bnx2x_func_sp_obj *o,
5228 			       enum bnx2x_func_cmd cmd)
5229 {
5230 	/* Complete the state machine part first, check if it's a
5231 	 * legal completion.
5232 	 */
5233 	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5234 	return rc;
5235 }
5236 
5237 /**
5238  * bnx2x_func_chk_transition - perform function state machine transition
5239  *
5240  * @bp:		device handle
5241  * @o:
5242  * @params:
5243  *
5244  * It both checks if the requested command is legal in a current
5245  * state and, if it's legal, sets a `next_state' in the object
5246  * that will be used in the completion flow to set the `state'
5247  * of the object.
5248  *
5249  * returns 0 if a requested command is a legal transition,
5250  *         -EINVAL otherwise.
5251  */
5252 static int bnx2x_func_chk_transition(struct bnx2x *bp,
5253 				     struct bnx2x_func_sp_obj *o,
5254 				     struct bnx2x_func_state_params *params)
5255 {
5256 	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5257 	enum bnx2x_func_cmd cmd = params->cmd;
5258 
5259 	/*
5260 	 * Forget all pending for completion commands if a driver only state
5261 	 * transition has been requested.
5262 	 */
5263 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5264 		o->pending = 0;
5265 		o->next_state = BNX2X_F_STATE_MAX;
5266 	}
5267 
5268 	/*
5269 	 * Don't allow a next state transition if we are in the middle of
5270 	 * the previous one.
5271 	 */
5272 	if (o->pending)
5273 		return -EBUSY;
5274 
5275 	switch (state) {
5276 	case BNX2X_F_STATE_RESET:
5277 		if (cmd == BNX2X_F_CMD_HW_INIT)
5278 			next_state = BNX2X_F_STATE_INITIALIZED;
5279 
5280 		break;
5281 	case BNX2X_F_STATE_INITIALIZED:
5282 		if (cmd == BNX2X_F_CMD_START)
5283 			next_state = BNX2X_F_STATE_STARTED;
5284 
5285 		else if (cmd == BNX2X_F_CMD_HW_RESET)
5286 			next_state = BNX2X_F_STATE_RESET;
5287 
5288 		break;
5289 	case BNX2X_F_STATE_STARTED:
5290 		if (cmd == BNX2X_F_CMD_STOP)
5291 			next_state = BNX2X_F_STATE_INITIALIZED;
5292 		else if (cmd == BNX2X_F_CMD_TX_STOP)
5293 			next_state = BNX2X_F_STATE_TX_STOPPED;
5294 
5295 		break;
5296 	case BNX2X_F_STATE_TX_STOPPED:
5297 		if (cmd == BNX2X_F_CMD_TX_START)
5298 			next_state = BNX2X_F_STATE_STARTED;
5299 
5300 		break;
5301 	default:
5302 		BNX2X_ERR("Unknown state: %d\n", state);
5303 	}
5304 
5305 	/* Transition is assured */
5306 	if (next_state != BNX2X_F_STATE_MAX) {
5307 		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5308 				 state, cmd, next_state);
5309 		o->next_state = next_state;
5310 		return 0;
5311 	}
5312 
5313 	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5314 			 state, cmd);
5315 
5316 	return -EINVAL;
5317 }
5318 
5319 /**
5320  * bnx2x_func_init_func - performs HW init at function stage
5321  *
5322  * @bp:		device handle
5323  * @drv:
5324  *
5325  * Init HW when the current phase is
5326  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5327  * HW blocks.
5328  */
5329 static inline int bnx2x_func_init_func(struct bnx2x *bp,
5330 				       const struct bnx2x_func_sp_drv_ops *drv)
5331 {
5332 	return drv->init_hw_func(bp);
5333 }
5334 
5335 /**
5336  * bnx2x_func_init_port - performs HW init at port stage
5337  *
5338  * @bp:		device handle
5339  * @drv:
5340  *
5341  * Init HW when the current phase is
5342  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5343  * FUNCTION-only HW blocks.
5344  *
5345  */
5346 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5347 				       const struct bnx2x_func_sp_drv_ops *drv)
5348 {
5349 	int rc = drv->init_hw_port(bp);
5350 	if (rc)
5351 		return rc;
5352 
5353 	return bnx2x_func_init_func(bp, drv);
5354 }
5355 
5356 /**
5357  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5358  *
5359  * @bp:		device handle
5360  * @drv:
5361  *
5362  * Init HW when the current phase is
5363  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5364  * PORT-only and FUNCTION-only HW blocks.
5365  */
5366 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5367 					const struct bnx2x_func_sp_drv_ops *drv)
5368 {
5369 	int rc = drv->init_hw_cmn_chip(bp);
5370 	if (rc)
5371 		return rc;
5372 
5373 	return bnx2x_func_init_port(bp, drv);
5374 }
5375 
5376 /**
5377  * bnx2x_func_init_cmn - performs HW init at common stage
5378  *
5379  * @bp:		device handle
5380  * @drv:
5381  *
5382  * Init HW when the current phase is
5383  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5384  * PORT-only and FUNCTION-only HW blocks.
5385  */
5386 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5387 				      const struct bnx2x_func_sp_drv_ops *drv)
5388 {
5389 	int rc = drv->init_hw_cmn(bp);
5390 	if (rc)
5391 		return rc;
5392 
5393 	return bnx2x_func_init_port(bp, drv);
5394 }
5395 
5396 static int bnx2x_func_hw_init(struct bnx2x *bp,
5397 			      struct bnx2x_func_state_params *params)
5398 {
5399 	u32 load_code = params->params.hw_init.load_phase;
5400 	struct bnx2x_func_sp_obj *o = params->f_obj;
5401 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5402 	int rc = 0;
5403 
5404 	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5405 			 BP_ABS_FUNC(bp), load_code);
5406 
5407 	/* Prepare buffers for unzipping the FW */
5408 	rc = drv->gunzip_init(bp);
5409 	if (rc)
5410 		return rc;
5411 
5412 	/* Prepare FW */
5413 	rc = drv->init_fw(bp);
5414 	if (rc) {
5415 		BNX2X_ERR("Error loading firmware\n");
5416 		goto init_err;
5417 	}
5418 
5419 	/* Handle the beginning of COMMON_XXX pases separatelly... */
5420 	switch (load_code) {
5421 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5422 		rc = bnx2x_func_init_cmn_chip(bp, drv);
5423 		if (rc)
5424 			goto init_err;
5425 
5426 		break;
5427 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5428 		rc = bnx2x_func_init_cmn(bp, drv);
5429 		if (rc)
5430 			goto init_err;
5431 
5432 		break;
5433 	case FW_MSG_CODE_DRV_LOAD_PORT:
5434 		rc = bnx2x_func_init_port(bp, drv);
5435 		if (rc)
5436 			goto init_err;
5437 
5438 		break;
5439 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5440 		rc = bnx2x_func_init_func(bp, drv);
5441 		if (rc)
5442 			goto init_err;
5443 
5444 		break;
5445 	default:
5446 		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5447 		rc = -EINVAL;
5448 	}
5449 
5450 init_err:
5451 	drv->gunzip_end(bp);
5452 
5453 	/* In case of success, complete the comand immediatelly: no ramrods
5454 	 * have been sent.
5455 	 */
5456 	if (!rc)
5457 		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5458 
5459 	return rc;
5460 }
5461 
5462 /**
5463  * bnx2x_func_reset_func - reset HW at function stage
5464  *
5465  * @bp:		device handle
5466  * @drv:
5467  *
5468  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5469  * FUNCTION-only HW blocks.
5470  */
5471 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5472 					const struct bnx2x_func_sp_drv_ops *drv)
5473 {
5474 	drv->reset_hw_func(bp);
5475 }
5476 
5477 /**
5478  * bnx2x_func_reset_port - reser HW at port stage
5479  *
5480  * @bp:		device handle
5481  * @drv:
5482  *
5483  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5484  * FUNCTION-only and PORT-only HW blocks.
5485  *
5486  *                 !!!IMPORTANT!!!
5487  *
5488  * It's important to call reset_port before reset_func() as the last thing
5489  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5490  * makes impossible any DMAE transactions.
5491  */
5492 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5493 					const struct bnx2x_func_sp_drv_ops *drv)
5494 {
5495 	drv->reset_hw_port(bp);
5496 	bnx2x_func_reset_func(bp, drv);
5497 }
5498 
5499 /**
5500  * bnx2x_func_reset_cmn - reser HW at common stage
5501  *
5502  * @bp:		device handle
5503  * @drv:
5504  *
5505  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5506  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5507  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5508  */
5509 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5510 					const struct bnx2x_func_sp_drv_ops *drv)
5511 {
5512 	bnx2x_func_reset_port(bp, drv);
5513 	drv->reset_hw_cmn(bp);
5514 }
5515 
5516 
5517 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5518 				      struct bnx2x_func_state_params *params)
5519 {
5520 	u32 reset_phase = params->params.hw_reset.reset_phase;
5521 	struct bnx2x_func_sp_obj *o = params->f_obj;
5522 	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5523 
5524 	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5525 			 reset_phase);
5526 
5527 	switch (reset_phase) {
5528 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5529 		bnx2x_func_reset_cmn(bp, drv);
5530 		break;
5531 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5532 		bnx2x_func_reset_port(bp, drv);
5533 		break;
5534 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5535 		bnx2x_func_reset_func(bp, drv);
5536 		break;
5537 	default:
5538 		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5539 			   reset_phase);
5540 		break;
5541 	}
5542 
5543 	/* Complete the comand immediatelly: no ramrods have been sent. */
5544 	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5545 
5546 	return 0;
5547 }
5548 
5549 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5550 					struct bnx2x_func_state_params *params)
5551 {
5552 	struct bnx2x_func_sp_obj *o = params->f_obj;
5553 	struct function_start_data *rdata =
5554 		(struct function_start_data *)o->rdata;
5555 	dma_addr_t data_mapping = o->rdata_mapping;
5556 	struct bnx2x_func_start_params *start_params = &params->params.start;
5557 
5558 	memset(rdata, 0, sizeof(*rdata));
5559 
5560 	/* Fill the ramrod data with provided parameters */
5561 	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5562 	rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
5563 	rdata->path_id       = BP_PATH(bp);
5564 	rdata->network_cos_mode = start_params->network_cos_mode;
5565 
5566 	/*
5567 	 *  No need for an explicit memory barrier here as long we would
5568 	 *  need to ensure the ordering of writing to the SPQ element
5569 	 *  and updating of the SPQ producer which involves a memory
5570 	 *  read and we will have to put a full memory barrier there
5571 	 *  (inside bnx2x_sp_post()).
5572 	 */
5573 
5574 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5575 			     U64_HI(data_mapping),
5576 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5577 }
5578 
5579 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5580 				       struct bnx2x_func_state_params *params)
5581 {
5582 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5583 			     NONE_CONNECTION_TYPE);
5584 }
5585 
5586 static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5587 				       struct bnx2x_func_state_params *params)
5588 {
5589 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5590 			     NONE_CONNECTION_TYPE);
5591 }
5592 static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5593 				       struct bnx2x_func_state_params *params)
5594 {
5595 	struct bnx2x_func_sp_obj *o = params->f_obj;
5596 	struct flow_control_configuration *rdata =
5597 		(struct flow_control_configuration *)o->rdata;
5598 	dma_addr_t data_mapping = o->rdata_mapping;
5599 	struct bnx2x_func_tx_start_params *tx_start_params =
5600 		&params->params.tx_start;
5601 	int i;
5602 
5603 	memset(rdata, 0, sizeof(*rdata));
5604 
5605 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
5606 	rdata->dcb_version = tx_start_params->dcb_version;
5607 	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5608 
5609 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5610 		rdata->traffic_type_to_priority_cos[i] =
5611 			tx_start_params->traffic_type_to_priority_cos[i];
5612 
5613 	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5614 			     U64_HI(data_mapping),
5615 			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5616 }
5617 
5618 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5619 			       struct bnx2x_func_state_params *params)
5620 {
5621 	switch (params->cmd) {
5622 	case BNX2X_F_CMD_HW_INIT:
5623 		return bnx2x_func_hw_init(bp, params);
5624 	case BNX2X_F_CMD_START:
5625 		return bnx2x_func_send_start(bp, params);
5626 	case BNX2X_F_CMD_STOP:
5627 		return bnx2x_func_send_stop(bp, params);
5628 	case BNX2X_F_CMD_HW_RESET:
5629 		return bnx2x_func_hw_reset(bp, params);
5630 	case BNX2X_F_CMD_TX_STOP:
5631 		return bnx2x_func_send_tx_stop(bp, params);
5632 	case BNX2X_F_CMD_TX_START:
5633 		return bnx2x_func_send_tx_start(bp, params);
5634 	default:
5635 		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5636 		return -EINVAL;
5637 	}
5638 }
5639 
5640 void bnx2x_init_func_obj(struct bnx2x *bp,
5641 			 struct bnx2x_func_sp_obj *obj,
5642 			 void *rdata, dma_addr_t rdata_mapping,
5643 			 struct bnx2x_func_sp_drv_ops *drv_iface)
5644 {
5645 	memset(obj, 0, sizeof(*obj));
5646 
5647 	mutex_init(&obj->one_pending_mutex);
5648 
5649 	obj->rdata = rdata;
5650 	obj->rdata_mapping = rdata_mapping;
5651 
5652 	obj->send_cmd = bnx2x_func_send_cmd;
5653 	obj->check_transition = bnx2x_func_chk_transition;
5654 	obj->complete_cmd = bnx2x_func_comp_cmd;
5655 	obj->wait_comp = bnx2x_func_wait_comp;
5656 
5657 	obj->drv = drv_iface;
5658 }
5659 
5660 /**
5661  * bnx2x_func_state_change - perform Function state change transition
5662  *
5663  * @bp:		device handle
5664  * @params:	parameters to perform the transaction
5665  *
5666  * returns 0 in case of successfully completed transition,
5667  *         negative error code in case of failure, positive
5668  *         (EBUSY) value if there is a completion to that is
5669  *         still pending (possible only if RAMROD_COMP_WAIT is
5670  *         not set in params->ramrod_flags for asynchronous
5671  *         commands).
5672  */
5673 int bnx2x_func_state_change(struct bnx2x *bp,
5674 			    struct bnx2x_func_state_params *params)
5675 {
5676 	struct bnx2x_func_sp_obj *o = params->f_obj;
5677 	int rc;
5678 	enum bnx2x_func_cmd cmd = params->cmd;
5679 	unsigned long *pending = &o->pending;
5680 
5681 	mutex_lock(&o->one_pending_mutex);
5682 
5683 	/* Check that the requested transition is legal */
5684 	if (o->check_transition(bp, o, params)) {
5685 		mutex_unlock(&o->one_pending_mutex);
5686 		return -EINVAL;
5687 	}
5688 
5689 	/* Set "pending" bit */
5690 	set_bit(cmd, pending);
5691 
5692 	/* Don't send a command if only driver cleanup was requested */
5693 	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5694 		bnx2x_func_state_change_comp(bp, o, cmd);
5695 		mutex_unlock(&o->one_pending_mutex);
5696 	} else {
5697 		/* Send a ramrod */
5698 		rc = o->send_cmd(bp, params);
5699 
5700 		mutex_unlock(&o->one_pending_mutex);
5701 
5702 		if (rc) {
5703 			o->next_state = BNX2X_F_STATE_MAX;
5704 			clear_bit(cmd, pending);
5705 			smp_mb__after_clear_bit();
5706 			return rc;
5707 		}
5708 
5709 		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5710 			rc = o->wait_comp(bp, o, cmd);
5711 			if (rc)
5712 				return rc;
5713 
5714 			return 0;
5715 		}
5716 	}
5717 
5718 	return !!test_bit(cmd, pending);
5719 }
5720