xref: /freebsd/sys/dev/bxe/ecore_sp.c (revision f677a9e2672665f4eb3dd4111c07ee8f1f954262)
1 /*-
2  * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "bxe.h"
38 #include "ecore_init.h"
39 
40 /**** Exe Queue interfaces ****/
41 
42 /**
43  * ecore_exe_queue_init - init the Exe Queue object
44  *
45  * @o:		pointer to the object
46  * @exe_len:	length
47  * @owner:	pointer to the owner
48  * @validate:	validate function pointer
49  * @optimize:	optimize function pointer
50  * @exec:	execute function pointer
51  * @get:	get function pointer
52  */
53 static inline void ecore_exe_queue_init(struct bxe_softc *sc,
54 					struct ecore_exe_queue_obj *o,
55 					int exe_len,
56 					union ecore_qable_obj *owner,
57 					exe_q_validate validate,
58 					exe_q_remove remove,
59 					exe_q_optimize optimize,
60 					exe_q_execute exec,
61 					exe_q_get get)
62 {
63 	ECORE_MEMSET(o, 0, sizeof(*o));
64 
65 	ECORE_LIST_INIT(&o->exe_queue);
66 	ECORE_LIST_INIT(&o->pending_comp);
67 
68 	ECORE_SPIN_LOCK_INIT(&o->lock, sc);
69 
70 	o->exe_chunk_len = exe_len;
71 	o->owner         = owner;
72 
73 	/* Owner specific callbacks */
74 	o->validate      = validate;
75 	o->remove        = remove;
76 	o->optimize      = optimize;
77 	o->execute       = exec;
78 	o->get           = get;
79 
80 	ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
81 		  exe_len);
82 }
83 
84 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
85 					     struct ecore_exeq_elem *elem)
86 {
87 	ECORE_MSG(sc, "Deleting an exe_queue element\n");
88 	ECORE_FREE(sc, elem, sizeof(*elem));
89 }
90 
91 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
92 {
93 	struct ecore_exeq_elem *elem;
94 	int cnt = 0;
95 
96 	ECORE_SPIN_LOCK_BH(&o->lock);
97 
98 	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
99 				  struct ecore_exeq_elem)
100 		cnt++;
101 
102 	ECORE_SPIN_UNLOCK_BH(&o->lock);
103 
104 	return cnt;
105 }
106 
107 /**
108  * ecore_exe_queue_add - add a new element to the execution queue
109  *
110  * @sc:		driver handle
111  * @o:		queue
112  * @cmd:	new command to add
113  * @restore:	true - do not optimize the command
114  *
115  * If the element is optimized or is illegal, frees it.
116  */
117 static inline int ecore_exe_queue_add(struct bxe_softc *sc,
118 				      struct ecore_exe_queue_obj *o,
119 				      struct ecore_exeq_elem *elem,
120 				      bool restore)
121 {
122 	int rc;
123 
124 	ECORE_SPIN_LOCK_BH(&o->lock);
125 
126 	if (!restore) {
127 		/* Try to cancel this element queue */
128 		rc = o->optimize(sc, o->owner, elem);
129 		if (rc)
130 			goto free_and_exit;
131 
132 		/* Check if this request is ok */
133 		rc = o->validate(sc, o->owner, elem);
134 		if (rc) {
135 			ECORE_MSG(sc, "Preamble failed: %d\n", rc);
136 			goto free_and_exit;
137 		}
138 	}
139 
140 	/* If so, add it to the execution queue */
141 	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
142 
143 	ECORE_SPIN_UNLOCK_BH(&o->lock);
144 
145 	return ECORE_SUCCESS;
146 
147 free_and_exit:
148 	ecore_exe_queue_free_elem(sc, elem);
149 
150 	ECORE_SPIN_UNLOCK_BH(&o->lock);
151 
152 	return rc;
153 }
154 
155 static inline void __ecore_exe_queue_reset_pending(
156 	struct bxe_softc *sc,
157 	struct ecore_exe_queue_obj *o)
158 {
159 	struct ecore_exeq_elem *elem;
160 
161 	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
162 		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
163 					      struct ecore_exeq_elem,
164 					      link);
165 
166 		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
167 		ecore_exe_queue_free_elem(sc, elem);
168 	}
169 }
170 
171 static inline void ecore_exe_queue_reset_pending(struct bxe_softc *sc,
172 						 struct ecore_exe_queue_obj *o)
173 {
174 	ECORE_SPIN_LOCK_BH(&o->lock);
175 
176 	__ecore_exe_queue_reset_pending(sc, o);
177 
178 	ECORE_SPIN_UNLOCK_BH(&o->lock);
179 }
180 
181 /**
182  * ecore_exe_queue_step - execute one execution chunk atomically
183  *
184  * @sc:			driver handle
185  * @o:			queue
186  * @ramrod_flags:	flags
187  *
188  * (Should be called while holding the exe_queue->lock).
189  */
190 static inline int ecore_exe_queue_step(struct bxe_softc *sc,
191 				       struct ecore_vlan_mac_obj *vobj,
192 				       struct ecore_exe_queue_obj *o,
193 				       unsigned long *ramrod_flags)
194 {
195 	struct ecore_exeq_elem *elem, spacer;
196 	int cur_len = 0, rc;
197 
198 	ECORE_MEMSET(&spacer, 0, sizeof(spacer));
199 
200 	/* Next step should not be performed until the current is finished,
201 	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
202 	 * properly clear object internals without sending any command to the FW
203 	 * which also implies there won't be any completion to clear the
204 	 * 'pending' list.
205 	 */
206 	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
207 		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
208 			ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
209 			__ecore_exe_queue_reset_pending(sc, o);
210 		} else {
211 			return ECORE_PENDING;
212 		}
213 	}
214 
215 	/* Run through the pending commands list and create a next
216 	 * execution chunk.
217 	 */
218 	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
219 		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
220 					      struct ecore_exeq_elem,
221 					      link);
222 		ECORE_DBG_BREAK_IF(!elem->cmd_len);
223 
224 		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
225 			cur_len += elem->cmd_len;
226 			/* Prevent from both lists being empty when moving an
227 			 * element. This will allow the call of
228 			 * ecore_exe_queue_empty() without locking.
229 			 */
230 			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
231 			mb();
232 			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
233 			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
234 			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
235 		} else
236 			break;
237 	}
238 
239 	/* Sanity check */
240 	if (!cur_len)
241 		return ECORE_SUCCESS;
242 
243 	rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
244 	if (rc < 0)
245 		/* In case of an error return the commands back to the queue
246 		 *  and reset the pending_comp.
247 		 */
248 		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
249 	else if (!rc)
250 		/* If zero is returned, means there are no outstanding pending
251 		 * completions and we may dismiss the pending list.
252 		 */
253 		__ecore_exe_queue_reset_pending(sc, o);
254 
255 	return rc;
256 }
257 
258 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
259 {
260 	bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
261 
262 	/* Don't reorder!!! */
263 	mb();
264 
265 	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
266 }
267 
268 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
269 	struct bxe_softc *sc)
270 {
271 	ECORE_MSG(sc, "Allocating a new exe_queue element\n");
272 	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
273 			    sc);
274 }
275 
276 /************************ raw_obj functions ***********************************/
277 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
278 {
279 	/*
280      * !! converts the value returned by ECORE_TEST_BIT such that it
281      * is guaranteed not to be truncated regardless of bool definition.
282 	 *
283 	 * Note we cannot simply define the function's return value type
284      * to match the type returned by ECORE_TEST_BIT, as it varies by
285      * platform/implementation.
286 	 */
287 
288 	return !!ECORE_TEST_BIT(o->state, o->pstate);
289 }
290 
291 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
292 {
293 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
294 	ECORE_CLEAR_BIT(o->state, o->pstate);
295 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
296 }
297 
298 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
299 {
300 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
301 	ECORE_SET_BIT(o->state, o->pstate);
302 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
303 }
304 
305 /**
306  * ecore_state_wait - wait until the given bit(state) is cleared
307  *
308  * @sc:		device handle
309  * @state:	state which is to be cleared
310  * @state_p:	state buffer
311  *
312  */
313 static inline int ecore_state_wait(struct bxe_softc *sc, int state,
314 				   unsigned long *pstate)
315 {
316 	/* can take a while if any port is running */
317 	int cnt = 5000;
318 
319 
320 	if (CHIP_REV_IS_EMUL(sc))
321 		cnt *= 20;
322 
323 	ECORE_MSG(sc, "waiting for state to become %d\n", state);
324 
325 	ECORE_MIGHT_SLEEP();
326 	while (cnt--) {
327 		if (!ECORE_TEST_BIT(state, pstate)) {
328 #ifdef ECORE_STOP_ON_ERROR
329 			ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
330 #endif
331 			return ECORE_SUCCESS;
332 		}
333 
334 		ECORE_WAIT(sc, delay_us);
335 
336 		if (sc->panic)
337 			return ECORE_IO;
338 	}
339 
340 	/* timeout! */
341 	ECORE_ERR("timeout waiting for state %d\n", state);
342 #ifdef ECORE_STOP_ON_ERROR
343 	ecore_panic();
344 #endif
345 
346 	return ECORE_TIMEOUT;
347 }
348 
349 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
350 {
351 	return ecore_state_wait(sc, raw->state, raw->pstate);
352 }
353 
354 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
355 /* credit handling callbacks */
356 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
357 {
358 	struct ecore_credit_pool_obj *mp = o->macs_pool;
359 
360 	ECORE_DBG_BREAK_IF(!mp);
361 
362 	return mp->get_entry(mp, offset);
363 }
364 
365 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
366 {
367 	struct ecore_credit_pool_obj *mp = o->macs_pool;
368 
369 	ECORE_DBG_BREAK_IF(!mp);
370 
371 	return mp->get(mp, 1);
372 }
373 
374 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
375 {
376 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
377 
378 	ECORE_DBG_BREAK_IF(!vp);
379 
380 	return vp->get_entry(vp, offset);
381 }
382 
383 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
384 {
385 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
386 
387 	ECORE_DBG_BREAK_IF(!vp);
388 
389 	return vp->get(vp, 1);
390 }
391 
392 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
393 {
394 	struct ecore_credit_pool_obj *mp = o->macs_pool;
395 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
396 
397 	if (!mp->get(mp, 1))
398 		return FALSE;
399 
400 	if (!vp->get(vp, 1)) {
401 		mp->put(mp, 1);
402 		return FALSE;
403 	}
404 
405 	return TRUE;
406 }
407 
408 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
409 {
410 	struct ecore_credit_pool_obj *mp = o->macs_pool;
411 
412 	return mp->put_entry(mp, offset);
413 }
414 
415 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
416 {
417 	struct ecore_credit_pool_obj *mp = o->macs_pool;
418 
419 	return mp->put(mp, 1);
420 }
421 
422 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
423 {
424 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
425 
426 	return vp->put_entry(vp, offset);
427 }
428 
429 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
430 {
431 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
432 
433 	return vp->put(vp, 1);
434 }
435 
436 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
437 {
438 	struct ecore_credit_pool_obj *mp = o->macs_pool;
439 	struct ecore_credit_pool_obj *vp = o->vlans_pool;
440 
441 	if (!mp->put(mp, 1))
442 		return FALSE;
443 
444 	if (!vp->put(vp, 1)) {
445 		mp->get(mp, 1);
446 		return FALSE;
447 	}
448 
449 	return TRUE;
450 }
451 
452 /**
453  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
454  * head list.
455  *
456  * @sc:		device handle
457  * @o:		vlan_mac object
458  *
459  * @details: Non-blocking implementation; should be called under execution
460  *           queue lock.
461  */
462 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
463 					    struct ecore_vlan_mac_obj *o)
464 {
465 	if (o->head_reader) {
466 		ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
467 		return ECORE_BUSY;
468 	}
469 
470 	ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
471 	return ECORE_SUCCESS;
472 }
473 
474 /**
475  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
476  * which wasn't able to run due to a taken lock on vlan mac head list.
477  *
478  * @sc:		device handle
479  * @o:		vlan_mac object
480  *
481  * @details Should be called under execution queue lock; notice it might release
482  *          and reclaim it during its run.
483  */
484 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
485 					    struct ecore_vlan_mac_obj *o)
486 {
487 	int rc;
488 	unsigned long ramrod_flags = o->saved_ramrod_flags;
489 
490 	ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
491 		  ramrod_flags);
492 	o->head_exe_request = FALSE;
493 	o->saved_ramrod_flags = 0;
494 	rc = ecore_exe_queue_step(sc, o, &o->exe_queue, &ramrod_flags);
495 	if (rc != ECORE_SUCCESS) {
496 		ECORE_ERR("execution of pending commands failed with rc %d\n",
497 			  rc);
498 #ifdef ECORE_STOP_ON_ERROR
499 		ecore_panic();
500 #endif
501 	}
502 }
503 
504 /**
505  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
506  * called due to vlan mac head list lock being taken.
507  *
508  * @sc:			device handle
509  * @o:			vlan_mac object
510  * @ramrod_flags:	ramrod flags of missed execution
511  *
512  * @details Should be called under execution queue lock.
513  */
514 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
515 				    struct ecore_vlan_mac_obj *o,
516 				    unsigned long ramrod_flags)
517 {
518 	o->head_exe_request = TRUE;
519 	o->saved_ramrod_flags = ramrod_flags;
520 	ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
521 		  ramrod_flags);
522 }
523 
524 /**
525  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
526  *
527  * @sc:			device handle
528  * @o:			vlan_mac object
529  *
530  * @details Should be called under execution queue lock. Notice if a pending
531  *          execution exists, it would perform it - possibly releasing and
532  *          reclaiming the execution queue lock.
533  */
534 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
535 					    struct ecore_vlan_mac_obj *o)
536 {
537 	/* It's possible a new pending execution was added since this writer
538 	 * executed. If so, execute again. [Ad infinitum]
539 	 */
540 	while(o->head_exe_request) {
541 		ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
542 		__ecore_vlan_mac_h_exec_pending(sc, o);
543 	}
544 }
545 
546 /**
547  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
548  *
549  * @sc:			device handle
550  * @o:			vlan_mac object
551  *
552  * @details Notice if a pending execution exists, it would perform it -
553  *          possibly releasing and reclaiming the execution queue lock.
554  */
555 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
556 				   struct ecore_vlan_mac_obj *o)
557 {
558 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
559 	__ecore_vlan_mac_h_write_unlock(sc, o);
560 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
561 }
562 
563 /**
564  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
565  *
566  * @sc:			device handle
567  * @o:			vlan_mac object
568  *
569  * @details Should be called under the execution queue lock. May sleep. May
570  *          release and reclaim execution queue lock during its run.
571  */
572 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
573 					struct ecore_vlan_mac_obj *o)
574 {
575 	/* If we got here, we're holding lock --> no WRITER exists */
576 	o->head_reader++;
577 	ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
578 		  o->head_reader);
579 
580 	return ECORE_SUCCESS;
581 }
582 
583 /**
584  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
585  *
586  * @sc:			device handle
587  * @o:			vlan_mac object
588  *
589  * @details May sleep. Claims and releases execution queue lock during its run.
590  */
591 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
592 			       struct ecore_vlan_mac_obj *o)
593 {
594 	int rc;
595 
596 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
597 	rc = __ecore_vlan_mac_h_read_lock(sc, o);
598 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
599 
600 	return rc;
601 }
602 
603 /**
604  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
605  *
606  * @sc:			device handle
607  * @o:			vlan_mac object
608  *
609  * @details Should be called under execution queue lock. Notice if a pending
610  *          execution exists, it would be performed if this was the last
611  *          reader. possibly releasing and reclaiming the execution queue lock.
612  */
613 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
614 					  struct ecore_vlan_mac_obj *o)
615 {
616 	if (!o->head_reader) {
617 		ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
618 #ifdef ECORE_STOP_ON_ERROR
619 		ecore_panic();
620 #endif
621 	} else {
622 		o->head_reader--;
623 		ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
624 			  o->head_reader);
625 	}
626 
627 	/* It's possible a new pending execution was added, and that this reader
628 	 * was last - if so we need to execute the command.
629 	 */
630 	if (!o->head_reader && o->head_exe_request) {
631 		ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
632 
633 		/* Writer release will do the trick */
634 		__ecore_vlan_mac_h_write_unlock(sc, o);
635 	}
636 }
637 
638 /**
639  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
640  *
641  * @sc:			device handle
642  * @o:			vlan_mac object
643  *
644  * @details Notice if a pending execution exists, it would be performed if this
645  *          was the last reader. Claims and releases the execution queue lock
646  *          during its run.
647  */
648 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
649 				  struct ecore_vlan_mac_obj *o)
650 {
651 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
652 	__ecore_vlan_mac_h_read_unlock(sc, o);
653 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
654 }
655 
656 /**
657  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
658  *
659  * @sc:			device handle
660  * @o:			vlan_mac object
661  * @n:			number of elements to get
662  * @base:		base address for element placement
663  * @stride:		stride between elements (in bytes)
664  */
665 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
666 				 int n, uint8_t *base, uint8_t stride, uint8_t size)
667 {
668 	struct ecore_vlan_mac_registry_elem *pos;
669 	uint8_t *next = base;
670 	int counter = 0, read_lock;
671 
672 	ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
673 	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
674 	if (read_lock != ECORE_SUCCESS)
675 		ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
676 
677 	/* traverse list */
678 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
679 				  struct ecore_vlan_mac_registry_elem) {
680 		if (counter < n) {
681 			ECORE_MEMCPY(next, &pos->u, size);
682 			counter++;
683 			ECORE_MSG(sc, "copied element number %d to address %p element was:",
684 				  counter, next);
685 			next += stride + size;
686 		}
687 	}
688 
689 	if (read_lock == ECORE_SUCCESS) {
690 		ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
691 		ecore_vlan_mac_h_read_unlock(sc, o);
692 	}
693 
694 	return counter * ETH_ALEN;
695 }
696 
697 /* check_add() callbacks */
698 static int ecore_check_mac_add(struct bxe_softc *sc,
699 			       struct ecore_vlan_mac_obj *o,
700 			       union ecore_classification_ramrod_data *data)
701 {
702 	struct ecore_vlan_mac_registry_elem *pos;
703 
704 	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
705 
706 	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
707 		return ECORE_INVAL;
708 
709 	/* Check if a requested MAC already exists */
710 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
711 				  struct ecore_vlan_mac_registry_elem)
712 		if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
713 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
714 			return ECORE_EXISTS;
715 
716 	return ECORE_SUCCESS;
717 }
718 
719 static int ecore_check_vlan_add(struct bxe_softc *sc,
720 				struct ecore_vlan_mac_obj *o,
721 				union ecore_classification_ramrod_data *data)
722 {
723 	struct ecore_vlan_mac_registry_elem *pos;
724 
725 	ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
726 
727 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
728 				  struct ecore_vlan_mac_registry_elem)
729 		if (data->vlan.vlan == pos->u.vlan.vlan)
730 			return ECORE_EXISTS;
731 
732 	return ECORE_SUCCESS;
733 }
734 
735 static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
736 				    struct ecore_vlan_mac_obj *o,
737 				   union ecore_classification_ramrod_data *data)
738 {
739 	struct ecore_vlan_mac_registry_elem *pos;
740 
741 	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
742 		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
743 
744 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
745 				  struct ecore_vlan_mac_registry_elem)
746 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
747 		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
748 				  ETH_ALEN)) &&
749 		    (data->vlan_mac.is_inner_mac ==
750 		     pos->u.vlan_mac.is_inner_mac))
751 			return ECORE_EXISTS;
752 
753 	return ECORE_SUCCESS;
754 }
755 
756 /* check_del() callbacks */
757 static struct ecore_vlan_mac_registry_elem *
758 	ecore_check_mac_del(struct bxe_softc *sc,
759 			    struct ecore_vlan_mac_obj *o,
760 			    union ecore_classification_ramrod_data *data)
761 {
762 	struct ecore_vlan_mac_registry_elem *pos;
763 
764 	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
765 
766 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
767 				  struct ecore_vlan_mac_registry_elem)
768 		if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
769 		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
770 			return pos;
771 
772 	return NULL;
773 }
774 
775 static struct ecore_vlan_mac_registry_elem *
776 	ecore_check_vlan_del(struct bxe_softc *sc,
777 			     struct ecore_vlan_mac_obj *o,
778 			     union ecore_classification_ramrod_data *data)
779 {
780 	struct ecore_vlan_mac_registry_elem *pos;
781 
782 	ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
783 
784 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
785 				  struct ecore_vlan_mac_registry_elem)
786 		if (data->vlan.vlan == pos->u.vlan.vlan)
787 			return pos;
788 
789 	return NULL;
790 }
791 
792 static struct ecore_vlan_mac_registry_elem *
793 	ecore_check_vlan_mac_del(struct bxe_softc *sc,
794 				 struct ecore_vlan_mac_obj *o,
795 				 union ecore_classification_ramrod_data *data)
796 {
797 	struct ecore_vlan_mac_registry_elem *pos;
798 
799 	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
800 		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
801 
802 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
803 				  struct ecore_vlan_mac_registry_elem)
804 		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
805 		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
806 			     ETH_ALEN)) &&
807 		    (data->vlan_mac.is_inner_mac ==
808 		     pos->u.vlan_mac.is_inner_mac))
809 			return pos;
810 
811 	return NULL;
812 }
813 
814 /* check_move() callback */
815 static bool ecore_check_move(struct bxe_softc *sc,
816 			     struct ecore_vlan_mac_obj *src_o,
817 			     struct ecore_vlan_mac_obj *dst_o,
818 			     union ecore_classification_ramrod_data *data)
819 {
820 	struct ecore_vlan_mac_registry_elem *pos;
821 	int rc;
822 
823 	/* Check if we can delete the requested configuration from the first
824 	 * object.
825 	 */
826 	pos = src_o->check_del(sc, src_o, data);
827 
828 	/*  check if configuration can be added */
829 	rc = dst_o->check_add(sc, dst_o, data);
830 
831 	/* If this classification can not be added (is already set)
832 	 * or can't be deleted - return an error.
833 	 */
834 	if (rc || !pos)
835 		return FALSE;
836 
837 	return TRUE;
838 }
839 
840 static bool ecore_check_move_always_err(
841 	struct bxe_softc *sc,
842 	struct ecore_vlan_mac_obj *src_o,
843 	struct ecore_vlan_mac_obj *dst_o,
844 	union ecore_classification_ramrod_data *data)
845 {
846 	return FALSE;
847 }
848 
849 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
850 {
851 	struct ecore_raw_obj *raw = &o->raw;
852 	uint8_t rx_tx_flag = 0;
853 
854 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
855 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
856 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
857 
858 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
859 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
860 		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
861 
862 	return rx_tx_flag;
863 }
864 
865 void ecore_set_mac_in_nig(struct bxe_softc *sc,
866 			  bool add, unsigned char *dev_addr, int index)
867 {
868 	uint32_t wb_data[2];
869 	uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
870 			 NIG_REG_LLH0_FUNC_MEM;
871 
872 	if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
873 		return;
874 
875 	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
876 		return;
877 
878 	ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
879 		  (add ? "ADD" : "DELETE"), index);
880 
881 	if (add) {
882 		/* LLH_FUNC_MEM is a uint64_t WB register */
883 		reg_offset += 8*index;
884 
885 		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
886 			      (dev_addr[4] <<  8) |  dev_addr[5]);
887 		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
888 
889 		ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
890 	}
891 
892 	REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
893 				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
894 }
895 
896 /**
897  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
898  *
899  * @sc:		device handle
900  * @o:		queue for which we want to configure this rule
901  * @add:	if TRUE the command is an ADD command, DEL otherwise
902  * @opcode:	CLASSIFY_RULE_OPCODE_XXX
903  * @hdr:	pointer to a header to setup
904  *
905  */
906 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
907 	struct ecore_vlan_mac_obj *o, bool add, int opcode,
908 	struct eth_classify_cmd_header *hdr)
909 {
910 	struct ecore_raw_obj *raw = &o->raw;
911 
912 	hdr->client_id = raw->cl_id;
913 	hdr->func_id = raw->func_id;
914 
915 	/* Rx or/and Tx (internal switching) configuration ? */
916 	hdr->cmd_general_data |=
917 		ecore_vlan_mac_get_rx_tx_flag(o);
918 
919 	if (add)
920 		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
921 
922 	hdr->cmd_general_data |=
923 		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
924 }
925 
926 /**
927  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
928  *
929  * @cid:	connection id
930  * @type:	ECORE_FILTER_XXX_PENDING
931  * @hdr:	pointer to header to setup
932  * @rule_cnt:
933  *
934  * currently we always configure one rule and echo field to contain a CID and an
935  * opcode type.
936  */
937 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
938 				struct eth_classify_header *hdr, int rule_cnt)
939 {
940 	hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
941 				(type << ECORE_SWCID_SHIFT));
942 	hdr->rule_cnt = (uint8_t)rule_cnt;
943 }
944 
945 /* hw_config() callbacks */
946 static void ecore_set_one_mac_e2(struct bxe_softc *sc,
947 				 struct ecore_vlan_mac_obj *o,
948 				 struct ecore_exeq_elem *elem, int rule_idx,
949 				 int cam_offset)
950 {
951 	struct ecore_raw_obj *raw = &o->raw;
952 	struct eth_classify_rules_ramrod_data *data =
953 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
954 	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
955 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
956 	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
957 	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
958 	uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
959 
960 	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
961 	 * relevant. In addition, current implementation is tuned for a
962 	 * single ETH MAC.
963 	 *
964 	 * When multiple unicast ETH MACs PF configuration in switch
965 	 * independent mode is required (NetQ, multiple netdev MACs,
966 	 * etc.), consider better utilisation of 8 per function MAC
967 	 * entries in the LLH register. There is also
968 	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
969 	 * total number of CAM entries to 16.
970 	 *
971 	 * Currently we won't configure NIG for MACs other than a primary ETH
972 	 * MAC and iSCSI L2 MAC.
973 	 *
974 	 * If this MAC is moving from one Queue to another, no need to change
975 	 * NIG configuration.
976 	 */
977 	if (cmd != ECORE_VLAN_MAC_MOVE) {
978 		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
979 			ecore_set_mac_in_nig(sc, add, mac,
980 					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
981 		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
982 			ecore_set_mac_in_nig(sc, add, mac,
983 					     ECORE_LLH_CAM_ETH_LINE);
984 	}
985 
986 	/* Reset the ramrod data buffer for the first rule */
987 	if (rule_idx == 0)
988 		ECORE_MEMSET(data, 0, sizeof(*data));
989 
990 	/* Setup a command header */
991 	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
992 				      &rule_entry->mac.header);
993 
994 	ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
995 		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
996 
997 	/* Set a MAC itself */
998 	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
999 			      &rule_entry->mac.mac_mid,
1000 			      &rule_entry->mac.mac_lsb, mac);
1001 	rule_entry->mac.inner_mac =
1002 		elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1003 
1004 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1005 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1006 		rule_entry++;
1007 		rule_cnt++;
1008 
1009 		/* Setup ramrod data */
1010 		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1011 					elem->cmd_data.vlan_mac.target_obj,
1012 					      TRUE, CLASSIFY_RULE_OPCODE_MAC,
1013 					      &rule_entry->mac.header);
1014 
1015 		/* Set a MAC itself */
1016 		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1017 				      &rule_entry->mac.mac_mid,
1018 				      &rule_entry->mac.mac_lsb, mac);
1019 		rule_entry->mac.inner_mac =
1020 			elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1021 	}
1022 
1023 	/* Set the ramrod data header */
1024 	/* TODO: take this to the higher level in order to prevent multiple
1025 		 writing */
1026 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1027 					rule_cnt);
1028 }
1029 
1030 /**
1031  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1032  *
1033  * @sc:		device handle
1034  * @o:		queue
1035  * @type:
1036  * @cam_offset:	offset in cam memory
1037  * @hdr:	pointer to a header to setup
1038  *
1039  * E1/E1H
1040  */
1041 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1042 	struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1043 	struct mac_configuration_hdr *hdr)
1044 {
1045 	struct ecore_raw_obj *r = &o->raw;
1046 
1047 	hdr->length = 1;
1048 	hdr->offset = (uint8_t)cam_offset;
1049 	hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1050 	hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1051 				(type << ECORE_SWCID_SHIFT));
1052 }
1053 
1054 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1055 	struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1056 	uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1057 {
1058 	struct ecore_raw_obj *r = &o->raw;
1059 	uint32_t cl_bit_vec = (1 << r->cl_id);
1060 
1061 	cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1062 	cfg_entry->pf_id = r->func_id;
1063 	cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1064 
1065 	if (add) {
1066 		ECORE_SET_FLAG(cfg_entry->flags,
1067 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1068 			       T_ETH_MAC_COMMAND_SET);
1069 		ECORE_SET_FLAG(cfg_entry->flags,
1070 			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1071 			       opcode);
1072 
1073 		/* Set a MAC in a ramrod data */
1074 		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1075 				      &cfg_entry->middle_mac_addr,
1076 				      &cfg_entry->lsb_mac_addr, mac);
1077 	} else
1078 		ECORE_SET_FLAG(cfg_entry->flags,
1079 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1080 			       T_ETH_MAC_COMMAND_INVALIDATE);
1081 }
1082 
1083 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1084 	struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1085 	uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1086 {
1087 	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1088 	struct ecore_raw_obj *raw = &o->raw;
1089 
1090 	ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1091 					 &config->hdr);
1092 	ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1093 					 cfg_entry);
1094 
1095 	ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1096 		  (add ? "setting" : "clearing"),
1097 		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1098 }
1099 
1100 /**
1101  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1102  *
1103  * @sc:		device handle
1104  * @o:		ecore_vlan_mac_obj
1105  * @elem:	ecore_exeq_elem
1106  * @rule_idx:	rule_idx
1107  * @cam_offset: cam_offset
1108  */
1109 static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1110 				  struct ecore_vlan_mac_obj *o,
1111 				  struct ecore_exeq_elem *elem, int rule_idx,
1112 				  int cam_offset)
1113 {
1114 	struct ecore_raw_obj *raw = &o->raw;
1115 	struct mac_configuration_cmd *config =
1116 		(struct mac_configuration_cmd *)(raw->rdata);
1117 	/* 57710 and 57711 do not support MOVE command,
1118 	 * so it's either ADD or DEL
1119 	 */
1120 	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1121 		TRUE : FALSE;
1122 
1123 	/* Reset the ramrod data buffer */
1124 	ECORE_MEMSET(config, 0, sizeof(*config));
1125 
1126 	ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1127 				     cam_offset, add,
1128 				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1129 				     ETH_VLAN_FILTER_ANY_VLAN, config);
1130 }
1131 
1132 static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1133 				  struct ecore_vlan_mac_obj *o,
1134 				  struct ecore_exeq_elem *elem, int rule_idx,
1135 				  int cam_offset)
1136 {
1137 	struct ecore_raw_obj *raw = &o->raw;
1138 	struct eth_classify_rules_ramrod_data *data =
1139 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1140 	int rule_cnt = rule_idx + 1;
1141 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1142 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1143 	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1144 	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1145 
1146 	/* Reset the ramrod data buffer for the first rule */
1147 	if (rule_idx == 0)
1148 		ECORE_MEMSET(data, 0, sizeof(*data));
1149 
1150 	/* Set a rule header */
1151 	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1152 				      &rule_entry->vlan.header);
1153 
1154 	ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1155 		  vlan);
1156 
1157 	/* Set a VLAN itself */
1158 	rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1159 
1160 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1161 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1162 		rule_entry++;
1163 		rule_cnt++;
1164 
1165 		/* Setup ramrod data */
1166 		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1167 					elem->cmd_data.vlan_mac.target_obj,
1168 					      TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1169 					      &rule_entry->vlan.header);
1170 
1171 		/* Set a VLAN itself */
1172 		rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1173 	}
1174 
1175 	/* Set the ramrod data header */
1176 	/* TODO: take this to the higher level in order to prevent multiple
1177 		 writing */
1178 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1179 					rule_cnt);
1180 }
1181 
1182 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1183 				      struct ecore_vlan_mac_obj *o,
1184 				      struct ecore_exeq_elem *elem,
1185 				      int rule_idx, int cam_offset)
1186 {
1187 	struct ecore_raw_obj *raw = &o->raw;
1188 	struct eth_classify_rules_ramrod_data *data =
1189 		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1190 	int rule_cnt = rule_idx + 1;
1191 	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1192 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1193 	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1194 	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1195 	uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1196 
1197 	/* Reset the ramrod data buffer for the first rule */
1198 	if (rule_idx == 0)
1199 		ECORE_MEMSET(data, 0, sizeof(*data));
1200 
1201 	/* Set a rule header */
1202 	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1203 				      &rule_entry->pair.header);
1204 
1205 	/* Set VLAN and MAC themselves */
1206 	rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1207 	ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1208 			      &rule_entry->pair.mac_mid,
1209 			      &rule_entry->pair.mac_lsb, mac);
1210 	rule_entry->pair.inner_mac =
1211 			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1212 	/* MOVE: Add a rule that will add this MAC to the target Queue */
1213 	if (cmd == ECORE_VLAN_MAC_MOVE) {
1214 		rule_entry++;
1215 		rule_cnt++;
1216 
1217 		/* Setup ramrod data */
1218 		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1219 					elem->cmd_data.vlan_mac.target_obj,
1220 					      TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1221 					      &rule_entry->pair.header);
1222 
1223 		/* Set a VLAN itself */
1224 		rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1225 		ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1226 				      &rule_entry->pair.mac_mid,
1227 				      &rule_entry->pair.mac_lsb, mac);
1228 		rule_entry->pair.inner_mac =
1229 			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1230 	}
1231 
1232 	/* Set the ramrod data header */
1233 	/* TODO: take this to the higher level in order to prevent multiple
1234 		 writing */
1235 	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1236 					rule_cnt);
1237 }
1238 
1239 /**
1240  * ecore_set_one_vlan_mac_e1h -
1241  *
1242  * @sc:		device handle
1243  * @o:		ecore_vlan_mac_obj
1244  * @elem:	ecore_exeq_elem
1245  * @rule_idx:	rule_idx
1246  * @cam_offset:	cam_offset
1247  */
1248 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1249 				       struct ecore_vlan_mac_obj *o,
1250 				       struct ecore_exeq_elem *elem,
1251 				       int rule_idx, int cam_offset)
1252 {
1253 	struct ecore_raw_obj *raw = &o->raw;
1254 	struct mac_configuration_cmd *config =
1255 		(struct mac_configuration_cmd *)(raw->rdata);
1256 	/* 57710 and 57711 do not support MOVE command,
1257 	 * so it's either ADD or DEL
1258 	 */
1259 	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1260 		TRUE : FALSE;
1261 
1262 	/* Reset the ramrod data buffer */
1263 	ECORE_MEMSET(config, 0, sizeof(*config));
1264 
1265 	ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1266 				     cam_offset, add,
1267 				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1268 				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1269 				     ETH_VLAN_FILTER_CLASSIFY, config);
1270 }
1271 
1272 #define list_next_entry(pos, member) \
1273 	list_entry((pos)->member.next, typeof(*(pos)), member)
1274 
1275 /**
1276  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1277  *
1278  * @sc:		device handle
1279  * @p:		command parameters
1280  * @ppos:	pointer to the cookie
1281  *
1282  * reconfigure next MAC/VLAN/VLAN-MAC element from the
1283  * previously configured elements list.
1284  *
1285  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1286  * into an account
1287  *
1288  * pointer to the cookie  - that should be given back in the next call to make
1289  * function handle the next element. If *ppos is set to NULL it will restart the
1290  * iterator. If returned *ppos == NULL this means that the last element has been
1291  * handled.
1292  *
1293  */
1294 static int ecore_vlan_mac_restore(struct bxe_softc *sc,
1295 			   struct ecore_vlan_mac_ramrod_params *p,
1296 			   struct ecore_vlan_mac_registry_elem **ppos)
1297 {
1298 	struct ecore_vlan_mac_registry_elem *pos;
1299 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1300 
1301 	/* If list is empty - there is nothing to do here */
1302 	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1303 		*ppos = NULL;
1304 		return 0;
1305 	}
1306 
1307 	/* make a step... */
1308 	if (*ppos == NULL)
1309 		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1310 					    struct ecore_vlan_mac_registry_elem,
1311 					       link);
1312 	else
1313 		*ppos = ECORE_LIST_NEXT(*ppos, link,
1314 					struct ecore_vlan_mac_registry_elem);
1315 
1316 	pos = *ppos;
1317 
1318 	/* If it's the last step - return NULL */
1319 	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1320 		*ppos = NULL;
1321 
1322 	/* Prepare a 'user_req' */
1323 	ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1324 
1325 	/* Set the command */
1326 	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1327 
1328 	/* Set vlan_mac_flags */
1329 	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1330 
1331 	/* Set a restore bit */
1332 	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1333 
1334 	return ecore_config_vlan_mac(sc, p);
1335 }
1336 
1337 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1338  * pointer to an element with a specific criteria and NULL if such an element
1339  * hasn't been found.
1340  */
1341 static struct ecore_exeq_elem *ecore_exeq_get_mac(
1342 	struct ecore_exe_queue_obj *o,
1343 	struct ecore_exeq_elem *elem)
1344 {
1345 	struct ecore_exeq_elem *pos;
1346 	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1347 
1348 	/* Check pending for execution commands */
1349 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1350 				  struct ecore_exeq_elem)
1351 		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1352 			      sizeof(*data)) &&
1353 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1354 			return pos;
1355 
1356 	return NULL;
1357 }
1358 
1359 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1360 	struct ecore_exe_queue_obj *o,
1361 	struct ecore_exeq_elem *elem)
1362 {
1363 	struct ecore_exeq_elem *pos;
1364 	struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1365 
1366 	/* Check pending for execution commands */
1367 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1368 				  struct ecore_exeq_elem)
1369 		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1370 			      sizeof(*data)) &&
1371 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1372 			return pos;
1373 
1374 	return NULL;
1375 }
1376 
1377 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1378 	struct ecore_exe_queue_obj *o,
1379 	struct ecore_exeq_elem *elem)
1380 {
1381 	struct ecore_exeq_elem *pos;
1382 	struct ecore_vlan_mac_ramrod_data *data =
1383 		&elem->cmd_data.vlan_mac.u.vlan_mac;
1384 
1385 	/* Check pending for execution commands */
1386 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1387 				  struct ecore_exeq_elem)
1388 		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1389 			      sizeof(*data)) &&
1390 		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1391 			return pos;
1392 
1393 	return NULL;
1394 }
1395 
1396 /**
1397  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1398  *
1399  * @sc:		device handle
1400  * @qo:		ecore_qable_obj
1401  * @elem:	ecore_exeq_elem
1402  *
1403  * Checks that the requested configuration can be added. If yes and if
1404  * requested, consume CAM credit.
1405  *
1406  * The 'validate' is run after the 'optimize'.
1407  *
1408  */
1409 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1410 					      union ecore_qable_obj *qo,
1411 					      struct ecore_exeq_elem *elem)
1412 {
1413 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1414 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1415 	int rc;
1416 
1417 	/* Check the registry */
1418 	rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1419 	if (rc) {
1420 		ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1421 		return rc;
1422 	}
1423 
1424 	/* Check if there is a pending ADD command for this
1425 	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1426 	 */
1427 	if (exeq->get(exeq, elem)) {
1428 		ECORE_MSG(sc, "There is a pending ADD command already\n");
1429 		return ECORE_EXISTS;
1430 	}
1431 
1432 	/* TODO: Check the pending MOVE from other objects where this
1433 	 * object is a destination object.
1434 	 */
1435 
1436 	/* Consume the credit if not requested not to */
1437 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1438 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1439 	    o->get_credit(o)))
1440 		return ECORE_INVAL;
1441 
1442 	return ECORE_SUCCESS;
1443 }
1444 
1445 /**
1446  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1447  *
1448  * @sc:		device handle
1449  * @qo:		quable object to check
1450  * @elem:	element that needs to be deleted
1451  *
1452  * Checks that the requested configuration can be deleted. If yes and if
1453  * requested, returns a CAM credit.
1454  *
1455  * The 'validate' is run after the 'optimize'.
1456  */
1457 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1458 					      union ecore_qable_obj *qo,
1459 					      struct ecore_exeq_elem *elem)
1460 {
1461 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1462 	struct ecore_vlan_mac_registry_elem *pos;
1463 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1464 	struct ecore_exeq_elem query_elem;
1465 
1466 	/* If this classification can not be deleted (doesn't exist)
1467 	 * - return a ECORE_EXIST.
1468 	 */
1469 	pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1470 	if (!pos) {
1471 		ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1472 		return ECORE_EXISTS;
1473 	}
1474 
1475 	/* Check if there are pending DEL or MOVE commands for this
1476 	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1477 	 */
1478 	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1479 
1480 	/* Check for MOVE commands */
1481 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1482 	if (exeq->get(exeq, &query_elem)) {
1483 		ECORE_ERR("There is a pending MOVE command already\n");
1484 		return ECORE_INVAL;
1485 	}
1486 
1487 	/* Check for DEL commands */
1488 	if (exeq->get(exeq, elem)) {
1489 		ECORE_MSG(sc, "There is a pending DEL command already\n");
1490 		return ECORE_EXISTS;
1491 	}
1492 
1493 	/* Return the credit to the credit pool if not requested not to */
1494 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1495 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1496 	    o->put_credit(o))) {
1497 		ECORE_ERR("Failed to return a credit\n");
1498 		return ECORE_INVAL;
1499 	}
1500 
1501 	return ECORE_SUCCESS;
1502 }
1503 
1504 /**
1505  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1506  *
1507  * @sc:		device handle
1508  * @qo:		quable object to check (source)
1509  * @elem:	element that needs to be moved
1510  *
1511  * Checks that the requested configuration can be moved. If yes and if
1512  * requested, returns a CAM credit.
1513  *
1514  * The 'validate' is run after the 'optimize'.
1515  */
1516 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1517 					       union ecore_qable_obj *qo,
1518 					       struct ecore_exeq_elem *elem)
1519 {
1520 	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1521 	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1522 	struct ecore_exeq_elem query_elem;
1523 	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1524 	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1525 
1526 	/* Check if we can perform this operation based on the current registry
1527 	 * state.
1528 	 */
1529 	if (!src_o->check_move(sc, src_o, dest_o,
1530 			       &elem->cmd_data.vlan_mac.u)) {
1531 		ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1532 		return ECORE_INVAL;
1533 	}
1534 
1535 	/* Check if there is an already pending DEL or MOVE command for the
1536 	 * source object or ADD command for a destination object. Return an
1537 	 * error if so.
1538 	 */
1539 	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1540 
1541 	/* Check DEL on source */
1542 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1543 	if (src_exeq->get(src_exeq, &query_elem)) {
1544 		ECORE_ERR("There is a pending DEL command on the source queue already\n");
1545 		return ECORE_INVAL;
1546 	}
1547 
1548 	/* Check MOVE on source */
1549 	if (src_exeq->get(src_exeq, elem)) {
1550 		ECORE_MSG(sc, "There is a pending MOVE command already\n");
1551 		return ECORE_EXISTS;
1552 	}
1553 
1554 	/* Check ADD on destination */
1555 	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1556 	if (dest_exeq->get(dest_exeq, &query_elem)) {
1557 		ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1558 		return ECORE_INVAL;
1559 	}
1560 
1561 	/* Consume the credit if not requested not to */
1562 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1563 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1564 	    dest_o->get_credit(dest_o)))
1565 		return ECORE_INVAL;
1566 
1567 	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1568 			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1569 	    src_o->put_credit(src_o))) {
1570 		/* return the credit taken from dest... */
1571 		dest_o->put_credit(dest_o);
1572 		return ECORE_INVAL;
1573 	}
1574 
1575 	return ECORE_SUCCESS;
1576 }
1577 
1578 static int ecore_validate_vlan_mac(struct bxe_softc *sc,
1579 				   union ecore_qable_obj *qo,
1580 				   struct ecore_exeq_elem *elem)
1581 {
1582 	switch (elem->cmd_data.vlan_mac.cmd) {
1583 	case ECORE_VLAN_MAC_ADD:
1584 		return ecore_validate_vlan_mac_add(sc, qo, elem);
1585 	case ECORE_VLAN_MAC_DEL:
1586 		return ecore_validate_vlan_mac_del(sc, qo, elem);
1587 	case ECORE_VLAN_MAC_MOVE:
1588 		return ecore_validate_vlan_mac_move(sc, qo, elem);
1589 	default:
1590 		return ECORE_INVAL;
1591 	}
1592 }
1593 
1594 static int ecore_remove_vlan_mac(struct bxe_softc *sc,
1595 				  union ecore_qable_obj *qo,
1596 				  struct ecore_exeq_elem *elem)
1597 {
1598 	int rc = 0;
1599 
1600 	/* If consumption wasn't required, nothing to do */
1601 	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1602 			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1603 		return ECORE_SUCCESS;
1604 
1605 	switch (elem->cmd_data.vlan_mac.cmd) {
1606 	case ECORE_VLAN_MAC_ADD:
1607 	case ECORE_VLAN_MAC_MOVE:
1608 		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1609 		break;
1610 	case ECORE_VLAN_MAC_DEL:
1611 		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1612 		break;
1613 	default:
1614 		return ECORE_INVAL;
1615 	}
1616 
1617 	if (rc != TRUE)
1618 		return ECORE_INVAL;
1619 
1620 	return ECORE_SUCCESS;
1621 }
1622 
1623 /**
1624  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1625  *
1626  * @sc:		device handle
1627  * @o:		ecore_vlan_mac_obj
1628  *
1629  */
1630 static int ecore_wait_vlan_mac(struct bxe_softc *sc,
1631 			       struct ecore_vlan_mac_obj *o)
1632 {
1633 	int cnt = 5000, rc;
1634 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1635 	struct ecore_raw_obj *raw = &o->raw;
1636 
1637 	while (cnt--) {
1638 		/* Wait for the current command to complete */
1639 		rc = raw->wait_comp(sc, raw);
1640 		if (rc)
1641 			return rc;
1642 
1643 		/* Wait until there are no pending commands */
1644 		if (!ecore_exe_queue_empty(exeq))
1645 			ECORE_WAIT(sc, 1000);
1646 		else
1647 			return ECORE_SUCCESS;
1648 	}
1649 
1650 	return ECORE_TIMEOUT;
1651 }
1652 
1653 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1654 					 struct ecore_vlan_mac_obj *o,
1655 					 unsigned long *ramrod_flags)
1656 {
1657 	int rc = ECORE_SUCCESS;
1658 
1659 	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1660 
1661 	ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1662 	rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1663 
1664 	if (rc != ECORE_SUCCESS) {
1665 		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1666 
1667 		/** Calling function should not diffrentiate between this case
1668 		 *  and the case in which there is already a pending ramrod
1669 		 */
1670 		rc = ECORE_PENDING;
1671 	} else {
1672 		rc = ecore_exe_queue_step(sc, o, &o->exe_queue, ramrod_flags);
1673 	}
1674 	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1675 
1676 	return rc;
1677 }
1678 
1679 /**
1680  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1681  *
1682  * @sc:		device handle
1683  * @o:		ecore_vlan_mac_obj
1684  * @cqe:
1685  * @cont:	if TRUE schedule next execution chunk
1686  *
1687  */
1688 static int ecore_complete_vlan_mac(struct bxe_softc *sc,
1689 				   struct ecore_vlan_mac_obj *o,
1690 				   union event_ring_elem *cqe,
1691 				   unsigned long *ramrod_flags)
1692 {
1693 	struct ecore_raw_obj *r = &o->raw;
1694 	int rc;
1695 
1696 	/* Reset pending list */
1697 	ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1698 
1699 	/* Clear pending */
1700 	r->clear_pending(r);
1701 
1702 	/* If ramrod failed this is most likely a SW bug */
1703 	if (cqe->message.error)
1704 		return ECORE_INVAL;
1705 
1706 	/* Run the next bulk of pending commands if requested */
1707 	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1708 		rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1709 		if (rc < 0)
1710 			return rc;
1711 	}
1712 
1713 	/* If there is more work to do return PENDING */
1714 	if (!ecore_exe_queue_empty(&o->exe_queue))
1715 		return ECORE_PENDING;
1716 
1717 	return ECORE_SUCCESS;
1718 }
1719 
1720 /**
1721  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1722  *
1723  * @sc:		device handle
1724  * @o:		ecore_qable_obj
1725  * @elem:	ecore_exeq_elem
1726  */
1727 static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1728 				   union ecore_qable_obj *qo,
1729 				   struct ecore_exeq_elem *elem)
1730 {
1731 	struct ecore_exeq_elem query, *pos;
1732 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1733 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1734 
1735 	ECORE_MEMCPY(&query, elem, sizeof(query));
1736 
1737 	switch (elem->cmd_data.vlan_mac.cmd) {
1738 	case ECORE_VLAN_MAC_ADD:
1739 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1740 		break;
1741 	case ECORE_VLAN_MAC_DEL:
1742 		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1743 		break;
1744 	default:
1745 		/* Don't handle anything other than ADD or DEL */
1746 		return 0;
1747 	}
1748 
1749 	/* If we found the appropriate element - delete it */
1750 	pos = exeq->get(exeq, &query);
1751 	if (pos) {
1752 
1753 		/* Return the credit of the optimized command */
1754 		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1755 				     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1756 			if ((query.cmd_data.vlan_mac.cmd ==
1757 			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1758 				ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1759 				return ECORE_INVAL;
1760 			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1761 				ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1762 				return ECORE_INVAL;
1763 			}
1764 		}
1765 
1766 		ECORE_MSG(sc, "Optimizing %s command\n",
1767 			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1768 			  "ADD" : "DEL");
1769 
1770 		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1771 		ecore_exe_queue_free_elem(sc, pos);
1772 		return 1;
1773 	}
1774 
1775 	return 0;
1776 }
1777 
1778 /**
1779  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1780  *
1781  * @sc:	  device handle
1782  * @o:
1783  * @elem:
1784  * @restore:
1785  * @re:
1786  *
1787  * prepare a registry element according to the current command request.
1788  */
1789 static inline int ecore_vlan_mac_get_registry_elem(
1790 	struct bxe_softc *sc,
1791 	struct ecore_vlan_mac_obj *o,
1792 	struct ecore_exeq_elem *elem,
1793 	bool restore,
1794 	struct ecore_vlan_mac_registry_elem **re)
1795 {
1796 	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1797 	struct ecore_vlan_mac_registry_elem *reg_elem;
1798 
1799 	/* Allocate a new registry element if needed. */
1800 	if (!restore &&
1801 	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1802 		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1803 		if (!reg_elem)
1804 			return ECORE_NOMEM;
1805 
1806 		/* Get a new CAM offset */
1807 		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1808 			/* This shall never happen, because we have checked the
1809 			 * CAM availability in the 'validate'.
1810 			 */
1811 			ECORE_DBG_BREAK_IF(1);
1812 			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1813 			return ECORE_INVAL;
1814 		}
1815 
1816 		ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1817 
1818 		/* Set a VLAN-MAC data */
1819 		ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1820 			  sizeof(reg_elem->u));
1821 
1822 		/* Copy the flags (needed for DEL and RESTORE flows) */
1823 		reg_elem->vlan_mac_flags =
1824 			elem->cmd_data.vlan_mac.vlan_mac_flags;
1825 	} else /* DEL, RESTORE */
1826 		reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1827 
1828 	*re = reg_elem;
1829 	return ECORE_SUCCESS;
1830 }
1831 
1832 /**
1833  * ecore_execute_vlan_mac - execute vlan mac command
1834  *
1835  * @sc:			device handle
1836  * @qo:
1837  * @exe_chunk:
1838  * @ramrod_flags:
1839  *
1840  * go and send a ramrod!
1841  */
1842 static int ecore_execute_vlan_mac(struct bxe_softc *sc,
1843 				  union ecore_qable_obj *qo,
1844 				  ecore_list_t *exe_chunk,
1845 				  unsigned long *ramrod_flags)
1846 {
1847 	struct ecore_exeq_elem *elem;
1848 	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1849 	struct ecore_raw_obj *r = &o->raw;
1850 	int rc, idx = 0;
1851 	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1852 	bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1853 	struct ecore_vlan_mac_registry_elem *reg_elem;
1854 	enum ecore_vlan_mac_cmd cmd;
1855 
1856 	/* If DRIVER_ONLY execution is requested, cleanup a registry
1857 	 * and exit. Otherwise send a ramrod to FW.
1858 	 */
1859 	if (!drv_only) {
1860 
1861 		/* Set pending */
1862 		r->set_pending(r);
1863 
1864 		/* Fill the ramrod data */
1865 		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1866 					  struct ecore_exeq_elem) {
1867 			cmd = elem->cmd_data.vlan_mac.cmd;
1868 			/* We will add to the target object in MOVE command, so
1869 			 * change the object for a CAM search.
1870 			 */
1871 			if (cmd == ECORE_VLAN_MAC_MOVE)
1872 				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1873 			else
1874 				cam_obj = o;
1875 
1876 			rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1877 							      elem, restore,
1878 							      &reg_elem);
1879 			if (rc)
1880 				goto error_exit;
1881 
1882 			ECORE_DBG_BREAK_IF(!reg_elem);
1883 
1884 			/* Push a new entry into the registry */
1885 			if (!restore &&
1886 			    ((cmd == ECORE_VLAN_MAC_ADD) ||
1887 			    (cmd == ECORE_VLAN_MAC_MOVE)))
1888 				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1889 						     &cam_obj->head);
1890 
1891 			/* Configure a single command in a ramrod data buffer */
1892 			o->set_one_rule(sc, o, elem, idx,
1893 					reg_elem->cam_offset);
1894 
1895 			/* MOVE command consumes 2 entries in the ramrod data */
1896 			if (cmd == ECORE_VLAN_MAC_MOVE)
1897 				idx += 2;
1898 			else
1899 				idx++;
1900 		}
1901 
1902 		/*
1903 		 *  No need for an explicit memory barrier here as long we would
1904 		 *  need to ensure the ordering of writing to the SPQ element
1905 		 *  and updating of the SPQ producer which involves a memory
1906 		 *  read and we will have to put a full memory barrier there
1907 		 *  (inside ecore_sp_post()).
1908 		 */
1909 
1910 		rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1911 				   r->rdata_mapping,
1912 				   ETH_CONNECTION_TYPE);
1913 		if (rc)
1914 			goto error_exit;
1915 	}
1916 
1917 	/* Now, when we are done with the ramrod - clean up the registry */
1918 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1919 				  struct ecore_exeq_elem) {
1920 		cmd = elem->cmd_data.vlan_mac.cmd;
1921 		if ((cmd == ECORE_VLAN_MAC_DEL) ||
1922 		    (cmd == ECORE_VLAN_MAC_MOVE)) {
1923 			reg_elem = o->check_del(sc, o,
1924 						&elem->cmd_data.vlan_mac.u);
1925 
1926 			ECORE_DBG_BREAK_IF(!reg_elem);
1927 
1928 			o->put_cam_offset(o, reg_elem->cam_offset);
1929 			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1930 			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1931 		}
1932 	}
1933 
1934 	if (!drv_only)
1935 		return ECORE_PENDING;
1936 	else
1937 		return ECORE_SUCCESS;
1938 
1939 error_exit:
1940 	r->clear_pending(r);
1941 
1942 	/* Cleanup a registry in case of a failure */
1943 	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1944 				  struct ecore_exeq_elem) {
1945 		cmd = elem->cmd_data.vlan_mac.cmd;
1946 
1947 		if (cmd == ECORE_VLAN_MAC_MOVE)
1948 			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1949 		else
1950 			cam_obj = o;
1951 
1952 		/* Delete all newly added above entries */
1953 		if (!restore &&
1954 		    ((cmd == ECORE_VLAN_MAC_ADD) ||
1955 		    (cmd == ECORE_VLAN_MAC_MOVE))) {
1956 			reg_elem = o->check_del(sc, cam_obj,
1957 						&elem->cmd_data.vlan_mac.u);
1958 			if (reg_elem) {
1959 				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1960 							&cam_obj->head);
1961 				ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1962 			}
1963 		}
1964 	}
1965 
1966 	return rc;
1967 }
1968 
1969 static inline int ecore_vlan_mac_push_new_cmd(
1970 	struct bxe_softc *sc,
1971 	struct ecore_vlan_mac_ramrod_params *p)
1972 {
1973 	struct ecore_exeq_elem *elem;
1974 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1975 	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1976 
1977 	/* Allocate the execution queue element */
1978 	elem = ecore_exe_queue_alloc_elem(sc);
1979 	if (!elem)
1980 		return ECORE_NOMEM;
1981 
1982 	/* Set the command 'length' */
1983 	switch (p->user_req.cmd) {
1984 	case ECORE_VLAN_MAC_MOVE:
1985 		elem->cmd_len = 2;
1986 		break;
1987 	default:
1988 		elem->cmd_len = 1;
1989 	}
1990 
1991 	/* Fill the object specific info */
1992 	ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1993 
1994 	/* Try to add a new command to the pending list */
1995 	return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1996 }
1997 
1998 /**
1999  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
2000  *
2001  * @sc:	  device handle
2002  * @p:
2003  *
2004  */
2005 int ecore_config_vlan_mac(struct bxe_softc *sc,
2006 			   struct ecore_vlan_mac_ramrod_params *p)
2007 {
2008 	int rc = ECORE_SUCCESS;
2009 	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2010 	unsigned long *ramrod_flags = &p->ramrod_flags;
2011 	bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2012 	struct ecore_raw_obj *raw = &o->raw;
2013 
2014 	/*
2015 	 * Add new elements to the execution list for commands that require it.
2016 	 */
2017 	if (!cont) {
2018 		rc = ecore_vlan_mac_push_new_cmd(sc, p);
2019 		if (rc)
2020 			return rc;
2021 	}
2022 
2023 	/* If nothing will be executed further in this iteration we want to
2024 	 * return PENDING if there are pending commands
2025 	 */
2026 	if (!ecore_exe_queue_empty(&o->exe_queue))
2027 		rc = ECORE_PENDING;
2028 
2029 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2030 		ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2031 		raw->clear_pending(raw);
2032 	}
2033 
2034 	/* Execute commands if required */
2035 	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2036 	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2037 		rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2038 						   &p->ramrod_flags);
2039 		if (rc < 0)
2040 			return rc;
2041 	}
2042 
2043 	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2044 	 * then user want to wait until the last command is done.
2045 	 */
2046 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2047 		/* Wait maximum for the current exe_queue length iterations plus
2048 		 * one (for the current pending command).
2049 		 */
2050 		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2051 
2052 		while (!ecore_exe_queue_empty(&o->exe_queue) &&
2053 		       max_iterations--) {
2054 
2055 			/* Wait for the current command to complete */
2056 			rc = raw->wait_comp(sc, raw);
2057 			if (rc)
2058 				return rc;
2059 
2060 			/* Make a next step */
2061 			rc = __ecore_vlan_mac_execute_step(sc,
2062 							   p->vlan_mac_obj,
2063 							   &p->ramrod_flags);
2064 			if (rc < 0)
2065 				return rc;
2066 		}
2067 
2068 		return ECORE_SUCCESS;
2069 	}
2070 
2071 	return rc;
2072 }
2073 
2074 /**
2075  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2076  *
2077  * @sc:			device handle
2078  * @o:
2079  * @vlan_mac_flags:
2080  * @ramrod_flags:	execution flags to be used for this deletion
2081  *
2082  * if the last operation has completed successfully and there are no
2083  * more elements left, positive value if the last operation has completed
2084  * successfully and there are more previously configured elements, negative
2085  * value is current operation has failed.
2086  */
2087 static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2088 				  struct ecore_vlan_mac_obj *o,
2089 				  unsigned long *vlan_mac_flags,
2090 				  unsigned long *ramrod_flags)
2091 {
2092 	struct ecore_vlan_mac_registry_elem *pos = NULL;
2093 	int rc = 0, read_lock;
2094 	struct ecore_vlan_mac_ramrod_params p;
2095 	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2096 	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2097 
2098 	/* Clear pending commands first */
2099 
2100 	ECORE_SPIN_LOCK_BH(&exeq->lock);
2101 
2102 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2103 				       &exeq->exe_queue, link,
2104 				       struct ecore_exeq_elem) {
2105 		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2106 		    *vlan_mac_flags) {
2107 			rc = exeq->remove(sc, exeq->owner, exeq_pos);
2108 			if (rc) {
2109 				ECORE_ERR("Failed to remove command\n");
2110 				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2111 				return rc;
2112 			}
2113 			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2114 						&exeq->exe_queue);
2115 			ecore_exe_queue_free_elem(sc, exeq_pos);
2116 		}
2117 	}
2118 
2119 	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2120 
2121 	/* Prepare a command request */
2122 	ECORE_MEMSET(&p, 0, sizeof(p));
2123 	p.vlan_mac_obj = o;
2124 	p.ramrod_flags = *ramrod_flags;
2125 	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2126 
2127 	/* Add all but the last VLAN-MAC to the execution queue without actually
2128 	 * execution anything.
2129 	 */
2130 	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2131 	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2132 	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2133 
2134 	ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2135 	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2136 	if (read_lock != ECORE_SUCCESS)
2137 		return read_lock;
2138 
2139 	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2140 				  struct ecore_vlan_mac_registry_elem) {
2141 		if (pos->vlan_mac_flags == *vlan_mac_flags) {
2142 			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2143 			ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2144 			rc = ecore_config_vlan_mac(sc, &p);
2145 			if (rc < 0) {
2146 				ECORE_ERR("Failed to add a new DEL command\n");
2147 				ecore_vlan_mac_h_read_unlock(sc, o);
2148 				return rc;
2149 			}
2150 		}
2151 	}
2152 
2153 	ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2154 	ecore_vlan_mac_h_read_unlock(sc, o);
2155 
2156 	p.ramrod_flags = *ramrod_flags;
2157 	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2158 
2159 	return ecore_config_vlan_mac(sc, &p);
2160 }
2161 
2162 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2163 	uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2164 	unsigned long *pstate, ecore_obj_type type)
2165 {
2166 	raw->func_id = func_id;
2167 	raw->cid = cid;
2168 	raw->cl_id = cl_id;
2169 	raw->rdata = rdata;
2170 	raw->rdata_mapping = rdata_mapping;
2171 	raw->state = state;
2172 	raw->pstate = pstate;
2173 	raw->obj_type = type;
2174 	raw->check_pending = ecore_raw_check_pending;
2175 	raw->clear_pending = ecore_raw_clear_pending;
2176 	raw->set_pending = ecore_raw_set_pending;
2177 	raw->wait_comp = ecore_raw_wait;
2178 }
2179 
2180 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2181 	uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2182 	int state, unsigned long *pstate, ecore_obj_type type,
2183 	struct ecore_credit_pool_obj *macs_pool,
2184 	struct ecore_credit_pool_obj *vlans_pool)
2185 {
2186 	ECORE_LIST_INIT(&o->head);
2187 	o->head_reader = 0;
2188 	o->head_exe_request = FALSE;
2189 	o->saved_ramrod_flags = 0;
2190 
2191 	o->macs_pool = macs_pool;
2192 	o->vlans_pool = vlans_pool;
2193 
2194 	o->delete_all = ecore_vlan_mac_del_all;
2195 	o->restore = ecore_vlan_mac_restore;
2196 	o->complete = ecore_complete_vlan_mac;
2197 	o->wait = ecore_wait_vlan_mac;
2198 
2199 	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2200 			   state, pstate, type);
2201 }
2202 
2203 void ecore_init_mac_obj(struct bxe_softc *sc,
2204 			struct ecore_vlan_mac_obj *mac_obj,
2205 			uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2206 			ecore_dma_addr_t rdata_mapping, int state,
2207 			unsigned long *pstate, ecore_obj_type type,
2208 			struct ecore_credit_pool_obj *macs_pool)
2209 {
2210 	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2211 
2212 	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2213 				   rdata_mapping, state, pstate, type,
2214 				   macs_pool, NULL);
2215 
2216 	/* CAM credit pool handling */
2217 	mac_obj->get_credit = ecore_get_credit_mac;
2218 	mac_obj->put_credit = ecore_put_credit_mac;
2219 	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2220 	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2221 
2222 	if (CHIP_IS_E1x(sc)) {
2223 		mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2224 		mac_obj->check_del         = ecore_check_mac_del;
2225 		mac_obj->check_add         = ecore_check_mac_add;
2226 		mac_obj->check_move        = ecore_check_move_always_err;
2227 		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2228 
2229 		/* Exe Queue */
2230 		ecore_exe_queue_init(sc,
2231 				     &mac_obj->exe_queue, 1, qable_obj,
2232 				     ecore_validate_vlan_mac,
2233 				     ecore_remove_vlan_mac,
2234 				     ecore_optimize_vlan_mac,
2235 				     ecore_execute_vlan_mac,
2236 				     ecore_exeq_get_mac);
2237 	} else {
2238 		mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2239 		mac_obj->check_del         = ecore_check_mac_del;
2240 		mac_obj->check_add         = ecore_check_mac_add;
2241 		mac_obj->check_move        = ecore_check_move;
2242 		mac_obj->ramrod_cmd        =
2243 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2244 		mac_obj->get_n_elements    = ecore_get_n_elements;
2245 
2246 		/* Exe Queue */
2247 		ecore_exe_queue_init(sc,
2248 				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2249 				     qable_obj, ecore_validate_vlan_mac,
2250 				     ecore_remove_vlan_mac,
2251 				     ecore_optimize_vlan_mac,
2252 				     ecore_execute_vlan_mac,
2253 				     ecore_exeq_get_mac);
2254 	}
2255 }
2256 
2257 void ecore_init_vlan_obj(struct bxe_softc *sc,
2258 			 struct ecore_vlan_mac_obj *vlan_obj,
2259 			 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2260 			 ecore_dma_addr_t rdata_mapping, int state,
2261 			 unsigned long *pstate, ecore_obj_type type,
2262 			 struct ecore_credit_pool_obj *vlans_pool)
2263 {
2264 	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2265 
2266 	ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2267 				   rdata_mapping, state, pstate, type, NULL,
2268 				   vlans_pool);
2269 
2270 	vlan_obj->get_credit = ecore_get_credit_vlan;
2271 	vlan_obj->put_credit = ecore_put_credit_vlan;
2272 	vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2273 	vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2274 
2275 	if (CHIP_IS_E1x(sc)) {
2276 		ECORE_ERR("Do not support chips others than E2 and newer\n");
2277 		ECORE_BUG();
2278 	} else {
2279 		vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2280 		vlan_obj->check_del         = ecore_check_vlan_del;
2281 		vlan_obj->check_add         = ecore_check_vlan_add;
2282 		vlan_obj->check_move        = ecore_check_move;
2283 		vlan_obj->ramrod_cmd        =
2284 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2285 		vlan_obj->get_n_elements    = ecore_get_n_elements;
2286 
2287 		/* Exe Queue */
2288 		ecore_exe_queue_init(sc,
2289 				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2290 				     qable_obj, ecore_validate_vlan_mac,
2291 				     ecore_remove_vlan_mac,
2292 				     ecore_optimize_vlan_mac,
2293 				     ecore_execute_vlan_mac,
2294 				     ecore_exeq_get_vlan);
2295 	}
2296 }
2297 
2298 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2299 			     struct ecore_vlan_mac_obj *vlan_mac_obj,
2300 			     uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2301 			     ecore_dma_addr_t rdata_mapping, int state,
2302 			     unsigned long *pstate, ecore_obj_type type,
2303 			     struct ecore_credit_pool_obj *macs_pool,
2304 			     struct ecore_credit_pool_obj *vlans_pool)
2305 {
2306 	union ecore_qable_obj *qable_obj =
2307 		(union ecore_qable_obj *)vlan_mac_obj;
2308 
2309 	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2310 				   rdata_mapping, state, pstate, type,
2311 				   macs_pool, vlans_pool);
2312 
2313 	/* CAM pool handling */
2314 	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2315 	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2316 	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2317 	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2318 	 * will be taken from MACs' pool object only.
2319 	 */
2320 	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2321 	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2322 
2323 	if (CHIP_IS_E1(sc)) {
2324 		ECORE_ERR("Do not support chips others than E2\n");
2325 		ECORE_BUG();
2326 	} else if (CHIP_IS_E1H(sc)) {
2327 		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2328 		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2329 		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2330 		vlan_mac_obj->check_move        = ecore_check_move_always_err;
2331 		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2332 
2333 		/* Exe Queue */
2334 		ecore_exe_queue_init(sc,
2335 				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2336 				     ecore_validate_vlan_mac,
2337 				     ecore_remove_vlan_mac,
2338 				     ecore_optimize_vlan_mac,
2339 				     ecore_execute_vlan_mac,
2340 				     ecore_exeq_get_vlan_mac);
2341 	} else {
2342 		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2343 		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2344 		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2345 		vlan_mac_obj->check_move        = ecore_check_move;
2346 		vlan_mac_obj->ramrod_cmd        =
2347 			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2348 
2349 		/* Exe Queue */
2350 		ecore_exe_queue_init(sc,
2351 				     &vlan_mac_obj->exe_queue,
2352 				     CLASSIFY_RULES_COUNT,
2353 				     qable_obj, ecore_validate_vlan_mac,
2354 				     ecore_remove_vlan_mac,
2355 				     ecore_optimize_vlan_mac,
2356 				     ecore_execute_vlan_mac,
2357 				     ecore_exeq_get_vlan_mac);
2358 	}
2359 }
2360 
2361 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2362 static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2363 			struct tstorm_eth_mac_filter_config *mac_filters,
2364 			uint16_t pf_id)
2365 {
2366 	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2367 
2368 	uint32_t addr = BAR_TSTRORM_INTMEM +
2369 			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2370 
2371 	ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2372 }
2373 
2374 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2375 				 struct ecore_rx_mode_ramrod_params *p)
2376 {
2377 	/* update the sc MAC filter structure */
2378 	uint32_t mask = (1 << p->cl_id);
2379 
2380 	struct tstorm_eth_mac_filter_config *mac_filters =
2381 		(struct tstorm_eth_mac_filter_config *)p->rdata;
2382 
2383 	/* initial setting is drop-all */
2384 	uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2385 	uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2386 	uint8_t unmatched_unicast = 0;
2387 
2388     /* In e1x there we only take into account rx accept flag since tx switching
2389      * isn't enabled. */
2390 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2391 		/* accept matched ucast */
2392 		drop_all_ucast = 0;
2393 
2394 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2395 		/* accept matched mcast */
2396 		drop_all_mcast = 0;
2397 
2398 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2399 		/* accept all mcast */
2400 		drop_all_ucast = 0;
2401 		accp_all_ucast = 1;
2402 	}
2403 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2404 		/* accept all mcast */
2405 		drop_all_mcast = 0;
2406 		accp_all_mcast = 1;
2407 	}
2408 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2409 		/* accept (all) bcast */
2410 		accp_all_bcast = 1;
2411 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2412 		/* accept unmatched unicasts */
2413 		unmatched_unicast = 1;
2414 
2415 	mac_filters->ucast_drop_all = drop_all_ucast ?
2416 		mac_filters->ucast_drop_all | mask :
2417 		mac_filters->ucast_drop_all & ~mask;
2418 
2419 	mac_filters->mcast_drop_all = drop_all_mcast ?
2420 		mac_filters->mcast_drop_all | mask :
2421 		mac_filters->mcast_drop_all & ~mask;
2422 
2423 	mac_filters->ucast_accept_all = accp_all_ucast ?
2424 		mac_filters->ucast_accept_all | mask :
2425 		mac_filters->ucast_accept_all & ~mask;
2426 
2427 	mac_filters->mcast_accept_all = accp_all_mcast ?
2428 		mac_filters->mcast_accept_all | mask :
2429 		mac_filters->mcast_accept_all & ~mask;
2430 
2431 	mac_filters->bcast_accept_all = accp_all_bcast ?
2432 		mac_filters->bcast_accept_all | mask :
2433 		mac_filters->bcast_accept_all & ~mask;
2434 
2435 	mac_filters->unmatched_unicast = unmatched_unicast ?
2436 		mac_filters->unmatched_unicast | mask :
2437 		mac_filters->unmatched_unicast & ~mask;
2438 
2439 	ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2440 			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2441 	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2442 	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2443 	   mac_filters->bcast_accept_all);
2444 
2445 	/* write the MAC filter structure*/
2446 	__storm_memset_mac_filters(sc, mac_filters, p->func_id);
2447 
2448 	/* The operation is completed */
2449 	ECORE_CLEAR_BIT(p->state, p->pstate);
2450 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
2451 
2452 	return ECORE_SUCCESS;
2453 }
2454 
2455 /* Setup ramrod data */
2456 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2457 				struct eth_classify_header *hdr,
2458 				uint8_t rule_cnt)
2459 {
2460 	hdr->echo = ECORE_CPU_TO_LE32(cid);
2461 	hdr->rule_cnt = rule_cnt;
2462 }
2463 
2464 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2465 				unsigned long *accept_flags,
2466 				struct eth_filter_rules_cmd *cmd,
2467 				bool clear_accept_all)
2468 {
2469 	uint16_t state;
2470 
2471 	/* start with 'drop-all' */
2472 	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2473 		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2474 
2475 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2476 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2477 
2478 	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2479 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2480 
2481 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2482 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2483 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2484 	}
2485 
2486 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2487 		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2488 		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2489 	}
2490 	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2491 		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2492 
2493 	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2494 		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2495 		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2496 	}
2497 	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2498 		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2499 
2500 	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2501 	if (clear_accept_all) {
2502 		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2503 		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2504 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2505 		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2506 	}
2507 
2508 	cmd->state = ECORE_CPU_TO_LE16(state);
2509 }
2510 
2511 static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2512 				struct ecore_rx_mode_ramrod_params *p)
2513 {
2514 	struct eth_filter_rules_ramrod_data *data = p->rdata;
2515 	int rc;
2516 	uint8_t rule_idx = 0;
2517 
2518 	/* Reset the ramrod data buffer */
2519 	ECORE_MEMSET(data, 0, sizeof(*data));
2520 
2521 	/* Setup ramrod data */
2522 
2523 	/* Tx (internal switching) */
2524 	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2525 		data->rules[rule_idx].client_id = p->cl_id;
2526 		data->rules[rule_idx].func_id = p->func_id;
2527 
2528 		data->rules[rule_idx].cmd_general_data =
2529 			ETH_FILTER_RULES_CMD_TX_CMD;
2530 
2531 		ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2532 			&(data->rules[rule_idx++]), FALSE);
2533 	}
2534 
2535 	/* Rx */
2536 	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2537 		data->rules[rule_idx].client_id = p->cl_id;
2538 		data->rules[rule_idx].func_id = p->func_id;
2539 
2540 		data->rules[rule_idx].cmd_general_data =
2541 			ETH_FILTER_RULES_CMD_RX_CMD;
2542 
2543 		ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2544 			&(data->rules[rule_idx++]), FALSE);
2545 	}
2546 
2547 	/* If FCoE Queue configuration has been requested configure the Rx and
2548 	 * internal switching modes for this queue in separate rules.
2549 	 *
2550 	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2551 	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2552 	 */
2553 	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2554 		/*  Tx (internal switching) */
2555 		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2556 			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2557 			data->rules[rule_idx].func_id = p->func_id;
2558 
2559 			data->rules[rule_idx].cmd_general_data =
2560 						ETH_FILTER_RULES_CMD_TX_CMD;
2561 
2562 			ecore_rx_mode_set_cmd_state_e2(sc,
2563 							 &p->tx_accept_flags,
2564 						     &(data->rules[rule_idx++]),
2565 						       TRUE);
2566 		}
2567 
2568 		/* Rx */
2569 		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2570 			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2571 			data->rules[rule_idx].func_id = p->func_id;
2572 
2573 			data->rules[rule_idx].cmd_general_data =
2574 						ETH_FILTER_RULES_CMD_RX_CMD;
2575 
2576 			ecore_rx_mode_set_cmd_state_e2(sc,
2577 							 &p->rx_accept_flags,
2578 						     &(data->rules[rule_idx++]),
2579 						       TRUE);
2580 		}
2581 	}
2582 
2583 	/* Set the ramrod header (most importantly - number of rules to
2584 	 * configure).
2585 	 */
2586 	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2587 
2588 	ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2589 		  data->header.rule_cnt, p->rx_accept_flags,
2590 		  p->tx_accept_flags);
2591 
2592 	/* No need for an explicit memory barrier here as long we would
2593 	 * need to ensure the ordering of writing to the SPQ element
2594 	 * and updating of the SPQ producer which involves a memory
2595 	 * read and we will have to put a full memory barrier there
2596 	 * (inside ecore_sp_post()).
2597 	 */
2598 
2599 	/* Send a ramrod */
2600 	rc = ecore_sp_post(sc,
2601 			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2602 			   p->cid,
2603 			   p->rdata_mapping,
2604 			   ETH_CONNECTION_TYPE);
2605 	if (rc)
2606 		return rc;
2607 
2608 	/* Ramrod completion is pending */
2609 	return ECORE_PENDING;
2610 }
2611 
2612 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2613 				      struct ecore_rx_mode_ramrod_params *p)
2614 {
2615 	return ecore_state_wait(sc, p->state, p->pstate);
2616 }
2617 
2618 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2619 				    struct ecore_rx_mode_ramrod_params *p)
2620 {
2621 	/* Do nothing */
2622 	return ECORE_SUCCESS;
2623 }
2624 
2625 int ecore_config_rx_mode(struct bxe_softc *sc,
2626 			 struct ecore_rx_mode_ramrod_params *p)
2627 {
2628 	int rc;
2629 
2630 	/* Configure the new classification in the chip */
2631 	rc = p->rx_mode_obj->config_rx_mode(sc, p);
2632 	if (rc < 0)
2633 		return rc;
2634 
2635 	/* Wait for a ramrod completion if was requested */
2636 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2637 		rc = p->rx_mode_obj->wait_comp(sc, p);
2638 		if (rc)
2639 			return rc;
2640 	}
2641 
2642 	return rc;
2643 }
2644 
2645 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
2646 			    struct ecore_rx_mode_obj *o)
2647 {
2648 	if (CHIP_IS_E1x(sc)) {
2649 		o->wait_comp      = ecore_empty_rx_mode_wait;
2650 		o->config_rx_mode = ecore_set_rx_mode_e1x;
2651 	} else {
2652 		o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2653 		o->config_rx_mode = ecore_set_rx_mode_e2;
2654 	}
2655 }
2656 
2657 /********************* Multicast verbs: SET, CLEAR ****************************/
2658 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2659 {
2660 	return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2661 }
2662 
2663 struct ecore_mcast_mac_elem {
2664 	ecore_list_entry_t link;
2665 	uint8_t mac[ETH_ALEN];
2666 	uint8_t pad[2]; /* For a natural alignment of the following buffer */
2667 };
2668 
2669 struct ecore_pending_mcast_cmd {
2670 	ecore_list_entry_t link;
2671 	int type; /* ECORE_MCAST_CMD_X */
2672 	union {
2673 		ecore_list_t macs_head;
2674 		uint32_t macs_num; /* Needed for DEL command */
2675 		int next_bin; /* Needed for RESTORE flow with aprox match */
2676 	} data;
2677 
2678 	bool done; /* set to TRUE, when the command has been handled,
2679 		    * practically used in 57712 handling only, where one pending
2680 		    * command may be handled in a few operations. As long as for
2681 		    * other chips every operation handling is completed in a
2682 		    * single ramrod, there is no need to utilize this field.
2683 		    */
2684 };
2685 
2686 static int ecore_mcast_wait(struct bxe_softc *sc,
2687 			    struct ecore_mcast_obj *o)
2688 {
2689 	if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2690 			o->raw.wait_comp(sc, &o->raw))
2691 		return ECORE_TIMEOUT;
2692 
2693 	return ECORE_SUCCESS;
2694 }
2695 
2696 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2697 				   struct ecore_mcast_obj *o,
2698 				   struct ecore_mcast_ramrod_params *p,
2699 				   enum ecore_mcast_cmd cmd)
2700 {
2701 	int total_sz;
2702 	struct ecore_pending_mcast_cmd *new_cmd;
2703 	struct ecore_mcast_mac_elem *cur_mac = NULL;
2704 	struct ecore_mcast_list_elem *pos;
2705 	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2706 			     p->mcast_list_len : 0);
2707 
2708 	/* If the command is empty ("handle pending commands only"), break */
2709 	if (!p->mcast_list_len)
2710 		return ECORE_SUCCESS;
2711 
2712 	total_sz = sizeof(*new_cmd) +
2713 		macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2714 
2715 	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2716 	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2717 
2718 	if (!new_cmd)
2719 		return ECORE_NOMEM;
2720 
2721 	ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n", \
2722 		  cmd, macs_list_len);
2723 
2724 	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2725 
2726 	new_cmd->type = cmd;
2727 	new_cmd->done = FALSE;
2728 
2729 	switch (cmd) {
2730 	case ECORE_MCAST_CMD_ADD:
2731 		cur_mac = (struct ecore_mcast_mac_elem *)
2732 			  ((uint8_t *)new_cmd + sizeof(*new_cmd));
2733 
2734 		/* Push the MACs of the current command into the pending command
2735 		 * MACs list: FIFO
2736 		 */
2737 		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2738 					  struct ecore_mcast_list_elem) {
2739 			ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2740 			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2741 					     &new_cmd->data.macs_head);
2742 			cur_mac++;
2743 		}
2744 
2745 		break;
2746 
2747 	case ECORE_MCAST_CMD_DEL:
2748 		new_cmd->data.macs_num = p->mcast_list_len;
2749 		break;
2750 
2751 	case ECORE_MCAST_CMD_RESTORE:
2752 		new_cmd->data.next_bin = 0;
2753 		break;
2754 
2755 	default:
2756 		ECORE_FREE(sc, new_cmd, total_sz);
2757 		ECORE_ERR("Unknown command: %d\n", cmd);
2758 		return ECORE_INVAL;
2759 	}
2760 
2761 	/* Push the new pending command to the tail of the pending list: FIFO */
2762 	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2763 
2764 	o->set_sched(o);
2765 
2766 	return ECORE_PENDING;
2767 }
2768 
2769 /**
2770  * ecore_mcast_get_next_bin - get the next set bin (index)
2771  *
2772  * @o:
2773  * @last:	index to start looking from (including)
2774  *
2775  * returns the next found (set) bin or a negative value if none is found.
2776  */
2777 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2778 {
2779 	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2780 
2781 	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2782 		if (o->registry.aprox_match.vec[i])
2783 			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2784 				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2785 				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2786 						       vec, cur_bit)) {
2787 					return cur_bit;
2788 				}
2789 			}
2790 		inner_start = 0;
2791 	}
2792 
2793 	/* None found */
2794 	return -1;
2795 }
2796 
2797 /**
2798  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2799  *
2800  * @o:
2801  *
2802  * returns the index of the found bin or -1 if none is found
2803  */
2804 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2805 {
2806 	int cur_bit = ecore_mcast_get_next_bin(o, 0);
2807 
2808 	if (cur_bit >= 0)
2809 		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2810 
2811 	return cur_bit;
2812 }
2813 
2814 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2815 {
2816 	struct ecore_raw_obj *raw = &o->raw;
2817 	uint8_t rx_tx_flag = 0;
2818 
2819 	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2820 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2821 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2822 
2823 	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2824 	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2825 		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2826 
2827 	return rx_tx_flag;
2828 }
2829 
2830 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2831 					struct ecore_mcast_obj *o, int idx,
2832 					union ecore_mcast_config_data *cfg_data,
2833 					enum ecore_mcast_cmd cmd)
2834 {
2835 	struct ecore_raw_obj *r = &o->raw;
2836 	struct eth_multicast_rules_ramrod_data *data =
2837 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2838 	uint8_t func_id = r->func_id;
2839 	uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2840 	int bin;
2841 
2842 	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2843 		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2844 
2845 	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2846 
2847 	/* Get a bin and update a bins' vector */
2848 	switch (cmd) {
2849 	case ECORE_MCAST_CMD_ADD:
2850 		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2851 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2852 		break;
2853 
2854 	case ECORE_MCAST_CMD_DEL:
2855 		/* If there were no more bins to clear
2856 		 * (ecore_mcast_clear_first_bin() returns -1) then we would
2857 		 * clear any (0xff) bin.
2858 		 * See ecore_mcast_validate_e2() for explanation when it may
2859 		 * happen.
2860 		 */
2861 		bin = ecore_mcast_clear_first_bin(o);
2862 		break;
2863 
2864 	case ECORE_MCAST_CMD_RESTORE:
2865 		bin = cfg_data->bin;
2866 		break;
2867 
2868 	default:
2869 		ECORE_ERR("Unknown command: %d\n", cmd);
2870 		return;
2871 	}
2872 
2873 	ECORE_MSG(sc, "%s bin %d\n",
2874 		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2875 		   "Setting"  : "Clearing"), bin);
2876 
2877 	data->rules[idx].bin_id    = (uint8_t)bin;
2878 	data->rules[idx].func_id   = func_id;
2879 	data->rules[idx].engine_id = o->engine_id;
2880 }
2881 
2882 /**
2883  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2884  *
2885  * @sc:		device handle
2886  * @o:
2887  * @start_bin:	index in the registry to start from (including)
2888  * @rdata_idx:	index in the ramrod data to start from
2889  *
2890  * returns last handled bin index or -1 if all bins have been handled
2891  */
2892 static inline int ecore_mcast_handle_restore_cmd_e2(
2893 	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
2894 	int *rdata_idx)
2895 {
2896 	int cur_bin, cnt = *rdata_idx;
2897 	union ecore_mcast_config_data cfg_data = {NULL};
2898 
2899 	/* go through the registry and configure the bins from it */
2900 	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2901 	    cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2902 
2903 		cfg_data.bin = (uint8_t)cur_bin;
2904 		o->set_one_rule(sc, o, cnt, &cfg_data,
2905 				ECORE_MCAST_CMD_RESTORE);
2906 
2907 		cnt++;
2908 
2909 		ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2910 
2911 		/* Break if we reached the maximum number
2912 		 * of rules.
2913 		 */
2914 		if (cnt >= o->max_cmd_len)
2915 			break;
2916 	}
2917 
2918 	*rdata_idx = cnt;
2919 
2920 	return cur_bin;
2921 }
2922 
2923 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
2924 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2925 	int *line_idx)
2926 {
2927 	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2928 	int cnt = *line_idx;
2929 	union ecore_mcast_config_data cfg_data = {NULL};
2930 
2931 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2932 		&cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2933 
2934 		cfg_data.mac = &pmac_pos->mac[0];
2935 		o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2936 
2937 		cnt++;
2938 
2939 		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2940 			  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2941 
2942 		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2943 					&cmd_pos->data.macs_head);
2944 
2945 		/* Break if we reached the maximum number
2946 		 * of rules.
2947 		 */
2948 		if (cnt >= o->max_cmd_len)
2949 			break;
2950 	}
2951 
2952 	*line_idx = cnt;
2953 
2954 	/* if no more MACs to configure - we are done */
2955 	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2956 		cmd_pos->done = TRUE;
2957 }
2958 
2959 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
2960 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2961 	int *line_idx)
2962 {
2963 	int cnt = *line_idx;
2964 
2965 	while (cmd_pos->data.macs_num) {
2966 		o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2967 
2968 		cnt++;
2969 
2970 		cmd_pos->data.macs_num--;
2971 
2972 		  ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2973 				  cmd_pos->data.macs_num, cnt);
2974 
2975 		/* Break if we reached the maximum
2976 		 * number of rules.
2977 		 */
2978 		if (cnt >= o->max_cmd_len)
2979 			break;
2980 	}
2981 
2982 	*line_idx = cnt;
2983 
2984 	/* If we cleared all bins - we are done */
2985 	if (!cmd_pos->data.macs_num)
2986 		cmd_pos->done = TRUE;
2987 }
2988 
2989 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
2990 	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2991 	int *line_idx)
2992 {
2993 	cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2994 						line_idx);
2995 
2996 	if (cmd_pos->data.next_bin < 0)
2997 		/* If o->set_restore returned -1 we are done */
2998 		cmd_pos->done = TRUE;
2999 	else
3000 		/* Start from the next bin next time */
3001 		cmd_pos->data.next_bin++;
3002 }
3003 
3004 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
3005 				struct ecore_mcast_ramrod_params *p)
3006 {
3007 	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3008 	int cnt = 0;
3009 	struct ecore_mcast_obj *o = p->mcast_obj;
3010 
3011 	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3012 		&o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3013 		switch (cmd_pos->type) {
3014 		case ECORE_MCAST_CMD_ADD:
3015 			ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3016 			break;
3017 
3018 		case ECORE_MCAST_CMD_DEL:
3019 			ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3020 			break;
3021 
3022 		case ECORE_MCAST_CMD_RESTORE:
3023 			ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3024 							   &cnt);
3025 			break;
3026 
3027 		default:
3028 			ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3029 			return ECORE_INVAL;
3030 		}
3031 
3032 		/* If the command has been completed - remove it from the list
3033 		 * and free the memory
3034 		 */
3035 		if (cmd_pos->done) {
3036 			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3037 						&o->pending_cmds_head);
3038 			ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3039 		}
3040 
3041 		/* Break if we reached the maximum number of rules */
3042 		if (cnt >= o->max_cmd_len)
3043 			break;
3044 	}
3045 
3046 	return cnt;
3047 }
3048 
3049 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3050 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3051 	int *line_idx)
3052 {
3053 	struct ecore_mcast_list_elem *mlist_pos;
3054 	union ecore_mcast_config_data cfg_data = {NULL};
3055 	int cnt = *line_idx;
3056 
3057 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3058 				  struct ecore_mcast_list_elem) {
3059 		cfg_data.mac = mlist_pos->mac;
3060 		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3061 
3062 		cnt++;
3063 
3064 		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3065 			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3066 	}
3067 
3068 	*line_idx = cnt;
3069 }
3070 
3071 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3072 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3073 	int *line_idx)
3074 {
3075 	int cnt = *line_idx, i;
3076 
3077 	for (i = 0; i < p->mcast_list_len; i++) {
3078 		o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3079 
3080 		cnt++;
3081 
3082 		ECORE_MSG(sc, "Deleting MAC. %d left\n",
3083 			  p->mcast_list_len - i - 1);
3084 	}
3085 
3086 	*line_idx = cnt;
3087 }
3088 
3089 /**
3090  * ecore_mcast_handle_current_cmd -
3091  *
3092  * @sc:		device handle
3093  * @p:
3094  * @cmd:
3095  * @start_cnt:	first line in the ramrod data that may be used
3096  *
3097  * This function is called iff there is enough place for the current command in
3098  * the ramrod data.
3099  * Returns number of lines filled in the ramrod data in total.
3100  */
3101 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3102 			struct ecore_mcast_ramrod_params *p,
3103 			enum ecore_mcast_cmd cmd,
3104 			int start_cnt)
3105 {
3106 	struct ecore_mcast_obj *o = p->mcast_obj;
3107 	int cnt = start_cnt;
3108 
3109 	ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3110 
3111 	switch (cmd) {
3112 	case ECORE_MCAST_CMD_ADD:
3113 		ecore_mcast_hdl_add(sc, o, p, &cnt);
3114 		break;
3115 
3116 	case ECORE_MCAST_CMD_DEL:
3117 		ecore_mcast_hdl_del(sc, o, p, &cnt);
3118 		break;
3119 
3120 	case ECORE_MCAST_CMD_RESTORE:
3121 		o->hdl_restore(sc, o, 0, &cnt);
3122 		break;
3123 
3124 	default:
3125 		ECORE_ERR("Unknown command: %d\n", cmd);
3126 		return ECORE_INVAL;
3127 	}
3128 
3129 	/* The current command has been handled */
3130 	p->mcast_list_len = 0;
3131 
3132 	return cnt;
3133 }
3134 
3135 static int ecore_mcast_validate_e2(struct bxe_softc *sc,
3136 				   struct ecore_mcast_ramrod_params *p,
3137 				   enum ecore_mcast_cmd cmd)
3138 {
3139 	struct ecore_mcast_obj *o = p->mcast_obj;
3140 	int reg_sz = o->get_registry_size(o);
3141 
3142 	switch (cmd) {
3143 	/* DEL command deletes all currently configured MACs */
3144 	case ECORE_MCAST_CMD_DEL:
3145 		o->set_registry_size(o, 0);
3146 		/* Don't break */
3147 
3148 	/* RESTORE command will restore the entire multicast configuration */
3149 	case ECORE_MCAST_CMD_RESTORE:
3150 		/* Here we set the approximate amount of work to do, which in
3151 		 * fact may be only less as some MACs in postponed ADD
3152 		 * command(s) scheduled before this command may fall into
3153 		 * the same bin and the actual number of bins set in the
3154 		 * registry would be less than we estimated here. See
3155 		 * ecore_mcast_set_one_rule_e2() for further details.
3156 		 */
3157 		p->mcast_list_len = reg_sz;
3158 		break;
3159 
3160 	case ECORE_MCAST_CMD_ADD:
3161 	case ECORE_MCAST_CMD_CONT:
3162 		/* Here we assume that all new MACs will fall into new bins.
3163 		 * However we will correct the real registry size after we
3164 		 * handle all pending commands.
3165 		 */
3166 		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3167 		break;
3168 
3169 	default:
3170 		ECORE_ERR("Unknown command: %d\n", cmd);
3171 		return ECORE_INVAL;
3172 	}
3173 
3174 	/* Increase the total number of MACs pending to be configured */
3175 	o->total_pending_num += p->mcast_list_len;
3176 
3177 	return ECORE_SUCCESS;
3178 }
3179 
3180 static void ecore_mcast_revert_e2(struct bxe_softc *sc,
3181 				      struct ecore_mcast_ramrod_params *p,
3182 				      int old_num_bins)
3183 {
3184 	struct ecore_mcast_obj *o = p->mcast_obj;
3185 
3186 	o->set_registry_size(o, old_num_bins);
3187 	o->total_pending_num -= p->mcast_list_len;
3188 }
3189 
3190 /**
3191  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3192  *
3193  * @sc:		device handle
3194  * @p:
3195  * @len:	number of rules to handle
3196  */
3197 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3198 					struct ecore_mcast_ramrod_params *p,
3199 					uint8_t len)
3200 {
3201 	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3202 	struct eth_multicast_rules_ramrod_data *data =
3203 		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3204 
3205 	data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3206 					(ECORE_FILTER_MCAST_PENDING <<
3207 					 ECORE_SWCID_SHIFT));
3208 	data->header.rule_cnt = len;
3209 }
3210 
3211 /**
3212  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3213  *
3214  * @sc:		device handle
3215  * @o:
3216  *
3217  * Recalculate the actual number of set bins in the registry using Brian
3218  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3219  *
3220  * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3221  */
3222 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3223 						  struct ecore_mcast_obj *o)
3224 {
3225 	int i, cnt = 0;
3226 	uint64_t elem;
3227 
3228 	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3229 		elem = o->registry.aprox_match.vec[i];
3230 		for (; elem; cnt++)
3231 			elem &= elem - 1;
3232 	}
3233 
3234 	o->set_registry_size(o, cnt);
3235 
3236 	return ECORE_SUCCESS;
3237 }
3238 
3239 static int ecore_mcast_setup_e2(struct bxe_softc *sc,
3240 				struct ecore_mcast_ramrod_params *p,
3241 				enum ecore_mcast_cmd cmd)
3242 {
3243 	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3244 	struct ecore_mcast_obj *o = p->mcast_obj;
3245 	struct eth_multicast_rules_ramrod_data *data =
3246 		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3247 	int cnt = 0, rc;
3248 
3249 	/* Reset the ramrod data buffer */
3250 	ECORE_MEMSET(data, 0, sizeof(*data));
3251 
3252 	cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3253 
3254 	/* If there are no more pending commands - clear SCHEDULED state */
3255 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3256 		o->clear_sched(o);
3257 
3258 	/* The below may be TRUE iff there was enough room in ramrod
3259 	 * data for all pending commands and for the current
3260 	 * command. Otherwise the current command would have been added
3261 	 * to the pending commands and p->mcast_list_len would have been
3262 	 * zeroed.
3263 	 */
3264 	if (p->mcast_list_len > 0)
3265 		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3266 
3267 	/* We've pulled out some MACs - update the total number of
3268 	 * outstanding.
3269 	 */
3270 	o->total_pending_num -= cnt;
3271 
3272 	/* send a ramrod */
3273 	ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3274 	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3275 
3276 	ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3277 
3278 	/* Update a registry size if there are no more pending operations.
3279 	 *
3280 	 * We don't want to change the value of the registry size if there are
3281 	 * pending operations because we want it to always be equal to the
3282 	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3283 	 * set bins after the last requested operation in order to properly
3284 	 * evaluate the size of the next DEL/RESTORE operation.
3285 	 *
3286 	 * Note that we update the registry itself during command(s) handling
3287 	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3288 	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3289 	 * with a limited amount of update commands (per MAC/bin) and we don't
3290 	 * know in this scope what the actual state of bins configuration is
3291 	 * going to be after this ramrod.
3292 	 */
3293 	if (!o->total_pending_num)
3294 		ecore_mcast_refresh_registry_e2(sc, o);
3295 
3296 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3297 	 * RAMROD_PENDING status immediately.
3298 	 */
3299 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3300 		raw->clear_pending(raw);
3301 		return ECORE_SUCCESS;
3302 	} else {
3303 		/* No need for an explicit memory barrier here as long we would
3304 		 * need to ensure the ordering of writing to the SPQ element
3305 		 * and updating of the SPQ producer which involves a memory
3306 		 * read and we will have to put a full memory barrier there
3307 		 * (inside ecore_sp_post()).
3308 		 */
3309 
3310 		/* Send a ramrod */
3311 		rc = ecore_sp_post( sc,
3312 				    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3313 				    raw->cid,
3314 				    raw->rdata_mapping,
3315 				    ETH_CONNECTION_TYPE);
3316 		if (rc)
3317 			return rc;
3318 
3319 		/* Ramrod completion is pending */
3320 		return ECORE_PENDING;
3321 	}
3322 }
3323 
3324 static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3325 				    struct ecore_mcast_ramrod_params *p,
3326 				    enum ecore_mcast_cmd cmd)
3327 {
3328 	/* Mark, that there is a work to do */
3329 	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3330 		p->mcast_list_len = 1;
3331 
3332 	return ECORE_SUCCESS;
3333 }
3334 
3335 static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3336 				       struct ecore_mcast_ramrod_params *p,
3337 				       int old_num_bins)
3338 {
3339 	/* Do nothing */
3340 }
3341 
3342 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3343 do { \
3344 	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3345 } while (0)
3346 
3347 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3348 					   struct ecore_mcast_obj *o,
3349 					   struct ecore_mcast_ramrod_params *p,
3350 					   uint32_t *mc_filter)
3351 {
3352 	struct ecore_mcast_list_elem *mlist_pos;
3353 	int bit;
3354 
3355 	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3356 				  struct ecore_mcast_list_elem) {
3357 		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3358 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3359 
3360 		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3361 			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3362 
3363 		/* bookkeeping... */
3364 		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3365 				  bit);
3366 	}
3367 }
3368 
3369 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3370 	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3371 	uint32_t *mc_filter)
3372 {
3373 	int bit;
3374 
3375 	for (bit = ecore_mcast_get_next_bin(o, 0);
3376 	     bit >= 0;
3377 	     bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3378 		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3379 		ECORE_MSG(sc, "About to set bin %d\n", bit);
3380 	}
3381 }
3382 
3383 /* On 57711 we write the multicast MACs' approximate match
3384  * table by directly into the TSTORM's internal RAM. So we don't
3385  * really need to handle any tricks to make it work.
3386  */
3387 static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3388 				 struct ecore_mcast_ramrod_params *p,
3389 				 enum ecore_mcast_cmd cmd)
3390 {
3391 	int i;
3392 	struct ecore_mcast_obj *o = p->mcast_obj;
3393 	struct ecore_raw_obj *r = &o->raw;
3394 
3395 	/* If CLEAR_ONLY has been requested - clear the registry
3396 	 * and clear a pending bit.
3397 	 */
3398 	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3399 		uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3400 
3401 		/* Set the multicast filter bits before writing it into
3402 		 * the internal memory.
3403 		 */
3404 		switch (cmd) {
3405 		case ECORE_MCAST_CMD_ADD:
3406 			ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3407 			break;
3408 
3409 		case ECORE_MCAST_CMD_DEL:
3410 			ECORE_MSG(sc,
3411 				  "Invalidating multicast MACs configuration\n");
3412 
3413 			/* clear the registry */
3414 			ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3415 			       sizeof(o->registry.aprox_match.vec));
3416 			break;
3417 
3418 		case ECORE_MCAST_CMD_RESTORE:
3419 			ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3420 			break;
3421 
3422 		default:
3423 			ECORE_ERR("Unknown command: %d\n", cmd);
3424 			return ECORE_INVAL;
3425 		}
3426 
3427 		/* Set the mcast filter in the internal memory */
3428 		for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3429 			REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3430 	} else
3431 		/* clear the registry */
3432 		ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3433 		       sizeof(o->registry.aprox_match.vec));
3434 
3435 	/* We are done */
3436 	r->clear_pending(r);
3437 
3438 	return ECORE_SUCCESS;
3439 }
3440 
3441 static int ecore_mcast_validate_e1(struct bxe_softc *sc,
3442 				   struct ecore_mcast_ramrod_params *p,
3443 				   enum ecore_mcast_cmd cmd)
3444 {
3445 	struct ecore_mcast_obj *o = p->mcast_obj;
3446 	int reg_sz = o->get_registry_size(o);
3447 
3448 	switch (cmd) {
3449 	/* DEL command deletes all currently configured MACs */
3450 	case ECORE_MCAST_CMD_DEL:
3451 		o->set_registry_size(o, 0);
3452 		/* Don't break */
3453 
3454 	/* RESTORE command will restore the entire multicast configuration */
3455 	case ECORE_MCAST_CMD_RESTORE:
3456 		p->mcast_list_len = reg_sz;
3457 		  ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3458 				  cmd, p->mcast_list_len);
3459 		break;
3460 
3461 	case ECORE_MCAST_CMD_ADD:
3462 	case ECORE_MCAST_CMD_CONT:
3463 		/* Multicast MACs on 57710 are configured as unicast MACs and
3464 		 * there is only a limited number of CAM entries for that
3465 		 * matter.
3466 		 */
3467 		if (p->mcast_list_len > o->max_cmd_len) {
3468 			ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3469 				  o->max_cmd_len);
3470 			return ECORE_INVAL;
3471 		}
3472 		/* Every configured MAC should be cleared if DEL command is
3473 		 * called. Only the last ADD command is relevant as long as
3474 		 * every ADD commands overrides the previous configuration.
3475 		 */
3476 		ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3477 		if (p->mcast_list_len > 0)
3478 			o->set_registry_size(o, p->mcast_list_len);
3479 
3480 		break;
3481 
3482 	default:
3483 		ECORE_ERR("Unknown command: %d\n", cmd);
3484 		return ECORE_INVAL;
3485 	}
3486 
3487 	/* We want to ensure that commands are executed one by one for 57710.
3488 	 * Therefore each none-empty command will consume o->max_cmd_len.
3489 	 */
3490 	if (p->mcast_list_len)
3491 		o->total_pending_num += o->max_cmd_len;
3492 
3493 	return ECORE_SUCCESS;
3494 }
3495 
3496 static void ecore_mcast_revert_e1(struct bxe_softc *sc,
3497 				      struct ecore_mcast_ramrod_params *p,
3498 				      int old_num_macs)
3499 {
3500 	struct ecore_mcast_obj *o = p->mcast_obj;
3501 
3502 	o->set_registry_size(o, old_num_macs);
3503 
3504 	/* If current command hasn't been handled yet and we are
3505 	 * here means that it's meant to be dropped and we have to
3506 	 * update the number of outstanding MACs accordingly.
3507 	 */
3508 	if (p->mcast_list_len)
3509 		o->total_pending_num -= o->max_cmd_len;
3510 }
3511 
3512 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3513 					struct ecore_mcast_obj *o, int idx,
3514 					union ecore_mcast_config_data *cfg_data,
3515 					enum ecore_mcast_cmd cmd)
3516 {
3517 	struct ecore_raw_obj *r = &o->raw;
3518 	struct mac_configuration_cmd *data =
3519 		(struct mac_configuration_cmd *)(r->rdata);
3520 
3521 	/* copy mac */
3522 	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3523 		ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3524 				      &data->config_table[idx].middle_mac_addr,
3525 				      &data->config_table[idx].lsb_mac_addr,
3526 				      cfg_data->mac);
3527 
3528 		data->config_table[idx].vlan_id = 0;
3529 		data->config_table[idx].pf_id = r->func_id;
3530 		data->config_table[idx].clients_bit_vector =
3531 			ECORE_CPU_TO_LE32(1 << r->cl_id);
3532 
3533 		ECORE_SET_FLAG(data->config_table[idx].flags,
3534 			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3535 			       T_ETH_MAC_COMMAND_SET);
3536 	}
3537 }
3538 
3539 /**
3540  * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3541  *
3542  * @sc:		device handle
3543  * @p:
3544  * @len:	number of rules to handle
3545  */
3546 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3547 					struct ecore_mcast_ramrod_params *p,
3548 					uint8_t len)
3549 {
3550 	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3551 	struct mac_configuration_cmd *data =
3552 		(struct mac_configuration_cmd *)(r->rdata);
3553 
3554 	uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3555 		     ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3556 		     ECORE_MAX_MULTICAST*(1 + r->func_id));
3557 
3558 	data->hdr.offset = offset;
3559 	data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3560 	data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3561 				     (ECORE_FILTER_MCAST_PENDING <<
3562 				      ECORE_SWCID_SHIFT));
3563 	data->hdr.length = len;
3564 }
3565 
3566 /**
3567  * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3568  *
3569  * @sc:		device handle
3570  * @o:
3571  * @start_idx:	index in the registry to start from
3572  * @rdata_idx:	index in the ramrod data to start from
3573  *
3574  * restore command for 57710 is like all other commands - always a stand alone
3575  * command - start_idx and rdata_idx will always be 0. This function will always
3576  * succeed.
3577  * returns -1 to comply with 57712 variant.
3578  */
3579 static inline int ecore_mcast_handle_restore_cmd_e1(
3580 	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3581 	int *rdata_idx)
3582 {
3583 	struct ecore_mcast_mac_elem *elem;
3584 	int i = 0;
3585 	union ecore_mcast_config_data cfg_data = {NULL};
3586 
3587 	/* go through the registry and configure the MACs from it. */
3588 	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3589 				  struct ecore_mcast_mac_elem) {
3590 		cfg_data.mac = &elem->mac[0];
3591 		o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3592 
3593 		i++;
3594 
3595 		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3596 			  cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3597 	}
3598 
3599 	*rdata_idx = i;
3600 
3601 	return -1;
3602 }
3603 
3604 static inline int ecore_mcast_handle_pending_cmds_e1(
3605 	struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3606 {
3607 	struct ecore_pending_mcast_cmd *cmd_pos;
3608 	struct ecore_mcast_mac_elem *pmac_pos;
3609 	struct ecore_mcast_obj *o = p->mcast_obj;
3610 	union ecore_mcast_config_data cfg_data = {NULL};
3611 	int cnt = 0;
3612 
3613 	/* If nothing to be done - return */
3614 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3615 		return 0;
3616 
3617 	/* Handle the first command */
3618 	cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3619 					 struct ecore_pending_mcast_cmd, link);
3620 
3621 	switch (cmd_pos->type) {
3622 	case ECORE_MCAST_CMD_ADD:
3623 		ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3624 					  link, struct ecore_mcast_mac_elem) {
3625 			cfg_data.mac = &pmac_pos->mac[0];
3626 			o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3627 
3628 			cnt++;
3629 
3630 			ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3631 				  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3632 		}
3633 		break;
3634 
3635 	case ECORE_MCAST_CMD_DEL:
3636 		cnt = cmd_pos->data.macs_num;
3637 		ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3638 		break;
3639 
3640 	case ECORE_MCAST_CMD_RESTORE:
3641 		o->hdl_restore(sc, o, 0, &cnt);
3642 		break;
3643 
3644 	default:
3645 		ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3646 		return ECORE_INVAL;
3647 	}
3648 
3649 	ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3650 	ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3651 
3652 	return cnt;
3653 }
3654 
3655 /**
3656  * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3657  *
3658  * @fw_hi:
3659  * @fw_mid:
3660  * @fw_lo:
3661  * @mac:
3662  */
3663 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3664 					 uint16_t *fw_lo, uint8_t *mac)
3665 {
3666 	mac[1] = ((uint8_t *)fw_hi)[0];
3667 	mac[0] = ((uint8_t *)fw_hi)[1];
3668 	mac[3] = ((uint8_t *)fw_mid)[0];
3669 	mac[2] = ((uint8_t *)fw_mid)[1];
3670 	mac[5] = ((uint8_t *)fw_lo)[0];
3671 	mac[4] = ((uint8_t *)fw_lo)[1];
3672 }
3673 
3674 /**
3675  * ecore_mcast_refresh_registry_e1 -
3676  *
3677  * @sc:		device handle
3678  * @cnt:
3679  *
3680  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3681  * and update the registry correspondingly: if ADD - allocate a memory and add
3682  * the entries to the registry (list), if DELETE - clear the registry and free
3683  * the memory.
3684  */
3685 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3686 						  struct ecore_mcast_obj *o)
3687 {
3688 	struct ecore_raw_obj *raw = &o->raw;
3689 	struct ecore_mcast_mac_elem *elem;
3690 	struct mac_configuration_cmd *data =
3691 			(struct mac_configuration_cmd *)(raw->rdata);
3692 
3693 	/* If first entry contains a SET bit - the command was ADD,
3694 	 * otherwise - DEL_ALL
3695 	 */
3696 	if (ECORE_GET_FLAG(data->config_table[0].flags,
3697 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3698 		int i, len = data->hdr.length;
3699 
3700 		/* Break if it was a RESTORE command */
3701 		if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3702 			return ECORE_SUCCESS;
3703 
3704 		elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3705 		if (!elem) {
3706 			ECORE_ERR("Failed to allocate registry memory\n");
3707 			return ECORE_NOMEM;
3708 		}
3709 
3710 		for (i = 0; i < len; i++, elem++) {
3711 			ecore_get_fw_mac_addr(
3712 				&data->config_table[i].msb_mac_addr,
3713 				&data->config_table[i].middle_mac_addr,
3714 				&data->config_table[i].lsb_mac_addr,
3715 				elem->mac);
3716 			ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3717 				  elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3718 			ECORE_LIST_PUSH_TAIL(&elem->link,
3719 					     &o->registry.exact_match.macs);
3720 		}
3721 	} else {
3722 		elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3723 					      struct ecore_mcast_mac_elem,
3724 					      link);
3725 		ECORE_MSG(sc, "Deleting a registry\n");
3726 		ECORE_FREE(sc, elem, sizeof(*elem));
3727 		ECORE_LIST_INIT(&o->registry.exact_match.macs);
3728 	}
3729 
3730 	return ECORE_SUCCESS;
3731 }
3732 
3733 static int ecore_mcast_setup_e1(struct bxe_softc *sc,
3734 				struct ecore_mcast_ramrod_params *p,
3735 				enum ecore_mcast_cmd cmd)
3736 {
3737 	struct ecore_mcast_obj *o = p->mcast_obj;
3738 	struct ecore_raw_obj *raw = &o->raw;
3739 	struct mac_configuration_cmd *data =
3740 		(struct mac_configuration_cmd *)(raw->rdata);
3741 	int cnt = 0, i, rc;
3742 
3743 	/* Reset the ramrod data buffer */
3744 	ECORE_MEMSET(data, 0, sizeof(*data));
3745 
3746 	/* First set all entries as invalid */
3747 	for (i = 0; i < o->max_cmd_len ; i++)
3748 		ECORE_SET_FLAG(data->config_table[i].flags,
3749 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3750 			T_ETH_MAC_COMMAND_INVALIDATE);
3751 
3752 	/* Handle pending commands first */
3753 	cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3754 
3755 	/* If there are no more pending commands - clear SCHEDULED state */
3756 	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3757 		o->clear_sched(o);
3758 
3759 	/* The below may be TRUE iff there were no pending commands */
3760 	if (!cnt)
3761 		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3762 
3763 	/* For 57710 every command has o->max_cmd_len length to ensure that
3764 	 * commands are done one at a time.
3765 	 */
3766 	o->total_pending_num -= o->max_cmd_len;
3767 
3768 	/* send a ramrod */
3769 
3770 	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3771 
3772 	/* Set ramrod header (in particular, a number of entries to update) */
3773 	ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3774 
3775 	/* update a registry: we need the registry contents to be always up
3776 	 * to date in order to be able to execute a RESTORE opcode. Here
3777 	 * we use the fact that for 57710 we sent one command at a time
3778 	 * hence we may take the registry update out of the command handling
3779 	 * and do it in a simpler way here.
3780 	 */
3781 	rc = ecore_mcast_refresh_registry_e1(sc, o);
3782 	if (rc)
3783 		return rc;
3784 
3785 	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3786 	 * RAMROD_PENDING status immediately.
3787 	 */
3788 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3789 		raw->clear_pending(raw);
3790 		return ECORE_SUCCESS;
3791 	} else {
3792 		/* No need for an explicit memory barrier here as long we would
3793 		 * need to ensure the ordering of writing to the SPQ element
3794 		 * and updating of the SPQ producer which involves a memory
3795 		 * read and we will have to put a full memory barrier there
3796 		 * (inside ecore_sp_post()).
3797 		 */
3798 
3799 		/* Send a ramrod */
3800 		rc = ecore_sp_post( sc,
3801 				    RAMROD_CMD_ID_ETH_SET_MAC,
3802 				    raw->cid,
3803 				    raw->rdata_mapping,
3804 				    ETH_CONNECTION_TYPE);
3805 		if (rc)
3806 			return rc;
3807 
3808 		/* Ramrod completion is pending */
3809 		return ECORE_PENDING;
3810 	}
3811 }
3812 
3813 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3814 {
3815 	return o->registry.exact_match.num_macs_set;
3816 }
3817 
3818 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3819 {
3820 	return o->registry.aprox_match.num_bins_set;
3821 }
3822 
3823 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3824 						int n)
3825 {
3826 	o->registry.exact_match.num_macs_set = n;
3827 }
3828 
3829 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3830 						int n)
3831 {
3832 	o->registry.aprox_match.num_bins_set = n;
3833 }
3834 
3835 int ecore_config_mcast(struct bxe_softc *sc,
3836 		       struct ecore_mcast_ramrod_params *p,
3837 		       enum ecore_mcast_cmd cmd)
3838 {
3839 	struct ecore_mcast_obj *o = p->mcast_obj;
3840 	struct ecore_raw_obj *r = &o->raw;
3841 	int rc = 0, old_reg_size;
3842 
3843 	/* This is needed to recover number of currently configured mcast macs
3844 	 * in case of failure.
3845 	 */
3846 	old_reg_size = o->get_registry_size(o);
3847 
3848 	/* Do some calculations and checks */
3849 	rc = o->validate(sc, p, cmd);
3850 	if (rc)
3851 		return rc;
3852 
3853 	/* Return if there is no work to do */
3854 	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3855 		return ECORE_SUCCESS;
3856 
3857 	ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3858 		  o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3859 
3860 	/* Enqueue the current command to the pending list if we can't complete
3861 	 * it in the current iteration
3862 	 */
3863 	if (r->check_pending(r) ||
3864 	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3865 		rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3866 		if (rc < 0)
3867 			goto error_exit1;
3868 
3869 		/* As long as the current command is in a command list we
3870 		 * don't need to handle it separately.
3871 		 */
3872 		p->mcast_list_len = 0;
3873 	}
3874 
3875 	if (!r->check_pending(r)) {
3876 
3877 		/* Set 'pending' state */
3878 		r->set_pending(r);
3879 
3880 		/* Configure the new classification in the chip */
3881 		rc = o->config_mcast(sc, p, cmd);
3882 		if (rc < 0)
3883 			goto error_exit2;
3884 
3885 		/* Wait for a ramrod completion if was requested */
3886 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3887 			rc = o->wait_comp(sc, o);
3888 	}
3889 
3890 	return rc;
3891 
3892 error_exit2:
3893 	r->clear_pending(r);
3894 
3895 error_exit1:
3896 	o->revert(sc, p, old_reg_size);
3897 
3898 	return rc;
3899 }
3900 
3901 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3902 {
3903 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3904 	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3905 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3906 }
3907 
3908 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3909 {
3910 	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3911 	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3912 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3913 }
3914 
3915 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3916 {
3917 	return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3918 }
3919 
3920 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3921 {
3922 	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3923 }
3924 
3925 void ecore_init_mcast_obj(struct bxe_softc *sc,
3926 			  struct ecore_mcast_obj *mcast_obj,
3927 			  uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
3928 			  uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
3929 			  int state, unsigned long *pstate, ecore_obj_type type)
3930 {
3931 	ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3932 
3933 	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3934 			   rdata, rdata_mapping, state, pstate, type);
3935 
3936 	mcast_obj->engine_id = engine_id;
3937 
3938 	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3939 
3940 	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3941 	mcast_obj->check_sched = ecore_mcast_check_sched;
3942 	mcast_obj->set_sched = ecore_mcast_set_sched;
3943 	mcast_obj->clear_sched = ecore_mcast_clear_sched;
3944 
3945 	if (CHIP_IS_E1(sc)) {
3946 		mcast_obj->config_mcast      = ecore_mcast_setup_e1;
3947 		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3948 		mcast_obj->hdl_restore       =
3949 			ecore_mcast_handle_restore_cmd_e1;
3950 		mcast_obj->check_pending     = ecore_mcast_check_pending;
3951 
3952 		if (CHIP_REV_IS_SLOW(sc))
3953 			mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
3954 		else
3955 			mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
3956 
3957 		mcast_obj->wait_comp         = ecore_mcast_wait;
3958 		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
3959 		mcast_obj->validate          = ecore_mcast_validate_e1;
3960 		mcast_obj->revert            = ecore_mcast_revert_e1;
3961 		mcast_obj->get_registry_size =
3962 			ecore_mcast_get_registry_size_exact;
3963 		mcast_obj->set_registry_size =
3964 			ecore_mcast_set_registry_size_exact;
3965 
3966 		/* 57710 is the only chip that uses the exact match for mcast
3967 		 * at the moment.
3968 		 */
3969 		ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
3970 
3971 	} else if (CHIP_IS_E1H(sc)) {
3972 		mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
3973 		mcast_obj->enqueue_cmd   = NULL;
3974 		mcast_obj->hdl_restore   = NULL;
3975 		mcast_obj->check_pending = ecore_mcast_check_pending;
3976 
3977 		/* 57711 doesn't send a ramrod, so it has unlimited credit
3978 		 * for one command.
3979 		 */
3980 		mcast_obj->max_cmd_len       = -1;
3981 		mcast_obj->wait_comp         = ecore_mcast_wait;
3982 		mcast_obj->set_one_rule      = NULL;
3983 		mcast_obj->validate          = ecore_mcast_validate_e1h;
3984 		mcast_obj->revert            = ecore_mcast_revert_e1h;
3985 		mcast_obj->get_registry_size =
3986 			ecore_mcast_get_registry_size_aprox;
3987 		mcast_obj->set_registry_size =
3988 			ecore_mcast_set_registry_size_aprox;
3989 	} else {
3990 		mcast_obj->config_mcast      = ecore_mcast_setup_e2;
3991 		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3992 		mcast_obj->hdl_restore       =
3993 			ecore_mcast_handle_restore_cmd_e2;
3994 		mcast_obj->check_pending     = ecore_mcast_check_pending;
3995 		/* TODO: There should be a proper HSI define for this number!!!
3996 		 */
3997 		mcast_obj->max_cmd_len       = 16;
3998 		mcast_obj->wait_comp         = ecore_mcast_wait;
3999 		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
4000 		mcast_obj->validate          = ecore_mcast_validate_e2;
4001 		mcast_obj->revert            = ecore_mcast_revert_e2;
4002 		mcast_obj->get_registry_size =
4003 			ecore_mcast_get_registry_size_aprox;
4004 		mcast_obj->set_registry_size =
4005 			ecore_mcast_set_registry_size_aprox;
4006 	}
4007 }
4008 
4009 /*************************** Credit handling **********************************/
4010 
4011 /**
4012  * atomic_add_ifless - add if the result is less than a given value.
4013  *
4014  * @v:	pointer of type ecore_atomic_t
4015  * @a:	the amount to add to v...
4016  * @u:	...if (v + a) is less than u.
4017  *
4018  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4019  *
4020  */
4021 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4022 {
4023 	int c, old;
4024 
4025 	c = ECORE_ATOMIC_READ(v);
4026 	for (;;) {
4027 		if (ECORE_UNLIKELY(c + a >= u))
4028 			return FALSE;
4029 
4030 		old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4031 		if (ECORE_LIKELY(old == c))
4032 			break;
4033 		c = old;
4034 	}
4035 
4036 	return TRUE;
4037 }
4038 
4039 /**
4040  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4041  *
4042  * @v:	pointer of type ecore_atomic_t
4043  * @a:	the amount to dec from v...
4044  * @u:	...if (v - a) is more or equal than u.
4045  *
4046  * returns TRUE if (v - a) was more or equal than u, and FALSE
4047  * otherwise.
4048  */
4049 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4050 {
4051 	int c, old;
4052 
4053 	c = ECORE_ATOMIC_READ(v);
4054 	for (;;) {
4055 		if (ECORE_UNLIKELY(c - a < u))
4056 			return FALSE;
4057 
4058 		old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4059 		if (ECORE_LIKELY(old == c))
4060 			break;
4061 		c = old;
4062 	}
4063 
4064 	return TRUE;
4065 }
4066 
4067 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4068 {
4069 	bool rc;
4070 
4071 	ECORE_SMP_MB();
4072 	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4073 	ECORE_SMP_MB();
4074 
4075 	return rc;
4076 }
4077 
4078 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4079 {
4080 	bool rc;
4081 
4082 	ECORE_SMP_MB();
4083 
4084 	/* Don't let to refill if credit + cnt > pool_sz */
4085 	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4086 
4087 	ECORE_SMP_MB();
4088 
4089 	return rc;
4090 }
4091 
4092 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4093 {
4094 	int cur_credit;
4095 
4096 	ECORE_SMP_MB();
4097 	cur_credit = ECORE_ATOMIC_READ(&o->credit);
4098 
4099 	return cur_credit;
4100 }
4101 
4102 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4103 					  int cnt)
4104 {
4105 	return TRUE;
4106 }
4107 
4108 static bool ecore_credit_pool_get_entry(
4109 	struct ecore_credit_pool_obj *o,
4110 	int *offset)
4111 {
4112 	int idx, vec, i;
4113 
4114 	*offset = -1;
4115 
4116 	/* Find "internal cam-offset" then add to base for this object... */
4117 	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4118 
4119 		/* Skip the current vector if there are no free entries in it */
4120 		if (!o->pool_mirror[vec])
4121 			continue;
4122 
4123 		/* If we've got here we are going to find a free entry */
4124 		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4125 		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4126 
4127 			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4128 				/* Got one!! */
4129 				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4130 				*offset = o->base_pool_offset + idx;
4131 				return TRUE;
4132 			}
4133 	}
4134 
4135 	return FALSE;
4136 }
4137 
4138 static bool ecore_credit_pool_put_entry(
4139 	struct ecore_credit_pool_obj *o,
4140 	int offset)
4141 {
4142 	if (offset < o->base_pool_offset)
4143 		return FALSE;
4144 
4145 	offset -= o->base_pool_offset;
4146 
4147 	if (offset >= o->pool_sz)
4148 		return FALSE;
4149 
4150 	/* Return the entry to the pool */
4151 	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4152 
4153 	return TRUE;
4154 }
4155 
4156 static bool ecore_credit_pool_put_entry_always_TRUE(
4157 	struct ecore_credit_pool_obj *o,
4158 	int offset)
4159 {
4160 	return TRUE;
4161 }
4162 
4163 static bool ecore_credit_pool_get_entry_always_TRUE(
4164 	struct ecore_credit_pool_obj *o,
4165 	int *offset)
4166 {
4167 	*offset = -1;
4168 	return TRUE;
4169 }
4170 /**
4171  * ecore_init_credit_pool - initialize credit pool internals.
4172  *
4173  * @p:
4174  * @base:	Base entry in the CAM to use.
4175  * @credit:	pool size.
4176  *
4177  * If base is negative no CAM entries handling will be performed.
4178  * If credit is negative pool operations will always succeed (unlimited pool).
4179  *
4180  */
4181 static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4182 					  int base, int credit)
4183 {
4184 	/* Zero the object first */
4185 	ECORE_MEMSET(p, 0, sizeof(*p));
4186 
4187 	/* Set the table to all 1s */
4188 	ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4189 
4190 	/* Init a pool as full */
4191 	ECORE_ATOMIC_SET(&p->credit, credit);
4192 
4193 	/* The total poll size */
4194 	p->pool_sz = credit;
4195 
4196 	p->base_pool_offset = base;
4197 
4198 	/* Commit the change */
4199 	ECORE_SMP_MB();
4200 
4201 	p->check = ecore_credit_pool_check;
4202 
4203 	/* if pool credit is negative - disable the checks */
4204 	if (credit >= 0) {
4205 		p->put      = ecore_credit_pool_put;
4206 		p->get      = ecore_credit_pool_get;
4207 		p->put_entry = ecore_credit_pool_put_entry;
4208 		p->get_entry = ecore_credit_pool_get_entry;
4209 	} else {
4210 		p->put      = ecore_credit_pool_always_TRUE;
4211 		p->get      = ecore_credit_pool_always_TRUE;
4212 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4213 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4214 	}
4215 
4216 	/* If base is negative - disable entries handling */
4217 	if (base < 0) {
4218 		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4219 		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4220 	}
4221 }
4222 
4223 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
4224 				struct ecore_credit_pool_obj *p, uint8_t func_id,
4225 				uint8_t func_num)
4226 {
4227 /* TODO: this will be defined in consts as well... */
4228 #define ECORE_CAM_SIZE_EMUL 5
4229 
4230 	int cam_sz;
4231 
4232 	if (CHIP_IS_E1(sc)) {
4233 		/* In E1, Multicast is saved in cam... */
4234 		if (!CHIP_REV_IS_SLOW(sc))
4235 			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4236 		else
4237 			cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4238 
4239 		ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4240 
4241 	} else if (CHIP_IS_E1H(sc)) {
4242 		/* CAM credit is equally divided between all active functions
4243 		 * on the PORT!.
4244 		 */
4245 		if ((func_num > 0)) {
4246 			if (!CHIP_REV_IS_SLOW(sc))
4247 				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4248 			else
4249 				cam_sz = ECORE_CAM_SIZE_EMUL;
4250 			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4251 		} else {
4252 			/* this should never happen! Block MAC operations. */
4253 			ecore_init_credit_pool(p, 0, 0);
4254 		}
4255 
4256 	} else {
4257 
4258 		/*
4259 		 * CAM credit is equaly divided between all active functions
4260 		 * on the PATH.
4261 		 */
4262 		if ((func_num > 0)) {
4263 			if (!CHIP_REV_IS_SLOW(sc))
4264 				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4265 			else
4266 				cam_sz = ECORE_CAM_SIZE_EMUL;
4267 
4268 			/* No need for CAM entries handling for 57712 and
4269 			 * newer.
4270 			 */
4271 			ecore_init_credit_pool(p, -1, cam_sz);
4272 		} else {
4273 			/* this should never happen! Block MAC operations. */
4274 			ecore_init_credit_pool(p, 0, 0);
4275 		}
4276 	}
4277 }
4278 
4279 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4280 				 struct ecore_credit_pool_obj *p,
4281 				 uint8_t func_id,
4282 				 uint8_t func_num)
4283 {
4284 	if (CHIP_IS_E1x(sc)) {
4285 		/* There is no VLAN credit in HW on 57710 and 57711 only
4286 		 * MAC / MAC-VLAN can be set
4287 		 */
4288 		ecore_init_credit_pool(p, 0, -1);
4289 	} else {
4290 		/* CAM credit is equally divided between all active functions
4291 		 * on the PATH.
4292 		 */
4293 		if (func_num > 0) {
4294 			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4295 			ecore_init_credit_pool(p, func_id * credit, credit);
4296 		} else
4297 			/* this should never happen! Block VLAN operations. */
4298 			ecore_init_credit_pool(p, 0, 0);
4299 	}
4300 }
4301 
4302 /****************** RSS Configuration ******************/
4303 
4304 /**
4305  * ecore_setup_rss - configure RSS
4306  *
4307  * @sc:		device handle
4308  * @p:		rss configuration
4309  *
4310  * sends on UPDATE ramrod for that matter.
4311  */
4312 static int ecore_setup_rss(struct bxe_softc *sc,
4313 			   struct ecore_config_rss_params *p)
4314 {
4315 	struct ecore_rss_config_obj *o = p->rss_obj;
4316 	struct ecore_raw_obj *r = &o->raw;
4317 	struct eth_rss_update_ramrod_data *data =
4318 		(struct eth_rss_update_ramrod_data *)(r->rdata);
4319 	uint8_t rss_mode = 0;
4320 	int rc;
4321 
4322 	ECORE_MEMSET(data, 0, sizeof(*data));
4323 
4324 	ECORE_MSG(sc, "Configuring RSS\n");
4325 
4326 	/* Set an echo field */
4327 	data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4328 				 (r->state << ECORE_SWCID_SHIFT));
4329 
4330 	/* RSS mode */
4331 	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4332 		rss_mode = ETH_RSS_MODE_DISABLED;
4333 	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4334 		rss_mode = ETH_RSS_MODE_REGULAR;
4335 
4336 	data->rss_mode = rss_mode;
4337 
4338 	ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4339 
4340 	/* RSS capabilities */
4341 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4342 		data->capabilities |=
4343 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4344 
4345 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4346 		data->capabilities |=
4347 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4348 
4349 	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4350 		data->capabilities |=
4351 			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4352 
4353 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4354 		data->capabilities |=
4355 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4356 
4357 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4358 		data->capabilities |=
4359 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4360 
4361 	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4362 		data->capabilities |=
4363 			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4364 
4365 	if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
4366 		data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
4367 		data->udp_4tuple_dst_port_value =
4368 			ECORE_CPU_TO_LE16(p->tunnel_value);
4369 	}
4370 
4371 	/* Hashing mask */
4372 	data->rss_result_mask = p->rss_result_mask;
4373 
4374 	/* RSS engine ID */
4375 	data->rss_engine_id = o->engine_id;
4376 
4377 	ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4378 
4379 	/* Indirection table */
4380 	ECORE_MEMCPY(data->indirection_table, p->ind_table,
4381 		  T_ETH_INDIRECTION_TABLE_SIZE);
4382 
4383 	/* Remember the last configuration */
4384 	ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4385 
4386 
4387 	/* RSS keys */
4388 	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4389 		ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4390 		       sizeof(data->rss_key));
4391 		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4392 	}
4393 
4394 	/* No need for an explicit memory barrier here as long we would
4395 	 * need to ensure the ordering of writing to the SPQ element
4396 	 * and updating of the SPQ producer which involves a memory
4397 	 * read and we will have to put a full memory barrier there
4398 	 * (inside ecore_sp_post()).
4399 	 */
4400 
4401 	/* Send a ramrod */
4402 	rc = ecore_sp_post(sc,
4403 			     RAMROD_CMD_ID_ETH_RSS_UPDATE,
4404 			     r->cid,
4405 			     r->rdata_mapping,
4406 			     ETH_CONNECTION_TYPE);
4407 
4408 	if (rc < 0)
4409 		return rc;
4410 
4411 	return ECORE_PENDING;
4412 }
4413 
4414 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4415 			     uint8_t *ind_table)
4416 {
4417 	ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4418 }
4419 
4420 int ecore_config_rss(struct bxe_softc *sc,
4421 		     struct ecore_config_rss_params *p)
4422 {
4423 	int rc;
4424 	struct ecore_rss_config_obj *o = p->rss_obj;
4425 	struct ecore_raw_obj *r = &o->raw;
4426 
4427 	/* Do nothing if only driver cleanup was requested */
4428 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4429 		return ECORE_SUCCESS;
4430 
4431 	r->set_pending(r);
4432 
4433 	rc = o->config_rss(sc, p);
4434 	if (rc < 0) {
4435 		r->clear_pending(r);
4436 		return rc;
4437 	}
4438 
4439 	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4440 		rc = r->wait_comp(sc, r);
4441 
4442 	return rc;
4443 }
4444 
4445 void ecore_init_rss_config_obj(struct bxe_softc *sc,
4446 			       struct ecore_rss_config_obj *rss_obj,
4447 			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4448 			       void *rdata, ecore_dma_addr_t rdata_mapping,
4449 			       int state, unsigned long *pstate,
4450 			       ecore_obj_type type)
4451 {
4452 	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4453 			   rdata_mapping, state, pstate, type);
4454 
4455 	rss_obj->engine_id  = engine_id;
4456 	rss_obj->config_rss = ecore_setup_rss;
4457 }
4458 
4459 /********************** Queue state object ***********************************/
4460 
4461 /**
4462  * ecore_queue_state_change - perform Queue state change transition
4463  *
4464  * @sc:		device handle
4465  * @params:	parameters to perform the transition
4466  *
4467  * returns 0 in case of successfully completed transition, negative error
4468  * code in case of failure, positive (EBUSY) value if there is a completion
4469  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4470  * not set in params->ramrod_flags for asynchronous commands).
4471  *
4472  */
4473 int ecore_queue_state_change(struct bxe_softc *sc,
4474 			     struct ecore_queue_state_params *params)
4475 {
4476 	struct ecore_queue_sp_obj *o = params->q_obj;
4477 	int rc, pending_bit;
4478 	unsigned long *pending = &o->pending;
4479 
4480 	/* Check that the requested transition is legal */
4481 	rc = o->check_transition(sc, o, params);
4482 	if (rc) {
4483 		ECORE_ERR("check transition returned an error. rc %d\n", rc);
4484 		return ECORE_INVAL;
4485 	}
4486 
4487 	/* Set "pending" bit */
4488 	ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4489 	pending_bit = o->set_pending(o, params);
4490 	ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4491 
4492 	/* Don't send a command if only driver cleanup was requested */
4493 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4494 		o->complete_cmd(sc, o, pending_bit);
4495 	else {
4496 		/* Send a ramrod */
4497 		rc = o->send_cmd(sc, params);
4498 		if (rc) {
4499 			o->next_state = ECORE_Q_STATE_MAX;
4500 			ECORE_CLEAR_BIT(pending_bit, pending);
4501 			ECORE_SMP_MB_AFTER_CLEAR_BIT();
4502 			return rc;
4503 		}
4504 
4505 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4506 			rc = o->wait_comp(sc, o, pending_bit);
4507 			if (rc)
4508 				return rc;
4509 
4510 			return ECORE_SUCCESS;
4511 		}
4512 	}
4513 
4514 	return ECORE_RET_PENDING(pending_bit, pending);
4515 }
4516 
4517 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4518 				   struct ecore_queue_state_params *params)
4519 {
4520 	enum ecore_queue_cmd cmd = params->cmd, bit;
4521 
4522 	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4523 	 * UPDATE command.
4524 	 */
4525 	if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4526 	    (cmd == ECORE_Q_CMD_DEACTIVATE))
4527 		bit = ECORE_Q_CMD_UPDATE;
4528 	else
4529 		bit = cmd;
4530 
4531 	ECORE_SET_BIT(bit, &obj->pending);
4532 	return bit;
4533 }
4534 
4535 static int ecore_queue_wait_comp(struct bxe_softc *sc,
4536 				 struct ecore_queue_sp_obj *o,
4537 				 enum ecore_queue_cmd cmd)
4538 {
4539 	return ecore_state_wait(sc, cmd, &o->pending);
4540 }
4541 
4542 /**
4543  * ecore_queue_comp_cmd - complete the state change command.
4544  *
4545  * @sc:		device handle
4546  * @o:
4547  * @cmd:
4548  *
4549  * Checks that the arrived completion is expected.
4550  */
4551 static int ecore_queue_comp_cmd(struct bxe_softc *sc,
4552 				struct ecore_queue_sp_obj *o,
4553 				enum ecore_queue_cmd cmd)
4554 {
4555 	unsigned long cur_pending = o->pending;
4556 
4557 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4558 		ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4559 			  cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4560 			  o->state, cur_pending, o->next_state);
4561 		return ECORE_INVAL;
4562 	}
4563 
4564 	if (o->next_tx_only >= o->max_cos)
4565 		/* >= because tx only must always be smaller than cos since the
4566 		 * primary connection supports COS 0
4567 		 */
4568 		ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4569 			  o->next_tx_only, o->max_cos);
4570 
4571 	ECORE_MSG(sc,
4572 		  "Completing command %d for queue %d, setting state to %d\n",
4573 		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4574 
4575 	if (o->next_tx_only)  /* print num tx-only if any exist */
4576 		ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4577 			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4578 
4579 	o->state = o->next_state;
4580 	o->num_tx_only = o->next_tx_only;
4581 	o->next_state = ECORE_Q_STATE_MAX;
4582 
4583 	/* It's important that o->state and o->next_state are
4584 	 * updated before o->pending.
4585 	 */
4586 	wmb();
4587 
4588 	ECORE_CLEAR_BIT(cmd, &o->pending);
4589 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4590 
4591 	return ECORE_SUCCESS;
4592 }
4593 
4594 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4595 				struct ecore_queue_state_params *cmd_params,
4596 				struct client_init_ramrod_data *data)
4597 {
4598 	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4599 
4600 	/* Rx data */
4601 
4602 	/* IPv6 TPA supported for E2 and above only */
4603 	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4604 					  &params->flags) *
4605 				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4606 }
4607 
4608 static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4609 				struct ecore_queue_sp_obj *o,
4610 				struct ecore_general_setup_params *params,
4611 				struct client_init_general_data *gen_data,
4612 				unsigned long *flags)
4613 {
4614 	gen_data->client_id = o->cl_id;
4615 
4616 	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4617 		gen_data->statistics_counter_id =
4618 					params->stat_id;
4619 		gen_data->statistics_en_flg = 1;
4620 		gen_data->statistics_zero_flg =
4621 			ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4622 	} else
4623 		gen_data->statistics_counter_id =
4624 					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4625 
4626 	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4627 						   flags);
4628 	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4629 						    flags);
4630 	gen_data->sp_client_id = params->spcl_id;
4631 	gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4632 	gen_data->func_id = o->func_id;
4633 
4634 	gen_data->cos = params->cos;
4635 
4636 	gen_data->traffic_type =
4637 		ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4638 		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4639 
4640 	ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4641 		  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4642 }
4643 
4644 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4645 				struct ecore_txq_setup_params *params,
4646 				struct client_init_tx_data *tx_data,
4647 				unsigned long *flags)
4648 {
4649 	tx_data->enforce_security_flg =
4650 		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4651 	tx_data->default_vlan =
4652 		ECORE_CPU_TO_LE16(params->default_vlan);
4653 	tx_data->default_vlan_flg =
4654 		ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4655 	tx_data->tx_switching_flg =
4656 		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4657 	tx_data->anti_spoofing_flg =
4658 		ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4659 	tx_data->force_default_pri_flg =
4660 		ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4661 	tx_data->refuse_outband_vlan_flg =
4662 		ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4663 	tx_data->tunnel_lso_inc_ip_id =
4664 		ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4665 	tx_data->tunnel_non_lso_pcsum_location =
4666 		ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4667 							    CSUM_ON_BD;
4668 
4669 	tx_data->tx_status_block_id = params->fw_sb_id;
4670 	tx_data->tx_sb_index_number = params->sb_cq_index;
4671 	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4672 
4673 	tx_data->tx_bd_page_base.lo =
4674 		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4675 	tx_data->tx_bd_page_base.hi =
4676 		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4677 
4678 	/* Don't configure any Tx switching mode during queue SETUP */
4679 	tx_data->state = 0;
4680 }
4681 
4682 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4683 				struct rxq_pause_params *params,
4684 				struct client_init_rx_data *rx_data)
4685 {
4686 	/* flow control data */
4687 	rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4688 	rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4689 	rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4690 	rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4691 	rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4692 	rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4693 	rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4694 }
4695 
4696 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4697 				struct ecore_rxq_setup_params *params,
4698 				struct client_init_rx_data *rx_data,
4699 				unsigned long *flags)
4700 {
4701 	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4702 				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4703 	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4704 				CLIENT_INIT_RX_DATA_TPA_MODE;
4705 	rx_data->vmqueue_mode_en_flg = 0;
4706 
4707 	rx_data->extra_data_over_sgl_en_flg =
4708 		ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4709 	rx_data->cache_line_alignment_log_size =
4710 		params->cache_line_log;
4711 	rx_data->enable_dynamic_hc =
4712 		ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4713 	rx_data->max_sges_for_packet = params->max_sges_pkt;
4714 	rx_data->client_qzone_id = params->cl_qzone_id;
4715 	rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4716 
4717 	/* Always start in DROP_ALL mode */
4718 	rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4719 				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4720 
4721 	/* We don't set drop flags */
4722 	rx_data->drop_ip_cs_err_flg = 0;
4723 	rx_data->drop_tcp_cs_err_flg = 0;
4724 	rx_data->drop_ttl0_flg = 0;
4725 	rx_data->drop_udp_cs_err_flg = 0;
4726 	rx_data->inner_vlan_removal_enable_flg =
4727 		ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4728 	rx_data->outer_vlan_removal_enable_flg =
4729 		ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4730 	rx_data->status_block_id = params->fw_sb_id;
4731 	rx_data->rx_sb_index_number = params->sb_cq_index;
4732 	rx_data->max_tpa_queues = params->max_tpa_queues;
4733 	rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4734 	rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4735 	rx_data->bd_page_base.lo =
4736 		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4737 	rx_data->bd_page_base.hi =
4738 		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4739 	rx_data->sge_page_base.lo =
4740 		ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4741 	rx_data->sge_page_base.hi =
4742 		ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4743 	rx_data->cqe_page_base.lo =
4744 		ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4745 	rx_data->cqe_page_base.hi =
4746 		ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4747 	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4748 						 flags);
4749 
4750 	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4751 		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4752 		rx_data->is_approx_mcast = 1;
4753 	}
4754 
4755 	rx_data->rss_engine_id = params->rss_engine_id;
4756 
4757 	/* silent vlan removal */
4758 	rx_data->silent_vlan_removal_flg =
4759 		ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4760 	rx_data->silent_vlan_value =
4761 		ECORE_CPU_TO_LE16(params->silent_removal_value);
4762 	rx_data->silent_vlan_mask =
4763 		ECORE_CPU_TO_LE16(params->silent_removal_mask);
4764 }
4765 
4766 /* initialize the general, tx and rx parts of a queue object */
4767 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4768 				struct ecore_queue_state_params *cmd_params,
4769 				struct client_init_ramrod_data *data)
4770 {
4771 	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4772 				       &cmd_params->params.setup.gen_params,
4773 				       &data->general,
4774 				       &cmd_params->params.setup.flags);
4775 
4776 	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4777 				  &cmd_params->params.setup.txq_params,
4778 				  &data->tx,
4779 				  &cmd_params->params.setup.flags);
4780 
4781 	ecore_q_fill_init_rx_data(cmd_params->q_obj,
4782 				  &cmd_params->params.setup.rxq_params,
4783 				  &data->rx,
4784 				  &cmd_params->params.setup.flags);
4785 
4786 	ecore_q_fill_init_pause_data(cmd_params->q_obj,
4787 				     &cmd_params->params.setup.pause_params,
4788 				     &data->rx);
4789 }
4790 
4791 /* initialize the general and tx parts of a tx-only queue object */
4792 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4793 				struct ecore_queue_state_params *cmd_params,
4794 				struct tx_queue_init_ramrod_data *data)
4795 {
4796 	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4797 				       &cmd_params->params.tx_only.gen_params,
4798 				       &data->general,
4799 				       &cmd_params->params.tx_only.flags);
4800 
4801 	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4802 				  &cmd_params->params.tx_only.txq_params,
4803 				  &data->tx,
4804 				  &cmd_params->params.tx_only.flags);
4805 
4806 	ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4807 		  cmd_params->q_obj->cids[0],
4808 		  data->tx.tx_bd_page_base.lo,
4809 		  data->tx.tx_bd_page_base.hi);
4810 }
4811 
4812 /**
4813  * ecore_q_init - init HW/FW queue
4814  *
4815  * @sc:		device handle
4816  * @params:
4817  *
4818  * HW/FW initial Queue configuration:
4819  *      - HC: Rx and Tx
4820  *      - CDU context validation
4821  *
4822  */
4823 static inline int ecore_q_init(struct bxe_softc *sc,
4824 			       struct ecore_queue_state_params *params)
4825 {
4826 	struct ecore_queue_sp_obj *o = params->q_obj;
4827 	struct ecore_queue_init_params *init = &params->params.init;
4828 	uint16_t hc_usec;
4829 	uint8_t cos;
4830 
4831 	/* Tx HC configuration */
4832 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
4833 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
4834 		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4835 
4836 		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
4837 			init->tx.sb_cq_index,
4838 			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
4839 			hc_usec);
4840 	}
4841 
4842 	/* Rx HC configuration */
4843 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
4844 	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
4845 		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4846 
4847 		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
4848 			init->rx.sb_cq_index,
4849 			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
4850 			hc_usec);
4851 	}
4852 
4853 	/* Set CDU context validation values */
4854 	for (cos = 0; cos < o->max_cos; cos++) {
4855 		ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
4856 			  o->cids[cos], cos);
4857 		ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
4858 		ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4859 	}
4860 
4861 	/* As no ramrod is sent, complete the command immediately  */
4862 	o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4863 
4864 	ECORE_MMIOWB();
4865 	ECORE_SMP_MB();
4866 
4867 	return ECORE_SUCCESS;
4868 }
4869 
4870 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
4871 					struct ecore_queue_state_params *params)
4872 {
4873 	struct ecore_queue_sp_obj *o = params->q_obj;
4874 	struct client_init_ramrod_data *rdata =
4875 		(struct client_init_ramrod_data *)o->rdata;
4876 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4877 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4878 
4879 	/* Clear the ramrod data */
4880 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4881 
4882 	/* Fill the ramrod data */
4883 	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4884 
4885 	/* No need for an explicit memory barrier here as long we would
4886 	 * need to ensure the ordering of writing to the SPQ element
4887 	 * and updating of the SPQ producer which involves a memory
4888 	 * read and we will have to put a full memory barrier there
4889 	 * (inside ecore_sp_post()).
4890 	 */
4891 
4892 	return ecore_sp_post(sc,
4893 			     ramrod,
4894 			     o->cids[ECORE_PRIMARY_CID_INDEX],
4895 			     data_mapping,
4896 			     ETH_CONNECTION_TYPE);
4897 }
4898 
4899 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
4900 					struct ecore_queue_state_params *params)
4901 {
4902 	struct ecore_queue_sp_obj *o = params->q_obj;
4903 	struct client_init_ramrod_data *rdata =
4904 		(struct client_init_ramrod_data *)o->rdata;
4905 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4906 	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4907 
4908 	/* Clear the ramrod data */
4909 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4910 
4911 	/* Fill the ramrod data */
4912 	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4913 	ecore_q_fill_setup_data_e2(sc, params, rdata);
4914 
4915 	/* No need for an explicit memory barrier here as long we would
4916 	 * need to ensure the ordering of writing to the SPQ element
4917 	 * and updating of the SPQ producer which involves a memory
4918 	 * read and we will have to put a full memory barrier there
4919 	 * (inside ecore_sp_post()).
4920 	 */
4921 
4922 	return ecore_sp_post(sc,
4923 			     ramrod,
4924 			     o->cids[ECORE_PRIMARY_CID_INDEX],
4925 			     data_mapping,
4926 			     ETH_CONNECTION_TYPE);
4927 }
4928 
4929 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
4930 				  struct ecore_queue_state_params *params)
4931 {
4932 	struct ecore_queue_sp_obj *o = params->q_obj;
4933 	struct tx_queue_init_ramrod_data *rdata =
4934 		(struct tx_queue_init_ramrod_data *)o->rdata;
4935 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4936 	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4937 	struct ecore_queue_setup_tx_only_params *tx_only_params =
4938 		&params->params.tx_only;
4939 	uint8_t cid_index = tx_only_params->cid_index;
4940 
4941 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4942 		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4943 	ECORE_MSG(sc, "sending forward tx-only ramrod");
4944 
4945 	if (cid_index >= o->max_cos) {
4946 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
4947 			  o->cl_id, cid_index);
4948 		return ECORE_INVAL;
4949 	}
4950 
4951 	ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
4952 		  tx_only_params->gen_params.cos,
4953 		  tx_only_params->gen_params.spcl_id);
4954 
4955 	/* Clear the ramrod data */
4956 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4957 
4958 	/* Fill the ramrod data */
4959 	ecore_q_fill_setup_tx_only(sc, params, rdata);
4960 
4961 	ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4962 		  o->cids[cid_index], rdata->general.client_id,
4963 		  rdata->general.sp_client_id, rdata->general.cos);
4964 
4965 	/* No need for an explicit memory barrier here as long we would
4966 	 * need to ensure the ordering of writing to the SPQ element
4967 	 * and updating of the SPQ producer which involves a memory
4968 	 * read and we will have to put a full memory barrier there
4969 	 * (inside ecore_sp_post()).
4970 	 */
4971 
4972 	return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4973 			     data_mapping, ETH_CONNECTION_TYPE);
4974 }
4975 
4976 static void ecore_q_fill_update_data(struct bxe_softc *sc,
4977 				     struct ecore_queue_sp_obj *obj,
4978 				     struct ecore_queue_update_params *params,
4979 				     struct client_update_ramrod_data *data)
4980 {
4981 	/* Client ID of the client to update */
4982 	data->client_id = obj->cl_id;
4983 
4984 	/* Function ID of the client to update */
4985 	data->func_id = obj->func_id;
4986 
4987 	/* Default VLAN value */
4988 	data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4989 
4990 	/* Inner VLAN stripping */
4991 	data->inner_vlan_removal_enable_flg =
4992 		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
4993 			       &params->update_flags);
4994 	data->inner_vlan_removal_change_flg =
4995 		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4996 		       &params->update_flags);
4997 
4998 	/* Outer VLAN stripping */
4999 	data->outer_vlan_removal_enable_flg =
5000 		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5001 			       &params->update_flags);
5002 	data->outer_vlan_removal_change_flg =
5003 		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5004 		       &params->update_flags);
5005 
5006 	/* Drop packets that have source MAC that doesn't belong to this
5007 	 * Queue.
5008 	 */
5009 	data->anti_spoofing_enable_flg =
5010 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5011 			       &params->update_flags);
5012 	data->anti_spoofing_change_flg =
5013 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5014 		       &params->update_flags);
5015 
5016 	/* Activate/Deactivate */
5017 	data->activate_flg =
5018 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5019 	data->activate_change_flg =
5020 		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5021 			       &params->update_flags);
5022 
5023 	/* Enable default VLAN */
5024 	data->default_vlan_enable_flg =
5025 		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5026 			       &params->update_flags);
5027 	data->default_vlan_change_flg =
5028 		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5029 		       &params->update_flags);
5030 
5031 	/* silent vlan removal */
5032 	data->silent_vlan_change_flg =
5033 		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5034 			       &params->update_flags);
5035 	data->silent_vlan_removal_flg =
5036 		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5037 			       &params->update_flags);
5038 	data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5039 	data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5040 
5041 	/* tx switching */
5042 	data->tx_switching_flg =
5043 		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5044 			       &params->update_flags);
5045 	data->tx_switching_change_flg =
5046 		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5047 			       &params->update_flags);
5048 }
5049 
5050 static inline int ecore_q_send_update(struct bxe_softc *sc,
5051 				      struct ecore_queue_state_params *params)
5052 {
5053 	struct ecore_queue_sp_obj *o = params->q_obj;
5054 	struct client_update_ramrod_data *rdata =
5055 		(struct client_update_ramrod_data *)o->rdata;
5056 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5057 	struct ecore_queue_update_params *update_params =
5058 		&params->params.update;
5059 	uint8_t cid_index = update_params->cid_index;
5060 
5061 	if (cid_index >= o->max_cos) {
5062 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5063 			  o->cl_id, cid_index);
5064 		return ECORE_INVAL;
5065 	}
5066 
5067 	/* Clear the ramrod data */
5068 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5069 
5070 	/* Fill the ramrod data */
5071 	ecore_q_fill_update_data(sc, o, update_params, rdata);
5072 
5073 	/* No need for an explicit memory barrier here as long we would
5074 	 * need to ensure the ordering of writing to the SPQ element
5075 	 * and updating of the SPQ producer which involves a memory
5076 	 * read and we will have to put a full memory barrier there
5077 	 * (inside ecore_sp_post()).
5078 	 */
5079 
5080 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5081 			     o->cids[cid_index], data_mapping,
5082 			     ETH_CONNECTION_TYPE);
5083 }
5084 
5085 /**
5086  * ecore_q_send_deactivate - send DEACTIVATE command
5087  *
5088  * @sc:		device handle
5089  * @params:
5090  *
5091  * implemented using the UPDATE command.
5092  */
5093 static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5094 					struct ecore_queue_state_params *params)
5095 {
5096 	struct ecore_queue_update_params *update = &params->params.update;
5097 
5098 	ECORE_MEMSET(update, 0, sizeof(*update));
5099 
5100 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5101 
5102 	return ecore_q_send_update(sc, params);
5103 }
5104 
5105 /**
5106  * ecore_q_send_activate - send ACTIVATE command
5107  *
5108  * @sc:		device handle
5109  * @params:
5110  *
5111  * implemented using the UPDATE command.
5112  */
5113 static inline int ecore_q_send_activate(struct bxe_softc *sc,
5114 					struct ecore_queue_state_params *params)
5115 {
5116 	struct ecore_queue_update_params *update = &params->params.update;
5117 
5118 	ECORE_MEMSET(update, 0, sizeof(*update));
5119 
5120 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5121 	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5122 
5123 	return ecore_q_send_update(sc, params);
5124 }
5125 
5126 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5127 					struct ecore_queue_state_params *params)
5128 {
5129 	/* TODO: Not implemented yet. */
5130 	return -1;
5131 }
5132 
5133 static inline int ecore_q_send_halt(struct bxe_softc *sc,
5134 				    struct ecore_queue_state_params *params)
5135 {
5136 	struct ecore_queue_sp_obj *o = params->q_obj;
5137 
5138 	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5139 	ecore_dma_addr_t data_mapping = 0;
5140 	data_mapping = (ecore_dma_addr_t)o->cl_id;
5141 
5142 	return ecore_sp_post(sc,
5143 			     RAMROD_CMD_ID_ETH_HALT,
5144 			     o->cids[ECORE_PRIMARY_CID_INDEX],
5145 			     data_mapping,
5146 			     ETH_CONNECTION_TYPE);
5147 }
5148 
5149 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5150 				       struct ecore_queue_state_params *params)
5151 {
5152 	struct ecore_queue_sp_obj *o = params->q_obj;
5153 	uint8_t cid_idx = params->params.cfc_del.cid_index;
5154 
5155 	if (cid_idx >= o->max_cos) {
5156 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5157 			  o->cl_id, cid_idx);
5158 		return ECORE_INVAL;
5159 	}
5160 
5161 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5162 			     o->cids[cid_idx], 0,
5163 			     NONE_CONNECTION_TYPE);
5164 }
5165 
5166 static inline int ecore_q_send_terminate(struct bxe_softc *sc,
5167 					struct ecore_queue_state_params *params)
5168 {
5169 	struct ecore_queue_sp_obj *o = params->q_obj;
5170 	uint8_t cid_index = params->params.terminate.cid_index;
5171 
5172 	if (cid_index >= o->max_cos) {
5173 		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5174 			  o->cl_id, cid_index);
5175 		return ECORE_INVAL;
5176 	}
5177 
5178 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5179 			     o->cids[cid_index], 0,
5180 			     ETH_CONNECTION_TYPE);
5181 }
5182 
5183 static inline int ecore_q_send_empty(struct bxe_softc *sc,
5184 				     struct ecore_queue_state_params *params)
5185 {
5186 	struct ecore_queue_sp_obj *o = params->q_obj;
5187 
5188 	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5189 			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5190 			     ETH_CONNECTION_TYPE);
5191 }
5192 
5193 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5194 					struct ecore_queue_state_params *params)
5195 {
5196 	switch (params->cmd) {
5197 	case ECORE_Q_CMD_INIT:
5198 		return ecore_q_init(sc, params);
5199 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5200 		return ecore_q_send_setup_tx_only(sc, params);
5201 	case ECORE_Q_CMD_DEACTIVATE:
5202 		return ecore_q_send_deactivate(sc, params);
5203 	case ECORE_Q_CMD_ACTIVATE:
5204 		return ecore_q_send_activate(sc, params);
5205 	case ECORE_Q_CMD_UPDATE:
5206 		return ecore_q_send_update(sc, params);
5207 	case ECORE_Q_CMD_UPDATE_TPA:
5208 		return ecore_q_send_update_tpa(sc, params);
5209 	case ECORE_Q_CMD_HALT:
5210 		return ecore_q_send_halt(sc, params);
5211 	case ECORE_Q_CMD_CFC_DEL:
5212 		return ecore_q_send_cfc_del(sc, params);
5213 	case ECORE_Q_CMD_TERMINATE:
5214 		return ecore_q_send_terminate(sc, params);
5215 	case ECORE_Q_CMD_EMPTY:
5216 		return ecore_q_send_empty(sc, params);
5217 	default:
5218 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5219 		return ECORE_INVAL;
5220 	}
5221 }
5222 
5223 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5224 				    struct ecore_queue_state_params *params)
5225 {
5226 	switch (params->cmd) {
5227 	case ECORE_Q_CMD_SETUP:
5228 		return ecore_q_send_setup_e1x(sc, params);
5229 	case ECORE_Q_CMD_INIT:
5230 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5231 	case ECORE_Q_CMD_DEACTIVATE:
5232 	case ECORE_Q_CMD_ACTIVATE:
5233 	case ECORE_Q_CMD_UPDATE:
5234 	case ECORE_Q_CMD_UPDATE_TPA:
5235 	case ECORE_Q_CMD_HALT:
5236 	case ECORE_Q_CMD_CFC_DEL:
5237 	case ECORE_Q_CMD_TERMINATE:
5238 	case ECORE_Q_CMD_EMPTY:
5239 		return ecore_queue_send_cmd_cmn(sc, params);
5240 	default:
5241 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5242 		return ECORE_INVAL;
5243 	}
5244 }
5245 
5246 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5247 				   struct ecore_queue_state_params *params)
5248 {
5249 	switch (params->cmd) {
5250 	case ECORE_Q_CMD_SETUP:
5251 		return ecore_q_send_setup_e2(sc, params);
5252 	case ECORE_Q_CMD_INIT:
5253 	case ECORE_Q_CMD_SETUP_TX_ONLY:
5254 	case ECORE_Q_CMD_DEACTIVATE:
5255 	case ECORE_Q_CMD_ACTIVATE:
5256 	case ECORE_Q_CMD_UPDATE:
5257 	case ECORE_Q_CMD_UPDATE_TPA:
5258 	case ECORE_Q_CMD_HALT:
5259 	case ECORE_Q_CMD_CFC_DEL:
5260 	case ECORE_Q_CMD_TERMINATE:
5261 	case ECORE_Q_CMD_EMPTY:
5262 		return ecore_queue_send_cmd_cmn(sc, params);
5263 	default:
5264 		ECORE_ERR("Unknown command: %d\n", params->cmd);
5265 		return ECORE_INVAL;
5266 	}
5267 }
5268 
5269 /**
5270  * ecore_queue_chk_transition - check state machine of a regular Queue
5271  *
5272  * @sc:		device handle
5273  * @o:
5274  * @params:
5275  *
5276  * (not Forwarding)
5277  * It both checks if the requested command is legal in a current
5278  * state and, if it's legal, sets a `next_state' in the object
5279  * that will be used in the completion flow to set the `state'
5280  * of the object.
5281  *
5282  * returns 0 if a requested command is a legal transition,
5283  *         ECORE_INVAL otherwise.
5284  */
5285 static int ecore_queue_chk_transition(struct bxe_softc *sc,
5286 				      struct ecore_queue_sp_obj *o,
5287 				      struct ecore_queue_state_params *params)
5288 {
5289 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5290 	enum ecore_queue_cmd cmd = params->cmd;
5291 	struct ecore_queue_update_params *update_params =
5292 		 &params->params.update;
5293 	uint8_t next_tx_only = o->num_tx_only;
5294 
5295 	/* Forget all pending for completion commands if a driver only state
5296 	 * transition has been requested.
5297 	 */
5298 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5299 		o->pending = 0;
5300 		o->next_state = ECORE_Q_STATE_MAX;
5301 	}
5302 
5303 	/* Don't allow a next state transition if we are in the middle of
5304 	 * the previous one.
5305 	 */
5306 	if (o->pending) {
5307 		ECORE_ERR("Blocking transition since pending was %lx\n",
5308 			  o->pending);
5309 		return ECORE_BUSY;
5310 	}
5311 
5312 	switch (state) {
5313 	case ECORE_Q_STATE_RESET:
5314 		if (cmd == ECORE_Q_CMD_INIT)
5315 			next_state = ECORE_Q_STATE_INITIALIZED;
5316 
5317 		break;
5318 	case ECORE_Q_STATE_INITIALIZED:
5319 		if (cmd == ECORE_Q_CMD_SETUP) {
5320 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5321 					   &params->params.setup.flags))
5322 				next_state = ECORE_Q_STATE_ACTIVE;
5323 			else
5324 				next_state = ECORE_Q_STATE_INACTIVE;
5325 		}
5326 
5327 		break;
5328 	case ECORE_Q_STATE_ACTIVE:
5329 		if (cmd == ECORE_Q_CMD_DEACTIVATE)
5330 			next_state = ECORE_Q_STATE_INACTIVE;
5331 
5332 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5333 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5334 			next_state = ECORE_Q_STATE_ACTIVE;
5335 
5336 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5337 			next_state = ECORE_Q_STATE_MULTI_COS;
5338 			next_tx_only = 1;
5339 		}
5340 
5341 		else if (cmd == ECORE_Q_CMD_HALT)
5342 			next_state = ECORE_Q_STATE_STOPPED;
5343 
5344 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5345 			/* If "active" state change is requested, update the
5346 			 *  state accordingly.
5347 			 */
5348 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5349 					   &update_params->update_flags) &&
5350 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5351 					    &update_params->update_flags))
5352 				next_state = ECORE_Q_STATE_INACTIVE;
5353 			else
5354 				next_state = ECORE_Q_STATE_ACTIVE;
5355 		}
5356 
5357 		break;
5358 	case ECORE_Q_STATE_MULTI_COS:
5359 		if (cmd == ECORE_Q_CMD_TERMINATE)
5360 			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5361 
5362 		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5363 			next_state = ECORE_Q_STATE_MULTI_COS;
5364 			next_tx_only = o->num_tx_only + 1;
5365 		}
5366 
5367 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5368 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5369 			next_state = ECORE_Q_STATE_MULTI_COS;
5370 
5371 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5372 			/* If "active" state change is requested, update the
5373 			 *  state accordingly.
5374 			 */
5375 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5376 					   &update_params->update_flags) &&
5377 			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5378 					    &update_params->update_flags))
5379 				next_state = ECORE_Q_STATE_INACTIVE;
5380 			else
5381 				next_state = ECORE_Q_STATE_MULTI_COS;
5382 		}
5383 
5384 		break;
5385 	case ECORE_Q_STATE_MCOS_TERMINATED:
5386 		if (cmd == ECORE_Q_CMD_CFC_DEL) {
5387 			next_tx_only = o->num_tx_only - 1;
5388 			if (next_tx_only == 0)
5389 				next_state = ECORE_Q_STATE_ACTIVE;
5390 			else
5391 				next_state = ECORE_Q_STATE_MULTI_COS;
5392 		}
5393 
5394 		break;
5395 	case ECORE_Q_STATE_INACTIVE:
5396 		if (cmd == ECORE_Q_CMD_ACTIVATE)
5397 			next_state = ECORE_Q_STATE_ACTIVE;
5398 
5399 		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5400 			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5401 			next_state = ECORE_Q_STATE_INACTIVE;
5402 
5403 		else if (cmd == ECORE_Q_CMD_HALT)
5404 			next_state = ECORE_Q_STATE_STOPPED;
5405 
5406 		else if (cmd == ECORE_Q_CMD_UPDATE) {
5407 			/* If "active" state change is requested, update the
5408 			 * state accordingly.
5409 			 */
5410 			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5411 					   &update_params->update_flags) &&
5412 			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5413 					   &update_params->update_flags)){
5414 				if (o->num_tx_only == 0)
5415 					next_state = ECORE_Q_STATE_ACTIVE;
5416 				else /* tx only queues exist for this queue */
5417 					next_state = ECORE_Q_STATE_MULTI_COS;
5418 			} else
5419 				next_state = ECORE_Q_STATE_INACTIVE;
5420 		}
5421 
5422 		break;
5423 	case ECORE_Q_STATE_STOPPED:
5424 		if (cmd == ECORE_Q_CMD_TERMINATE)
5425 			next_state = ECORE_Q_STATE_TERMINATED;
5426 
5427 		break;
5428 	case ECORE_Q_STATE_TERMINATED:
5429 		if (cmd == ECORE_Q_CMD_CFC_DEL)
5430 			next_state = ECORE_Q_STATE_RESET;
5431 
5432 		break;
5433 	default:
5434 		ECORE_ERR("Illegal state: %d\n", state);
5435 	}
5436 
5437 	/* Transition is assured */
5438 	if (next_state != ECORE_Q_STATE_MAX) {
5439 		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5440 			  state, cmd, next_state);
5441 		o->next_state = next_state;
5442 		o->next_tx_only = next_tx_only;
5443 		return ECORE_SUCCESS;
5444 	}
5445 
5446 	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5447 
5448 	return ECORE_INVAL;
5449 }
5450 
5451 /**
5452  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5453  *
5454  * @sc:		device handle
5455  * @o:
5456  * @params:
5457  *
5458  * It both checks if the requested command is legal in a current
5459  * state and, if it's legal, sets a `next_state' in the object
5460  * that will be used in the completion flow to set the `state'
5461  * of the object.
5462  *
5463  * returns 0 if a requested command is a legal transition,
5464  *         ECORE_INVAL otherwise.
5465  */
5466 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5467 					  struct ecore_queue_sp_obj *o,
5468 					struct ecore_queue_state_params *params)
5469 {
5470 	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5471 	enum ecore_queue_cmd cmd = params->cmd;
5472 
5473 	switch (state) {
5474 	case ECORE_Q_STATE_RESET:
5475 		if (cmd == ECORE_Q_CMD_INIT)
5476 			next_state = ECORE_Q_STATE_INITIALIZED;
5477 
5478 		break;
5479 	case ECORE_Q_STATE_INITIALIZED:
5480 		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5481 			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5482 					   &params->params.tx_only.flags))
5483 				next_state = ECORE_Q_STATE_ACTIVE;
5484 			else
5485 				next_state = ECORE_Q_STATE_INACTIVE;
5486 		}
5487 
5488 		break;
5489 	case ECORE_Q_STATE_ACTIVE:
5490 	case ECORE_Q_STATE_INACTIVE:
5491 		if (cmd == ECORE_Q_CMD_CFC_DEL)
5492 			next_state = ECORE_Q_STATE_RESET;
5493 
5494 		break;
5495 	default:
5496 		ECORE_ERR("Illegal state: %d\n", state);
5497 	}
5498 
5499 	/* Transition is assured */
5500 	if (next_state != ECORE_Q_STATE_MAX) {
5501 		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5502 			  state, cmd, next_state);
5503 		o->next_state = next_state;
5504 		return ECORE_SUCCESS;
5505 	}
5506 
5507 	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5508 	return ECORE_INVAL;
5509 }
5510 
5511 void ecore_init_queue_obj(struct bxe_softc *sc,
5512 			  struct ecore_queue_sp_obj *obj,
5513 			  uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5514 			  void *rdata,
5515 			  ecore_dma_addr_t rdata_mapping, unsigned long type)
5516 {
5517 	ECORE_MEMSET(obj, 0, sizeof(*obj));
5518 
5519 	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5520 	ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5521 
5522 	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5523 	obj->max_cos = cid_cnt;
5524 	obj->cl_id = cl_id;
5525 	obj->func_id = func_id;
5526 	obj->rdata = rdata;
5527 	obj->rdata_mapping = rdata_mapping;
5528 	obj->type = type;
5529 	obj->next_state = ECORE_Q_STATE_MAX;
5530 
5531 	if (CHIP_IS_E1x(sc))
5532 		obj->send_cmd = ecore_queue_send_cmd_e1x;
5533 	else
5534 		obj->send_cmd = ecore_queue_send_cmd_e2;
5535 
5536 	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5537 		obj->check_transition = ecore_queue_chk_fwd_transition;
5538 	else
5539 	obj->check_transition = ecore_queue_chk_transition;
5540 
5541 	obj->complete_cmd = ecore_queue_comp_cmd;
5542 	obj->wait_comp = ecore_queue_wait_comp;
5543 	obj->set_pending = ecore_queue_set_pending;
5544 }
5545 
5546 /* return a queue object's logical state*/
5547 int ecore_get_q_logical_state(struct bxe_softc *sc,
5548 			       struct ecore_queue_sp_obj *obj)
5549 {
5550 	switch (obj->state) {
5551 	case ECORE_Q_STATE_ACTIVE:
5552 	case ECORE_Q_STATE_MULTI_COS:
5553 		return ECORE_Q_LOGICAL_STATE_ACTIVE;
5554 	case ECORE_Q_STATE_RESET:
5555 	case ECORE_Q_STATE_INITIALIZED:
5556 	case ECORE_Q_STATE_MCOS_TERMINATED:
5557 	case ECORE_Q_STATE_INACTIVE:
5558 	case ECORE_Q_STATE_STOPPED:
5559 	case ECORE_Q_STATE_TERMINATED:
5560 	case ECORE_Q_STATE_FLRED:
5561 		return ECORE_Q_LOGICAL_STATE_STOPPED;
5562 	default:
5563 		return ECORE_INVAL;
5564 	}
5565 }
5566 
5567 /********************** Function state object *********************************/
5568 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5569 					   struct ecore_func_sp_obj *o)
5570 {
5571 	/* in the middle of transaction - return INVALID state */
5572 	if (o->pending)
5573 		return ECORE_F_STATE_MAX;
5574 
5575 	/* unsure the order of reading of o->pending and o->state
5576 	 * o->pending should be read first
5577 	 */
5578 	rmb();
5579 
5580 	return o->state;
5581 }
5582 
5583 static int ecore_func_wait_comp(struct bxe_softc *sc,
5584 				struct ecore_func_sp_obj *o,
5585 				enum ecore_func_cmd cmd)
5586 {
5587 	return ecore_state_wait(sc, cmd, &o->pending);
5588 }
5589 
5590 /**
5591  * ecore_func_state_change_comp - complete the state machine transition
5592  *
5593  * @sc:		device handle
5594  * @o:
5595  * @cmd:
5596  *
5597  * Called on state change transition. Completes the state
5598  * machine transition only - no HW interaction.
5599  */
5600 static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5601 					       struct ecore_func_sp_obj *o,
5602 					       enum ecore_func_cmd cmd)
5603 {
5604 	unsigned long cur_pending = o->pending;
5605 
5606 	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5607 		ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5608 			  cmd, ECORE_FUNC_ID(sc), o->state,
5609 			  cur_pending, o->next_state);
5610 		return ECORE_INVAL;
5611 	}
5612 
5613 	ECORE_MSG(sc,
5614 		  "Completing command %d for func %d, setting state to %d\n",
5615 		  cmd, ECORE_FUNC_ID(sc), o->next_state);
5616 
5617 	o->state = o->next_state;
5618 	o->next_state = ECORE_F_STATE_MAX;
5619 
5620 	/* It's important that o->state and o->next_state are
5621 	 * updated before o->pending.
5622 	 */
5623 	wmb();
5624 
5625 	ECORE_CLEAR_BIT(cmd, &o->pending);
5626 	ECORE_SMP_MB_AFTER_CLEAR_BIT();
5627 
5628 	return ECORE_SUCCESS;
5629 }
5630 
5631 /**
5632  * ecore_func_comp_cmd - complete the state change command
5633  *
5634  * @sc:		device handle
5635  * @o:
5636  * @cmd:
5637  *
5638  * Checks that the arrived completion is expected.
5639  */
5640 static int ecore_func_comp_cmd(struct bxe_softc *sc,
5641 			       struct ecore_func_sp_obj *o,
5642 			       enum ecore_func_cmd cmd)
5643 {
5644 	/* Complete the state machine part first, check if it's a
5645 	 * legal completion.
5646 	 */
5647 	int rc = ecore_func_state_change_comp(sc, o, cmd);
5648 	return rc;
5649 }
5650 
5651 /**
5652  * ecore_func_chk_transition - perform function state machine transition
5653  *
5654  * @sc:		device handle
5655  * @o:
5656  * @params:
5657  *
5658  * It both checks if the requested command is legal in a current
5659  * state and, if it's legal, sets a `next_state' in the object
5660  * that will be used in the completion flow to set the `state'
5661  * of the object.
5662  *
5663  * returns 0 if a requested command is a legal transition,
5664  *         ECORE_INVAL otherwise.
5665  */
5666 static int ecore_func_chk_transition(struct bxe_softc *sc,
5667 				     struct ecore_func_sp_obj *o,
5668 				     struct ecore_func_state_params *params)
5669 {
5670 	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5671 	enum ecore_func_cmd cmd = params->cmd;
5672 
5673 	/* Forget all pending for completion commands if a driver only state
5674 	 * transition has been requested.
5675 	 */
5676 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5677 		o->pending = 0;
5678 		o->next_state = ECORE_F_STATE_MAX;
5679 	}
5680 
5681 	/* Don't allow a next state transition if we are in the middle of
5682 	 * the previous one.
5683 	 */
5684 	if (o->pending)
5685 		return ECORE_BUSY;
5686 
5687 	switch (state) {
5688 	case ECORE_F_STATE_RESET:
5689 		if (cmd == ECORE_F_CMD_HW_INIT)
5690 			next_state = ECORE_F_STATE_INITIALIZED;
5691 
5692 		break;
5693 	case ECORE_F_STATE_INITIALIZED:
5694 		if (cmd == ECORE_F_CMD_START)
5695 			next_state = ECORE_F_STATE_STARTED;
5696 
5697 		else if (cmd == ECORE_F_CMD_HW_RESET)
5698 			next_state = ECORE_F_STATE_RESET;
5699 
5700 		break;
5701 	case ECORE_F_STATE_STARTED:
5702 		if (cmd == ECORE_F_CMD_STOP)
5703 			next_state = ECORE_F_STATE_INITIALIZED;
5704 		/* afex ramrods can be sent only in started mode, and only
5705 		 * if not pending for function_stop ramrod completion
5706 		 * for these events - next state remained STARTED.
5707 		 */
5708 		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5709 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5710 			next_state = ECORE_F_STATE_STARTED;
5711 
5712 		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5713 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5714 			next_state = ECORE_F_STATE_STARTED;
5715 
5716 		/* Switch_update ramrod can be sent in either started or
5717 		 * tx_stopped state, and it doesn't change the state.
5718 		 */
5719 		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5720 			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5721 			next_state = ECORE_F_STATE_STARTED;
5722 
5723 		else if (cmd == ECORE_F_CMD_TX_STOP)
5724 			next_state = ECORE_F_STATE_TX_STOPPED;
5725 
5726 		break;
5727 	case ECORE_F_STATE_TX_STOPPED:
5728 		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5729 		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5730 			next_state = ECORE_F_STATE_TX_STOPPED;
5731 
5732 		else if (cmd == ECORE_F_CMD_TX_START)
5733 			next_state = ECORE_F_STATE_STARTED;
5734 
5735 		break;
5736 	default:
5737 		ECORE_ERR("Unknown state: %d\n", state);
5738 	}
5739 
5740 	/* Transition is assured */
5741 	if (next_state != ECORE_F_STATE_MAX) {
5742 		ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5743 			  state, cmd, next_state);
5744 		o->next_state = next_state;
5745 		return ECORE_SUCCESS;
5746 	}
5747 
5748 	ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5749 		  state, cmd);
5750 
5751 	return ECORE_INVAL;
5752 }
5753 
5754 /**
5755  * ecore_func_init_func - performs HW init at function stage
5756  *
5757  * @sc:		device handle
5758  * @drv:
5759  *
5760  * Init HW when the current phase is
5761  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5762  * HW blocks.
5763  */
5764 static inline int ecore_func_init_func(struct bxe_softc *sc,
5765 				       const struct ecore_func_sp_drv_ops *drv)
5766 {
5767 	return drv->init_hw_func(sc);
5768 }
5769 
5770 /**
5771  * ecore_func_init_port - performs HW init at port stage
5772  *
5773  * @sc:		device handle
5774  * @drv:
5775  *
5776  * Init HW when the current phase is
5777  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5778  * FUNCTION-only HW blocks.
5779  *
5780  */
5781 static inline int ecore_func_init_port(struct bxe_softc *sc,
5782 				       const struct ecore_func_sp_drv_ops *drv)
5783 {
5784 	int rc = drv->init_hw_port(sc);
5785 	if (rc)
5786 		return rc;
5787 
5788 	return ecore_func_init_func(sc, drv);
5789 }
5790 
5791 /**
5792  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
5793  *
5794  * @sc:		device handle
5795  * @drv:
5796  *
5797  * Init HW when the current phase is
5798  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5799  * PORT-only and FUNCTION-only HW blocks.
5800  */
5801 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
5802 					const struct ecore_func_sp_drv_ops *drv)
5803 {
5804 	int rc = drv->init_hw_cmn_chip(sc);
5805 	if (rc)
5806 		return rc;
5807 
5808 	return ecore_func_init_port(sc, drv);
5809 }
5810 
5811 /**
5812  * ecore_func_init_cmn - performs HW init at common stage
5813  *
5814  * @sc:		device handle
5815  * @drv:
5816  *
5817  * Init HW when the current phase is
5818  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5819  * PORT-only and FUNCTION-only HW blocks.
5820  */
5821 static inline int ecore_func_init_cmn(struct bxe_softc *sc,
5822 				      const struct ecore_func_sp_drv_ops *drv)
5823 {
5824 	int rc = drv->init_hw_cmn(sc);
5825 	if (rc)
5826 		return rc;
5827 
5828 	return ecore_func_init_port(sc, drv);
5829 }
5830 
5831 static int ecore_func_hw_init(struct bxe_softc *sc,
5832 			      struct ecore_func_state_params *params)
5833 {
5834 	uint32_t load_code = params->params.hw_init.load_phase;
5835 	struct ecore_func_sp_obj *o = params->f_obj;
5836 	const struct ecore_func_sp_drv_ops *drv = o->drv;
5837 	int rc = 0;
5838 
5839 	ECORE_MSG(sc, "function %d  load_code %x\n",
5840 		  ECORE_ABS_FUNC_ID(sc), load_code);
5841 
5842 	/* Prepare buffers for unzipping the FW */
5843 	rc = drv->gunzip_init(sc);
5844 	if (rc)
5845 		return rc;
5846 
5847 	/* Prepare FW */
5848 	rc = drv->init_fw(sc);
5849 	if (rc) {
5850 		ECORE_ERR("Error loading firmware\n");
5851 		goto init_err;
5852 	}
5853 
5854 	/* Handle the beginning of COMMON_XXX pases separately... */
5855 	switch (load_code) {
5856 	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5857 		rc = ecore_func_init_cmn_chip(sc, drv);
5858 		if (rc)
5859 			goto init_err;
5860 
5861 		break;
5862 	case FW_MSG_CODE_DRV_LOAD_COMMON:
5863 		rc = ecore_func_init_cmn(sc, drv);
5864 		if (rc)
5865 			goto init_err;
5866 
5867 		break;
5868 	case FW_MSG_CODE_DRV_LOAD_PORT:
5869 		rc = ecore_func_init_port(sc, drv);
5870 		if (rc)
5871 			goto init_err;
5872 
5873 		break;
5874 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5875 		rc = ecore_func_init_func(sc, drv);
5876 		if (rc)
5877 			goto init_err;
5878 
5879 		break;
5880 	default:
5881 		ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5882 		rc = ECORE_INVAL;
5883 	}
5884 
5885 init_err:
5886 	drv->gunzip_end(sc);
5887 
5888 	/* In case of success, complete the command immediately: no ramrods
5889 	 * have been sent.
5890 	 */
5891 	if (!rc)
5892 		o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5893 
5894 	return rc;
5895 }
5896 
5897 /**
5898  * ecore_func_reset_func - reset HW at function stage
5899  *
5900  * @sc:		device handle
5901  * @drv:
5902  *
5903  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5904  * FUNCTION-only HW blocks.
5905  */
5906 static inline void ecore_func_reset_func(struct bxe_softc *sc,
5907 					const struct ecore_func_sp_drv_ops *drv)
5908 {
5909 	drv->reset_hw_func(sc);
5910 }
5911 
5912 /**
5913  * ecore_func_reset_port - reser HW at port stage
5914  *
5915  * @sc:		device handle
5916  * @drv:
5917  *
5918  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5919  * FUNCTION-only and PORT-only HW blocks.
5920  *
5921  *                 !!!IMPORTANT!!!
5922  *
5923  * It's important to call reset_port before reset_func() as the last thing
5924  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5925  * makes impossible any DMAE transactions.
5926  */
5927 static inline void ecore_func_reset_port(struct bxe_softc *sc,
5928 					const struct ecore_func_sp_drv_ops *drv)
5929 {
5930 	drv->reset_hw_port(sc);
5931 	ecore_func_reset_func(sc, drv);
5932 }
5933 
5934 /**
5935  * ecore_func_reset_cmn - reser HW at common stage
5936  *
5937  * @sc:		device handle
5938  * @drv:
5939  *
5940  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5941  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5942  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5943  */
5944 static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
5945 					const struct ecore_func_sp_drv_ops *drv)
5946 {
5947 	ecore_func_reset_port(sc, drv);
5948 	drv->reset_hw_cmn(sc);
5949 }
5950 
5951 static inline int ecore_func_hw_reset(struct bxe_softc *sc,
5952 				      struct ecore_func_state_params *params)
5953 {
5954 	uint32_t reset_phase = params->params.hw_reset.reset_phase;
5955 	struct ecore_func_sp_obj *o = params->f_obj;
5956 	const struct ecore_func_sp_drv_ops *drv = o->drv;
5957 
5958 	ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
5959 		  reset_phase);
5960 
5961 	switch (reset_phase) {
5962 	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5963 		ecore_func_reset_cmn(sc, drv);
5964 		break;
5965 	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5966 		ecore_func_reset_port(sc, drv);
5967 		break;
5968 	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5969 		ecore_func_reset_func(sc, drv);
5970 		break;
5971 	default:
5972 		ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
5973 			  reset_phase);
5974 		break;
5975 	}
5976 
5977 	/* Complete the command immediately: no ramrods have been sent. */
5978 	o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5979 
5980 	return ECORE_SUCCESS;
5981 }
5982 
5983 static inline int ecore_func_send_start(struct bxe_softc *sc,
5984 					struct ecore_func_state_params *params)
5985 {
5986 	struct ecore_func_sp_obj *o = params->f_obj;
5987 	struct function_start_data *rdata =
5988 		(struct function_start_data *)o->rdata;
5989 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5990 	struct ecore_func_start_params *start_params = &params->params.start;
5991 
5992 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5993 
5994 	/* Fill the ramrod data with provided parameters */
5995 	rdata->function_mode	= (uint8_t)start_params->mf_mode;
5996 	rdata->sd_vlan_tag	= ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5997 	rdata->path_id		= ECORE_PATH_ID(sc);
5998 	rdata->network_cos_mode	= start_params->network_cos_mode;
5999 	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
6000 	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
6001 
6002 	/*
6003 	 *  No need for an explicit memory barrier here as long we would
6004 	 *  need to ensure the ordering of writing to the SPQ element
6005 	 *  and updating of the SPQ producer which involves a memory
6006 	 *  read and we will have to put a full memory barrier there
6007 	 *  (inside ecore_sp_post()).
6008 	 */
6009 
6010 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6011 			     data_mapping, NONE_CONNECTION_TYPE);
6012 }
6013 
6014 static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6015 					struct ecore_func_state_params *params)
6016 {
6017 	struct ecore_func_sp_obj *o = params->f_obj;
6018 	struct function_update_data *rdata =
6019 		(struct function_update_data *)o->rdata;
6020 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6021 	struct ecore_func_switch_update_params *switch_update_params =
6022 		&params->params.switch_update;
6023 
6024 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6025 
6026 	/* Fill the ramrod data with provided parameters */
6027 	rdata->tx_switch_suspend_change_flg = 1;
6028 	rdata->tx_switch_suspend = switch_update_params->suspend;
6029 	rdata->echo = SWITCH_UPDATE;
6030 
6031 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6032 			     data_mapping, NONE_CONNECTION_TYPE);
6033 }
6034 
6035 static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6036 					 struct ecore_func_state_params *params)
6037 {
6038 	struct ecore_func_sp_obj *o = params->f_obj;
6039 	struct function_update_data *rdata =
6040 		(struct function_update_data *)o->afex_rdata;
6041 	ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6042 	struct ecore_func_afex_update_params *afex_update_params =
6043 		&params->params.afex_update;
6044 
6045 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6046 
6047 	/* Fill the ramrod data with provided parameters */
6048 	rdata->vif_id_change_flg = 1;
6049 	rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6050 	rdata->afex_default_vlan_change_flg = 1;
6051 	rdata->afex_default_vlan =
6052 		ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6053 	rdata->allowed_priorities_change_flg = 1;
6054 	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6055 	rdata->echo = AFEX_UPDATE;
6056 
6057 	/*  No need for an explicit memory barrier here as long we would
6058 	 *  need to ensure the ordering of writing to the SPQ element
6059 	 *  and updating of the SPQ producer which involves a memory
6060 	 *  read and we will have to put a full memory barrier there
6061 	 *  (inside ecore_sp_post()).
6062 	 */
6063 	ECORE_MSG(sc,
6064 		  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6065 		  rdata->vif_id,
6066 		  rdata->afex_default_vlan, rdata->allowed_priorities);
6067 
6068 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6069 			     data_mapping, NONE_CONNECTION_TYPE);
6070 }
6071 
6072 static
6073 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6074 					 struct ecore_func_state_params *params)
6075 {
6076 	struct ecore_func_sp_obj *o = params->f_obj;
6077 	struct afex_vif_list_ramrod_data *rdata =
6078 		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6079 	struct ecore_func_afex_viflists_params *afex_vif_params =
6080 		&params->params.afex_viflists;
6081 	uint64_t *p_rdata = (uint64_t *)rdata;
6082 
6083 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6084 
6085 	/* Fill the ramrod data with provided parameters */
6086 	rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6087 	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6088 	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6089 	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6090 
6091 	/* send in echo type of sub command */
6092 	rdata->echo = afex_vif_params->afex_vif_list_command;
6093 
6094 	/*  No need for an explicit memory barrier here as long we would
6095 	 *  need to ensure the ordering of writing to the SPQ element
6096 	 *  and updating of the SPQ producer which involves a memory
6097 	 *  read and we will have to put a full memory barrier there
6098 	 *  (inside ecore_sp_post()).
6099 	 */
6100 
6101 	ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6102 		  rdata->afex_vif_list_command, rdata->vif_list_index,
6103 		  rdata->func_bit_map, rdata->func_to_clear);
6104 
6105 	/* this ramrod sends data directly and not through DMA mapping */
6106 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6107 			     *p_rdata, NONE_CONNECTION_TYPE);
6108 }
6109 
6110 static inline int ecore_func_send_stop(struct bxe_softc *sc,
6111 				       struct ecore_func_state_params *params)
6112 {
6113 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6114 			     NONE_CONNECTION_TYPE);
6115 }
6116 
6117 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6118 				       struct ecore_func_state_params *params)
6119 {
6120 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6121 			     NONE_CONNECTION_TYPE);
6122 }
6123 static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6124 				       struct ecore_func_state_params *params)
6125 {
6126 	struct ecore_func_sp_obj *o = params->f_obj;
6127 	struct flow_control_configuration *rdata =
6128 		(struct flow_control_configuration *)o->rdata;
6129 	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6130 	struct ecore_func_tx_start_params *tx_start_params =
6131 		&params->params.tx_start;
6132 	int i;
6133 
6134 	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6135 
6136 	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6137 	rdata->dcb_version = tx_start_params->dcb_version;
6138 	rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6139 
6140 	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6141 		rdata->traffic_type_to_priority_cos[i] =
6142 			tx_start_params->traffic_type_to_priority_cos[i];
6143 
6144 	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6145 			     data_mapping, NONE_CONNECTION_TYPE);
6146 }
6147 
6148 static int ecore_func_send_cmd(struct bxe_softc *sc,
6149 			       struct ecore_func_state_params *params)
6150 {
6151 	switch (params->cmd) {
6152 	case ECORE_F_CMD_HW_INIT:
6153 		return ecore_func_hw_init(sc, params);
6154 	case ECORE_F_CMD_START:
6155 		return ecore_func_send_start(sc, params);
6156 	case ECORE_F_CMD_STOP:
6157 		return ecore_func_send_stop(sc, params);
6158 	case ECORE_F_CMD_HW_RESET:
6159 		return ecore_func_hw_reset(sc, params);
6160 	case ECORE_F_CMD_AFEX_UPDATE:
6161 		return ecore_func_send_afex_update(sc, params);
6162 	case ECORE_F_CMD_AFEX_VIFLISTS:
6163 		return ecore_func_send_afex_viflists(sc, params);
6164 	case ECORE_F_CMD_TX_STOP:
6165 		return ecore_func_send_tx_stop(sc, params);
6166 	case ECORE_F_CMD_TX_START:
6167 		return ecore_func_send_tx_start(sc, params);
6168 	case ECORE_F_CMD_SWITCH_UPDATE:
6169 		return ecore_func_send_switch_update(sc, params);
6170 	default:
6171 		ECORE_ERR("Unknown command: %d\n", params->cmd);
6172 		return ECORE_INVAL;
6173 	}
6174 }
6175 
6176 void ecore_init_func_obj(struct bxe_softc *sc,
6177 			 struct ecore_func_sp_obj *obj,
6178 			 void *rdata, ecore_dma_addr_t rdata_mapping,
6179 			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6180 			 struct ecore_func_sp_drv_ops *drv_iface)
6181 {
6182 	ECORE_MEMSET(obj, 0, sizeof(*obj));
6183 
6184 	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6185 
6186 	obj->rdata = rdata;
6187 	obj->rdata_mapping = rdata_mapping;
6188 	obj->afex_rdata = afex_rdata;
6189 	obj->afex_rdata_mapping = afex_rdata_mapping;
6190 	obj->send_cmd = ecore_func_send_cmd;
6191 	obj->check_transition = ecore_func_chk_transition;
6192 	obj->complete_cmd = ecore_func_comp_cmd;
6193 	obj->wait_comp = ecore_func_wait_comp;
6194 	obj->drv = drv_iface;
6195 }
6196 
6197 /**
6198  * ecore_func_state_change - perform Function state change transition
6199  *
6200  * @sc:		device handle
6201  * @params:	parameters to perform the transaction
6202  *
6203  * returns 0 in case of successfully completed transition,
6204  *         negative error code in case of failure, positive
6205  *         (EBUSY) value if there is a completion to that is
6206  *         still pending (possible only if RAMROD_COMP_WAIT is
6207  *         not set in params->ramrod_flags for asynchronous
6208  *         commands).
6209  */
6210 int ecore_func_state_change(struct bxe_softc *sc,
6211 			    struct ecore_func_state_params *params)
6212 {
6213 	struct ecore_func_sp_obj *o = params->f_obj;
6214 	int rc, cnt = 300;
6215 	enum ecore_func_cmd cmd = params->cmd;
6216 	unsigned long *pending = &o->pending;
6217 
6218 	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6219 
6220 	/* Check that the requested transition is legal */
6221 	rc = o->check_transition(sc, o, params);
6222 	if ((rc == ECORE_BUSY) &&
6223 	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6224 		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6225 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6226 			ECORE_MSLEEP(10);
6227 			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6228 			rc = o->check_transition(sc, o, params);
6229 		}
6230 		if (rc == ECORE_BUSY) {
6231 			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6232 			ECORE_ERR("timeout waiting for previous ramrod completion\n");
6233 			return rc;
6234 		}
6235 	} else if (rc) {
6236 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6237 		return rc;
6238 	}
6239 
6240 	/* Set "pending" bit */
6241 	ECORE_SET_BIT(cmd, pending);
6242 
6243 	/* Don't send a command if only driver cleanup was requested */
6244 	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6245 		ecore_func_state_change_comp(sc, o, cmd);
6246 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6247 	} else {
6248 		/* Send a ramrod */
6249 		rc = o->send_cmd(sc, params);
6250 
6251 		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6252 
6253 		if (rc) {
6254 			o->next_state = ECORE_F_STATE_MAX;
6255 			ECORE_CLEAR_BIT(cmd, pending);
6256 			ECORE_SMP_MB_AFTER_CLEAR_BIT();
6257 			return rc;
6258 		}
6259 
6260 		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6261 			rc = o->wait_comp(sc, o, cmd);
6262 			if (rc)
6263 				return rc;
6264 
6265 			return ECORE_SUCCESS;
6266 		}
6267 	}
6268 
6269 	return ECORE_RET_PENDING(cmd, pending);
6270 }
6271