xref: /freebsd/sys/dev/bxe/ecore_sp.h (revision 3a92d97ff0f22d21608e1c19b83104c4937523b6)
1 /*-
2  * Copyright (c) 2007-2013 Broadcom Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written consent.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #ifndef ECORE_SP_H
38 #define ECORE_SP_H
39 
40 
41 #include <sys/types.h>
42 #include <sys/endian.h>
43 #include <sys/param.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <machine/bus.h>
49 #include <net/ethernet.h>
50 
51 #if _BYTE_ORDER == _LITTLE_ENDIAN
52 #ifndef LITTLE_ENDIAN
53 #define LITTLE_ENDIAN
54 #endif
55 #ifndef __LITTLE_ENDIAN
56 #define __LITTLE_ENDIAN
57 #endif
58 #undef BIG_ENDIAN
59 #undef __BIG_ENDIAN
60 #else /* _BIG_ENDIAN */
61 #ifndef BIG_ENDIAN
62 #define BIG_ENDIAN
63 #endif
64 #ifndef __BIG_ENDIAN
65 #define __BIG_ENDIAN
66 #endif
67 #undef LITTLE_ENDIAN
68 #undef __LITTLE_ENDIAN
69 #endif
70 
71 #include "ecore_mfw_req.h"
72 #include "ecore_fw_defs.h"
73 #include "ecore_hsi.h"
74 #include "ecore_reg.h"
75 
76 struct bxe_softc;
77 typedef bus_addr_t ecore_dma_addr_t; /* expected to be 64 bit wide */
78 typedef volatile int ecore_atomic_t;
79 
80 #ifndef __bool_true_false_are_defined
81 #ifndef __cplusplus
82 #define bool _Bool
83 #if __STDC_VERSION__ < 199901L && __GNUC__ < 3 && !defined(__INTEL_COMPILER)
84 typedef _Bool bool;
85 #endif
86 #endif /* !__cplusplus */
87 #endif /* !__bool_true_false_are_defined$ */
88 
89 #define ETH_ALEN ETHER_ADDR_LEN /* 6 */
90 
91 #define ECORE_SWCID_SHIFT   17
92 #define ECORE_SWCID_MASK    ((0x1 << ECORE_SWCID_SHIFT) - 1)
93 
94 #define ECORE_MC_HASH_SIZE 8
95 #define ECORE_MC_HASH_OFFSET(sc, i)                                          \
96     (BAR_TSTRORM_INTMEM +                                                    \
97      TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(FUNC_ID(sc)) + i*4)
98 
99 #define ECORE_MAX_MULTICAST   64
100 #define ECORE_MAX_EMUL_MULTI  1
101 
102 #define IRO sc->iro_array
103 
104 typedef struct mtx ECORE_MUTEX;
105 #define ECORE_MUTEX_INIT(_mutex) \
106     mtx_init(_mutex, "ecore_lock", "ECORE Lock", MTX_DEF)
107 #define ECORE_MUTEX_LOCK(_mutex)   mtx_lock(_mutex)
108 #define ECORE_MUTEX_UNLOCK(_mutex) mtx_unlock(_mutex)
109 
110 typedef struct mtx ECORE_MUTEX_SPIN;
111 #define ECORE_SPIN_LOCK_INIT(_spin, _sc) \
112     mtx_init(_spin, "ecore_lock", "ECORE Lock", MTX_DEF)
113 #define ECORE_SPIN_LOCK_BH(_spin)   mtx_lock(_spin) /* bh = bottom-half */
114 #define ECORE_SPIN_UNLOCK_BH(_spin) mtx_unlock(_spin) /* bh = bottom-half */
115 
116 #define ECORE_SMP_MB_AFTER_CLEAR_BIT()  mb()
117 #define ECORE_SMP_MB_BEFORE_CLEAR_BIT() mb()
118 #define ECORE_SMP_MB()                  mb()
119 #define ECORE_SMP_RMB()                 rmb()
120 #define ECORE_SMP_WMB()                 wmb()
121 #define ECORE_MMIOWB()                  wmb()
122 
123 #define ECORE_SET_BIT_NA(bit, var)   bit_set(var, bit) /* non-atomic */
124 #define ECORE_CLEAR_BIT_NA(bit, var) bit_clear(var, bit) /* non-atomic */
125 #define ECORE_TEST_BIT(bit, var)     bxe_test_bit(bit, var)
126 #define ECORE_SET_BIT(bit, var)      bxe_set_bit(bit, var)
127 #define ECORE_CLEAR_BIT(bit, var)    bxe_clear_bit(bit, var)
128 #define ECORE_TEST_AND_CLEAR_BIT(bit, var) bxe_test_and_clear_bit(bit, var)
129 
130 #define ECORE_ATOMIC_READ(a) atomic_load_acq_int((volatile int *)a)
131 #define ECORE_ATOMIC_SET(a, v) atomic_store_rel_int((volatile int *)a, v)
132 #define ECORE_ATOMIC_CMPXCHG(a, o, n) bxe_cmpxchg((volatile int *)a, o, n)
133 
134 #define ECORE_RET_PENDING(pending_bit, pending) \
135     (ECORE_TEST_BIT(pending_bit, pending) ? ECORE_PENDING : ECORE_SUCCESS)
136 
137 #define ECORE_SET_FLAG(value, mask, flag)      \
138     do {                                       \
139         (value) &= ~(mask);                    \
140         (value) |= ((flag) << (mask##_SHIFT)); \
141     } while (0)
142 
143 #define ECORE_GET_FLAG(value, mask) \
144     (((value) &= (mask)) >> (mask##_SHIFT))
145 
146 #define ECORE_MIGHT_SLEEP()
147 
148 #define ECORE_FCOE_CID(sc) ((sc)->fp[FCOE_IDX(sc)].cl_id)
149 
150 #define ECORE_MEMCMP(_a, _b, _s) memcmp(_a, _b, _s)
151 #define ECORE_MEMCPY(_a, _b, _s) memcpy(_a, _b, _s)
152 #define ECORE_MEMSET(_a, _c, _s) memset(_a, _c, _s)
153 
154 #define ECORE_CPU_TO_LE16(x) htole16(x)
155 #define ECORE_CPU_TO_LE32(x) htole32(x)
156 
157 #define ECORE_WAIT(_s, _t) DELAY(1000)
158 #define ECORE_MSLEEP(_t)   DELAY((_t) * 1000)
159 
160 #define ECORE_LIKELY(x)   __predict_true(x)
161 #define ECORE_UNLIKELY(x) __predict_false(x)
162 
163 #define ECORE_ZALLOC(_size, _flags, _sc) \
164     malloc(_size, M_TEMP, (M_NOWAIT | M_ZERO))
165 
166 #define ECORE_CALLOC(_len, _size, _flags, _sc) \
167     malloc(_len * _size, M_TEMP, (M_NOWAIT | M_ZERO))
168 
169 #define ECORE_FREE(_s, _buf, _size) free(_buf, M_TEMP)
170 
171 #define SC_ILT(sc)  ((sc)->ilt)
172 #define ILOG2(x)    bxe_ilog2(x)
173 
174 #define ECORE_ILT_ZALLOC(x, y, size)                                       \
175     do {                                                                   \
176         x = malloc(sizeof(struct bxe_dma), M_DEVBUF, (M_NOWAIT | M_ZERO)); \
177         if (x) {                                                           \
178             if (bxe_dma_alloc((struct bxe_softc *)sc,                      \
179                               size, (struct bxe_dma *)x,                   \
180                               "ECORE_ILT") != 0) {                         \
181                 free(x, M_DEVBUF);                                         \
182                 x = NULL;                                                  \
183                 *y = 0;                                                    \
184             } else {                                                       \
185                 *y = ((struct bxe_dma *)x)->paddr;                         \
186             }                                                              \
187         }                                                                  \
188     } while (0)
189 
190 #define ECORE_ILT_FREE(x, y, size)                   \
191     do {                                             \
192         if (x) {                                     \
193             bxe_dma_free((struct bxe_softc *)sc, x); \
194             free(x, M_DEVBUF);                       \
195             x = NULL;                                \
196             y = 0;                                   \
197         }                                            \
198     } while (0)
199 
200 #define ECORE_IS_VALID_ETHER_ADDR(_mac) TRUE
201 
202 #define ECORE_IS_MF_SD_MODE   IS_MF_SD_MODE
203 #define ECORE_IS_MF_SI_MODE   IS_MF_SI_MODE
204 #define ECORE_IS_MF_AFEX_MODE IS_MF_AFEX_MODE
205 
206 #define ECORE_SET_CTX_VALIDATION bxe_set_ctx_validation
207 
208 #define ECORE_UPDATE_COALESCE_SB_INDEX bxe_update_coalesce_sb_index
209 
210 #define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
211 
212 #define ECORE_REG_WR_DMAE_LEN REG_WR_DMAE_LEN
213 
214 #define ECORE_PATH_ID     SC_PATH
215 #define ECORE_PORT_ID     SC_PORT
216 #define ECORE_FUNC_ID     SC_FUNC
217 #define ECORE_ABS_FUNC_ID SC_ABS_FUNC
218 
219 uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length,
220                     uint32_t crc32_seed, uint8_t complement);
221 static inline uint32_t
222 ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
223 {
224     uint32_t packet_buf[2] = {0};
225     memcpy(((uint8_t *)(&packet_buf[0]))+2, &mac[0], 2);
226     memcpy(&packet_buf[1], &mac[2], 4);
227     return bswap32(calc_crc32((uint8_t *)packet_buf, 8, seed, 0));
228 }
229 
230 #define ecore_sp_post(_sc, _a, _b, _c, _d) \
231     bxe_sp_post(_sc, _a, _b, U64_HI(_c), U64_LO(_c), _d)
232 
233 #define ECORE_DBG_BREAK_IF(exp)     \
234     do {                            \
235         if (__predict_false(exp)) { \
236             panic("ECORE");         \
237         }                           \
238     } while (0)
239 
240 #define ECORE_BUG()                               \
241     do {                                          \
242         panic("BUG (%s:%d)", __FILE__, __LINE__); \
243     } while(0);
244 
245 #define ECORE_BUG_ON(exp)                                \
246     do {                                                 \
247         if (__predict_true(exp)) {                       \
248             panic("BUG_ON (%s:%d)", __FILE__, __LINE__); \
249         }                                                \
250     } while (0)
251 
252 #define ECORE_ERR(str, ...) \
253     BLOGE(sc, "ECORE: " str, ##__VA_ARGS__)
254 
255 #define DBG_SP 0x00000004 /* defined in bxe.h */
256 
257 #define ECORE_MSG(sc, m, ...) \
258     BLOGD(sc, DBG_SP, "ECORE: " m, ##__VA_ARGS__)
259 
260 typedef struct _ecore_list_entry_t
261 {
262     struct _ecore_list_entry_t *next, *prev;
263 } ecore_list_entry_t;
264 
265 typedef struct ecore_list_t
266 {
267     ecore_list_entry_t *head, *tail;
268     unsigned long cnt;
269 } ecore_list_t;
270 
271 /* initialize the list */
272 #define ECORE_LIST_INIT(_list) \
273     do {                       \
274         (_list)->head = NULL;  \
275         (_list)->tail = NULL;  \
276         (_list)->cnt  = 0;     \
277     } while (0)
278 
279 /* return TRUE if the element is the last on the list */
280 #define ECORE_LIST_IS_LAST(_elem, _list) \
281     (_elem == (_list)->tail)
282 
283 /* return TRUE if the list is empty */
284 #define ECORE_LIST_IS_EMPTY(_list) \
285     ((_list)->cnt == 0)
286 
287 /* return the first element */
288 #define ECORE_LIST_FIRST_ENTRY(_list, cast, _link) \
289     (cast *)((_list)->head)
290 
291 /* return the next element */
292 #define ECORE_LIST_NEXT(_elem, _link, cast) \
293     (cast *)((&((_elem)->_link))->next)
294 
295 /* push an element on the head of the list */
296 #define ECORE_LIST_PUSH_HEAD(_elem, _list)              \
297     do {                                                \
298         (_elem)->prev = (ecore_list_entry_t *)0;        \
299         (_elem)->next = (_list)->head;                  \
300         if ((_list)->tail == (ecore_list_entry_t *)0) { \
301             (_list)->tail = (_elem);                    \
302         } else {                                        \
303             (_list)->head->prev = (_elem);              \
304         }                                               \
305         (_list)->head = (_elem);                        \
306         (_list)->cnt++;                                 \
307     } while (0)
308 
309 /* push an element on the tail of the list */
310 #define ECORE_LIST_PUSH_TAIL(_elem, _list)       \
311     do {                                         \
312         (_elem)->next = (ecore_list_entry_t *)0; \
313         (_elem)->prev = (_list)->tail;           \
314         if ((_list)->tail) {                     \
315             (_list)->tail->next = (_elem);       \
316         } else {                                 \
317             (_list)->head = (_elem);             \
318         }                                        \
319         (_list)->tail = (_elem);                 \
320         (_list)->cnt++;                          \
321     } while (0)
322 
323 /* push list1 on the head of list2 and return with list1 as empty */
324 #define ECORE_LIST_SPLICE_INIT(_list1, _list2)     \
325     do {                                           \
326         (_list1)->tail->next = (_list2)->head;     \
327         if ((_list2)->head) {                      \
328             (_list2)->head->prev = (_list1)->tail; \
329         } else {                                   \
330             (_list2)->tail = (_list1)->tail;       \
331         }                                          \
332         (_list2)->head = (_list1)->head;           \
333         (_list2)->cnt += (_list1)->cnt;            \
334         (_list1)->head = NULL;                     \
335         (_list1)->tail = NULL;                     \
336         (_list1)->cnt  = 0;                        \
337     } while (0)
338 
339 /* remove an element from the list */
340 #define ECORE_LIST_REMOVE_ENTRY(_elem, _list)                      \
341     do {                                                           \
342         if ((_list)->head == (_elem)) {                            \
343             if ((_list)->head) {                                   \
344                 (_list)->head = (_list)->head->next;               \
345                 if ((_list)->head) {                               \
346                     (_list)->head->prev = (ecore_list_entry_t *)0; \
347                 } else {                                           \
348                     (_list)->tail = (ecore_list_entry_t *)0;       \
349                 }                                                  \
350                 (_list)->cnt--;                                    \
351             }                                                      \
352         } else if ((_list)->tail == (_elem)) {                     \
353             if ((_list)->tail) {                                   \
354                 (_list)->tail = (_list)->tail->prev;               \
355                 if ((_list)->tail) {                               \
356                     (_list)->tail->next = (ecore_list_entry_t *)0; \
357                 } else {                                           \
358                     (_list)->head = (ecore_list_entry_t *)0;       \
359                 }                                                  \
360                 (_list)->cnt--;                                    \
361             }                                                      \
362         } else {                                                   \
363             (_elem)->prev->next = (_elem)->next;                   \
364             (_elem)->next->prev = (_elem)->prev;                   \
365             (_list)->cnt--;                                        \
366         }                                                          \
367     } while (0)
368 
369 /* walk the list */
370 #define ECORE_LIST_FOR_EACH_ENTRY(pos, _list, _link, cast) \
371     for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _link); \
372          pos;                                              \
373          pos = ECORE_LIST_NEXT(pos, _link, cast))
374 
375 /* walk the list (safely) */
376 #define ECORE_LIST_FOR_EACH_ENTRY_SAFE(pos, n, _list, _link, cast) \
377      for (pos = ECORE_LIST_FIRST_ENTRY(_list, cast, _lint),        \
378           n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL;    \
379           pos != NULL;                                             \
380           pos = (cast *)n,                                         \
381           n = (pos) ? ECORE_LIST_NEXT(pos, _link, cast) : NULL)
382 
383 
384 /* Manipulate a bit vector defined as an array of uint64_t */
385 
386 /* Number of bits in one sge_mask array element */
387 #define BIT_VEC64_ELEM_SZ     64
388 #define BIT_VEC64_ELEM_SHIFT  6
389 #define BIT_VEC64_ELEM_MASK   ((uint64_t)BIT_VEC64_ELEM_SZ - 1)
390 
391 #define __BIT_VEC64_SET_BIT(el, bit)            \
392     do {                                        \
393         el = ((el) | ((uint64_t)0x1 << (bit))); \
394     } while (0)
395 
396 #define __BIT_VEC64_CLEAR_BIT(el, bit)             \
397     do {                                           \
398         el = ((el) & (~((uint64_t)0x1 << (bit)))); \
399     } while (0)
400 
401 #define BIT_VEC64_SET_BIT(vec64, idx)                           \
402     __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
403                         (idx) & BIT_VEC64_ELEM_MASK)
404 
405 #define BIT_VEC64_CLEAR_BIT(vec64, idx)                           \
406     __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
407                           (idx) & BIT_VEC64_ELEM_MASK)
408 
409 #define BIT_VEC64_TEST_BIT(vec64, idx)          \
410     (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
411       ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
412 
413 /*
414  * Creates a bitmask of all ones in less significant bits.
415  * idx - index of the most significant bit in the created mask
416  */
417 #define BIT_VEC64_ONES_MASK(idx)                                 \
418     (((uint64_t)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
419 #define BIT_VEC64_ELEM_ONE_MASK ((uint64_t)(~0))
420 
421 /* fill in a MAC address the way the FW likes it */
422 static inline void
423 ecore_set_fw_mac_addr(uint16_t *fw_hi,
424                       uint16_t *fw_mid,
425                       uint16_t *fw_lo,
426                       uint8_t  *mac)
427 {
428     ((uint8_t *)fw_hi)[0]  = mac[1];
429     ((uint8_t *)fw_hi)[1]  = mac[0];
430     ((uint8_t *)fw_mid)[0] = mac[3];
431     ((uint8_t *)fw_mid)[1] = mac[2];
432     ((uint8_t *)fw_lo)[0]  = mac[5];
433     ((uint8_t *)fw_lo)[1]  = mac[4];
434 }
435 
436 
437 enum ecore_status_t {
438     ECORE_EXISTS  = -6,
439     ECORE_IO      = -5,
440     ECORE_TIMEOUT = -4,
441     ECORE_INVAL   = -3,
442     ECORE_BUSY    = -2,
443     ECORE_NOMEM   = -1,
444     ECORE_SUCCESS = 0,
445     /* PENDING is not an error and should be positive */
446     ECORE_PENDING = 1,
447 };
448 
449 enum {
450     SWITCH_UPDATE,
451     AFEX_UPDATE,
452 };
453 
454 
455 
456 
457 struct bxe_softc;
458 struct eth_context;
459 
460 /* Bits representing general command's configuration */
461 enum {
462 	RAMROD_TX,
463 	RAMROD_RX,
464 	/* Wait until all pending commands complete */
465 	RAMROD_COMP_WAIT,
466 	/* Don't send a ramrod, only update a registry */
467 	RAMROD_DRV_CLR_ONLY,
468 	/* Configure HW according to the current object state */
469 	RAMROD_RESTORE,
470 	 /* Execute the next command now */
471 	RAMROD_EXEC,
472 	/* Don't add a new command and continue execution of posponed
473 	 * commands. If not set a new command will be added to the
474 	 * pending commands list.
475 	 */
476 	RAMROD_CONT,
477 	/* If there is another pending ramrod, wait until it finishes and
478 	 * re-try to submit this one. This flag can be set only in sleepable
479 	 * context, and should not be set from the context that completes the
480 	 * ramrods as deadlock will occur.
481 	 */
482 	RAMROD_RETRY,
483 };
484 
485 typedef enum {
486 	ECORE_OBJ_TYPE_RX,
487 	ECORE_OBJ_TYPE_TX,
488 	ECORE_OBJ_TYPE_RX_TX,
489 } ecore_obj_type;
490 
491 /* Public slow path states */
492 enum {
493 	ECORE_FILTER_MAC_PENDING,
494 	ECORE_FILTER_VLAN_PENDING,
495 	ECORE_FILTER_VLAN_MAC_PENDING,
496 	ECORE_FILTER_RX_MODE_PENDING,
497 	ECORE_FILTER_RX_MODE_SCHED,
498 	ECORE_FILTER_ISCSI_ETH_START_SCHED,
499 	ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
500 	ECORE_FILTER_FCOE_ETH_START_SCHED,
501 	ECORE_FILTER_FCOE_ETH_STOP_SCHED,
502 	ECORE_FILTER_BYPASS_RX_MODE_PENDING,
503 	ECORE_FILTER_BYPASS_MAC_PENDING,
504 	ECORE_FILTER_BYPASS_RSS_CONF_PENDING,
505 	ECORE_FILTER_MCAST_PENDING,
506 	ECORE_FILTER_MCAST_SCHED,
507 	ECORE_FILTER_RSS_CONF_PENDING,
508 	ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
509 	ECORE_AFEX_PENDING_VIFSET_MCP_ACK
510 };
511 
512 struct ecore_raw_obj {
513 	uint8_t		func_id;
514 
515 	/* Queue params */
516 	uint8_t		cl_id;
517 	uint32_t		cid;
518 
519 	/* Ramrod data buffer params */
520 	void		*rdata;
521 	ecore_dma_addr_t	rdata_mapping;
522 
523 	/* Ramrod state params */
524 	int		state;   /* "ramrod is pending" state bit */
525 	unsigned long	*pstate; /* pointer to state buffer */
526 
527 	ecore_obj_type	obj_type;
528 
529 	int (*wait_comp)(struct bxe_softc *sc,
530 			 struct ecore_raw_obj *o);
531 
532 	bool (*check_pending)(struct ecore_raw_obj *o);
533 	void (*clear_pending)(struct ecore_raw_obj *o);
534 	void (*set_pending)(struct ecore_raw_obj *o);
535 };
536 
537 /************************* VLAN-MAC commands related parameters ***************/
538 struct ecore_mac_ramrod_data {
539 	uint8_t mac[ETH_ALEN];
540 	uint8_t is_inner_mac;
541 };
542 
543 struct ecore_vlan_ramrod_data {
544 	uint16_t vlan;
545 };
546 
547 struct ecore_vlan_mac_ramrod_data {
548 	uint8_t mac[ETH_ALEN];
549 	uint8_t is_inner_mac;
550 	uint16_t vlan;
551 };
552 
553 union ecore_classification_ramrod_data {
554 	struct ecore_mac_ramrod_data mac;
555 	struct ecore_vlan_ramrod_data vlan;
556 	struct ecore_vlan_mac_ramrod_data vlan_mac;
557 };
558 
559 /* VLAN_MAC commands */
560 enum ecore_vlan_mac_cmd {
561 	ECORE_VLAN_MAC_ADD,
562 	ECORE_VLAN_MAC_DEL,
563 	ECORE_VLAN_MAC_MOVE,
564 };
565 
566 struct ecore_vlan_mac_data {
567 	/* Requested command: ECORE_VLAN_MAC_XX */
568 	enum ecore_vlan_mac_cmd cmd;
569 	/* used to contain the data related vlan_mac_flags bits from
570 	 * ramrod parameters.
571 	 */
572 	unsigned long vlan_mac_flags;
573 
574 	/* Needed for MOVE command */
575 	struct ecore_vlan_mac_obj *target_obj;
576 
577 	union ecore_classification_ramrod_data u;
578 };
579 
580 /*************************** Exe Queue obj ************************************/
581 union ecore_exe_queue_cmd_data {
582 	struct ecore_vlan_mac_data vlan_mac;
583 
584 	struct {
585 		/* TODO */
586 	} mcast;
587 };
588 
589 struct ecore_exeq_elem {
590 	ecore_list_entry_t		link;
591 
592 	/* Length of this element in the exe_chunk. */
593 	int				cmd_len;
594 
595 	union ecore_exe_queue_cmd_data	cmd_data;
596 };
597 
598 union ecore_qable_obj;
599 
600 union ecore_exeq_comp_elem {
601 	union event_ring_elem *elem;
602 };
603 
604 struct ecore_exe_queue_obj;
605 
606 typedef int (*exe_q_validate)(struct bxe_softc *sc,
607 			      union ecore_qable_obj *o,
608 			      struct ecore_exeq_elem *elem);
609 
610 typedef int (*exe_q_remove)(struct bxe_softc *sc,
611 			    union ecore_qable_obj *o,
612 			    struct ecore_exeq_elem *elem);
613 
614 /* Return positive if entry was optimized, 0 - if not, negative
615  * in case of an error.
616  */
617 typedef int (*exe_q_optimize)(struct bxe_softc *sc,
618 			      union ecore_qable_obj *o,
619 			      struct ecore_exeq_elem *elem);
620 typedef int (*exe_q_execute)(struct bxe_softc *sc,
621 			     union ecore_qable_obj *o,
622 			     ecore_list_t *exe_chunk,
623 			     unsigned long *ramrod_flags);
624 typedef struct ecore_exeq_elem *
625 			(*exe_q_get)(struct ecore_exe_queue_obj *o,
626 				     struct ecore_exeq_elem *elem);
627 
628 struct ecore_exe_queue_obj {
629 	/* Commands pending for an execution. */
630 	ecore_list_t	exe_queue;
631 
632 	/* Commands pending for an completion. */
633 	ecore_list_t	pending_comp;
634 
635 	ECORE_MUTEX_SPIN		lock;
636 
637 	/* Maximum length of commands' list for one execution */
638 	int			exe_chunk_len;
639 
640 	union ecore_qable_obj	*owner;
641 
642 	/****** Virtual functions ******/
643 	/**
644 	 * Called before commands execution for commands that are really
645 	 * going to be executed (after 'optimize').
646 	 *
647 	 * Must run under exe_queue->lock
648 	 */
649 	exe_q_validate		validate;
650 
651 	/**
652 	 * Called before removing pending commands, cleaning allocated
653 	 * resources (e.g., credits from validate)
654 	 */
655 	 exe_q_remove		remove;
656 
657 	/**
658 	 * This will try to cancel the current pending commands list
659 	 * considering the new command.
660 	 *
661 	 * Returns the number of optimized commands or a negative error code
662 	 *
663 	 * Must run under exe_queue->lock
664 	 */
665 	exe_q_optimize		optimize;
666 
667 	/**
668 	 * Run the next commands chunk (owner specific).
669 	 */
670 	exe_q_execute		execute;
671 
672 	/**
673 	 * Return the exe_queue element containing the specific command
674 	 * if any. Otherwise return NULL.
675 	 */
676 	exe_q_get		get;
677 };
678 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
679 /*
680  * Element in the VLAN_MAC registry list having all current configured
681  * rules.
682  */
683 struct ecore_vlan_mac_registry_elem {
684 	ecore_list_entry_t	link;
685 
686 	/* Used to store the cam offset used for the mac/vlan/vlan-mac.
687 	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
688 	 * same CAM for these chips.
689 	 */
690 	int			cam_offset;
691 
692 	/* Needed for DEL and RESTORE flows */
693 	unsigned long		vlan_mac_flags;
694 
695 	union ecore_classification_ramrod_data u;
696 };
697 
698 /* Bits representing VLAN_MAC commands specific flags */
699 enum {
700 	ECORE_UC_LIST_MAC,
701 	ECORE_ETH_MAC,
702 	ECORE_ISCSI_ETH_MAC,
703 	ECORE_NETQ_ETH_MAC,
704 	ECORE_DONT_CONSUME_CAM_CREDIT,
705 	ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
706 };
707 
708 struct ecore_vlan_mac_ramrod_params {
709 	/* Object to run the command from */
710 	struct ecore_vlan_mac_obj *vlan_mac_obj;
711 
712 	/* General command flags: COMP_WAIT, etc. */
713 	unsigned long ramrod_flags;
714 
715 	/* Command specific configuration request */
716 	struct ecore_vlan_mac_data user_req;
717 };
718 
719 struct ecore_vlan_mac_obj {
720 	struct ecore_raw_obj raw;
721 
722 	/* Bookkeeping list: will prevent the addition of already existing
723 	 * entries.
724 	 */
725 	ecore_list_t		head;
726 	/* Implement a simple reader/writer lock on the head list.
727 	 * all these fields should only be accessed under the exe_queue lock
728 	 */
729 	uint8_t		head_reader; /* Num. of readers accessing head list */
730 	bool		head_exe_request; /* Pending execution request. */
731 	unsigned long	saved_ramrod_flags; /* Ramrods of pending execution */
732 
733 	/* Execution queue interface instance */
734 	struct ecore_exe_queue_obj	exe_queue;
735 
736 	/* MACs credit pool */
737 	struct ecore_credit_pool_obj	*macs_pool;
738 
739 	/* VLANs credit pool */
740 	struct ecore_credit_pool_obj	*vlans_pool;
741 
742 	/* RAMROD command to be used */
743 	int				ramrod_cmd;
744 
745 	/* copy first n elements onto preallocated buffer
746 	 *
747 	 * @param n number of elements to get
748 	 * @param buf buffer preallocated by caller into which elements
749 	 *            will be copied. Note elements are 4-byte aligned
750 	 *            so buffer size must be able to accommodate the
751 	 *            aligned elements.
752 	 *
753 	 * @return number of copied bytes
754 	 */
755 
756 	int (*get_n_elements)(struct bxe_softc *sc,
757 			      struct ecore_vlan_mac_obj *o, int n, uint8_t *base,
758 			      uint8_t stride, uint8_t size);
759 
760 	/**
761 	 * Checks if ADD-ramrod with the given params may be performed.
762 	 *
763 	 * @return zero if the element may be added
764 	 */
765 
766 	int (*check_add)(struct bxe_softc *sc,
767 			 struct ecore_vlan_mac_obj *o,
768 			 union ecore_classification_ramrod_data *data);
769 
770 	/**
771 	 * Checks if DEL-ramrod with the given params may be performed.
772 	 *
773 	 * @return TRUE if the element may be deleted
774 	 */
775 	struct ecore_vlan_mac_registry_elem *
776 		(*check_del)(struct bxe_softc *sc,
777 			     struct ecore_vlan_mac_obj *o,
778 			     union ecore_classification_ramrod_data *data);
779 
780 	/**
781 	 * Checks if DEL-ramrod with the given params may be performed.
782 	 *
783 	 * @return TRUE if the element may be deleted
784 	 */
785 	bool (*check_move)(struct bxe_softc *sc,
786 			   struct ecore_vlan_mac_obj *src_o,
787 			   struct ecore_vlan_mac_obj *dst_o,
788 			   union ecore_classification_ramrod_data *data);
789 
790 	/**
791 	 *  Update the relevant credit object(s) (consume/return
792 	 *  correspondingly).
793 	 */
794 	bool (*get_credit)(struct ecore_vlan_mac_obj *o);
795 	bool (*put_credit)(struct ecore_vlan_mac_obj *o);
796 	bool (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
797 	bool (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
798 
799 	/**
800 	 * Configures one rule in the ramrod data buffer.
801 	 */
802 	void (*set_one_rule)(struct bxe_softc *sc,
803 			     struct ecore_vlan_mac_obj *o,
804 			     struct ecore_exeq_elem *elem, int rule_idx,
805 			     int cam_offset);
806 
807 	/**
808 	*  Delete all configured elements having the given
809 	*  vlan_mac_flags specification. Assumes no pending for
810 	*  execution commands. Will schedule all all currently
811 	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
812 	*  specification for deletion and will use the given
813 	*  ramrod_flags for the last DEL operation.
814 	 *
815 	 * @param sc
816 	 * @param o
817 	 * @param ramrod_flags RAMROD_XX flags
818 	 *
819 	 * @return 0 if the last operation has completed successfully
820 	 *         and there are no more elements left, positive value
821 	 *         if there are pending for completion commands,
822 	 *         negative value in case of failure.
823 	 */
824 	int (*delete_all)(struct bxe_softc *sc,
825 			  struct ecore_vlan_mac_obj *o,
826 			  unsigned long *vlan_mac_flags,
827 			  unsigned long *ramrod_flags);
828 
829 	/**
830 	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
831 	 * configured elements list.
832 	 *
833 	 * @param sc
834 	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
835 	 *          ramrod_flags is only taken into an account)
836 	 * @param ppos a pointer to the cookie that should be given back in the
837 	 *        next call to make function handle the next element. If
838 	 *        *ppos is set to NULL it will restart the iterator.
839 	 *        If returned *ppos == NULL this means that the last
840 	 *        element has been handled.
841 	 *
842 	 * @return int
843 	 */
844 	int (*restore)(struct bxe_softc *sc,
845 		       struct ecore_vlan_mac_ramrod_params *p,
846 		       struct ecore_vlan_mac_registry_elem **ppos);
847 
848 	/**
849 	 * Should be called on a completion arrival.
850 	 *
851 	 * @param sc
852 	 * @param o
853 	 * @param cqe Completion element we are handling
854 	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
855 	 *		       pending commands will be executed.
856 	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
857 	 *		       may also be set if needed.
858 	 *
859 	 * @return 0 if there are neither pending nor waiting for
860 	 *         completion commands. Positive value if there are
861 	 *         pending for execution or for completion commands.
862 	 *         Negative value in case of an error (including an
863 	 *         error in the cqe).
864 	 */
865 	int (*complete)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
866 			union event_ring_elem *cqe,
867 			unsigned long *ramrod_flags);
868 
869 	/**
870 	 * Wait for completion of all commands. Don't schedule new ones,
871 	 * just wait. It assumes that the completion code will schedule
872 	 * for new commands.
873 	 */
874 	int (*wait)(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o);
875 };
876 
877 enum {
878 	ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
879 	ECORE_LLH_CAM_ETH_LINE,
880 	ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
881 };
882 
883 void ecore_set_mac_in_nig(struct bxe_softc *sc,
884 			  bool add, unsigned char *dev_addr, int index);
885 
886 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
887 
888 /* RX_MODE ramrod special flags: set in rx_mode_flags field in
889  * a ecore_rx_mode_ramrod_params.
890  */
891 enum {
892 	ECORE_RX_MODE_FCOE_ETH,
893 	ECORE_RX_MODE_ISCSI_ETH,
894 };
895 
896 enum {
897 	ECORE_ACCEPT_UNICAST,
898 	ECORE_ACCEPT_MULTICAST,
899 	ECORE_ACCEPT_ALL_UNICAST,
900 	ECORE_ACCEPT_ALL_MULTICAST,
901 	ECORE_ACCEPT_BROADCAST,
902 	ECORE_ACCEPT_UNMATCHED,
903 	ECORE_ACCEPT_ANY_VLAN
904 };
905 
906 struct ecore_rx_mode_ramrod_params {
907 	struct ecore_rx_mode_obj *rx_mode_obj;
908 	unsigned long *pstate;
909 	int state;
910 	uint8_t cl_id;
911 	uint32_t cid;
912 	uint8_t func_id;
913 	unsigned long ramrod_flags;
914 	unsigned long rx_mode_flags;
915 
916 	/* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
917 	 * a tstorm_eth_mac_filter_config (e1x).
918 	 */
919 	void *rdata;
920 	ecore_dma_addr_t rdata_mapping;
921 
922 	/* Rx mode settings */
923 	unsigned long rx_accept_flags;
924 
925 	/* internal switching settings */
926 	unsigned long tx_accept_flags;
927 };
928 
929 struct ecore_rx_mode_obj {
930 	int (*config_rx_mode)(struct bxe_softc *sc,
931 			      struct ecore_rx_mode_ramrod_params *p);
932 
933 	int (*wait_comp)(struct bxe_softc *sc,
934 			 struct ecore_rx_mode_ramrod_params *p);
935 };
936 
937 /********************** Set multicast group ***********************************/
938 
939 struct ecore_mcast_list_elem {
940 	ecore_list_entry_t link;
941 	uint8_t *mac;
942 };
943 
944 union ecore_mcast_config_data {
945 	uint8_t *mac;
946 	uint8_t bin; /* used in a RESTORE flow */
947 };
948 
949 struct ecore_mcast_ramrod_params {
950 	struct ecore_mcast_obj *mcast_obj;
951 
952 	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
953 	unsigned long ramrod_flags;
954 
955 	ecore_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
956 	/** TODO:
957 	 *      - rename it to macs_num.
958 	 *      - Add a new command type for handling pending commands
959 	 *        (remove "zero semantics").
960 	 *
961 	 *  Length of mcast_list. If zero and ADD_CONT command - post
962 	 *  pending commands.
963 	 */
964 	int mcast_list_len;
965 };
966 
967 enum ecore_mcast_cmd {
968 	ECORE_MCAST_CMD_ADD,
969 	ECORE_MCAST_CMD_CONT,
970 	ECORE_MCAST_CMD_DEL,
971 	ECORE_MCAST_CMD_RESTORE,
972 };
973 
974 struct ecore_mcast_obj {
975 	struct ecore_raw_obj raw;
976 
977 	union {
978 		struct {
979 		#define ECORE_MCAST_BINS_NUM	256
980 		#define ECORE_MCAST_VEC_SZ	(ECORE_MCAST_BINS_NUM / 64)
981 			uint64_t vec[ECORE_MCAST_VEC_SZ];
982 
983 			/** Number of BINs to clear. Should be updated
984 			 *  immediately when a command arrives in order to
985 			 *  properly create DEL commands.
986 			 */
987 			int num_bins_set;
988 		} aprox_match;
989 
990 		struct {
991 			ecore_list_t macs;
992 			int num_macs_set;
993 		} exact_match;
994 	} registry;
995 
996 	/* Pending commands */
997 	ecore_list_t pending_cmds_head;
998 
999 	/* A state that is set in raw.pstate, when there are pending commands */
1000 	int sched_state;
1001 
1002 	/* Maximal number of mcast MACs configured in one command */
1003 	int max_cmd_len;
1004 
1005 	/* Total number of currently pending MACs to configure: both
1006 	 * in the pending commands list and in the current command.
1007 	 */
1008 	int total_pending_num;
1009 
1010 	uint8_t engine_id;
1011 
1012 	/**
1013 	 * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
1014 	 */
1015 	int (*config_mcast)(struct bxe_softc *sc,
1016 			    struct ecore_mcast_ramrod_params *p,
1017 			    enum ecore_mcast_cmd cmd);
1018 
1019 	/**
1020 	 * Fills the ramrod data during the RESTORE flow.
1021 	 *
1022 	 * @param sc
1023 	 * @param o
1024 	 * @param start_idx Registry index to start from
1025 	 * @param rdata_idx Index in the ramrod data to start from
1026 	 *
1027 	 * @return -1 if we handled the whole registry or index of the last
1028 	 *         handled registry element.
1029 	 */
1030 	int (*hdl_restore)(struct bxe_softc *sc, struct ecore_mcast_obj *o,
1031 			   int start_bin, int *rdata_idx);
1032 
1033 	int (*enqueue_cmd)(struct bxe_softc *sc, struct ecore_mcast_obj *o,
1034 			   struct ecore_mcast_ramrod_params *p,
1035 			   enum ecore_mcast_cmd cmd);
1036 
1037 	void (*set_one_rule)(struct bxe_softc *sc,
1038 			     struct ecore_mcast_obj *o, int idx,
1039 			     union ecore_mcast_config_data *cfg_data,
1040 			     enum ecore_mcast_cmd cmd);
1041 
1042 	/** Checks if there are more mcast MACs to be set or a previous
1043 	 *  command is still pending.
1044 	 */
1045 	bool (*check_pending)(struct ecore_mcast_obj *o);
1046 
1047 	/**
1048 	 * Set/Clear/Check SCHEDULED state of the object
1049 	 */
1050 	void (*set_sched)(struct ecore_mcast_obj *o);
1051 	void (*clear_sched)(struct ecore_mcast_obj *o);
1052 	bool (*check_sched)(struct ecore_mcast_obj *o);
1053 
1054 	/* Wait until all pending commands complete */
1055 	int (*wait_comp)(struct bxe_softc *sc, struct ecore_mcast_obj *o);
1056 
1057 	/**
1058 	 * Handle the internal object counters needed for proper
1059 	 * commands handling. Checks that the provided parameters are
1060 	 * feasible.
1061 	 */
1062 	int (*validate)(struct bxe_softc *sc,
1063 			struct ecore_mcast_ramrod_params *p,
1064 			enum ecore_mcast_cmd cmd);
1065 
1066 	/**
1067 	 * Restore the values of internal counters in case of a failure.
1068 	 */
1069 	void (*revert)(struct bxe_softc *sc,
1070 		       struct ecore_mcast_ramrod_params *p,
1071 		       int old_num_bins);
1072 
1073 	int (*get_registry_size)(struct ecore_mcast_obj *o);
1074 	void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
1075 };
1076 
1077 /*************************** Credit handling **********************************/
1078 struct ecore_credit_pool_obj {
1079 
1080 	/* Current amount of credit in the pool */
1081 	ecore_atomic_t	credit;
1082 
1083 	/* Maximum allowed credit. put() will check against it. */
1084 	int		pool_sz;
1085 
1086 	/* Allocate a pool table statically.
1087 	 *
1088 	 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
1089 	 *
1090 	 * The set bit in the table will mean that the entry is available.
1091 	 */
1092 #define ECORE_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
1093 	uint64_t		pool_mirror[ECORE_POOL_VEC_SIZE];
1094 
1095 	/* Base pool offset (initialized differently */
1096 	int		base_pool_offset;
1097 
1098 	/**
1099 	 * Get the next free pool entry.
1100 	 *
1101 	 * @return TRUE if there was a free entry in the pool
1102 	 */
1103 	bool (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
1104 
1105 	/**
1106 	 * Return the entry back to the pool.
1107 	 *
1108 	 * @return TRUE if entry is legal and has been successfully
1109 	 *         returned to the pool.
1110 	 */
1111 	bool (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
1112 
1113 	/**
1114 	 * Get the requested amount of credit from the pool.
1115 	 *
1116 	 * @param cnt Amount of requested credit
1117 	 * @return TRUE if the operation is successful
1118 	 */
1119 	bool (*get)(struct ecore_credit_pool_obj *o, int cnt);
1120 
1121 	/**
1122 	 * Returns the credit to the pool.
1123 	 *
1124 	 * @param cnt Amount of credit to return
1125 	 * @return TRUE if the operation is successful
1126 	 */
1127 	bool (*put)(struct ecore_credit_pool_obj *o, int cnt);
1128 
1129 	/**
1130 	 * Reads the current amount of credit.
1131 	 */
1132 	int (*check)(struct ecore_credit_pool_obj *o);
1133 };
1134 
1135 /*************************** RSS configuration ********************************/
1136 enum {
1137 	/* RSS_MODE bits are mutually exclusive */
1138 	ECORE_RSS_MODE_DISABLED,
1139 	ECORE_RSS_MODE_REGULAR,
1140 
1141 	ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
1142 
1143 	ECORE_RSS_IPV4,
1144 	ECORE_RSS_IPV4_TCP,
1145 	ECORE_RSS_IPV4_UDP,
1146 	ECORE_RSS_IPV6,
1147 	ECORE_RSS_IPV6_TCP,
1148 	ECORE_RSS_IPV6_UDP,
1149 
1150 	ECORE_RSS_TUNNELING,
1151 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
1152 	ECORE_RSS_MODE_ESX51,
1153 #endif
1154 };
1155 
1156 struct ecore_config_rss_params {
1157 	struct ecore_rss_config_obj *rss_obj;
1158 
1159 	/* may have RAMROD_COMP_WAIT set only */
1160 	unsigned long	ramrod_flags;
1161 
1162 	/* ECORE_RSS_X bits */
1163 	unsigned long	rss_flags;
1164 
1165 	/* Number hash bits to take into an account */
1166 	uint8_t		rss_result_mask;
1167 
1168 	/* Indirection table */
1169 	uint8_t		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
1170 
1171 	/* RSS hash values */
1172 	uint32_t		rss_key[10];
1173 
1174 	/* valid only iff ECORE_RSS_UPDATE_TOE is set */
1175 	uint16_t		toe_rss_bitmap;
1176 
1177 	/* valid iff ECORE_RSS_TUNNELING is set */
1178 	uint16_t		tunnel_value;
1179 	uint16_t		tunnel_mask;
1180 };
1181 
1182 struct ecore_rss_config_obj {
1183 	struct ecore_raw_obj	raw;
1184 
1185 	/* RSS engine to use */
1186 	uint8_t			engine_id;
1187 
1188 	/* Last configured indirection table */
1189 	uint8_t			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
1190 
1191 	/* flags for enabling 4-tupple hash on UDP */
1192 	uint8_t			udp_rss_v4;
1193 	uint8_t			udp_rss_v6;
1194 
1195 	int (*config_rss)(struct bxe_softc *sc,
1196 			  struct ecore_config_rss_params *p);
1197 };
1198 
1199 /*********************** Queue state update ***********************************/
1200 
1201 /* UPDATE command options */
1202 enum {
1203 	ECORE_Q_UPDATE_IN_VLAN_REM,
1204 	ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
1205 	ECORE_Q_UPDATE_OUT_VLAN_REM,
1206 	ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
1207 	ECORE_Q_UPDATE_ANTI_SPOOF,
1208 	ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
1209 	ECORE_Q_UPDATE_ACTIVATE,
1210 	ECORE_Q_UPDATE_ACTIVATE_CHNG,
1211 	ECORE_Q_UPDATE_DEF_VLAN_EN,
1212 	ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
1213 	ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
1214 	ECORE_Q_UPDATE_SILENT_VLAN_REM,
1215 	ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
1216 	ECORE_Q_UPDATE_TX_SWITCHING,
1217 };
1218 
1219 /* Allowed Queue states */
1220 enum ecore_q_state {
1221 	ECORE_Q_STATE_RESET,
1222 	ECORE_Q_STATE_INITIALIZED,
1223 	ECORE_Q_STATE_ACTIVE,
1224 	ECORE_Q_STATE_MULTI_COS,
1225 	ECORE_Q_STATE_MCOS_TERMINATED,
1226 	ECORE_Q_STATE_INACTIVE,
1227 	ECORE_Q_STATE_STOPPED,
1228 	ECORE_Q_STATE_TERMINATED,
1229 	ECORE_Q_STATE_FLRED,
1230 	ECORE_Q_STATE_MAX,
1231 };
1232 
1233 /* Allowed Queue states */
1234 enum ecore_q_logical_state {
1235 	ECORE_Q_LOGICAL_STATE_ACTIVE,
1236 	ECORE_Q_LOGICAL_STATE_STOPPED,
1237 };
1238 
1239 /* Allowed commands */
1240 enum ecore_queue_cmd {
1241 	ECORE_Q_CMD_INIT,
1242 	ECORE_Q_CMD_SETUP,
1243 	ECORE_Q_CMD_SETUP_TX_ONLY,
1244 	ECORE_Q_CMD_DEACTIVATE,
1245 	ECORE_Q_CMD_ACTIVATE,
1246 	ECORE_Q_CMD_UPDATE,
1247 	ECORE_Q_CMD_UPDATE_TPA,
1248 	ECORE_Q_CMD_HALT,
1249 	ECORE_Q_CMD_CFC_DEL,
1250 	ECORE_Q_CMD_TERMINATE,
1251 	ECORE_Q_CMD_EMPTY,
1252 	ECORE_Q_CMD_MAX,
1253 };
1254 
1255 /* queue SETUP + INIT flags */
1256 enum {
1257 	ECORE_Q_FLG_TPA,
1258 	ECORE_Q_FLG_TPA_IPV6,
1259 	ECORE_Q_FLG_TPA_GRO,
1260 	ECORE_Q_FLG_STATS,
1261 	ECORE_Q_FLG_ZERO_STATS,
1262 	ECORE_Q_FLG_ACTIVE,
1263 	ECORE_Q_FLG_OV,
1264 	ECORE_Q_FLG_VLAN,
1265 	ECORE_Q_FLG_COS,
1266 	ECORE_Q_FLG_HC,
1267 	ECORE_Q_FLG_HC_EN,
1268 	ECORE_Q_FLG_DHC,
1269 	ECORE_Q_FLG_OOO,
1270 	ECORE_Q_FLG_FCOE,
1271 	ECORE_Q_FLG_LEADING_RSS,
1272 	ECORE_Q_FLG_MCAST,
1273 	ECORE_Q_FLG_DEF_VLAN,
1274 	ECORE_Q_FLG_TX_SWITCH,
1275 	ECORE_Q_FLG_TX_SEC,
1276 	ECORE_Q_FLG_ANTI_SPOOF,
1277 	ECORE_Q_FLG_SILENT_VLAN_REM,
1278 	ECORE_Q_FLG_FORCE_DEFAULT_PRI,
1279 	ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
1280 	ECORE_Q_FLG_PCSUM_ON_PKT,
1281 	ECORE_Q_FLG_TUN_INC_INNER_IP_ID
1282 };
1283 
1284 /* Queue type options: queue type may be a combination of below. */
1285 enum ecore_q_type {
1286 	ECORE_Q_TYPE_FWD,
1287 	/** TODO: Consider moving both these flags into the init()
1288 	 *        ramrod params.
1289 	 */
1290 	ECORE_Q_TYPE_HAS_RX,
1291 	ECORE_Q_TYPE_HAS_TX,
1292 };
1293 
1294 #define ECORE_PRIMARY_CID_INDEX			0
1295 #define ECORE_MULTI_TX_COS_E1X			3 /* QM only */
1296 #define ECORE_MULTI_TX_COS_E2_E3A0		2
1297 #define ECORE_MULTI_TX_COS_E3B0			3
1298 #define ECORE_MULTI_TX_COS			3 /* Maximum possible */
1299 #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
1300 
1301 struct ecore_queue_init_params {
1302 	struct {
1303 		unsigned long	flags;
1304 		uint16_t		hc_rate;
1305 		uint8_t		fw_sb_id;
1306 		uint8_t		sb_cq_index;
1307 	} tx;
1308 
1309 	struct {
1310 		unsigned long	flags;
1311 		uint16_t		hc_rate;
1312 		uint8_t		fw_sb_id;
1313 		uint8_t		sb_cq_index;
1314 	} rx;
1315 
1316 	/* CID context in the host memory */
1317 	struct eth_context *cxts[ECORE_MULTI_TX_COS];
1318 
1319 	/* maximum number of cos supported by hardware */
1320 	uint8_t max_cos;
1321 };
1322 
1323 struct ecore_queue_terminate_params {
1324 	/* index within the tx_only cids of this queue object */
1325 	uint8_t cid_index;
1326 };
1327 
1328 struct ecore_queue_cfc_del_params {
1329 	/* index within the tx_only cids of this queue object */
1330 	uint8_t cid_index;
1331 };
1332 
1333 struct ecore_queue_update_params {
1334 	unsigned long	update_flags; /* ECORE_Q_UPDATE_XX bits */
1335 	uint16_t		def_vlan;
1336 	uint16_t		silent_removal_value;
1337 	uint16_t		silent_removal_mask;
1338 /* index within the tx_only cids of this queue object */
1339 	uint8_t		cid_index;
1340 };
1341 
1342 struct rxq_pause_params {
1343 	uint16_t		bd_th_lo;
1344 	uint16_t		bd_th_hi;
1345 	uint16_t		rcq_th_lo;
1346 	uint16_t		rcq_th_hi;
1347 	uint16_t		sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
1348 	uint16_t		sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
1349 	uint16_t		pri_map;
1350 };
1351 
1352 /* general */
1353 struct ecore_general_setup_params {
1354 	/* valid iff ECORE_Q_FLG_STATS */
1355 	uint8_t		stat_id;
1356 
1357 	uint8_t		spcl_id;
1358 	uint16_t		mtu;
1359 	uint8_t		cos;
1360 };
1361 
1362 struct ecore_rxq_setup_params {
1363 	/* dma */
1364 	ecore_dma_addr_t	dscr_map;
1365 	ecore_dma_addr_t	sge_map;
1366 	ecore_dma_addr_t	rcq_map;
1367 	ecore_dma_addr_t	rcq_np_map;
1368 
1369 	uint16_t		drop_flags;
1370 	uint16_t		buf_sz;
1371 	uint8_t		fw_sb_id;
1372 	uint8_t		cl_qzone_id;
1373 
1374 	/* valid iff ECORE_Q_FLG_TPA */
1375 	uint16_t		tpa_agg_sz;
1376 	uint16_t		sge_buf_sz;
1377 	uint8_t		max_sges_pkt;
1378 	uint8_t		max_tpa_queues;
1379 	uint8_t		rss_engine_id;
1380 
1381 	/* valid iff ECORE_Q_FLG_MCAST */
1382 	uint8_t		mcast_engine_id;
1383 
1384 	uint8_t		cache_line_log;
1385 
1386 	uint8_t		sb_cq_index;
1387 
1388 	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
1389 	uint16_t silent_removal_value;
1390 	uint16_t silent_removal_mask;
1391 };
1392 
1393 struct ecore_txq_setup_params {
1394 	/* dma */
1395 	ecore_dma_addr_t	dscr_map;
1396 
1397 	uint8_t		fw_sb_id;
1398 	uint8_t		sb_cq_index;
1399 	uint8_t		cos;		/* valid iff ECORE_Q_FLG_COS */
1400 	uint16_t		traffic_type;
1401 	/* equals to the leading rss client id, used for TX classification*/
1402 	uint8_t		tss_leading_cl_id;
1403 
1404 	/* valid iff ECORE_Q_FLG_DEF_VLAN */
1405 	uint16_t		default_vlan;
1406 };
1407 
1408 struct ecore_queue_setup_params {
1409 	struct ecore_general_setup_params gen_params;
1410 	struct ecore_txq_setup_params txq_params;
1411 	struct ecore_rxq_setup_params rxq_params;
1412 	struct rxq_pause_params pause_params;
1413 	unsigned long flags;
1414 };
1415 
1416 struct ecore_queue_setup_tx_only_params {
1417 	struct ecore_general_setup_params	gen_params;
1418 	struct ecore_txq_setup_params		txq_params;
1419 	unsigned long				flags;
1420 	/* index within the tx_only cids of this queue object */
1421 	uint8_t					cid_index;
1422 };
1423 
1424 struct ecore_queue_state_params {
1425 	struct ecore_queue_sp_obj *q_obj;
1426 
1427 	/* Current command */
1428 	enum ecore_queue_cmd cmd;
1429 
1430 	/* may have RAMROD_COMP_WAIT set only */
1431 	unsigned long ramrod_flags;
1432 
1433 	/* Params according to the current command */
1434 	union {
1435 		struct ecore_queue_update_params	update;
1436 		struct ecore_queue_setup_params		setup;
1437 		struct ecore_queue_init_params		init;
1438 		struct ecore_queue_setup_tx_only_params	tx_only;
1439 		struct ecore_queue_terminate_params	terminate;
1440 		struct ecore_queue_cfc_del_params	cfc_del;
1441 	} params;
1442 };
1443 
1444 struct ecore_viflist_params {
1445 	uint8_t echo_res;
1446 	uint8_t func_bit_map_res;
1447 };
1448 
1449 struct ecore_queue_sp_obj {
1450 	uint32_t		cids[ECORE_MULTI_TX_COS];
1451 	uint8_t		cl_id;
1452 	uint8_t		func_id;
1453 
1454 	/* number of traffic classes supported by queue.
1455 	 * The primary connection of the queue supports the first traffic
1456 	 * class. Any further traffic class is supported by a tx-only
1457 	 * connection.
1458 	 *
1459 	 * Therefore max_cos is also a number of valid entries in the cids
1460 	 * array.
1461 	 */
1462 	uint8_t max_cos;
1463 	uint8_t num_tx_only, next_tx_only;
1464 
1465 	enum ecore_q_state state, next_state;
1466 
1467 	/* bits from enum ecore_q_type */
1468 	unsigned long	type;
1469 
1470 	/* ECORE_Q_CMD_XX bits. This object implements "one
1471 	 * pending" paradigm but for debug and tracing purposes it's
1472 	 * more convenient to have different bits for different
1473 	 * commands.
1474 	 */
1475 	unsigned long	pending;
1476 
1477 	/* Buffer to use as a ramrod data and its mapping */
1478 	void		*rdata;
1479 	ecore_dma_addr_t	rdata_mapping;
1480 
1481 	/**
1482 	 * Performs one state change according to the given parameters.
1483 	 *
1484 	 * @return 0 in case of success and negative value otherwise.
1485 	 */
1486 	int (*send_cmd)(struct bxe_softc *sc,
1487 			struct ecore_queue_state_params *params);
1488 
1489 	/**
1490 	 * Sets the pending bit according to the requested transition.
1491 	 */
1492 	int (*set_pending)(struct ecore_queue_sp_obj *o,
1493 			   struct ecore_queue_state_params *params);
1494 
1495 	/**
1496 	 * Checks that the requested state transition is legal.
1497 	 */
1498 	int (*check_transition)(struct bxe_softc *sc,
1499 				struct ecore_queue_sp_obj *o,
1500 				struct ecore_queue_state_params *params);
1501 
1502 	/**
1503 	 * Completes the pending command.
1504 	 */
1505 	int (*complete_cmd)(struct bxe_softc *sc,
1506 			    struct ecore_queue_sp_obj *o,
1507 			    enum ecore_queue_cmd);
1508 
1509 	int (*wait_comp)(struct bxe_softc *sc,
1510 			 struct ecore_queue_sp_obj *o,
1511 			 enum ecore_queue_cmd cmd);
1512 };
1513 
1514 /********************** Function state update *********************************/
1515 /* Allowed Function states */
1516 enum ecore_func_state {
1517 	ECORE_F_STATE_RESET,
1518 	ECORE_F_STATE_INITIALIZED,
1519 	ECORE_F_STATE_STARTED,
1520 	ECORE_F_STATE_TX_STOPPED,
1521 	ECORE_F_STATE_MAX,
1522 };
1523 
1524 /* Allowed Function commands */
1525 enum ecore_func_cmd {
1526 	ECORE_F_CMD_HW_INIT,
1527 	ECORE_F_CMD_START,
1528 	ECORE_F_CMD_STOP,
1529 	ECORE_F_CMD_HW_RESET,
1530 	ECORE_F_CMD_AFEX_UPDATE,
1531 	ECORE_F_CMD_AFEX_VIFLISTS,
1532 	ECORE_F_CMD_TX_STOP,
1533 	ECORE_F_CMD_TX_START,
1534 	ECORE_F_CMD_SWITCH_UPDATE,
1535 	ECORE_F_CMD_MAX,
1536 };
1537 
1538 struct ecore_func_hw_init_params {
1539 	/* A load phase returned by MCP.
1540 	 *
1541 	 * May be:
1542 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1543 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1544 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1545 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1546 	 */
1547 	uint32_t load_phase;
1548 };
1549 
1550 struct ecore_func_hw_reset_params {
1551 	/* A load phase returned by MCP.
1552 	 *
1553 	 * May be:
1554 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1555 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1556 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1557 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1558 	 */
1559 	uint32_t reset_phase;
1560 };
1561 
1562 struct ecore_func_start_params {
1563 	/* Multi Function mode:
1564 	 *	- Single Function
1565 	 *	- Switch Dependent
1566 	 *	- Switch Independent
1567 	 */
1568 	uint16_t mf_mode;
1569 
1570 	/* Switch Dependent mode outer VLAN tag */
1571 	uint16_t sd_vlan_tag;
1572 
1573 	/* Function cos mode */
1574 	uint8_t network_cos_mode;
1575 
1576 	/* NVGRE classification enablement */
1577 	uint8_t nvgre_clss_en;
1578 
1579 	/* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1580 	uint8_t gre_tunnel_mode;
1581 
1582 	/* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
1583 	uint8_t gre_tunnel_rss;
1584 
1585 };
1586 
1587 struct ecore_func_switch_update_params {
1588 	uint8_t suspend;
1589 };
1590 
1591 struct ecore_func_afex_update_params {
1592 	uint16_t vif_id;
1593 	uint16_t afex_default_vlan;
1594 	uint8_t allowed_priorities;
1595 };
1596 
1597 struct ecore_func_afex_viflists_params {
1598 	uint16_t vif_list_index;
1599 	uint8_t func_bit_map;
1600 	uint8_t afex_vif_list_command;
1601 	uint8_t func_to_clear;
1602 };
1603 struct ecore_func_tx_start_params {
1604 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1605 	uint8_t dcb_enabled;
1606 	uint8_t dcb_version;
1607 	uint8_t dont_add_pri_0;
1608 };
1609 
1610 struct ecore_func_state_params {
1611 	struct ecore_func_sp_obj *f_obj;
1612 
1613 	/* Current command */
1614 	enum ecore_func_cmd cmd;
1615 
1616 	/* may have RAMROD_COMP_WAIT set only */
1617 	unsigned long	ramrod_flags;
1618 
1619 	/* Params according to the current command */
1620 	union {
1621 		struct ecore_func_hw_init_params hw_init;
1622 		struct ecore_func_hw_reset_params hw_reset;
1623 		struct ecore_func_start_params start;
1624 		struct ecore_func_switch_update_params switch_update;
1625 		struct ecore_func_afex_update_params afex_update;
1626 		struct ecore_func_afex_viflists_params afex_viflists;
1627 		struct ecore_func_tx_start_params tx_start;
1628 	} params;
1629 };
1630 
1631 struct ecore_func_sp_drv_ops {
1632 	/* Init tool + runtime initialization:
1633 	 *      - Common Chip
1634 	 *      - Common (per Path)
1635 	 *      - Port
1636 	 *      - Function phases
1637 	 */
1638 	int (*init_hw_cmn_chip)(struct bxe_softc *sc);
1639 	int (*init_hw_cmn)(struct bxe_softc *sc);
1640 	int (*init_hw_port)(struct bxe_softc *sc);
1641 	int (*init_hw_func)(struct bxe_softc *sc);
1642 
1643 	/* Reset Function HW: Common, Port, Function phases. */
1644 	void (*reset_hw_cmn)(struct bxe_softc *sc);
1645 	void (*reset_hw_port)(struct bxe_softc *sc);
1646 	void (*reset_hw_func)(struct bxe_softc *sc);
1647 
1648 	/* Init/Free GUNZIP resources */
1649 	int (*gunzip_init)(struct bxe_softc *sc);
1650 	void (*gunzip_end)(struct bxe_softc *sc);
1651 
1652 	/* Prepare/Release FW resources */
1653 	int (*init_fw)(struct bxe_softc *sc);
1654 	void (*release_fw)(struct bxe_softc *sc);
1655 };
1656 
1657 struct ecore_func_sp_obj {
1658 	enum ecore_func_state	state, next_state;
1659 
1660 	/* ECORE_FUNC_CMD_XX bits. This object implements "one
1661 	 * pending" paradigm but for debug and tracing purposes it's
1662 	 * more convenient to have different bits for different
1663 	 * commands.
1664 	 */
1665 	unsigned long		pending;
1666 
1667 	/* Buffer to use as a ramrod data and its mapping */
1668 	void			*rdata;
1669 	ecore_dma_addr_t		rdata_mapping;
1670 
1671 	/* Buffer to use as a afex ramrod data and its mapping.
1672 	 * This can't be same rdata as above because afex ramrod requests
1673 	 * can arrive to the object in parallel to other ramrod requests.
1674 	 */
1675 	void			*afex_rdata;
1676 	ecore_dma_addr_t		afex_rdata_mapping;
1677 
1678 	/* this mutex validates that when pending flag is taken, the next
1679 	 * ramrod to be sent will be the one set the pending bit
1680 	 */
1681 	ECORE_MUTEX		one_pending_mutex;
1682 
1683 	/* Driver interface */
1684 	struct ecore_func_sp_drv_ops	*drv;
1685 
1686 	/**
1687 	 * Performs one state change according to the given parameters.
1688 	 *
1689 	 * @return 0 in case of success and negative value otherwise.
1690 	 */
1691 	int (*send_cmd)(struct bxe_softc *sc,
1692 			struct ecore_func_state_params *params);
1693 
1694 	/**
1695 	 * Checks that the requested state transition is legal.
1696 	 */
1697 	int (*check_transition)(struct bxe_softc *sc,
1698 				struct ecore_func_sp_obj *o,
1699 				struct ecore_func_state_params *params);
1700 
1701 	/**
1702 	 * Completes the pending command.
1703 	 */
1704 	int (*complete_cmd)(struct bxe_softc *sc,
1705 			    struct ecore_func_sp_obj *o,
1706 			    enum ecore_func_cmd cmd);
1707 
1708 	int (*wait_comp)(struct bxe_softc *sc, struct ecore_func_sp_obj *o,
1709 			 enum ecore_func_cmd cmd);
1710 };
1711 
1712 /********************** Interfaces ********************************************/
1713 /* Queueable objects set */
1714 union ecore_qable_obj {
1715 	struct ecore_vlan_mac_obj vlan_mac;
1716 };
1717 /************** Function state update *********/
1718 void ecore_init_func_obj(struct bxe_softc *sc,
1719 			 struct ecore_func_sp_obj *obj,
1720 			 void *rdata, ecore_dma_addr_t rdata_mapping,
1721 			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
1722 			 struct ecore_func_sp_drv_ops *drv_iface);
1723 
1724 int ecore_func_state_change(struct bxe_softc *sc,
1725 			    struct ecore_func_state_params *params);
1726 
1727 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
1728 					   struct ecore_func_sp_obj *o);
1729 /******************* Queue State **************/
1730 void ecore_init_queue_obj(struct bxe_softc *sc,
1731 			  struct ecore_queue_sp_obj *obj, uint8_t cl_id, uint32_t *cids,
1732 			  uint8_t cid_cnt, uint8_t func_id, void *rdata,
1733 			  ecore_dma_addr_t rdata_mapping, unsigned long type);
1734 
1735 int ecore_queue_state_change(struct bxe_softc *sc,
1736 			     struct ecore_queue_state_params *params);
1737 
1738 int ecore_get_q_logical_state(struct bxe_softc *sc,
1739 			       struct ecore_queue_sp_obj *obj);
1740 
1741 /********************* VLAN-MAC ****************/
1742 void ecore_init_mac_obj(struct bxe_softc *sc,
1743 			struct ecore_vlan_mac_obj *mac_obj,
1744 			uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
1745 			ecore_dma_addr_t rdata_mapping, int state,
1746 			unsigned long *pstate, ecore_obj_type type,
1747 			struct ecore_credit_pool_obj *macs_pool);
1748 
1749 void ecore_init_vlan_obj(struct bxe_softc *sc,
1750 			 struct ecore_vlan_mac_obj *vlan_obj,
1751 			 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
1752 			 ecore_dma_addr_t rdata_mapping, int state,
1753 			 unsigned long *pstate, ecore_obj_type type,
1754 			 struct ecore_credit_pool_obj *vlans_pool);
1755 
1756 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
1757 			     struct ecore_vlan_mac_obj *vlan_mac_obj,
1758 			     uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
1759 			     ecore_dma_addr_t rdata_mapping, int state,
1760 			     unsigned long *pstate, ecore_obj_type type,
1761 			     struct ecore_credit_pool_obj *macs_pool,
1762 			     struct ecore_credit_pool_obj *vlans_pool);
1763 
1764 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
1765 					struct ecore_vlan_mac_obj *o);
1766 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
1767 				  struct ecore_vlan_mac_obj *o);
1768 int ecore_vlan_mac_h_write_lock(struct bxe_softc *sc,
1769 				struct ecore_vlan_mac_obj *o);
1770 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
1771 					  struct ecore_vlan_mac_obj *o);
1772 int ecore_config_vlan_mac(struct bxe_softc *sc,
1773 			   struct ecore_vlan_mac_ramrod_params *p);
1774 
1775 int ecore_vlan_mac_move(struct bxe_softc *sc,
1776 			struct ecore_vlan_mac_ramrod_params *p,
1777 			struct ecore_vlan_mac_obj *dest_o);
1778 
1779 /********************* RX MODE ****************/
1780 
1781 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
1782 			    struct ecore_rx_mode_obj *o);
1783 
1784 /**
1785  * ecore_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1786  *
1787  * @p: Command parameters
1788  *
1789  * Return: 0 - if operation was successful and there is no pending completions,
1790  *         positive number - if there are pending completions,
1791  *         negative - if there were errors
1792  */
1793 int ecore_config_rx_mode(struct bxe_softc *sc,
1794 			 struct ecore_rx_mode_ramrod_params *p);
1795 
1796 /****************** MULTICASTS ****************/
1797 
1798 void ecore_init_mcast_obj(struct bxe_softc *sc,
1799 			  struct ecore_mcast_obj *mcast_obj,
1800 			  uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
1801 			  uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
1802 			  int state, unsigned long *pstate,
1803 			  ecore_obj_type type);
1804 
1805 /**
1806  * ecore_config_mcast - Configure multicast MACs list.
1807  *
1808  * @cmd: command to execute: BNX2X_MCAST_CMD_X
1809  *
1810  * May configure a new list
1811  * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
1812  * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
1813  * configuration, continue to execute the pending commands
1814  * (ECORE_MCAST_CMD_CONT).
1815  *
1816  * If previous command is still pending or if number of MACs to
1817  * configure is more that maximum number of MACs in one command,
1818  * the current command will be enqueued to the tail of the
1819  * pending commands list.
1820  *
1821  * Return: 0 is operation was successfull and there are no pending completions,
1822  *         negative if there were errors, positive if there are pending
1823  *         completions.
1824  */
1825 int ecore_config_mcast(struct bxe_softc *sc,
1826 		       struct ecore_mcast_ramrod_params *p,
1827 		       enum ecore_mcast_cmd cmd);
1828 
1829 /****************** CREDIT POOL ****************/
1830 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
1831 				struct ecore_credit_pool_obj *p, uint8_t func_id,
1832 				uint8_t func_num);
1833 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
1834 				 struct ecore_credit_pool_obj *p, uint8_t func_id,
1835 				 uint8_t func_num);
1836 
1837 /****************** RSS CONFIGURATION ****************/
1838 void ecore_init_rss_config_obj(struct bxe_softc *sc,
1839 			       struct ecore_rss_config_obj *rss_obj,
1840 			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
1841 			       void *rdata, ecore_dma_addr_t rdata_mapping,
1842 			       int state, unsigned long *pstate,
1843 			       ecore_obj_type type);
1844 
1845 /**
1846  * ecore_config_rss - Updates RSS configuration according to provided parameters
1847  *
1848  * Return: 0 in case of success
1849  */
1850 int ecore_config_rss(struct bxe_softc *sc,
1851 		     struct ecore_config_rss_params *p);
1852 
1853 /**
1854  * ecore_get_rss_ind_table - Return the current ind_table configuration.
1855  *
1856  * @ind_table: buffer to fill with the current indirection
1857  *                  table content. Should be at least
1858  *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1859  */
1860 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
1861 			     uint8_t *ind_table);
1862 
1863 /* set as inline so printout will show the offending function */
1864 int validate_vlan_mac(struct bxe_softc *sc,
1865 		      struct ecore_vlan_mac_obj *vlan_mac);
1866 
1867 #endif /* ECORE_SP_H */
1868 
1869