xref: /titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/ecore/ecore_sp_verbs.h (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 #ifndef ECORE_SP_VERBS
2 #define ECORE_SP_VERBS
3 
4 #ifndef ECORE_ERASE
5 #define ETH_ALEN 6
6 
7 #include "lm_defs.h"
8 #include "listq.h"
9 #include "eth_constants.h"
10 #include "bcm_utils.h"
11 #include "mm.h"
12 
13 #ifdef __LINUX
14 #include <linux/time.h>
15 #include <linux/mutex.h>
16 #define ECORE_ALIGN(x, a) ALIGN(x, a)
17 #else
18 #define ECORE_ALIGN(x, a) ((((x) + (a) - 1) / (a)) * (a))
19 
20 typedef volatile unsigned long atomic_t;
21 #endif
22 
23 /* FIXME (MichalS): move to bcmtypes.h 26-Sep-10 */
24 typedef int BOOL;
25 
26 /* None-atomic macros */
27 #define ECORE_SET_BIT_NA(bit, var) SET_BIT(*(var), bit)
28 #define ECORE_CLEAR_BIT_NA(bit, var) RESET_BIT(*(var), bit)
29 
30 #ifdef __LINUX
31 typedef struct mutex ECORE_MUTEX;
32 
33 /* Bits tweaking */
34 #define ECORE_SET_BIT(bit, var)   mm_atomic_or(var, (1<<bit))
35 #define ECORE_CLEAR_BIT(bit, var) mm_atomic_and(var, ~(1<<bit))
36 
37 #elif defined(USER_LINUX)
38 typedef int ECORE_MUTEX;
39 
40 /* Bits tweaking */
41 #define ECORE_SET_BIT(bit, var)   set_bit(bit, var)
42 #define ECORE_CLEAR_BIT(bit, var) clear_bit(bit, var)
43 #else /* VBD */
44 
45 typedef int ECORE_MUTEX;
46 
47 /* Bits tweaking */
48 #define ECORE_SET_BIT(bit, var)   mm_atomic_long_or(var, (1<<bit))
49 #define ECORE_CLEAR_BIT(bit, var) mm_atomic_long_and(var, ~(1<<bit))
50 #endif
51 
52 /************************ Types used in ecore *********************************/
53 
54 enum _ecore_status_t {
55 	ECORE_EXISTS  = -6,
56 	ECORE_IO      = -5,
57 	ECORE_TIMEOUT = -4,
58 	ECORE_INVAL   = -3,
59 	ECORE_BUSY    = -2,
60 	ECORE_NOMEM   = -1,
61 	ECORE_SUCCESS = 0,
62 	/* PENDING is not an error and should be positive */
63 	ECORE_PENDING = 1,
64 };
65 
66 #endif
67 
68 struct _lm_device_t;
69 struct eth_context;
70 
71 /* Bits representing general command's configuration */
72 enum {
73 	RAMROD_TX,
74 	RAMROD_RX,
75 	/* Wait until all pending commands complete */
76 	RAMROD_COMP_WAIT,
77 	/* Don't send a ramrod, only update a registry */
78 	RAMROD_DRV_CLR_ONLY,
79 	/* Configure HW according to the current object state */
80 	RAMROD_RESTORE,
81 	 /* Execute the next command now */
82 	RAMROD_EXEC,
83 	/* Don't add a new command and continue execution of posponed
84 	 * commands. If not set a new command will be added to the
85 	 * pending commands list.
86 	 */
87 	RAMROD_CONT,
88 	/* If there is another pending ramrod, wait until it finishes and
89 	 * re-try to submit this one. This flag can be set only in sleepable
90 	 * context, and should not be set from the context that completes the
91 	 * ramrods as deadlock will occur.
92 	 */
93 	RAMROD_RETRY,
94 };
95 
96 typedef enum {
97 	ECORE_OBJ_TYPE_RX,
98 	ECORE_OBJ_TYPE_TX,
99 	ECORE_OBJ_TYPE_RX_TX,
100 } ecore_obj_type;
101 
102 /* Public slow path states */
103 enum {
104 	ECORE_FILTER_MAC_PENDING,
105 	ECORE_FILTER_VLAN_PENDING,
106 	ECORE_FILTER_VLAN_MAC_PENDING,
107 	ECORE_FILTER_RX_MODE_PENDING,
108 	ECORE_FILTER_RX_MODE_SCHED,
109 	ECORE_FILTER_ISCSI_ETH_START_SCHED,
110 	ECORE_FILTER_ISCSI_ETH_STOP_SCHED,
111 	ECORE_FILTER_FCOE_ETH_START_SCHED,
112 	ECORE_FILTER_FCOE_ETH_STOP_SCHED,
113 #ifdef ECORE_CHAR_DEV /* ! ECORE_UPSTREAM */
114 	ECORE_FILTER_BYPASS_RX_MODE_PENDING,
115 	ECORE_FILTER_BYPASS_MAC_PENDING,
116 	ECORE_FILTER_BYPASS_RSS_CONF_PENDING,
117 #endif
118 	ECORE_FILTER_MCAST_PENDING,
119 	ECORE_FILTER_MCAST_SCHED,
120 	ECORE_FILTER_RSS_CONF_PENDING,
121 	ECORE_AFEX_FCOE_Q_UPDATE_PENDING,
122 	ECORE_AFEX_PENDING_VIFSET_MCP_ACK
123 };
124 
125 struct ecore_raw_obj {
126 	u8		func_id;
127 
128 	/* Queue params */
129 	u8		cl_id;
130 	u32		cid;
131 
132 	/* Ramrod data buffer params */
133 	void		*rdata;
134 	lm_address_t	rdata_mapping;
135 
136 	/* Ramrod state params */
137 	int		state;   /* "ramrod is pending" state bit */
138 	unsigned long	*pstate; /* pointer to state buffer */
139 
140 	ecore_obj_type	obj_type;
141 
142 	int (*wait_comp)(struct _lm_device_t *pdev,
143 			 struct ecore_raw_obj *o);
144 
145 	BOOL (*check_pending)(struct ecore_raw_obj *o);
146 	void (*clear_pending)(struct ecore_raw_obj *o);
147 	void (*set_pending)(struct ecore_raw_obj *o);
148 };
149 
150 /************************* VLAN-MAC commands related parameters ***************/
151 struct ecore_mac_ramrod_data {
152 	u8 mac[ETH_ALEN];
153 	u8 is_inner_mac;
154 };
155 
156 struct ecore_vlan_ramrod_data {
157 	u16 vlan;
158 };
159 
160 struct ecore_vlan_mac_ramrod_data {
161 	u8 mac[ETH_ALEN];
162 	u8 is_inner_mac;
163 	u16 vlan;
164 };
165 
166 union ecore_classification_ramrod_data {
167 	struct ecore_mac_ramrod_data mac;
168 	struct ecore_vlan_ramrod_data vlan;
169 	struct ecore_vlan_mac_ramrod_data vlan_mac;
170 };
171 
172 /* VLAN_MAC commands */
173 enum ecore_vlan_mac_cmd {
174 	ECORE_VLAN_MAC_ADD,
175 	ECORE_VLAN_MAC_DEL,
176 	ECORE_VLAN_MAC_MOVE,
177 };
178 
179 struct ecore_vlan_mac_data {
180 	/* Requested command: ECORE_VLAN_MAC_XX */
181 	enum ecore_vlan_mac_cmd cmd;
182 	/* used to contain the data related vlan_mac_flags bits from
183 	 * ramrod parameters.
184 	 */
185 	unsigned long vlan_mac_flags;
186 
187 	/* Needed for MOVE command */
188 	struct ecore_vlan_mac_obj *target_obj;
189 
190 	union ecore_classification_ramrod_data u;
191 };
192 
193 /*************************** Exe Queue obj ************************************/
194 union ecore_exe_queue_cmd_data {
195 	struct ecore_vlan_mac_data vlan_mac;
196 
197 	struct {
198 		/* TODO */
199 #ifndef ECORE_ERASE
200 		int TODO;
201 #endif
202 	} mcast;
203 };
204 
205 struct ecore_exeq_elem {
206 	d_list_entry_t		link;
207 
208 	/* Length of this element in the exe_chunk. */
209 	int				cmd_len;
210 
211 	union ecore_exe_queue_cmd_data	cmd_data;
212 };
213 
214 union ecore_qable_obj;
215 
216 union ecore_exeq_comp_elem {
217 	union event_ring_elem *elem;
218 };
219 
220 struct ecore_exe_queue_obj;
221 
222 typedef int (*exe_q_validate)(struct _lm_device_t *pdev,
223 			      union ecore_qable_obj *o,
224 			      struct ecore_exeq_elem *elem);
225 
226 typedef int (*exe_q_remove)(struct _lm_device_t *pdev,
227 			    union ecore_qable_obj *o,
228 			    struct ecore_exeq_elem *elem);
229 
230 /* Return positive if entry was optimized, 0 - if not, negative
231  * in case of an error.
232  */
233 typedef int (*exe_q_optimize)(struct _lm_device_t *pdev,
234 			      union ecore_qable_obj *o,
235 			      struct ecore_exeq_elem *elem);
236 typedef int (*exe_q_execute)(struct _lm_device_t *pdev,
237 			     union ecore_qable_obj *o,
238 			     d_list_t *exe_chunk,
239 			     unsigned long *ramrod_flags);
240 typedef struct ecore_exeq_elem *
241 			(*exe_q_get)(struct ecore_exe_queue_obj *o,
242 				     struct ecore_exeq_elem *elem);
243 
244 struct ecore_exe_queue_obj {
245 	/* Commands pending for an execution. */
246 	d_list_t	exe_queue;
247 
248 	/* Commands pending for an completion. */
249 	d_list_t	pending_comp;
250 
251 	mm_spin_lock_t		lock;
252 
253 	/* Maximum length of commands' list for one execution */
254 	int			exe_chunk_len;
255 
256 	union ecore_qable_obj	*owner;
257 
258 	/****** Virtual functions ******/
259 	/**
260 	 * Called before commands execution for commands that are really
261 	 * going to be executed (after 'optimize').
262 	 *
263 	 * Must run under exe_queue->lock
264 	 */
265 	exe_q_validate		validate;
266 
267 	/**
268 	 * Called before removing pending commands, cleaning allocated
269 	 * resources (e.g., credits from validate)
270 	 */
271 	 exe_q_remove		remove;
272 
273 	/**
274 	 * This will try to cancel the current pending commands list
275 	 * considering the new command.
276 	 *
277 	 * Returns the number of optimized commands or a negative error code
278 	 *
279 	 * Must run under exe_queue->lock
280 	 */
281 	exe_q_optimize		optimize;
282 
283 	/**
284 	 * Run the next commands chunk (owner specific).
285 	 */
286 	exe_q_execute		execute;
287 
288 	/**
289 	 * Return the exe_queue element containing the specific command
290 	 * if any. Otherwise return NULL.
291 	 */
292 	exe_q_get		get;
293 };
294 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
295 /*
296  * Element in the VLAN_MAC registry list having all current configured
297  * rules.
298  */
299 struct ecore_vlan_mac_registry_elem {
300 	d_list_entry_t	link;
301 
302 	/* Used to store the cam offset used for the mac/vlan/vlan-mac.
303 	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
304 	 * same CAM for these chips.
305 	 */
306 	int			cam_offset;
307 
308 	/* Needed for DEL and RESTORE flows */
309 	unsigned long		vlan_mac_flags;
310 
311 	union ecore_classification_ramrod_data u;
312 };
313 
314 /* Bits representing VLAN_MAC commands specific flags */
315 enum {
316 	ECORE_UC_LIST_MAC,
317 	ECORE_ETH_MAC,
318 	ECORE_ISCSI_ETH_MAC,
319 	ECORE_NETQ_ETH_MAC,
320 	ECORE_DONT_CONSUME_CAM_CREDIT,
321 	ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
322 };
323 /* When looking for matching filters, some flags are not interesting */
324 #define ECORE_VLAN_MAC_CMP_MASK	(1 << ECORE_UC_LIST_MAC | \
325 				 1 << ECORE_ETH_MAC | \
326 				 1 << ECORE_ISCSI_ETH_MAC | \
327 				 1 << ECORE_NETQ_ETH_MAC)
328 #define ECORE_VLAN_MAC_CMP_FLAGS(flags) \
329 	((flags) & ECORE_VLAN_MAC_CMP_MASK)
330 
331 struct ecore_vlan_mac_ramrod_params {
332 	/* Object to run the command from */
333 	struct ecore_vlan_mac_obj *vlan_mac_obj;
334 
335 	/* General command flags: COMP_WAIT, etc. */
336 	unsigned long ramrod_flags;
337 
338 	/* Command specific configuration request */
339 	struct ecore_vlan_mac_data user_req;
340 };
341 
342 struct ecore_vlan_mac_obj {
343 	struct ecore_raw_obj raw;
344 
345 	/* Bookkeeping list: will prevent the addition of already existing
346 	 * entries.
347 	 */
348 	d_list_t		head;
349 	/* Implement a simple reader/writer lock on the head list.
350 	 * all these fields should only be accessed under the exe_queue lock
351 	 */
352 	u8		head_reader; /* Num. of readers accessing head list */
353 	BOOL		head_exe_request; /* Pending execution request. */
354 	unsigned long	saved_ramrod_flags; /* Ramrods of pending execution */
355 
356 	/* Execution queue interface instance */
357 	struct ecore_exe_queue_obj	exe_queue;
358 
359 	/* MACs credit pool */
360 	struct ecore_credit_pool_obj	*macs_pool;
361 
362 	/* VLANs credit pool */
363 	struct ecore_credit_pool_obj	*vlans_pool;
364 
365 	/* RAMROD command to be used */
366 	int				ramrod_cmd;
367 
368 	/* copy first n elements onto preallocated buffer
369 	 *
370 	 * @param n number of elements to get
371 	 * @param buf buffer preallocated by caller into which elements
372 	 *            will be copied. Note elements are 4-byte aligned
373 	 *            so buffer size must be able to accommodate the
374 	 *            aligned elements.
375 	 *
376 	 * @return number of copied bytes
377 	 */
378 
379 	int (*get_n_elements)(struct _lm_device_t *pdev,
380 			      struct ecore_vlan_mac_obj *o, int n, u8 *base,
381 			      u8 stride, u8 size);
382 
383 	/**
384 	 * Checks if ADD-ramrod with the given params may be performed.
385 	 *
386 	 * @return zero if the element may be added
387 	 */
388 
389 	int (*check_add)(struct _lm_device_t *pdev,
390 			 struct ecore_vlan_mac_obj *o,
391 			 union ecore_classification_ramrod_data *data);
392 
393 	/**
394 	 * Checks if DEL-ramrod with the given params may be performed.
395 	 *
396 	 * @return TRUE if the element may be deleted
397 	 */
398 	struct ecore_vlan_mac_registry_elem *
399 		(*check_del)(struct _lm_device_t *pdev,
400 			     struct ecore_vlan_mac_obj *o,
401 			     union ecore_classification_ramrod_data *data);
402 
403 	/**
404 	 * Checks if DEL-ramrod with the given params may be performed.
405 	 *
406 	 * @return TRUE if the element may be deleted
407 	 */
408 	BOOL (*check_move)(struct _lm_device_t *pdev,
409 			   struct ecore_vlan_mac_obj *src_o,
410 			   struct ecore_vlan_mac_obj *dst_o,
411 			   union ecore_classification_ramrod_data *data);
412 
413 	/**
414 	 *  Update the relevant credit object(s) (consume/return
415 	 *  correspondingly).
416 	 */
417 	BOOL (*get_credit)(struct ecore_vlan_mac_obj *o);
418 	BOOL (*put_credit)(struct ecore_vlan_mac_obj *o);
419 	BOOL (*get_cam_offset)(struct ecore_vlan_mac_obj *o, int *offset);
420 	BOOL (*put_cam_offset)(struct ecore_vlan_mac_obj *o, int offset);
421 
422 	/**
423 	 * Configures one rule in the ramrod data buffer.
424 	 */
425 	void (*set_one_rule)(struct _lm_device_t *pdev,
426 			     struct ecore_vlan_mac_obj *o,
427 			     struct ecore_exeq_elem *elem, int rule_idx,
428 			     int cam_offset);
429 
430 	/**
431 	*  Delete all configured elements having the given
432 	*  vlan_mac_flags specification. Assumes no pending for
433 	*  execution commands. Will schedule all all currently
434 	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
435 	*  specification for deletion and will use the given
436 	*  ramrod_flags for the last DEL operation.
437 	 *
438 	 * @param pdev
439 	 * @param o
440 	 * @param ramrod_flags RAMROD_XX flags
441 	 *
442 	 * @return 0 if the last operation has completed successfully
443 	 *         and there are no more elements left, positive value
444 	 *         if there are pending for completion commands,
445 	 *         negative value in case of failure.
446 	 */
447 	int (*delete_all)(struct _lm_device_t *pdev,
448 			  struct ecore_vlan_mac_obj *o,
449 			  unsigned long *vlan_mac_flags,
450 			  unsigned long *ramrod_flags);
451 
452 	/**
453 	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
454 	 * configured elements list.
455 	 *
456 	 * @param pdev
457 	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
458 	 *          ramrod_flags is only taken into an account)
459 	 * @param ppos a pointer to the cookie that should be given back in the
460 	 *        next call to make function handle the next element. If
461 	 *        *ppos is set to NULL it will restart the iterator.
462 	 *        If returned *ppos == NULL this means that the last
463 	 *        element has been handled.
464 	 *
465 	 * @return int
466 	 */
467 	int (*restore)(struct _lm_device_t *pdev,
468 		       struct ecore_vlan_mac_ramrod_params *p,
469 		       struct ecore_vlan_mac_registry_elem **ppos);
470 
471 	/**
472 	 * Should be called on a completion arrival.
473 	 *
474 	 * @param pdev
475 	 * @param o
476 	 * @param cqe Completion element we are handling
477 	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
478 	 *		       pending commands will be executed.
479 	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
480 	 *		       may also be set if needed.
481 	 *
482 	 * @return 0 if there are neither pending nor waiting for
483 	 *         completion commands. Positive value if there are
484 	 *         pending for execution or for completion commands.
485 	 *         Negative value in case of an error (including an
486 	 *         error in the cqe).
487 	 */
488 	int (*complete)(struct _lm_device_t *pdev, struct ecore_vlan_mac_obj *o,
489 			union event_ring_elem *cqe,
490 			unsigned long *ramrod_flags);
491 
492 	/**
493 	 * Wait for completion of all commands. Don't schedule new ones,
494 	 * just wait. It assumes that the completion code will schedule
495 	 * for new commands.
496 	 */
497 	int (*wait)(struct _lm_device_t *pdev, struct ecore_vlan_mac_obj *o);
498 };
499 
500 enum {
501 	ECORE_LLH_CAM_ISCSI_ETH_LINE = 0,
502 	ECORE_LLH_CAM_ETH_LINE,
503 	ECORE_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
504 };
505 
506 void ecore_set_mac_in_nig(struct _lm_device_t *pdev,
507 			  BOOL add, unsigned char *dev_addr, int index);
508 
509 /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
510 
511 /* RX_MODE ramrod special flags: set in rx_mode_flags field in
512  * a ecore_rx_mode_ramrod_params.
513  */
514 enum {
515 	ECORE_RX_MODE_FCOE_ETH,
516 	ECORE_RX_MODE_ISCSI_ETH,
517 };
518 
519 enum {
520 	ECORE_ACCEPT_UNICAST,
521 	ECORE_ACCEPT_MULTICAST,
522 	ECORE_ACCEPT_ALL_UNICAST,
523 	ECORE_ACCEPT_ALL_MULTICAST,
524 	ECORE_ACCEPT_BROADCAST,
525 	ECORE_ACCEPT_UNMATCHED,
526 	ECORE_ACCEPT_ANY_VLAN
527 };
528 
529 struct ecore_rx_mode_ramrod_params {
530 	struct ecore_rx_mode_obj *rx_mode_obj;
531 	unsigned long *pstate;
532 	int state;
533 	u8 cl_id;
534 	u32 cid;
535 	u8 func_id;
536 	unsigned long ramrod_flags;
537 	unsigned long rx_mode_flags;
538 
539 	/* rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
540 	 * a tstorm_eth_mac_filter_config (e1x).
541 	 */
542 	void *rdata;
543 	lm_address_t rdata_mapping;
544 
545 	/* Rx mode settings */
546 	unsigned long rx_accept_flags;
547 
548 	/* internal switching settings */
549 	unsigned long tx_accept_flags;
550 };
551 
552 struct ecore_rx_mode_obj {
553 	int (*config_rx_mode)(struct _lm_device_t *pdev,
554 			      struct ecore_rx_mode_ramrod_params *p);
555 
556 	int (*wait_comp)(struct _lm_device_t *pdev,
557 			 struct ecore_rx_mode_ramrod_params *p);
558 };
559 
560 /********************** Set multicast group ***********************************/
561 
562 struct ecore_mcast_list_elem {
563 	d_list_entry_t link;
564 	u8 *mac;
565 };
566 
567 union ecore_mcast_config_data {
568 	u8 *mac;
569 	u8 bin; /* used in a RESTORE flow */
570 };
571 
572 struct ecore_mcast_ramrod_params {
573 	struct ecore_mcast_obj *mcast_obj;
574 
575 	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
576 	unsigned long ramrod_flags;
577 
578 	d_list_t mcast_list; /* list of struct ecore_mcast_list_elem */
579 	/** TODO:
580 	 *      - rename it to macs_num.
581 	 *      - Add a new command type for handling pending commands
582 	 *        (remove "zero semantics").
583 	 *
584 	 *  Length of mcast_list. If zero and ADD_CONT command - post
585 	 *  pending commands.
586 	 */
587 	int mcast_list_len;
588 };
589 
590 enum ecore_mcast_cmd {
591 	ECORE_MCAST_CMD_ADD,
592 	ECORE_MCAST_CMD_CONT,
593 	ECORE_MCAST_CMD_DEL,
594 	ECORE_MCAST_CMD_RESTORE,
595 };
596 
597 struct ecore_mcast_obj {
598 	struct ecore_raw_obj raw;
599 
600 	union {
601 		struct {
602 		#define ECORE_MCAST_BINS_NUM	256
603 		#define ECORE_MCAST_VEC_SZ	(ECORE_MCAST_BINS_NUM / 64)
604 			u64 vec[ECORE_MCAST_VEC_SZ];
605 
606 			/** Number of BINs to clear. Should be updated
607 			 *  immediately when a command arrives in order to
608 			 *  properly create DEL commands.
609 			 */
610 			int num_bins_set;
611 		} aprox_match;
612 
613 		struct {
614 			d_list_t macs;
615 			int num_macs_set;
616 		} exact_match;
617 	} registry;
618 
619 	/* Pending commands */
620 	d_list_t pending_cmds_head;
621 
622 	/* A state that is set in raw.pstate, when there are pending commands */
623 	int sched_state;
624 
625 	/* Maximal number of mcast MACs configured in one command */
626 	int max_cmd_len;
627 
628 	/* Total number of currently pending MACs to configure: both
629 	 * in the pending commands list and in the current command.
630 	 */
631 	int total_pending_num;
632 
633 	u8 engine_id;
634 
635 	/**
636 	 * @param cmd command to execute (ECORE_MCAST_CMD_X, see above)
637 	 */
638 	int (*config_mcast)(struct _lm_device_t *pdev,
639 			    struct ecore_mcast_ramrod_params *p,
640 			    enum ecore_mcast_cmd cmd);
641 
642 	/**
643 	 * Fills the ramrod data during the RESTORE flow.
644 	 *
645 	 * @param pdev
646 	 * @param o
647 	 * @param start_idx Registry index to start from
648 	 * @param rdata_idx Index in the ramrod data to start from
649 	 *
650 	 * @return -1 if we handled the whole registry or index of the last
651 	 *         handled registry element.
652 	 */
653 	int (*hdl_restore)(struct _lm_device_t *pdev, struct ecore_mcast_obj *o,
654 			   int start_bin, int *rdata_idx);
655 
656 	int (*enqueue_cmd)(struct _lm_device_t *pdev, struct ecore_mcast_obj *o,
657 			   struct ecore_mcast_ramrod_params *p,
658 			   enum ecore_mcast_cmd cmd);
659 
660 	void (*set_one_rule)(struct _lm_device_t *pdev,
661 			     struct ecore_mcast_obj *o, int idx,
662 			     union ecore_mcast_config_data *cfg_data,
663 			     enum ecore_mcast_cmd cmd);
664 
665 	/** Checks if there are more mcast MACs to be set or a previous
666 	 *  command is still pending.
667 	 */
668 	BOOL (*check_pending)(struct ecore_mcast_obj *o);
669 
670 	/**
671 	 * Set/Clear/Check SCHEDULED state of the object
672 	 */
673 	void (*set_sched)(struct ecore_mcast_obj *o);
674 	void (*clear_sched)(struct ecore_mcast_obj *o);
675 	BOOL (*check_sched)(struct ecore_mcast_obj *o);
676 
677 	/* Wait until all pending commands complete */
678 	int (*wait_comp)(struct _lm_device_t *pdev, struct ecore_mcast_obj *o);
679 
680 	/**
681 	 * Handle the internal object counters needed for proper
682 	 * commands handling. Checks that the provided parameters are
683 	 * feasible.
684 	 */
685 	int (*validate)(struct _lm_device_t *pdev,
686 			struct ecore_mcast_ramrod_params *p,
687 			enum ecore_mcast_cmd cmd);
688 
689 	/**
690 	 * Restore the values of internal counters in case of a failure.
691 	 */
692 	void (*revert)(struct _lm_device_t *pdev,
693 		       struct ecore_mcast_ramrod_params *p,
694 		       int old_num_bins);
695 
696 	int (*get_registry_size)(struct ecore_mcast_obj *o);
697 	void (*set_registry_size)(struct ecore_mcast_obj *o, int n);
698 };
699 
700 /*************************** Credit handling **********************************/
701 struct ecore_credit_pool_obj {
702 
703 	/* Current amount of credit in the pool */
704 	atomic_t	credit;
705 
706 	/* Maximum allowed credit. put() will check against it. */
707 	int		pool_sz;
708 
709 	/* Allocate a pool table statically.
710 	 *
711 	 * Currently the maximum allowed size is MAX_MAC_CREDIT_E2(272)
712 	 *
713 	 * The set bit in the table will mean that the entry is available.
714 	 */
715 #define ECORE_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
716 	u64		pool_mirror[ECORE_POOL_VEC_SIZE];
717 
718 	/* Base pool offset (initialized differently */
719 	int		base_pool_offset;
720 
721 	/**
722 	 * Get the next free pool entry.
723 	 *
724 	 * @return TRUE if there was a free entry in the pool
725 	 */
726 	BOOL (*get_entry)(struct ecore_credit_pool_obj *o, int *entry);
727 
728 	/**
729 	 * Return the entry back to the pool.
730 	 *
731 	 * @return TRUE if entry is legal and has been successfully
732 	 *         returned to the pool.
733 	 */
734 	BOOL (*put_entry)(struct ecore_credit_pool_obj *o, int entry);
735 
736 	/**
737 	 * Get the requested amount of credit from the pool.
738 	 *
739 	 * @param cnt Amount of requested credit
740 	 * @return TRUE if the operation is successful
741 	 */
742 	BOOL (*get)(struct ecore_credit_pool_obj *o, int cnt);
743 
744 	/**
745 	 * Returns the credit to the pool.
746 	 *
747 	 * @param cnt Amount of credit to return
748 	 * @return TRUE if the operation is successful
749 	 */
750 	BOOL (*put)(struct ecore_credit_pool_obj *o, int cnt);
751 
752 	/**
753 	 * Reads the current amount of credit.
754 	 */
755 	int (*check)(struct ecore_credit_pool_obj *o);
756 };
757 
758 /*************************** RSS configuration ********************************/
759 enum {
760 	/* RSS_MODE bits are mutually exclusive */
761 	ECORE_RSS_MODE_DISABLED,
762 	ECORE_RSS_MODE_REGULAR,
763 
764 	ECORE_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
765 
766 	ECORE_RSS_IPV4,
767 	ECORE_RSS_IPV4_TCP,
768 	ECORE_RSS_IPV4_UDP,
769 	ECORE_RSS_IPV6,
770 	ECORE_RSS_IPV6_TCP,
771 	ECORE_RSS_IPV6_UDP,
772 
773 #if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
774 	ECORE_RSS_MODE_ESX51,
775 #endif
776 	ECORE_RSS_IPV4_VXLAN,
777 	ECORE_RSS_IPV6_VXLAN,
778 	ECORE_RSS_NVGRE_KEY_ENTROPY,
779 	ECORE_RSS_GRE_INNER_HDRS,
780 };
781 
782 struct ecore_config_rss_params {
783 	struct ecore_rss_config_obj *rss_obj;
784 
785 	/* may have RAMROD_COMP_WAIT set only */
786 	unsigned long	ramrod_flags;
787 
788 	/* ECORE_RSS_X bits */
789 	unsigned long	rss_flags;
790 
791 	/* Number hash bits to take into an account */
792 	u8		rss_result_mask;
793 
794 	/* Indirection table */
795 	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
796 
797 	/* RSS hash values */
798 	u32		rss_key[10];
799 
800 	/* valid only iff ECORE_RSS_UPDATE_TOE is set */
801 	u16		toe_rss_bitmap;
802 };
803 
804 struct ecore_rss_config_obj {
805 	struct ecore_raw_obj	raw;
806 
807 	/* RSS engine to use */
808 	u8			engine_id;
809 
810 	/* Last configured indirection table */
811 	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
812 
813 	/* flags for enabling 4-tupple hash on UDP */
814 	u8			udp_rss_v4;
815 	u8			udp_rss_v6;
816 
817 	int (*config_rss)(struct _lm_device_t *pdev,
818 			  struct ecore_config_rss_params *p);
819 };
820 
821 /*********************** Queue state update ***********************************/
822 
823 /* UPDATE command options */
824 enum {
825 	ECORE_Q_UPDATE_IN_VLAN_REM,
826 	ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
827 	ECORE_Q_UPDATE_OUT_VLAN_REM,
828 	ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
829 	ECORE_Q_UPDATE_ANTI_SPOOF,
830 	ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
831 	ECORE_Q_UPDATE_ACTIVATE,
832 	ECORE_Q_UPDATE_ACTIVATE_CHNG,
833 	ECORE_Q_UPDATE_DEF_VLAN_EN,
834 	ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
835 	ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
836 	ECORE_Q_UPDATE_SILENT_VLAN_REM,
837 	ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
838 	ECORE_Q_UPDATE_TX_SWITCHING,
839 	ECORE_Q_UPDATE_PTP_PKTS_CHNG,
840 	ECORE_Q_UPDATE_PTP_PKTS,
841 };
842 
843 /* Allowed Queue states */
844 enum ecore_q_state {
845 	ECORE_Q_STATE_RESET,
846 	ECORE_Q_STATE_INITIALIZED,
847 	ECORE_Q_STATE_ACTIVE,
848 	ECORE_Q_STATE_MULTI_COS,
849 	ECORE_Q_STATE_MCOS_TERMINATED,
850 	ECORE_Q_STATE_INACTIVE,
851 	ECORE_Q_STATE_STOPPED,
852 	ECORE_Q_STATE_TERMINATED,
853 	ECORE_Q_STATE_FLRED,
854 	ECORE_Q_STATE_MAX,
855 };
856 
857 /* Allowed Queue states */
858 enum ecore_q_logical_state {
859 	ECORE_Q_LOGICAL_STATE_ACTIVE,
860 	ECORE_Q_LOGICAL_STATE_STOPPED,
861 };
862 
863 /* Allowed commands */
864 enum ecore_queue_cmd {
865 	ECORE_Q_CMD_INIT,
866 	ECORE_Q_CMD_SETUP,
867 	ECORE_Q_CMD_SETUP_TX_ONLY,
868 	ECORE_Q_CMD_DEACTIVATE,
869 	ECORE_Q_CMD_ACTIVATE,
870 	ECORE_Q_CMD_UPDATE,
871 	ECORE_Q_CMD_UPDATE_TPA,
872 	ECORE_Q_CMD_HALT,
873 	ECORE_Q_CMD_CFC_DEL,
874 	ECORE_Q_CMD_TERMINATE,
875 	ECORE_Q_CMD_EMPTY,
876 	ECORE_Q_CMD_MAX,
877 };
878 
879 /* queue SETUP + INIT flags */
880 enum {
881 	ECORE_Q_FLG_TPA,
882 	ECORE_Q_FLG_TPA_IPV6,
883 	ECORE_Q_FLG_TPA_GRO,
884 	ECORE_Q_FLG_STATS,
885 #ifndef ECORE_UPSTREAM /* ! ECORE_UPSTREAM */
886 	ECORE_Q_FLG_VMQUEUE_MODE,
887 #endif
888 	ECORE_Q_FLG_ZERO_STATS,
889 	ECORE_Q_FLG_ACTIVE,
890 	ECORE_Q_FLG_OV,
891 	ECORE_Q_FLG_VLAN,
892 	ECORE_Q_FLG_COS,
893 	ECORE_Q_FLG_HC,
894 	ECORE_Q_FLG_HC_EN,
895 	ECORE_Q_FLG_DHC,
896 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
897 	ECORE_Q_FLG_OOO,
898 #endif
899 	ECORE_Q_FLG_FCOE,
900 	ECORE_Q_FLG_LEADING_RSS,
901 	ECORE_Q_FLG_MCAST,
902 	ECORE_Q_FLG_DEF_VLAN,
903 	ECORE_Q_FLG_TX_SWITCH,
904 	ECORE_Q_FLG_TX_SEC,
905 	ECORE_Q_FLG_ANTI_SPOOF,
906 	ECORE_Q_FLG_SILENT_VLAN_REM,
907 	ECORE_Q_FLG_FORCE_DEFAULT_PRI,
908 	ECORE_Q_FLG_REFUSE_OUTBAND_VLAN,
909 	ECORE_Q_FLG_PCSUM_ON_PKT,
910 	ECORE_Q_FLG_TUN_INC_INNER_IP_ID
911 };
912 
913 /* Queue type options: queue type may be a combination of below. */
914 enum ecore_q_type {
915 #ifdef ECORE_OOO /* ! ECORE_UPSTREAM */
916 	ECORE_Q_TYPE_FWD,
917 #endif
918 	/** TODO: Consider moving both these flags into the init()
919 	 *        ramrod params.
920 	 */
921 	ECORE_Q_TYPE_HAS_RX,
922 	ECORE_Q_TYPE_HAS_TX,
923 };
924 
925 #define ECORE_PRIMARY_CID_INDEX			0
926 #define ECORE_MULTI_TX_COS_E1X			3 /* QM only */
927 #define ECORE_MULTI_TX_COS_E2_E3A0		2
928 #define ECORE_MULTI_TX_COS_E3B0			3
929 #define ECORE_MULTI_TX_COS			3 /* Maximum possible */
930 #define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
931 /* DMAE channel to be used by FW for timesync workaroun. A driver that sends
932  * timesync-related ramrods must not use this DMAE command ID.
933  */
934 #define FW_DMAE_CMD_ID 6
935 
936 struct ecore_queue_init_params {
937 	struct {
938 		unsigned long	flags;
939 		u16		hc_rate;
940 		u8		fw_sb_id;
941 		u8		sb_cq_index;
942 	} tx;
943 
944 	struct {
945 		unsigned long	flags;
946 		u16		hc_rate;
947 		u8		fw_sb_id;
948 		u8		sb_cq_index;
949 	} rx;
950 
951 	/* CID context in the host memory */
952 	struct eth_context *cxts[ECORE_MULTI_TX_COS];
953 
954 	/* maximum number of cos supported by hardware */
955 	u8 max_cos;
956 };
957 
958 struct ecore_queue_terminate_params {
959 	/* index within the tx_only cids of this queue object */
960 	u8 cid_index;
961 };
962 
963 struct ecore_queue_cfc_del_params {
964 	/* index within the tx_only cids of this queue object */
965 	u8 cid_index;
966 };
967 
968 struct ecore_queue_update_params {
969 	unsigned long	update_flags; /* ECORE_Q_UPDATE_XX bits */
970 	u16		def_vlan;
971 	u16		silent_removal_value;
972 	u16		silent_removal_mask;
973 /* index within the tx_only cids of this queue object */
974 	u8		cid_index;
975 };
976 
977 struct ecore_queue_update_tpa_params {
978 	lm_address_t sge_map;
979 	u8 update_ipv4;
980 	u8 update_ipv6;
981 	u8 max_tpa_queues;
982 	u8 max_sges_pkt;
983 	u8 complete_on_both_clients;
984 	u8 dont_verify_thr;
985 	u8 tpa_mode;
986 	u8 _pad;
987 
988 	u16 sge_buff_sz;
989 	u16 max_agg_sz;
990 
991 	u16 sge_pause_thr_low;
992 	u16 sge_pause_thr_high;
993 };
994 
995 struct rxq_pause_params {
996 	u16		bd_th_lo;
997 	u16		bd_th_hi;
998 	u16		rcq_th_lo;
999 	u16		rcq_th_hi;
1000 	u16		sge_th_lo; /* valid iff ECORE_Q_FLG_TPA */
1001 	u16		sge_th_hi; /* valid iff ECORE_Q_FLG_TPA */
1002 	u16		pri_map;
1003 };
1004 
1005 /* general */
1006 struct ecore_general_setup_params {
1007 	/* valid iff ECORE_Q_FLG_STATS */
1008 	u8		stat_id;
1009 
1010 	u8		spcl_id;
1011 	u16		mtu;
1012 	u8		cos;
1013 };
1014 
1015 struct ecore_rxq_setup_params {
1016 	/* dma */
1017 	lm_address_t	dscr_map;
1018 	lm_address_t	sge_map;
1019 	lm_address_t	rcq_map;
1020 	lm_address_t	rcq_np_map;
1021 
1022 	u16		drop_flags;
1023 	u16		buf_sz;
1024 	u8		fw_sb_id;
1025 	u8		cl_qzone_id;
1026 
1027 	/* valid iff ECORE_Q_FLG_TPA */
1028 	u16		tpa_agg_sz;
1029 	u16		sge_buf_sz;
1030 	u8		max_sges_pkt;
1031 	u8		max_tpa_queues;
1032 	u8		rss_engine_id;
1033 
1034 	/* valid iff ECORE_Q_FLG_MCAST */
1035 	u8		mcast_engine_id;
1036 
1037 	u8		cache_line_log;
1038 
1039 	u8		sb_cq_index;
1040 
1041 	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
1042 	u16 silent_removal_value;
1043 	u16 silent_removal_mask;
1044 };
1045 
1046 struct ecore_txq_setup_params {
1047 	/* dma */
1048 	lm_address_t	dscr_map;
1049 
1050 	u8		fw_sb_id;
1051 	u8		sb_cq_index;
1052 	u8		cos;		/* valid iff ECORE_Q_FLG_COS */
1053 	u16		traffic_type;
1054 	/* equals to the leading rss client id, used for TX classification*/
1055 	u8		tss_leading_cl_id;
1056 
1057 	/* valid iff ECORE_Q_FLG_DEF_VLAN */
1058 	u16		default_vlan;
1059 };
1060 
1061 struct ecore_queue_setup_params {
1062 	struct ecore_general_setup_params gen_params;
1063 	struct ecore_txq_setup_params txq_params;
1064 	struct ecore_rxq_setup_params rxq_params;
1065 	struct rxq_pause_params pause_params;
1066 	unsigned long flags;
1067 };
1068 
1069 struct ecore_queue_setup_tx_only_params {
1070 	struct ecore_general_setup_params	gen_params;
1071 	struct ecore_txq_setup_params		txq_params;
1072 	unsigned long				flags;
1073 	/* index within the tx_only cids of this queue object */
1074 	u8					cid_index;
1075 };
1076 
1077 struct ecore_queue_state_params {
1078 	struct ecore_queue_sp_obj *q_obj;
1079 
1080 	/* Current command */
1081 	enum ecore_queue_cmd cmd;
1082 
1083 	/* may have RAMROD_COMP_WAIT set only */
1084 	unsigned long ramrod_flags;
1085 
1086 	/* Params according to the current command */
1087 	union {
1088 		struct ecore_queue_update_params	update;
1089 		struct ecore_queue_update_tpa_params    update_tpa;
1090 		struct ecore_queue_setup_params		setup;
1091 		struct ecore_queue_init_params		init;
1092 		struct ecore_queue_setup_tx_only_params	tx_only;
1093 		struct ecore_queue_terminate_params	terminate;
1094 		struct ecore_queue_cfc_del_params	cfc_del;
1095 	} params;
1096 };
1097 
1098 struct ecore_viflist_params {
1099 	u8 echo_res;
1100 	u8 func_bit_map_res;
1101 };
1102 
1103 struct ecore_queue_sp_obj {
1104 	u32		cids[ECORE_MULTI_TX_COS];
1105 	u8		cl_id;
1106 	u8		func_id;
1107 
1108 	/* number of traffic classes supported by queue.
1109 	 * The primary connection of the queue supports the first traffic
1110 	 * class. Any further traffic class is supported by a tx-only
1111 	 * connection.
1112 	 *
1113 	 * Therefore max_cos is also a number of valid entries in the cids
1114 	 * array.
1115 	 */
1116 	u8 max_cos;
1117 	u8 num_tx_only, next_tx_only;
1118 
1119 	enum ecore_q_state state, next_state;
1120 
1121 	/* bits from enum ecore_q_type */
1122 	unsigned long	type;
1123 
1124 	/* ECORE_Q_CMD_XX bits. This object implements "one
1125 	 * pending" paradigm but for debug and tracing purposes it's
1126 	 * more convenient to have different bits for different
1127 	 * commands.
1128 	 */
1129 	unsigned long	pending;
1130 
1131 	/* Buffer to use as a ramrod data and its mapping */
1132 	void		*rdata;
1133 	lm_address_t	rdata_mapping;
1134 
1135 	/**
1136 	 * Performs one state change according to the given parameters.
1137 	 *
1138 	 * @return 0 in case of success and negative value otherwise.
1139 	 */
1140 	int (*send_cmd)(struct _lm_device_t *pdev,
1141 			struct ecore_queue_state_params *params);
1142 
1143 	/**
1144 	 * Sets the pending bit according to the requested transition.
1145 	 */
1146 	int (*set_pending)(struct ecore_queue_sp_obj *o,
1147 			   struct ecore_queue_state_params *params);
1148 
1149 	/**
1150 	 * Checks that the requested state transition is legal.
1151 	 */
1152 	int (*check_transition)(struct _lm_device_t *pdev,
1153 				struct ecore_queue_sp_obj *o,
1154 				struct ecore_queue_state_params *params);
1155 
1156 	/**
1157 	 * Completes the pending command.
1158 	 */
1159 	int (*complete_cmd)(struct _lm_device_t *pdev,
1160 			    struct ecore_queue_sp_obj *o,
1161 			    enum ecore_queue_cmd);
1162 
1163 	int (*wait_comp)(struct _lm_device_t *pdev,
1164 			 struct ecore_queue_sp_obj *o,
1165 			 enum ecore_queue_cmd cmd);
1166 };
1167 
1168 /********************** Function state update *********************************/
1169 
1170 /* UPDATE command options */
1171 enum {
1172 	ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
1173 	ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
1174 	ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
1175 	ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
1176 	ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
1177 	ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
1178 	ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
1179 	ECORE_F_UPDATE_TUNNEL_CLSS_EN,
1180 	ECORE_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
1181 };
1182 
1183 /* Allowed Function states */
1184 enum ecore_func_state {
1185 	ECORE_F_STATE_RESET,
1186 	ECORE_F_STATE_INITIALIZED,
1187 	ECORE_F_STATE_STARTED,
1188 	ECORE_F_STATE_TX_STOPPED,
1189 	ECORE_F_STATE_MAX,
1190 };
1191 
1192 /* Allowed Function commands */
1193 enum ecore_func_cmd {
1194 	ECORE_F_CMD_HW_INIT,
1195 	ECORE_F_CMD_START,
1196 	ECORE_F_CMD_STOP,
1197 	ECORE_F_CMD_HW_RESET,
1198 	ECORE_F_CMD_AFEX_UPDATE,
1199 	ECORE_F_CMD_AFEX_VIFLISTS,
1200 	ECORE_F_CMD_TX_STOP,
1201 	ECORE_F_CMD_TX_START,
1202 	ECORE_F_CMD_SWITCH_UPDATE,
1203 	ECORE_F_CMD_SET_TIMESYNC,
1204 	ECORE_F_CMD_MAX,
1205 };
1206 
1207 struct ecore_func_hw_init_params {
1208 	/* A load phase returned by MCP.
1209 	 *
1210 	 * May be:
1211 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1212 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1213 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1214 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1215 	 */
1216 	u32 load_phase;
1217 };
1218 
1219 struct ecore_func_hw_reset_params {
1220 	/* A load phase returned by MCP.
1221 	 *
1222 	 * May be:
1223 	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
1224 	 *		FW_MSG_CODE_DRV_LOAD_COMMON
1225 	 *		FW_MSG_CODE_DRV_LOAD_PORT
1226 	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
1227 	 */
1228 	u32 reset_phase;
1229 };
1230 
1231 struct ecore_func_start_params {
1232 	/* Multi Function mode:
1233 	 *	- Single Function
1234 	 *	- Switch Dependent
1235 	 *	- Switch Independent
1236 	 */
1237 	u16 mf_mode;
1238 
1239 	/* Switch Dependent mode outer VLAN tag */
1240 	u16 sd_vlan_tag;
1241 
1242 	/* Function cos mode */
1243 	u8 network_cos_mode;
1244 
1245 	/* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
1246 	u8 tunnel_mode;
1247 
1248 	/* tunneling classification enablement */
1249 	u8 tunn_clss_en;
1250 
1251 	/* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1252 	u8 gre_tunnel_type;
1253 
1254 	/* Enables Inner GRE RSS on the function, depends on the client RSS
1255 	 * capailities
1256 	 */
1257 	u8 inner_gre_rss_en;
1258 
1259 	/* UDP dest port for VXLAN */
1260 	u16 vxlan_dst_port;
1261 
1262 	/** Allows accepting of packets failing MF classification, possibly
1263 	 * only matching a given ethertype
1264 	 */
1265 	u8 class_fail;
1266 	u16 class_fail_ethtype;
1267 
1268 	/* Override priority of output packets */
1269 	u8 sd_vlan_force_pri;
1270 	u8 sd_vlan_force_pri_val;
1271 
1272 	/* Replace vlan's ethertype */
1273 	u16 sd_vlan_eth_type;
1274 
1275 	/* Prevent inner vlans from being added by FW */
1276 	u8 no_added_tags;
1277 };
1278 
1279 struct ecore_func_switch_update_params {
1280 	unsigned long changes; /* ECORE_F_UPDATE_XX bits */
1281 	u16 vlan;
1282 	u16 vlan_eth_type;
1283 	u8 vlan_force_prio;
1284 	u8 tunnel_mode;
1285 	u8 gre_tunnel_type;
1286 	u16 vxlan_dst_port;
1287 
1288 };
1289 
1290 struct ecore_func_afex_update_params {
1291 	u16 vif_id;
1292 	u16 afex_default_vlan;
1293 	u8 allowed_priorities;
1294 };
1295 
1296 struct ecore_func_afex_viflists_params {
1297 	u16 vif_list_index;
1298 	u8 func_bit_map;
1299 	u8 afex_vif_list_command;
1300 	u8 func_to_clear;
1301 };
1302 
1303 struct ecore_func_tx_start_params {
1304 	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1305 	u8 dcb_enabled;
1306 	u8 dcb_version;
1307 	u8 dont_add_pri_0_en;
1308 };
1309 
1310 struct ecore_func_set_timesync_params {
1311 	/* Reset, set or keep the current drift value */
1312 	u8 drift_adjust_cmd;
1313 	/* Dec, inc or keep the current offset */
1314 	u8 offset_cmd;
1315 	/* Drift value direction */
1316 	u8 add_sub_drift_adjust_value;
1317 	/* Drift, period and offset values to be used according to the commands
1318 	 * above.
1319 	 */
1320 	u8 drift_adjust_value;
1321 	u32 drift_adjust_period;
1322 	u64 offset_delta;
1323 };
1324 
1325 struct ecore_func_state_params {
1326 	struct ecore_func_sp_obj *f_obj;
1327 
1328 	/* Current command */
1329 	enum ecore_func_cmd cmd;
1330 
1331 	/* may have RAMROD_COMP_WAIT set only */
1332 	unsigned long	ramrod_flags;
1333 
1334 	/* Params according to the current command */
1335 	union {
1336 		struct ecore_func_hw_init_params hw_init;
1337 		struct ecore_func_hw_reset_params hw_reset;
1338 		struct ecore_func_start_params start;
1339 		struct ecore_func_switch_update_params switch_update;
1340 		struct ecore_func_afex_update_params afex_update;
1341 		struct ecore_func_afex_viflists_params afex_viflists;
1342 		struct ecore_func_tx_start_params tx_start;
1343 		struct ecore_func_set_timesync_params set_timesync;
1344 	} params;
1345 };
1346 
1347 struct ecore_func_sp_drv_ops {
1348 	/* Init tool + runtime initialization:
1349 	 *      - Common Chip
1350 	 *      - Common (per Path)
1351 	 *      - Port
1352 	 *      - Function phases
1353 	 */
1354 	int (*init_hw_cmn_chip)(struct _lm_device_t *pdev);
1355 	int (*init_hw_cmn)(struct _lm_device_t *pdev);
1356 	int (*init_hw_port)(struct _lm_device_t *pdev);
1357 	int (*init_hw_func)(struct _lm_device_t *pdev);
1358 
1359 	/* Reset Function HW: Common, Port, Function phases. */
1360 	void (*reset_hw_cmn)(struct _lm_device_t *pdev);
1361 	void (*reset_hw_port)(struct _lm_device_t *pdev);
1362 	void (*reset_hw_func)(struct _lm_device_t *pdev);
1363 
1364 	/* Init/Free GUNZIP resources */
1365 	int (*gunzip_init)(struct _lm_device_t *pdev);
1366 	void (*gunzip_end)(struct _lm_device_t *pdev);
1367 
1368 	/* Prepare/Release FW resources */
1369 	int (*init_fw)(struct _lm_device_t *pdev);
1370 	void (*release_fw)(struct _lm_device_t *pdev);
1371 };
1372 
1373 struct ecore_func_sp_obj {
1374 	enum ecore_func_state	state, next_state;
1375 
1376 	/* ECORE_FUNC_CMD_XX bits. This object implements "one
1377 	 * pending" paradigm but for debug and tracing purposes it's
1378 	 * more convenient to have different bits for different
1379 	 * commands.
1380 	 */
1381 	unsigned long		pending;
1382 
1383 	/* Buffer to use as a ramrod data and its mapping */
1384 	void			*rdata;
1385 	lm_address_t		rdata_mapping;
1386 
1387 	/* Buffer to use as a afex ramrod data and its mapping.
1388 	 * This can't be same rdata as above because afex ramrod requests
1389 	 * can arrive to the object in parallel to other ramrod requests.
1390 	 */
1391 	void			*afex_rdata;
1392 	lm_address_t		afex_rdata_mapping;
1393 
1394 	/* this mutex validates that when pending flag is taken, the next
1395 	 * ramrod to be sent will be the one set the pending bit
1396 	 */
1397 	ECORE_MUTEX		one_pending_mutex;
1398 
1399 	/* Driver interface */
1400 	struct ecore_func_sp_drv_ops	*drv;
1401 
1402 	/**
1403 	 * Performs one state change according to the given parameters.
1404 	 *
1405 	 * @return 0 in case of success and negative value otherwise.
1406 	 */
1407 	int (*send_cmd)(struct _lm_device_t *pdev,
1408 			struct ecore_func_state_params *params);
1409 
1410 	/**
1411 	 * Checks that the requested state transition is legal.
1412 	 */
1413 	int (*check_transition)(struct _lm_device_t *pdev,
1414 				struct ecore_func_sp_obj *o,
1415 				struct ecore_func_state_params *params);
1416 
1417 	/**
1418 	 * Completes the pending command.
1419 	 */
1420 	int (*complete_cmd)(struct _lm_device_t *pdev,
1421 			    struct ecore_func_sp_obj *o,
1422 			    enum ecore_func_cmd cmd);
1423 
1424 	int (*wait_comp)(struct _lm_device_t *pdev, struct ecore_func_sp_obj *o,
1425 			 enum ecore_func_cmd cmd);
1426 };
1427 
1428 /********************** Interfaces ********************************************/
1429 /* Queueable objects set */
1430 union ecore_qable_obj {
1431 	struct ecore_vlan_mac_obj vlan_mac;
1432 };
1433 /************** Function state update *********/
1434 void ecore_init_func_obj(struct _lm_device_t *pdev,
1435 			 struct ecore_func_sp_obj *obj,
1436 			 void *rdata, lm_address_t rdata_mapping,
1437 			 void *afex_rdata, lm_address_t afex_rdata_mapping,
1438 			 struct ecore_func_sp_drv_ops *drv_iface);
1439 
1440 int ecore_func_state_change(struct _lm_device_t *pdev,
1441 			    struct ecore_func_state_params *params);
1442 
1443 enum ecore_func_state ecore_func_get_state(struct _lm_device_t *pdev,
1444 					   struct ecore_func_sp_obj *o);
1445 /******************* Queue State **************/
1446 void ecore_init_queue_obj(struct _lm_device_t *pdev,
1447 			  struct ecore_queue_sp_obj *obj, u8 cl_id, u32 *cids,
1448 			  u8 cid_cnt, u8 func_id, void *rdata,
1449 			  lm_address_t rdata_mapping, unsigned long type);
1450 
1451 int ecore_queue_state_change(struct _lm_device_t *pdev,
1452 			     struct ecore_queue_state_params *params);
1453 
1454 int ecore_get_q_logical_state(struct _lm_device_t *pdev,
1455 			       struct ecore_queue_sp_obj *obj);
1456 
1457 /********************* VLAN-MAC ****************/
1458 void ecore_init_mac_obj(struct _lm_device_t *pdev,
1459 			struct ecore_vlan_mac_obj *mac_obj,
1460 			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1461 			lm_address_t rdata_mapping, int state,
1462 			unsigned long *pstate, ecore_obj_type type,
1463 			struct ecore_credit_pool_obj *macs_pool);
1464 
1465 void ecore_init_vlan_obj(struct _lm_device_t *pdev,
1466 			 struct ecore_vlan_mac_obj *vlan_obj,
1467 			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1468 			 lm_address_t rdata_mapping, int state,
1469 			 unsigned long *pstate, ecore_obj_type type,
1470 			 struct ecore_credit_pool_obj *vlans_pool);
1471 
1472 void ecore_init_vlan_mac_obj(struct _lm_device_t *pdev,
1473 			     struct ecore_vlan_mac_obj *vlan_mac_obj,
1474 			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
1475 			     lm_address_t rdata_mapping, int state,
1476 			     unsigned long *pstate, ecore_obj_type type,
1477 			     struct ecore_credit_pool_obj *macs_pool,
1478 			     struct ecore_credit_pool_obj *vlans_pool);
1479 
1480 int ecore_vlan_mac_h_read_lock(struct _lm_device_t *pdev,
1481 					struct ecore_vlan_mac_obj *o);
1482 void ecore_vlan_mac_h_read_unlock(struct _lm_device_t *pdev,
1483 				  struct ecore_vlan_mac_obj *o);
1484 int ecore_vlan_mac_h_write_lock(struct _lm_device_t *pdev,
1485 				struct ecore_vlan_mac_obj *o);
1486 void ecore_vlan_mac_h_write_unlock(struct _lm_device_t *pdev,
1487 					  struct ecore_vlan_mac_obj *o);
1488 int ecore_config_vlan_mac(struct _lm_device_t *pdev,
1489 			   struct ecore_vlan_mac_ramrod_params *p);
1490 
1491 int ecore_vlan_mac_move(struct _lm_device_t *pdev,
1492 			struct ecore_vlan_mac_ramrod_params *p,
1493 			struct ecore_vlan_mac_obj *dest_o);
1494 
1495 /********************* RX MODE ****************/
1496 
1497 void ecore_init_rx_mode_obj(struct _lm_device_t *pdev,
1498 			    struct ecore_rx_mode_obj *o);
1499 
1500 /**
1501  * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
1502  *
1503  * @p: Command parameters
1504  *
1505  * Return: 0 - if operation was successful and there is no pending completions,
1506  *         positive number - if there are pending completions,
1507  *         negative - if there were errors
1508  */
1509 int ecore_config_rx_mode(struct _lm_device_t *pdev,
1510 			 struct ecore_rx_mode_ramrod_params *p);
1511 
1512 /****************** MULTICASTS ****************/
1513 
1514 void ecore_init_mcast_obj(struct _lm_device_t *pdev,
1515 			  struct ecore_mcast_obj *mcast_obj,
1516 			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
1517 			  u8 engine_id, void *rdata, lm_address_t rdata_mapping,
1518 			  int state, unsigned long *pstate,
1519 			  ecore_obj_type type);
1520 
1521 /**
1522  * bnx2x_config_mcast - Configure multicast MACs list.
1523  *
1524  * @cmd: command to execute: BNX2X_MCAST_CMD_X
1525  *
1526  * May configure a new list
1527  * provided in p->mcast_list (ECORE_MCAST_CMD_ADD), clean up
1528  * (ECORE_MCAST_CMD_DEL) or restore (ECORE_MCAST_CMD_RESTORE) a current
1529  * configuration, continue to execute the pending commands
1530  * (ECORE_MCAST_CMD_CONT).
1531  *
1532  * If previous command is still pending or if number of MACs to
1533  * configure is more that maximum number of MACs in one command,
1534  * the current command will be enqueued to the tail of the
1535  * pending commands list.
1536  *
1537  * Return: 0 is operation was successfull and there are no pending completions,
1538  *         negative if there were errors, positive if there are pending
1539  *         completions.
1540  */
1541 int ecore_config_mcast(struct _lm_device_t *pdev,
1542 		       struct ecore_mcast_ramrod_params *p,
1543 		       enum ecore_mcast_cmd cmd);
1544 
1545 /****************** CREDIT POOL ****************/
1546 void ecore_init_mac_credit_pool(struct _lm_device_t *pdev,
1547 				struct ecore_credit_pool_obj *p, u8 func_id,
1548 				u8 func_num);
1549 void ecore_init_vlan_credit_pool(struct _lm_device_t *pdev,
1550 				 struct ecore_credit_pool_obj *p, u8 func_id,
1551 				 u8 func_num);
1552 
1553 /****************** RSS CONFIGURATION ****************/
1554 void ecore_init_rss_config_obj(struct _lm_device_t *pdev,
1555 			       struct ecore_rss_config_obj *rss_obj,
1556 			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
1557 			       void *rdata, lm_address_t rdata_mapping,
1558 			       int state, unsigned long *pstate,
1559 			       ecore_obj_type type);
1560 
1561 /**
1562  * bnx2x_config_rss - Updates RSS configuration according to provided parameters
1563  *
1564  * Return: 0 in case of success
1565  */
1566 int ecore_config_rss(struct _lm_device_t *pdev,
1567 		     struct ecore_config_rss_params *p);
1568 
1569 /**
1570  * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
1571  *
1572  * @ind_table: buffer to fill with the current indirection
1573  *                  table content. Should be at least
1574  *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
1575  */
1576 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
1577 			     u8 *ind_table);
1578 
1579 #endif /* ECORE_SP_VERBS */
1580