xref: /linux/include/linux/qed/qed_if.h (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 /* QLogic qed NIC Driver
2  *
3  * Copyright (c) 2015 QLogic Corporation
4  *
5  * This software is available under the terms of the GNU General Public License
6  * (GPL) Version 2, available from the file COPYING in the main directory of
7  * this source tree.
8  */
9 
10 #ifndef _QED_IF_H
11 #define _QED_IF_H
12 
13 #include <linux/types.h>
14 #include <linux/interrupt.h>
15 #include <linux/netdevice.h>
16 #include <linux/pci.h>
17 #include <linux/skbuff.h>
18 #include <linux/types.h>
19 #include <asm/byteorder.h>
20 #include <linux/io.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/qed/common_hsi.h>
26 #include <linux/qed/qed_chain.h>
27 
28 enum dcbx_protocol_type {
29 	DCBX_PROTOCOL_ISCSI,
30 	DCBX_PROTOCOL_FCOE,
31 	DCBX_PROTOCOL_ROCE,
32 	DCBX_PROTOCOL_ROCE_V2,
33 	DCBX_PROTOCOL_ETH,
34 	DCBX_MAX_PROTOCOL_TYPE
35 };
36 
37 #ifdef CONFIG_DCB
38 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
39 #define QED_LLDP_PORT_ID_STAT_LEN 4
40 #define QED_DCBX_MAX_APP_PROTOCOL 32
41 #define QED_MAX_PFC_PRIORITIES 8
42 #define QED_DCBX_DSCP_SIZE 64
43 
44 struct qed_dcbx_lldp_remote {
45 	u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
46 	u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
47 	bool enable_rx;
48 	bool enable_tx;
49 	u32 tx_interval;
50 	u32 max_credit;
51 };
52 
53 struct qed_dcbx_lldp_local {
54 	u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
55 	u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
56 };
57 
58 struct qed_dcbx_app_prio {
59 	u8 roce;
60 	u8 roce_v2;
61 	u8 fcoe;
62 	u8 iscsi;
63 	u8 eth;
64 };
65 
66 struct qed_dbcx_pfc_params {
67 	bool willing;
68 	bool enabled;
69 	u8 prio[QED_MAX_PFC_PRIORITIES];
70 	u8 max_tc;
71 };
72 
73 enum qed_dcbx_sf_ieee_type {
74 	QED_DCBX_SF_IEEE_ETHTYPE,
75 	QED_DCBX_SF_IEEE_TCP_PORT,
76 	QED_DCBX_SF_IEEE_UDP_PORT,
77 	QED_DCBX_SF_IEEE_TCP_UDP_PORT
78 };
79 
80 struct qed_app_entry {
81 	bool ethtype;
82 	enum qed_dcbx_sf_ieee_type sf_ieee;
83 	bool enabled;
84 	u8 prio;
85 	u16 proto_id;
86 	enum dcbx_protocol_type proto_type;
87 };
88 
89 struct qed_dcbx_params {
90 	struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
91 	u16 num_app_entries;
92 	bool app_willing;
93 	bool app_valid;
94 	bool app_error;
95 	bool ets_willing;
96 	bool ets_enabled;
97 	bool ets_cbs;
98 	bool valid;
99 	u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
100 	u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
101 	u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
102 	struct qed_dbcx_pfc_params pfc;
103 	u8 max_ets_tc;
104 };
105 
106 struct qed_dcbx_admin_params {
107 	struct qed_dcbx_params params;
108 	bool valid;
109 };
110 
111 struct qed_dcbx_remote_params {
112 	struct qed_dcbx_params params;
113 	bool valid;
114 };
115 
116 struct qed_dcbx_operational_params {
117 	struct qed_dcbx_app_prio app_prio;
118 	struct qed_dcbx_params params;
119 	bool valid;
120 	bool enabled;
121 	bool ieee;
122 	bool cee;
123 	u32 err;
124 };
125 
126 struct qed_dcbx_get {
127 	struct qed_dcbx_operational_params operational;
128 	struct qed_dcbx_lldp_remote lldp_remote;
129 	struct qed_dcbx_lldp_local lldp_local;
130 	struct qed_dcbx_remote_params remote;
131 	struct qed_dcbx_admin_params local;
132 };
133 #endif
134 
135 enum qed_led_mode {
136 	QED_LED_MODE_OFF,
137 	QED_LED_MODE_ON,
138 	QED_LED_MODE_RESTORE
139 };
140 
141 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
142 					    (void __iomem *)(reg_addr))
143 
144 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
145 
146 #define QED_COALESCE_MAX 0xFF
147 
148 /* forward */
149 struct qed_dev;
150 
151 struct qed_eth_pf_params {
152 	/* The following parameters are used during HW-init
153 	 * and these parameters need to be passed as arguments
154 	 * to update_pf_params routine invoked before slowpath start
155 	 */
156 	u16 num_cons;
157 };
158 
159 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
160 struct qed_iscsi_pf_params {
161 	u64 glbl_q_params_addr;
162 	u64 bdq_pbl_base_addr[2];
163 	u32 max_cwnd;
164 	u16 cq_num_entries;
165 	u16 cmdq_num_entries;
166 	u16 dup_ack_threshold;
167 	u16 tx_sws_timer;
168 	u16 min_rto;
169 	u16 min_rto_rt;
170 	u16 max_rto;
171 
172 	/* The following parameters are used during HW-init
173 	 * and these parameters need to be passed as arguments
174 	 * to update_pf_params routine invoked before slowpath start
175 	 */
176 	u16 num_cons;
177 	u16 num_tasks;
178 
179 	/* The following parameters are used during protocol-init */
180 	u16 half_way_close_timeout;
181 	u16 bdq_xoff_threshold[2];
182 	u16 bdq_xon_threshold[2];
183 	u16 cmdq_xoff_threshold;
184 	u16 cmdq_xon_threshold;
185 	u16 rq_buffer_size;
186 
187 	u8 num_sq_pages_in_ring;
188 	u8 num_r2tq_pages_in_ring;
189 	u8 num_uhq_pages_in_ring;
190 	u8 num_queues;
191 	u8 log_page_size;
192 	u8 rqe_log_size;
193 	u8 max_fin_rt;
194 	u8 gl_rq_pi;
195 	u8 gl_cmd_pi;
196 	u8 debug_mode;
197 	u8 ll2_ooo_queue_id;
198 	u8 ooo_enable;
199 
200 	u8 is_target;
201 	u8 bdq_pbl_num_entries[2];
202 };
203 
204 struct qed_rdma_pf_params {
205 	/* Supplied to QED during resource allocation (may affect the ILT and
206 	 * the doorbell BAR).
207 	 */
208 	u32 min_dpis;		/* number of requested DPIs */
209 	u32 num_mrs;		/* number of requested memory regions */
210 	u32 num_qps;		/* number of requested Queue Pairs */
211 	u32 num_srqs;		/* number of requested SRQ */
212 	u8 roce_edpm_mode;	/* see QED_ROCE_EDPM_MODE_ENABLE */
213 	u8 gl_pi;		/* protocol index */
214 
215 	/* Will allocate rate limiters to be used with QPs */
216 	u8 enable_dcqcn;
217 };
218 
219 struct qed_pf_params {
220 	struct qed_eth_pf_params eth_pf_params;
221 	struct qed_iscsi_pf_params iscsi_pf_params;
222 	struct qed_rdma_pf_params rdma_pf_params;
223 };
224 
225 enum qed_int_mode {
226 	QED_INT_MODE_INTA,
227 	QED_INT_MODE_MSIX,
228 	QED_INT_MODE_MSI,
229 	QED_INT_MODE_POLL,
230 };
231 
232 struct qed_sb_info {
233 	struct status_block	*sb_virt;
234 	dma_addr_t		sb_phys;
235 	u32			sb_ack; /* Last given ack */
236 	u16			igu_sb_id;
237 	void __iomem		*igu_addr;
238 	u8			flags;
239 #define QED_SB_INFO_INIT        0x1
240 #define QED_SB_INFO_SETUP       0x2
241 
242 	struct qed_dev		*cdev;
243 };
244 
245 struct qed_dev_info {
246 	unsigned long	pci_mem_start;
247 	unsigned long	pci_mem_end;
248 	unsigned int	pci_irq;
249 	u8		num_hwfns;
250 
251 	u8		hw_mac[ETH_ALEN];
252 	bool		is_mf_default;
253 
254 	/* FW version */
255 	u16		fw_major;
256 	u16		fw_minor;
257 	u16		fw_rev;
258 	u16		fw_eng;
259 
260 	/* MFW version */
261 	u32		mfw_rev;
262 
263 	bool rdma_supported;
264 
265 	u32		flash_size;
266 	u8		mf_mode;
267 	bool		tx_switching;
268 };
269 
270 enum qed_sb_type {
271 	QED_SB_TYPE_L2_QUEUE,
272 };
273 
274 enum qed_protocol {
275 	QED_PROTOCOL_ETH,
276 	QED_PROTOCOL_ISCSI,
277 };
278 
279 struct qed_link_params {
280 	bool	link_up;
281 
282 #define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
283 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
284 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
285 #define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
286 #define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
287 	u32	override_flags;
288 	bool	autoneg;
289 	u32	adv_speeds;
290 	u32	forced_speed;
291 #define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
292 #define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
293 #define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
294 	u32	pause_config;
295 #define QED_LINK_LOOPBACK_NONE                  BIT(0)
296 #define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
297 #define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
298 #define QED_LINK_LOOPBACK_EXT                   BIT(3)
299 #define QED_LINK_LOOPBACK_MAC                   BIT(4)
300 	u32	loopback_mode;
301 };
302 
303 struct qed_link_output {
304 	bool	link_up;
305 
306 	u32	supported_caps;         /* In SUPPORTED defs */
307 	u32	advertised_caps;        /* In ADVERTISED defs */
308 	u32	lp_caps;                /* In ADVERTISED defs */
309 	u32	speed;                  /* In Mb/s */
310 	u8	duplex;                 /* In DUPLEX defs */
311 	u8	port;                   /* In PORT defs */
312 	bool	autoneg;
313 	u32	pause_config;
314 };
315 
316 struct qed_probe_params {
317 	enum qed_protocol protocol;
318 	u32 dp_module;
319 	u8 dp_level;
320 	bool is_vf;
321 };
322 
323 #define QED_DRV_VER_STR_SIZE 12
324 struct qed_slowpath_params {
325 	u32	int_mode;
326 	u8	drv_major;
327 	u8	drv_minor;
328 	u8	drv_rev;
329 	u8	drv_eng;
330 	u8	name[QED_DRV_VER_STR_SIZE];
331 };
332 
333 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
334 
335 struct qed_int_info {
336 	struct msix_entry	*msix;
337 	u8			msix_cnt;
338 
339 	/* This should be updated by the protocol driver */
340 	u8			used_cnt;
341 };
342 
343 struct qed_common_cb_ops {
344 	void	(*link_update)(void			*dev,
345 			       struct qed_link_output	*link);
346 };
347 
348 struct qed_selftest_ops {
349 /**
350  * @brief selftest_interrupt - Perform interrupt test
351  *
352  * @param cdev
353  *
354  * @return 0 on success, error otherwise.
355  */
356 	int (*selftest_interrupt)(struct qed_dev *cdev);
357 
358 /**
359  * @brief selftest_memory - Perform memory test
360  *
361  * @param cdev
362  *
363  * @return 0 on success, error otherwise.
364  */
365 	int (*selftest_memory)(struct qed_dev *cdev);
366 
367 /**
368  * @brief selftest_register - Perform register test
369  *
370  * @param cdev
371  *
372  * @return 0 on success, error otherwise.
373  */
374 	int (*selftest_register)(struct qed_dev *cdev);
375 
376 /**
377  * @brief selftest_clock - Perform clock test
378  *
379  * @param cdev
380  *
381  * @return 0 on success, error otherwise.
382  */
383 	int (*selftest_clock)(struct qed_dev *cdev);
384 };
385 
386 struct qed_common_ops {
387 	struct qed_selftest_ops *selftest;
388 
389 	struct qed_dev*	(*probe)(struct pci_dev *dev,
390 				 struct qed_probe_params *params);
391 
392 	void		(*remove)(struct qed_dev *cdev);
393 
394 	int		(*set_power_state)(struct qed_dev *cdev,
395 					   pci_power_t state);
396 
397 	void		(*set_id)(struct qed_dev *cdev,
398 				  char name[],
399 				  char ver_str[]);
400 
401 	/* Client drivers need to make this call before slowpath_start.
402 	 * PF params required for the call before slowpath_start is
403 	 * documented within the qed_pf_params structure definition.
404 	 */
405 	void		(*update_pf_params)(struct qed_dev *cdev,
406 					    struct qed_pf_params *params);
407 	int		(*slowpath_start)(struct qed_dev *cdev,
408 					  struct qed_slowpath_params *params);
409 
410 	int		(*slowpath_stop)(struct qed_dev *cdev);
411 
412 	/* Requests to use `cnt' interrupts for fastpath.
413 	 * upon success, returns number of interrupts allocated for fastpath.
414 	 */
415 	int		(*set_fp_int)(struct qed_dev *cdev,
416 				      u16 cnt);
417 
418 	/* Fills `info' with pointers required for utilizing interrupts */
419 	int		(*get_fp_int)(struct qed_dev *cdev,
420 				      struct qed_int_info *info);
421 
422 	u32		(*sb_init)(struct qed_dev *cdev,
423 				   struct qed_sb_info *sb_info,
424 				   void *sb_virt_addr,
425 				   dma_addr_t sb_phy_addr,
426 				   u16 sb_id,
427 				   enum qed_sb_type type);
428 
429 	u32		(*sb_release)(struct qed_dev *cdev,
430 				      struct qed_sb_info *sb_info,
431 				      u16 sb_id);
432 
433 	void		(*simd_handler_config)(struct qed_dev *cdev,
434 					       void *token,
435 					       int index,
436 					       void (*handler)(void *));
437 
438 	void		(*simd_handler_clean)(struct qed_dev *cdev,
439 					      int index);
440 
441 /**
442  * @brief can_link_change - can the instance change the link or not
443  *
444  * @param cdev
445  *
446  * @return true if link-change is allowed, false otherwise.
447  */
448 	bool (*can_link_change)(struct qed_dev *cdev);
449 
450 /**
451  * @brief set_link - set links according to params
452  *
453  * @param cdev
454  * @param params - values used to override the default link configuration
455  *
456  * @return 0 on success, error otherwise.
457  */
458 	int		(*set_link)(struct qed_dev *cdev,
459 				    struct qed_link_params *params);
460 
461 /**
462  * @brief get_link - returns the current link state.
463  *
464  * @param cdev
465  * @param if_link - structure to be filled with current link configuration.
466  */
467 	void		(*get_link)(struct qed_dev *cdev,
468 				    struct qed_link_output *if_link);
469 
470 /**
471  * @brief - drains chip in case Tx completions fail to arrive due to pause.
472  *
473  * @param cdev
474  */
475 	int		(*drain)(struct qed_dev *cdev);
476 
477 /**
478  * @brief update_msglvl - update module debug level
479  *
480  * @param cdev
481  * @param dp_module
482  * @param dp_level
483  */
484 	void		(*update_msglvl)(struct qed_dev *cdev,
485 					 u32 dp_module,
486 					 u8 dp_level);
487 
488 	int		(*chain_alloc)(struct qed_dev *cdev,
489 				       enum qed_chain_use_mode intended_use,
490 				       enum qed_chain_mode mode,
491 				       enum qed_chain_cnt_type cnt_type,
492 				       u32 num_elems,
493 				       size_t elem_size,
494 				       struct qed_chain *p_chain);
495 
496 	void		(*chain_free)(struct qed_dev *cdev,
497 				      struct qed_chain *p_chain);
498 
499 /**
500  * @brief get_coalesce - Get coalesce parameters in usec
501  *
502  * @param cdev
503  * @param rx_coal - Rx coalesce value in usec
504  * @param tx_coal - Tx coalesce value in usec
505  *
506  */
507 	void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
508 
509 /**
510  * @brief set_coalesce - Configure Rx coalesce value in usec
511  *
512  * @param cdev
513  * @param rx_coal - Rx coalesce value in usec
514  * @param tx_coal - Tx coalesce value in usec
515  * @param qid - Queue index
516  * @param sb_id - Status Block Id
517  *
518  * @return 0 on success, error otherwise.
519  */
520 	int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
521 			    u8 qid, u16 sb_id);
522 
523 /**
524  * @brief set_led - Configure LED mode
525  *
526  * @param cdev
527  * @param mode - LED mode
528  *
529  * @return 0 on success, error otherwise.
530  */
531 	int (*set_led)(struct qed_dev *cdev,
532 		       enum qed_led_mode mode);
533 };
534 
535 #define MASK_FIELD(_name, _value) \
536 	((_value) &= (_name ## _MASK))
537 
538 #define FIELD_VALUE(_name, _value) \
539 	((_value & _name ## _MASK) << _name ## _SHIFT)
540 
541 #define SET_FIELD(value, name, flag)			       \
542 	do {						       \
543 		(value) &= ~(name ## _MASK << name ## _SHIFT); \
544 		(value) |= (((u64)flag) << (name ## _SHIFT));  \
545 	} while (0)
546 
547 #define GET_FIELD(value, name) \
548 	(((value) >> (name ## _SHIFT)) & name ## _MASK)
549 
550 /* Debug print definitions */
551 #define DP_ERR(cdev, fmt, ...)						     \
552 		pr_err("[%s:%d(%s)]" fmt,				     \
553 		       __func__, __LINE__,				     \
554 		       DP_NAME(cdev) ? DP_NAME(cdev) : "",		     \
555 		       ## __VA_ARGS__)					     \
556 
557 #define DP_NOTICE(cdev, fmt, ...)				      \
558 	do {							      \
559 		if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
560 			pr_notice("[%s:%d(%s)]" fmt,		      \
561 				  __func__, __LINE__,		      \
562 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
563 				  ## __VA_ARGS__);		      \
564 								      \
565 		}						      \
566 	} while (0)
567 
568 #define DP_INFO(cdev, fmt, ...)					      \
569 	do {							      \
570 		if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
571 			pr_notice("[%s:%d(%s)]" fmt,		      \
572 				  __func__, __LINE__,		      \
573 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
574 				  ## __VA_ARGS__);		      \
575 		}						      \
576 	} while (0)
577 
578 #define DP_VERBOSE(cdev, module, fmt, ...)				\
579 	do {								\
580 		if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) &&	\
581 			     ((cdev)->dp_module & module))) {		\
582 			pr_notice("[%s:%d(%s)]" fmt,			\
583 				  __func__, __LINE__,			\
584 				  DP_NAME(cdev) ? DP_NAME(cdev) : "",	\
585 				  ## __VA_ARGS__);			\
586 		}							\
587 	} while (0)
588 
589 enum DP_LEVEL {
590 	QED_LEVEL_VERBOSE	= 0x0,
591 	QED_LEVEL_INFO		= 0x1,
592 	QED_LEVEL_NOTICE	= 0x2,
593 	QED_LEVEL_ERR		= 0x3,
594 };
595 
596 #define QED_LOG_LEVEL_SHIFT     (30)
597 #define QED_LOG_VERBOSE_MASK    (0x3fffffff)
598 #define QED_LOG_INFO_MASK       (0x40000000)
599 #define QED_LOG_NOTICE_MASK     (0x80000000)
600 
601 enum DP_MODULE {
602 	QED_MSG_SPQ	= 0x10000,
603 	QED_MSG_STATS	= 0x20000,
604 	QED_MSG_DCB	= 0x40000,
605 	QED_MSG_IOV	= 0x80000,
606 	QED_MSG_SP	= 0x100000,
607 	QED_MSG_STORAGE = 0x200000,
608 	QED_MSG_CXT	= 0x800000,
609 	QED_MSG_ILT	= 0x2000000,
610 	QED_MSG_ROCE	= 0x4000000,
611 	QED_MSG_DEBUG	= 0x8000000,
612 	/* to be added...up to 0x8000000 */
613 };
614 
615 enum qed_mf_mode {
616 	QED_MF_DEFAULT,
617 	QED_MF_OVLAN,
618 	QED_MF_NPAR,
619 };
620 
621 struct qed_eth_stats {
622 	u64	no_buff_discards;
623 	u64	packet_too_big_discard;
624 	u64	ttl0_discard;
625 	u64	rx_ucast_bytes;
626 	u64	rx_mcast_bytes;
627 	u64	rx_bcast_bytes;
628 	u64	rx_ucast_pkts;
629 	u64	rx_mcast_pkts;
630 	u64	rx_bcast_pkts;
631 	u64	mftag_filter_discards;
632 	u64	mac_filter_discards;
633 	u64	tx_ucast_bytes;
634 	u64	tx_mcast_bytes;
635 	u64	tx_bcast_bytes;
636 	u64	tx_ucast_pkts;
637 	u64	tx_mcast_pkts;
638 	u64	tx_bcast_pkts;
639 	u64	tx_err_drop_pkts;
640 	u64	tpa_coalesced_pkts;
641 	u64	tpa_coalesced_events;
642 	u64	tpa_aborts_num;
643 	u64	tpa_not_coalesced_pkts;
644 	u64	tpa_coalesced_bytes;
645 
646 	/* port */
647 	u64	rx_64_byte_packets;
648 	u64	rx_65_to_127_byte_packets;
649 	u64	rx_128_to_255_byte_packets;
650 	u64	rx_256_to_511_byte_packets;
651 	u64	rx_512_to_1023_byte_packets;
652 	u64	rx_1024_to_1518_byte_packets;
653 	u64	rx_1519_to_1522_byte_packets;
654 	u64	rx_1519_to_2047_byte_packets;
655 	u64	rx_2048_to_4095_byte_packets;
656 	u64	rx_4096_to_9216_byte_packets;
657 	u64	rx_9217_to_16383_byte_packets;
658 	u64	rx_crc_errors;
659 	u64	rx_mac_crtl_frames;
660 	u64	rx_pause_frames;
661 	u64	rx_pfc_frames;
662 	u64	rx_align_errors;
663 	u64	rx_carrier_errors;
664 	u64	rx_oversize_packets;
665 	u64	rx_jabbers;
666 	u64	rx_undersize_packets;
667 	u64	rx_fragments;
668 	u64	tx_64_byte_packets;
669 	u64	tx_65_to_127_byte_packets;
670 	u64	tx_128_to_255_byte_packets;
671 	u64	tx_256_to_511_byte_packets;
672 	u64	tx_512_to_1023_byte_packets;
673 	u64	tx_1024_to_1518_byte_packets;
674 	u64	tx_1519_to_2047_byte_packets;
675 	u64	tx_2048_to_4095_byte_packets;
676 	u64	tx_4096_to_9216_byte_packets;
677 	u64	tx_9217_to_16383_byte_packets;
678 	u64	tx_pause_frames;
679 	u64	tx_pfc_frames;
680 	u64	tx_lpi_entry_count;
681 	u64	tx_total_collisions;
682 	u64	brb_truncates;
683 	u64	brb_discards;
684 	u64	rx_mac_bytes;
685 	u64	rx_mac_uc_packets;
686 	u64	rx_mac_mc_packets;
687 	u64	rx_mac_bc_packets;
688 	u64	rx_mac_frames_ok;
689 	u64	tx_mac_bytes;
690 	u64	tx_mac_uc_packets;
691 	u64	tx_mac_mc_packets;
692 	u64	tx_mac_bc_packets;
693 	u64	tx_mac_ctrl_frames;
694 };
695 
696 #define QED_SB_IDX              0x0002
697 
698 #define RX_PI           0
699 #define TX_PI(tc)       (RX_PI + 1 + tc)
700 
701 struct qed_sb_cnt_info {
702 	int	sb_cnt;
703 	int	sb_iov_cnt;
704 	int	sb_free_blk;
705 };
706 
707 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
708 {
709 	u32 prod = 0;
710 	u16 rc = 0;
711 
712 	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
713 	       STATUS_BLOCK_PROD_INDEX_MASK;
714 	if (sb_info->sb_ack != prod) {
715 		sb_info->sb_ack = prod;
716 		rc |= QED_SB_IDX;
717 	}
718 
719 	/* Let SB update */
720 	mmiowb();
721 	return rc;
722 }
723 
724 /**
725  *
726  * @brief This function creates an update command for interrupts that is
727  *        written to the IGU.
728  *
729  * @param sb_info       - This is the structure allocated and
730  *                 initialized per status block. Assumption is
731  *                 that it was initialized using qed_sb_init
732  * @param int_cmd       - Enable/Disable/Nop
733  * @param upd_flg       - whether igu consumer should be
734  *                 updated.
735  *
736  * @return inline void
737  */
738 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
739 			      enum igu_int_cmd int_cmd,
740 			      u8 upd_flg)
741 {
742 	struct igu_prod_cons_update igu_ack = { 0 };
743 
744 	igu_ack.sb_id_and_flags =
745 		((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
746 		 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
747 		 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
748 		 (IGU_SEG_ACCESS_REG <<
749 		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
750 
751 	DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
752 
753 	/* Both segments (interrupts & acks) are written to same place address;
754 	 * Need to guarantee all commands will be received (in-order) by HW.
755 	 */
756 	mmiowb();
757 	barrier();
758 }
759 
760 static inline void __internal_ram_wr(void *p_hwfn,
761 				     void __iomem *addr,
762 				     int size,
763 				     u32 *data)
764 
765 {
766 	unsigned int i;
767 
768 	for (i = 0; i < size / sizeof(*data); i++)
769 		DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
770 }
771 
772 static inline void internal_ram_wr(void __iomem *addr,
773 				   int size,
774 				   u32 *data)
775 {
776 	__internal_ram_wr(NULL, addr, size, data);
777 }
778 
779 enum qed_rss_caps {
780 	QED_RSS_IPV4		= 0x1,
781 	QED_RSS_IPV6		= 0x2,
782 	QED_RSS_IPV4_TCP	= 0x4,
783 	QED_RSS_IPV6_TCP	= 0x8,
784 	QED_RSS_IPV4_UDP	= 0x10,
785 	QED_RSS_IPV6_UDP	= 0x20,
786 };
787 
788 #define QED_RSS_IND_TABLE_SIZE 128
789 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
790 #endif
791