xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_def.h (revision 13ea0450a9c8742119d36f3bf8f47accdce46e54)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 
32 
33 /*
34  * File: qlnx_def.h
35  * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
36  */
37 
38 #ifndef _QLNX_DEF_H_
39 #define _QLNX_DEF_H_
40 
41 #define VER_SIZE 16
42 
43 struct qlnx_ivec {
44         uint32_t                rss_idx;
45         void                    *ha;
46         struct resource         *irq;
47         void                    *handle;
48         int                     irq_rid;
49 };
50 
51 typedef struct qlnx_ivec qlnx_ivec_t;
52 
53 //#define QLNX_MAX_RSS		30
54 #define QLNX_MAX_VF_RSS		4
55 #define QLNX_MAX_RSS		36
56 #define QLNX_DEFAULT_RSS	16
57 #define QLNX_MAX_TC		1
58 
59 enum QLNX_STATE {
60         QLNX_STATE_CLOSED,
61         QLNX_STATE_OPEN,
62 };
63 
64 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
65 
66 #define MAX_NUM_TC      8
67 #define MAX_NUM_PRI     8
68 
69 #ifndef BITS_PER_BYTE
70 #define BITS_PER_BYTE	8
71 #endif /* #ifndef BITS_PER_BYTE */
72 
73 
74 /*
75  * RX ring buffer contains pointer to kmalloc() data only,
76  */
77 struct sw_rx_data {
78         void		*data;
79 	bus_dmamap_t	map;
80 	dma_addr_t	dma_addr;
81 };
82 
83 enum qlnx_agg_state {
84         QLNX_AGG_STATE_NONE  = 0,
85         QLNX_AGG_STATE_START = 1,
86         QLNX_AGG_STATE_ERROR = 2
87 };
88 
89 struct qlnx_agg_info {
90         /* rx_buf is a data buffer that can be placed /consumed from rx bd
91          * chain. It has two purposes: We will preallocate the data buffer
92          * for each aggregation when we open the interface and will place this
93          * buffer on the rx-bd-ring when we receive TPA_START. We don't want
94          * to be in a state where allocation fails, as we can't reuse the
95          * consumer buffer in the rx-chain since FW may still be writing to it
96          * (since header needs to be modified for TPA.
97          * The second purpose is to keep a pointer to the bd buffer during
98          * aggregation.
99          */
100         struct sw_rx_data       rx_buf;
101         enum qlnx_agg_state     agg_state;
102 	uint16_t		placement_offset;
103         struct mbuf             *mpf; /* first mbuf in chain */
104         struct mbuf             *mpl; /* last mbuf in chain */
105 };
106 
107 #define RX_RING_SIZE_POW        13
108 #define RX_RING_SIZE            (1 << RX_RING_SIZE_POW)
109 
110 #define TX_RING_SIZE_POW        14
111 #define TX_RING_SIZE            (1 << TX_RING_SIZE_POW)
112 
113 struct qlnx_rx_queue {
114         volatile __le16         *hw_cons_ptr;
115         struct sw_rx_data       sw_rx_ring[RX_RING_SIZE];
116         uint16_t		sw_rx_cons;
117         uint16_t		sw_rx_prod;
118         struct ecore_chain      rx_bd_ring;
119         struct ecore_chain      rx_comp_ring;
120         void __iomem            *hw_rxq_prod_addr;
121 	void 			*handle;
122 
123         /* LRO */
124         struct qlnx_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
125 
126         uint32_t		rx_buf_size;
127 
128         uint16_t		num_rx_buffers;
129         uint16_t		rxq_id;
130 
131 
132 #ifdef QLNX_SOFT_LRO
133 	struct lro_ctrl		lro;
134 #endif
135 };
136 
137 
138 union db_prod {
139         struct eth_db_data	data;
140         uint32_t		raw;
141 };
142 
143 struct sw_tx_bd {
144         struct mbuf		*mp;
145 	bus_dmamap_t		map;
146         uint8_t			flags;
147 	int			nsegs;
148 
149 /* Set on the first BD descriptor when there is a split BD */
150 #define QLNX_TSO_SPLIT_BD               (1<<0)
151 };
152 
153 #define QLNX_MAX_SEGMENTS		255
154 struct qlnx_tx_queue {
155 
156         int                     index; /* Queue index */
157         volatile __le16         *hw_cons_ptr;
158         struct sw_tx_bd         sw_tx_ring[TX_RING_SIZE];
159         uint16_t		sw_tx_cons;
160         uint16_t		sw_tx_prod;
161         struct ecore_chain	tx_pbl;
162         void __iomem            *doorbell_addr;
163 	void 			*handle;
164         union db_prod           tx_db;
165 
166 	bus_dma_segment_t	segs[QLNX_MAX_SEGMENTS];
167 
168         uint16_t		num_tx_buffers;
169 };
170 
171 #define BD_UNMAP_ADDR(bd)	HILO_U64(le32toh((bd)->addr.hi), \
172 					le32toh((bd)->addr.lo))
173 #define BD_UNMAP_LEN(bd)	(le16toh((bd)->nbytes))
174 
175 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
176         do { \
177                 (bd)->addr.hi = htole32(U64_HI(maddr)); \
178                 (bd)->addr.lo = htole32(U64_LO(maddr)); \
179                 (bd)->nbytes = htole16(len); \
180         } while (0);
181 
182 
183 #define QLNX_FP_MAX_SEGS	24
184 
185 struct qlnx_fastpath {
186         void			*edev;
187         uint8_t			rss_id;
188         struct ecore_sb_info    *sb_info;
189         struct qlnx_rx_queue    *rxq;
190         struct qlnx_tx_queue    *txq[MAX_NUM_TC];
191 	char			name[64];
192 
193 	struct mtx		tx_mtx;
194 	char			tx_mtx_name[32];
195 	struct buf_ring		*tx_br;
196 	uint32_t		tx_ring_full;
197 
198 	struct task		fp_task;
199 	struct taskqueue	*fp_taskqueue;
200 
201 	/* transmit statistics */
202 	uint64_t		tx_pkts_processed;
203 	uint64_t		tx_pkts_freed;
204 	uint64_t		tx_pkts_transmitted;
205 	uint64_t		tx_pkts_completed;
206 	uint64_t		tx_tso_pkts;
207 	uint64_t		tx_non_tso_pkts;
208 
209 #ifdef QLNX_TRACE_PERF_DATA
210 	uint64_t		tx_pkts_trans_ctx;
211 	uint64_t		tx_pkts_compl_ctx;
212 	uint64_t		tx_pkts_trans_fp;
213 	uint64_t		tx_pkts_compl_fp;
214 	uint64_t		tx_pkts_compl_intr;
215 #endif
216 
217 	uint64_t		tx_lso_wnd_min_len;
218 	uint64_t		tx_defrag;
219 	uint64_t		tx_nsegs_gt_elem_left;
220 	uint32_t		tx_tso_max_nsegs;
221 	uint32_t		tx_tso_min_nsegs;
222 	uint32_t		tx_tso_max_pkt_len;
223 	uint32_t		tx_tso_min_pkt_len;
224 	uint64_t		tx_pkts[QLNX_FP_MAX_SEGS];
225 
226 #ifdef QLNX_TRACE_PERF_DATA
227 	uint64_t		tx_pkts_hist[QLNX_FP_MAX_SEGS];
228 	uint64_t		tx_comInt[QLNX_FP_MAX_SEGS];
229 	uint64_t		tx_pkts_q[QLNX_FP_MAX_SEGS];
230 #endif
231 
232 	uint64_t		err_tx_nsegs_gt_elem_left;
233         uint64_t                err_tx_dmamap_create;
234         uint64_t                err_tx_defrag_dmamap_load;
235         uint64_t                err_tx_non_tso_max_seg;
236         uint64_t                err_tx_dmamap_load;
237         uint64_t                err_tx_defrag;
238         uint64_t                err_tx_free_pkt_null;
239         uint64_t                err_tx_cons_idx_conflict;
240 
241         uint64_t                lro_cnt_64;
242         uint64_t                lro_cnt_128;
243         uint64_t                lro_cnt_256;
244         uint64_t                lro_cnt_512;
245         uint64_t                lro_cnt_1024;
246 
247 	/* receive statistics */
248 	uint64_t		rx_pkts;
249 	uint64_t		tpa_start;
250 	uint64_t		tpa_cont;
251 	uint64_t		tpa_end;
252         uint64_t                err_m_getcl;
253         uint64_t                err_m_getjcl;
254         uint64_t		err_rx_hw_errors;
255         uint64_t		err_rx_alloc_errors;
256 	uint64_t		err_rx_jumbo_chain_pkts;
257 	uint64_t		err_rx_mp_null;
258 	uint64_t		err_rx_tpa_invalid_agg_num;
259 };
260 
261 struct qlnx_update_vport_params {
262         uint8_t			vport_id;
263         uint8_t			update_vport_active_rx_flg;
264         uint8_t			vport_active_rx_flg;
265         uint8_t			update_vport_active_tx_flg;
266         uint8_t			vport_active_tx_flg;
267         uint8_t			update_inner_vlan_removal_flg;
268         uint8_t			inner_vlan_removal_flg;
269         struct ecore_rss_params	*rss_params;
270 	struct ecore_sge_tpa_params *sge_tpa_params;
271 };
272 
273 /*
274  * link related
275  */
276 struct qlnx_link_output {
277 	bool		link_up;
278 	uint32_t	supported_caps;
279 	uint32_t	advertised_caps;
280 	uint32_t	link_partner_caps;
281 	uint32_t	speed; /* In Mb/s */
282 	bool		autoneg;
283 	uint32_t	media_type;
284 	uint32_t	duplex;
285 };
286 typedef struct qlnx_link_output qlnx_link_output_t;
287 
288 #define QLNX_LINK_DUPLEX			0x0001
289 
290 #define QLNX_LINK_CAP_FIBRE			0x0001
291 #define QLNX_LINK_CAP_Autoneg			0x0002
292 #define QLNX_LINK_CAP_Pause			0x0004
293 #define QLNX_LINK_CAP_Asym_Pause		0x0008
294 #define QLNX_LINK_CAP_1000baseT_Half		0x0010
295 #define QLNX_LINK_CAP_1000baseT_Full		0x0020
296 #define QLNX_LINK_CAP_10000baseKR_Full		0x0040
297 #define QLNX_LINK_CAP_25000baseKR_Full		0x0080
298 #define QLNX_LINK_CAP_40000baseLR4_Full		0x0100
299 #define QLNX_LINK_CAP_50000baseKR2_Full		0x0200
300 #define QLNX_LINK_CAP_100000baseKR4_Full	0x0400
301 
302 
303 /* Functions definition */
304 
305 #define XMIT_PLAIN              0
306 #define XMIT_L4_CSUM            (1 << 0)
307 #define XMIT_LSO                (1 << 1)
308 
309 #define CQE_FLAGS_ERR   (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<       \
310                          PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT |       \
311                          PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<     \
312                          PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT |     \
313                          PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
314                          PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
315                          PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
316                          PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
317 
318 #define RX_COPY_THRESH          92
319 #define ETH_MAX_PACKET_SIZE     1500
320 
321 #define QLNX_MFW_VERSION_LENGTH 32
322 #define QLNX_STORMFW_VERSION_LENGTH 32
323 
324 #define QLNX_TX_ELEM_RESERVE		2
325 #define QLNX_TX_ELEM_THRESH		128
326 #define QLNX_TX_ELEM_MAX_THRESH		512
327 #define QLNX_TX_ELEM_MIN_THRESH		32
328 #define QLNX_TX_COMPL_THRESH		32
329 
330 
331 #define QLNX_TPA_MAX_AGG_BUFFERS             (20)
332 
333 #define QLNX_MAX_NUM_MULTICAST_ADDRS	ECORE_MAX_MC_ADDRS
334 typedef struct _qlnx_mcast {
335         uint16_t        rsrvd;
336         uint8_t         addr[6];
337 } __packed qlnx_mcast_t;
338 
339 typedef struct _qlnx_vf_attr {
340 	uint8_t		mac_addr[ETHER_ADDR_LEN];
341 	uint32_t	num_rings;
342 } qlnx_vf_attr_t;
343 
344 typedef struct _qlnx_sriov_task {
345 
346 	struct task		pf_task;
347 	struct taskqueue	*pf_taskqueue;
348 
349 #define QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG		0x01
350 #define QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE	0x02
351 #define QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE	0x04
352 	volatile uint32_t	flags;
353 
354 } qlnx_sriov_task_t;
355 
356 
357 /*
358  * Adapter structure contains the hardware independent information of the
359  * pci function.
360  */
361 struct qlnx_host {
362 
363 	/* interface to ecore */
364 
365 	struct ecore_dev	cdev;
366 
367 	uint32_t		state;
368 
369 	/* some flags */
370         volatile struct {
371                 volatile uint32_t
372 			hw_init			:1,
373 			callout_init		:1,
374                         slowpath_start		:1,
375                         parent_tag		:1,
376                         lock_init		:1;
377         } flags;
378 
379 	/* interface to o.s */
380 
381 	device_t		pci_dev;
382 	uint8_t			pci_func;
383 	uint8_t			dev_unit;
384 	uint16_t		device_id;
385 
386 	struct ifnet		*ifp;
387 	int			if_flags;
388 	volatile int		link_up;
389 	struct ifmedia		media;
390 	uint16_t		max_frame_size;
391 
392 	struct cdev		*ioctl_dev;
393 
394 	/* resources */
395         struct resource         *pci_reg;
396         int                     reg_rid;
397 
398         struct resource         *pci_dbells;
399         int                     dbells_rid;
400 	uint64_t		dbells_phys_addr;
401 	uint32_t		dbells_size;
402 
403         struct resource         *msix_bar;
404         int                     msix_rid;
405 
406 	int			msix_count;
407 
408 	struct mtx		hw_lock;
409 
410 	/* debug */
411 
412 	uint32_t                dbg_level;
413 	uint32_t                dbg_trace_lro_cnt;
414 	uint32_t                dbg_trace_tso_pkt_len;
415 	uint32_t                dp_level;
416 	uint32_t                dp_module;
417 
418 	/* misc */
419 	uint8_t 		mfw_ver[QLNX_MFW_VERSION_LENGTH];
420 	uint8_t 		stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
421 	uint32_t		flash_size;
422 
423 	/* dma related */
424 
425 	bus_dma_tag_t		parent_tag;
426 	bus_dma_tag_t		tx_tag;
427 	bus_dma_tag_t		rx_tag;
428 
429 
430         struct ecore_sb_info    sb_array[QLNX_MAX_RSS];
431         struct qlnx_rx_queue    rxq_array[QLNX_MAX_RSS];
432         struct qlnx_tx_queue    txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
433         struct qlnx_fastpath    fp_array[QLNX_MAX_RSS];
434 
435 	/* tx related */
436 	struct callout		tx_callout;
437 	uint32_t		txr_idx;
438 
439 	/* rx related */
440 	uint32_t		rx_pkt_threshold;
441 	uint32_t		rx_jumbo_buf_eq_mtu;
442 
443 	/* slow path related */
444         struct resource         *sp_irq[MAX_HWFNS_PER_DEVICE];
445         void                    *sp_handle[MAX_HWFNS_PER_DEVICE];
446         int                     sp_irq_rid[MAX_HWFNS_PER_DEVICE];
447 	struct task		sp_task[MAX_HWFNS_PER_DEVICE];
448 	struct taskqueue	*sp_taskqueue[MAX_HWFNS_PER_DEVICE];
449 
450 	struct callout          qlnx_callout;
451 
452 	/* fast path related */
453 	int			num_rss;
454 	int			num_tc;
455 
456 #define QLNX_MAX_TSS_CNT(ha)	((ha->num_rss) * (ha->num_tc))
457 
458 	qlnx_ivec_t              irq_vec[QLNX_MAX_RSS];
459 
460 
461 	uint8_t			filter;
462 	uint32_t                nmcast;
463 	qlnx_mcast_t            mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
464 	struct ecore_filter_mcast ecore_mcast;
465 	uint8_t			primary_mac[ETH_ALEN];
466 	uint8_t			prio_to_tc[MAX_NUM_PRI];
467 	struct ecore_eth_stats	hw_stats;
468 	struct ecore_rss_params	rss_params;
469         uint32_t		rx_buf_size;
470         bool			rx_csum_offload;
471 
472 	uint32_t		rx_coalesce_usecs;
473 	uint32_t		tx_coalesce_usecs;
474 
475 	/* link related */
476 	qlnx_link_output_t	if_link;
477 
478 	/* global counters */
479 	uint64_t		sp_interrupts;
480 	uint64_t		err_illegal_intr;
481 	uint64_t		err_fp_null;
482 	uint64_t		err_get_proto_invalid_type;
483 
484 	/* error recovery related */
485 	uint32_t		error_recovery;
486 	struct task		err_task;
487 	struct taskqueue	*err_taskqueue;
488 
489 	/* grcdump related */
490 	uint32_t		err_inject;
491 	uint32_t		grcdump_taken;
492 	uint32_t		grcdump_dwords[QLNX_MAX_HW_FUNCS];
493 	uint32_t		grcdump_size[QLNX_MAX_HW_FUNCS];
494 	void			*grcdump[QLNX_MAX_HW_FUNCS];
495 
496 	uint32_t		idle_chk_taken;
497 	uint32_t		idle_chk_dwords[QLNX_MAX_HW_FUNCS];
498 	uint32_t		idle_chk_size[QLNX_MAX_HW_FUNCS];
499 	void			*idle_chk[QLNX_MAX_HW_FUNCS];
500 
501 	/* storm stats related */
502 #define QLNX_STORM_STATS_TOTAL \
503 		(QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
504 	qlnx_storm_stats_t	storm_stats[QLNX_STORM_STATS_TOTAL];
505 	uint32_t		storm_stats_index;
506 	uint32_t		storm_stats_enable;
507 	uint32_t		storm_stats_gather;
508 
509 	uint32_t		personality;
510 
511 	uint16_t		sriov_initialized;
512 	uint16_t		num_vfs;
513 	qlnx_vf_attr_t		*vf_attr;
514 	qlnx_sriov_task_t	sriov_task[MAX_HWFNS_PER_DEVICE];
515 	uint32_t		curr_vf;
516 
517 	void			*next;
518 	void			*qlnx_rdma;
519 	volatile int		qlnxr_debug;
520 };
521 
522 typedef struct qlnx_host qlnx_host_t;
523 
524 /* note that align has to be a power of 2 */
525 #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)));
526 #define QL_MIN(x, y) ((x < y) ? x : y)
527 
528 #define QL_RUNNING(ifp) \
529 		((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
530 			IFF_DRV_RUNNING)
531 
532 #define QLNX_MAX_MTU			9000
533 #define QLNX_MAX_SEGMENTS_NON_TSO	(ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
534 //#define QLNX_MAX_TSO_FRAME_SIZE		((64 * 1024 - 1) + 22)
535 #define QLNX_MAX_TSO_FRAME_SIZE		65536
536 #define QLNX_MAX_TX_MBUF_SIZE		65536    /* bytes - bd_len = 16bits */
537 
538 
539 #define QL_MAC_CMP(mac1, mac2)    \
540         ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
541         (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
542 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
543 
544 /*
545  * Debug Related
546  */
547 
548 #ifdef QLNX_DEBUG
549 
550 #define QL_DPRINT1(ha, x, ...) 					\
551 	do { 							\
552 		if ((ha)->dbg_level & 0x0001) {			\
553 			device_printf ((ha)->pci_dev,		\
554 				"[%s:%d]" x,			\
555 				__func__, __LINE__,		\
556 				## __VA_ARGS__);		\
557 		}						\
558 	} while (0)
559 
560 #define QL_DPRINT2(ha, x, ...)					\
561 	do { 							\
562 		if ((ha)->dbg_level & 0x0002) {			\
563 			device_printf ((ha)->pci_dev,		\
564 				"[%s:%d]" x,			\
565 				__func__, __LINE__,		\
566 				## __VA_ARGS__);		\
567 		}						\
568 	} while (0)
569 
570 #define QL_DPRINT3(ha, x, ...)					\
571 	do { 							\
572 		if ((ha)->dbg_level & 0x0004) {			\
573 			device_printf ((ha)->pci_dev,		\
574 				"[%s:%d]" x,			\
575 				__func__, __LINE__,		\
576 				## __VA_ARGS__);		\
577 		}						\
578 	} while (0)
579 
580 #define QL_DPRINT4(ha, x, ...)					\
581 	do { 							\
582 		if ((ha)->dbg_level & 0x0008) {			\
583 			device_printf ((ha)->pci_dev,		\
584 				"[%s:%d]" x,			\
585 				__func__, __LINE__,		\
586 				## __VA_ARGS__);		\
587 		}						\
588 	} while (0)
589 
590 #define QL_DPRINT5(ha, x, ...)					\
591 	do { 							\
592 		if ((ha)->dbg_level & 0x0010) {			\
593 			device_printf ((ha)->pci_dev,		\
594 				"[%s:%d]" x,			\
595 				__func__, __LINE__,		\
596 				## __VA_ARGS__);		\
597 		}						\
598 	} while (0)
599 
600 #define QL_DPRINT6(ha, x, ...)					\
601 	do { 							\
602 		if ((ha)->dbg_level & 0x0020) {			\
603 			device_printf ((ha)->pci_dev,		\
604 				"[%s:%d]" x,			\
605 				__func__, __LINE__,		\
606 				## __VA_ARGS__);		\
607 		}						\
608 	} while (0)
609 
610 #define QL_DPRINT7(ha, x, ...)					\
611 	do { 							\
612 		if ((ha)->dbg_level & 0x0040) {			\
613 			device_printf ((ha)->pci_dev,		\
614 				"[%s:%d]" x,			\
615 				__func__, __LINE__,		\
616 				## __VA_ARGS__);		\
617 		}						\
618 	} while (0)
619 
620 #define QL_DPRINT8(ha, x, ...)					\
621 	do { 							\
622 		if ((ha)->dbg_level & 0x0080) {			\
623 			device_printf ((ha)->pci_dev,		\
624 				"[%s:%d]" x,			\
625 				__func__, __LINE__,		\
626 				## __VA_ARGS__);		\
627 		}						\
628 	} while (0)
629 
630 #define QL_DPRINT9(ha, x, ...)					\
631 	do { 							\
632 		if ((ha)->dbg_level & 0x0100) {			\
633 			device_printf ((ha)->pci_dev,		\
634 				"[%s:%d]" x,			\
635 				__func__, __LINE__,		\
636 				## __VA_ARGS__);		\
637 		}						\
638 	} while (0)
639 
640 #define QL_DPRINT11(ha, x, ...)					\
641 	do { 							\
642 		if ((ha)->dbg_level & 0x0400) {			\
643 			device_printf ((ha)->pci_dev,		\
644 				"[%s:%d]" x,			\
645 				__func__, __LINE__,		\
646 				## __VA_ARGS__);		\
647 		}						\
648 	} while (0)
649 
650 #define QL_DPRINT12(ha, x, ...)					\
651 	do { 							\
652 		if ((ha)->dbg_level & 0x0800) {			\
653 			device_printf ((ha)->pci_dev,		\
654 				"[%s:%d]" x,			\
655 				__func__, __LINE__,		\
656 				## __VA_ARGS__);		\
657 		}						\
658 	} while (0)
659 
660 #define QL_DPRINT13(ha, x, ...)					\
661 	do { 							\
662 		if ((ha)->dbg_level & 0x1000) {			\
663 			device_printf ((ha)->pci_dev,		\
664 				"[%s:%d]" x,			\
665 				__func__, __LINE__,		\
666 				## __VA_ARGS__);		\
667 		}						\
668 	} while (0)
669 
670 
671 #else
672 
673 #define QL_DPRINT1(ha, x, ...)
674 #define QL_DPRINT2(ha, x, ...)
675 #define QL_DPRINT3(ha, x, ...)
676 #define QL_DPRINT4(ha, x, ...)
677 #define QL_DPRINT5(ha, x, ...)
678 #define QL_DPRINT6(ha, x, ...)
679 #define QL_DPRINT7(ha, x, ...)
680 #define QL_DPRINT8(ha, x, ...)
681 #define QL_DPRINT9(ha, x, ...)
682 #define QL_DPRINT11(ha, x, ...)
683 #define QL_DPRINT12(ha, x, ...)
684 #define QL_DPRINT13(ha, x, ...)
685 
686 #endif /* #ifdef QLNX_DEBUG */
687 
688 #define QL_ASSERT(ha, x, y)     if (!x) panic y
689 
690 #define QL_ERR_INJECT(ha, val)		(ha->err_inject == val)
691 #define QL_RESET_ERR_INJECT(ha, val)	{if (ha->err_inject == val) ha->err_inject = 0;}
692 #define QL_ERR_INJCT_TX_INT_DIFF	0x0001
693 #define QL_ERR_INJCT_TX_INT_MBUF_NULL	0x0002
694 
695 
696 /*
697  * exported functions
698  */
699 extern int qlnx_make_cdev(qlnx_host_t *ha);
700 extern void qlnx_del_cdev(qlnx_host_t *ha);
701 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
702 		int hwfn_index);
703 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
704 		int hwfn_index);
705 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
706 extern void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
707                           struct qlnx_link_output *if_link);
708 extern int qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs);
709 extern int qlnx_vf_device(qlnx_host_t *ha);
710 extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info);
711 extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
712 		u16 sb_id);
713 
714 
715 /*
716  * Some OS specific stuff
717  */
718 
719 #if (defined IFM_100G_SR4)
720 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
721 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
722 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
723 #else
724 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
725 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
726 #endif /* #if (defined IFM_100G_SR4) */
727 
728 #if (defined IFM_25G_SR)
729 #define QLNX_IFM_25G_SR IFM_25G_SR
730 #define QLNX_IFM_25G_CR IFM_25G_CR
731 #else
732 #define QLNX_IFM_25G_SR IFM_UNKNOWN
733 #define QLNX_IFM_25G_CR IFM_UNKNOWN
734 #endif /* #if (defined IFM_25G_SR) */
735 
736 
737 #if __FreeBSD_version < 1100000
738 
739 #define QLNX_INC_IERRORS(ifp)		ifp->if_ierrors++
740 #define QLNX_INC_IQDROPS(ifp)		ifp->if_iqdrops++
741 #define QLNX_INC_IPACKETS(ifp)		ifp->if_ipackets++
742 #define QLNX_INC_OPACKETS(ifp)		ifp->if_opackets++
743 #define QLNX_INC_OBYTES(ifp, len)	ifp->if_obytes += len
744 #define QLNX_INC_IBYTES(ifp, len)	ifp->if_ibytes += len
745 
746 #else
747 
748 #define QLNX_INC_IERRORS(ifp)	if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
749 #define QLNX_INC_IQDROPS(ifp)	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
750 #define QLNX_INC_IPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
751 #define QLNX_INC_OPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
752 
753 #define QLNX_INC_OBYTES(ifp, len)	\
754 			if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
755 #define QLNX_INC_IBYTES(ifp, len)	\
756 			if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
757 
758 #endif /* #if __FreeBSD_version < 1100000 */
759 
760 #define CQE_L3_PACKET(flags)    \
761         ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
762         (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
763 
764 #define CQE_IP_HDR_ERR(flags) \
765         ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
766                 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
767 
768 #define CQE_L4_HAS_CSUM(flags) \
769         ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
770                 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
771 
772 #define CQE_HAS_VLAN(flags) \
773         ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
774                 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
775 
776 #ifndef QLNX_RDMA
777 #if defined(__i386__) || defined(__amd64__)
778 
779 static __inline
780 void prefetch(void *x)
781 {
782         __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
783 }
784 
785 #else
786 #define prefetch(x)
787 #endif
788 #endif
789 
790 
791 #endif /* #ifndef _QLNX_DEF_H_ */
792