xref: /freebsd/sys/dev/qlnx/qlnxe/qlnx_def.h (revision e3514747256465c52c3b2aedc9795f52c0d3efe9)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 
32 
33 /*
34  * File: qlnx_def.h
35  * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
36  */
37 
38 #ifndef _QLNX_DEF_H_
39 #define _QLNX_DEF_H_
40 
41 #define VER_SIZE 16
42 
43 struct qlnx_ivec {
44         uint32_t                rss_idx;
45         void                    *ha;
46         struct resource         *irq;
47         void                    *handle;
48         int                     irq_rid;
49 };
50 
51 typedef struct qlnx_ivec qlnx_ivec_t;
52 
53 //#define QLNX_MAX_RSS	30
54 #define QLNX_MAX_RSS	16
55 #define QLNX_MAX_TC	1
56 
57 enum QLNX_STATE {
58         QLNX_STATE_CLOSED,
59         QLNX_STATE_OPEN,
60 };
61 
62 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
63 
64 #define MAX_NUM_TC      8
65 #define MAX_NUM_PRI     8
66 
67 #ifndef BITS_PER_BYTE
68 #define BITS_PER_BYTE	8
69 #endif /* #ifndef BITS_PER_BYTE */
70 
71 
72 /*
73  * RX ring buffer contains pointer to kmalloc() data only,
74  */
75 struct sw_rx_data {
76         void		*data;
77 	bus_dmamap_t	map;
78 	dma_addr_t	dma_addr;
79 };
80 
81 enum qlnx_agg_state {
82         QLNX_AGG_STATE_NONE  = 0,
83         QLNX_AGG_STATE_START = 1,
84         QLNX_AGG_STATE_ERROR = 2
85 };
86 
87 struct qlnx_agg_info {
88         /* rx_buf is a data buffer that can be placed /consumed from rx bd
89          * chain. It has two purposes: We will preallocate the data buffer
90          * for each aggregation when we open the interface and will place this
91          * buffer on the rx-bd-ring when we receive TPA_START. We don't want
92          * to be in a state where allocation fails, as we can't reuse the
93          * consumer buffer in the rx-chain since FW may still be writing to it
94          * (since header needs to be modified for TPA.
95          * The second purpose is to keep a pointer to the bd buffer during
96          * aggregation.
97          */
98         struct sw_rx_data       rx_buf;
99         enum qlnx_agg_state     agg_state;
100 	uint16_t		placement_offset;
101         struct mbuf             *mpf; /* first mbuf in chain */
102         struct mbuf             *mpl; /* last mbuf in chain */
103 };
104 
105 #define RX_RING_SIZE_POW        13
106 #define RX_RING_SIZE            (1 << RX_RING_SIZE_POW)
107 
108 #define TX_RING_SIZE_POW        14
109 #define TX_RING_SIZE            (1 << TX_RING_SIZE_POW)
110 
111 struct qlnx_rx_queue {
112         volatile __le16         *hw_cons_ptr;
113         struct sw_rx_data       sw_rx_ring[RX_RING_SIZE];
114         uint16_t		sw_rx_cons;
115         uint16_t		sw_rx_prod;
116         struct ecore_chain      rx_bd_ring;
117         struct ecore_chain      rx_comp_ring;
118         void __iomem            *hw_rxq_prod_addr;
119 	void 			*handle;
120 
121         /* LRO */
122         struct qlnx_agg_info    tpa_info[ETH_TPA_MAX_AGGS_NUM];
123 
124         uint32_t		rx_buf_size;
125 
126         uint16_t		num_rx_buffers;
127         uint16_t		rxq_id;
128 
129 
130 #ifdef QLNX_SOFT_LRO
131 	struct lro_ctrl		lro;
132 #endif
133 };
134 
135 
136 union db_prod {
137         struct eth_db_data	data;
138         uint32_t		raw;
139 };
140 
141 struct sw_tx_bd {
142         struct mbuf		*mp;
143 	bus_dmamap_t		map;
144         uint8_t			flags;
145 	int			nsegs;
146 
147 /* Set on the first BD descriptor when there is a split BD */
148 #define QLNX_TSO_SPLIT_BD               (1<<0)
149 };
150 
151 #define QLNX_MAX_SEGMENTS		255
152 struct qlnx_tx_queue {
153 
154         int                     index; /* Queue index */
155         volatile __le16         *hw_cons_ptr;
156         struct sw_tx_bd         sw_tx_ring[TX_RING_SIZE];
157         uint16_t		sw_tx_cons;
158         uint16_t		sw_tx_prod;
159         struct ecore_chain	tx_pbl;
160         void __iomem            *doorbell_addr;
161 	void 			*handle;
162         union db_prod           tx_db;
163 
164 	bus_dma_segment_t	segs[QLNX_MAX_SEGMENTS];
165 
166         uint16_t		num_tx_buffers;
167 };
168 
169 #define BD_UNMAP_ADDR(bd)	HILO_U64(le32toh((bd)->addr.hi), \
170 					le32toh((bd)->addr.lo))
171 #define BD_UNMAP_LEN(bd)	(le16toh((bd)->nbytes))
172 
173 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
174         do { \
175                 (bd)->addr.hi = htole32(U64_HI(maddr)); \
176                 (bd)->addr.lo = htole32(U64_LO(maddr)); \
177                 (bd)->nbytes = htole16(len); \
178         } while (0);
179 
180 
181 #define QLNX_FP_MAX_SEGS	24
182 
183 struct qlnx_fastpath {
184         void			*edev;
185         uint8_t			rss_id;
186         struct ecore_sb_info    *sb_info;
187         struct qlnx_rx_queue    *rxq;
188         struct qlnx_tx_queue    *txq[MAX_NUM_TC];
189 	char			name[64];
190 
191 	struct mtx		tx_mtx;
192 	char			tx_mtx_name[32];
193 	struct buf_ring		*tx_br;
194 
195 	struct task		fp_task;
196 	struct taskqueue	*fp_taskqueue;
197 
198 	/* transmit statistics */
199 	uint64_t		tx_pkts_processed;
200 	uint64_t		tx_pkts_freed;
201 	uint64_t		tx_pkts_transmitted;
202 	uint64_t		tx_pkts_completed;
203 	uint64_t		tx_lso_wnd_min_len;
204 	uint64_t		tx_defrag;
205 	uint64_t		tx_nsegs_gt_elem_left;
206 	uint32_t		tx_tso_max_nsegs;
207 	uint32_t		tx_tso_min_nsegs;
208 	uint32_t		tx_tso_max_pkt_len;
209 	uint32_t		tx_tso_min_pkt_len;
210 	uint64_t		tx_pkts[QLNX_FP_MAX_SEGS];
211 	uint64_t		err_tx_nsegs_gt_elem_left;
212         uint64_t                err_tx_dmamap_create;
213         uint64_t                err_tx_defrag_dmamap_load;
214         uint64_t                err_tx_non_tso_max_seg;
215         uint64_t                err_tx_dmamap_load;
216         uint64_t                err_tx_defrag;
217         uint64_t                err_tx_free_pkt_null;
218         uint64_t                err_tx_cons_idx_conflict;
219 
220         uint64_t                lro_cnt_64;
221         uint64_t                lro_cnt_128;
222         uint64_t                lro_cnt_256;
223         uint64_t                lro_cnt_512;
224         uint64_t                lro_cnt_1024;
225 
226 	/* receive statistics */
227 	uint64_t		rx_pkts;
228 	uint64_t		tpa_start;
229 	uint64_t		tpa_cont;
230 	uint64_t		tpa_end;
231         uint64_t                err_m_getcl;
232         uint64_t                err_m_getjcl;
233         uint64_t		err_rx_hw_errors;
234         uint64_t		err_rx_alloc_errors;
235 	uint64_t		err_rx_jumbo_chain_pkts;
236 	uint64_t		err_rx_mp_null;
237 	uint64_t		err_rx_tpa_invalid_agg_num;
238 };
239 
240 struct qlnx_update_vport_params {
241         uint8_t			vport_id;
242         uint8_t			update_vport_active_rx_flg;
243         uint8_t			vport_active_rx_flg;
244         uint8_t			update_vport_active_tx_flg;
245         uint8_t			vport_active_tx_flg;
246         uint8_t			update_inner_vlan_removal_flg;
247         uint8_t			inner_vlan_removal_flg;
248         struct ecore_rss_params	*rss_params;
249 	struct ecore_sge_tpa_params *sge_tpa_params;
250 };
251 
252 /*
253  * link related
254  */
255 struct qlnx_link_output {
256 	bool		link_up;
257 	uint32_t	supported_caps;
258 	uint32_t	advertised_caps;
259 	uint32_t	link_partner_caps;
260 	uint32_t	speed; /* In Mb/s */
261 	bool		autoneg;
262 	uint32_t	media_type;
263 	uint32_t	duplex;
264 };
265 typedef struct qlnx_link_output qlnx_link_output_t;
266 
267 #define QLNX_LINK_DUPLEX			0x0001
268 
269 #define QLNX_LINK_CAP_FIBRE			0x0001
270 #define QLNX_LINK_CAP_Autoneg			0x0002
271 #define QLNX_LINK_CAP_Pause			0x0004
272 #define QLNX_LINK_CAP_Asym_Pause		0x0008
273 #define QLNX_LINK_CAP_1000baseT_Half		0x0010
274 #define QLNX_LINK_CAP_1000baseT_Full		0x0020
275 #define QLNX_LINK_CAP_10000baseKR_Full		0x0040
276 #define QLNX_LINK_CAP_25000baseKR_Full		0x0080
277 #define QLNX_LINK_CAP_40000baseLR4_Full		0x0100
278 #define QLNX_LINK_CAP_50000baseKR2_Full		0x0200
279 #define QLNX_LINK_CAP_100000baseKR4_Full	0x0400
280 
281 
282 /* Functions definition */
283 
284 #define XMIT_PLAIN              0
285 #define XMIT_L4_CSUM            (1 << 0)
286 #define XMIT_LSO                (1 << 1)
287 
288 #define CQE_FLAGS_ERR   (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<       \
289                          PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT |       \
290                          PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<     \
291                          PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT |     \
292                          PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
293                          PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
294                          PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
295                          PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
296 
297 #define RX_COPY_THRESH          92
298 #define ETH_MAX_PACKET_SIZE     1500
299 
300 #define QLNX_MFW_VERSION_LENGTH 32
301 #define QLNX_STORMFW_VERSION_LENGTH 32
302 
303 #define QLNX_TX_ELEM_RESERVE	2
304 
305 #define QLNX_TPA_MAX_AGG_BUFFERS             (20)
306 
307 #define QLNX_MAX_NUM_MULTICAST_ADDRS	ECORE_MAX_MC_ADDRS
308 typedef struct _qlnx_mcast {
309         uint16_t        rsrvd;
310         uint8_t         addr[6];
311 } __packed qlnx_mcast_t;
312 
313 /*
314  * Adapter structure contains the hardware independent information of the
315  * pci function.
316  */
317 struct qlnx_host {
318 
319 	/* interface to ecore */
320 
321 	struct ecore_dev	cdev;
322 
323 	uint32_t		state;
324 
325 	/* some flags */
326         volatile struct {
327                 volatile uint32_t
328 			callout_init		:1,
329                         slowpath_start		:1,
330                         parent_tag		:1,
331                         lock_init		:1;
332         } flags;
333 
334 	/* interface to o.s */
335 
336 	device_t		pci_dev;
337 	uint8_t			pci_func;
338 	uint8_t			dev_unit;
339 
340 	struct ifnet		*ifp;
341 	int			if_flags;
342 	volatile int		link_up;
343 	struct ifmedia		media;
344 	uint16_t		max_frame_size;
345 
346 	struct cdev		*ioctl_dev;
347 
348 	/* resources */
349         struct resource         *pci_reg;
350         int                     reg_rid;
351 
352         struct resource         *pci_dbells;
353         int                     dbells_rid;
354 	uint64_t		dbells_phys_addr;
355 	uint32_t		dbells_size;
356 
357         struct resource         *msix_bar;
358         int                     msix_rid;
359 
360 	int			msix_count;
361 
362 	struct mtx		hw_lock;
363 
364 	/* debug */
365 
366 	uint32_t                dbg_level;
367 	uint32_t                dp_level;
368 	uint32_t                dp_module;
369 
370 	/* misc */
371 	uint8_t 		mfw_ver[QLNX_MFW_VERSION_LENGTH];
372 	uint8_t 		stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
373 	uint32_t		flash_size;
374 
375 	/* dma related */
376 
377 	bus_dma_tag_t		parent_tag;
378 	bus_dma_tag_t		tx_tag;
379 	bus_dma_tag_t		rx_tag;
380 
381 
382         struct ecore_sb_info    sb_array[QLNX_MAX_RSS];
383         struct qlnx_rx_queue    rxq_array[QLNX_MAX_RSS];
384         struct qlnx_tx_queue    txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
385         struct qlnx_fastpath    fp_array[QLNX_MAX_RSS];
386 
387 	/* tx related */
388 	struct callout		tx_callout;
389 	struct mtx		tx_lock;
390 	uint32_t		txr_idx;
391 
392 	/* rx related */
393 	uint32_t		rx_pkt_threshold;
394 	uint32_t		rx_jumbo_buf_eq_mtu;
395 
396 	/* slow path related */
397         struct resource         *sp_irq[MAX_HWFNS_PER_DEVICE];
398         void                    *sp_handle[MAX_HWFNS_PER_DEVICE];
399         int                     sp_irq_rid[MAX_HWFNS_PER_DEVICE];
400 	struct task		sp_task[MAX_HWFNS_PER_DEVICE];
401 	struct taskqueue	*sp_taskqueue[MAX_HWFNS_PER_DEVICE];
402 
403 	struct callout          qlnx_callout;
404 
405 	/* fast path related */
406 	int			num_rss;
407 	int			num_tc;
408 
409 #define QLNX_MAX_TSS_CNT(ha)	((ha->num_rss) * (ha->num_tc))
410 
411 	qlnx_ivec_t              irq_vec[QLNX_MAX_RSS];
412 
413 
414 	uint8_t			filter;
415 	uint32_t                nmcast;
416 	qlnx_mcast_t            mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
417 	struct ecore_filter_mcast ecore_mcast;
418 	uint8_t			primary_mac[ETH_ALEN];
419 	uint8_t			prio_to_tc[MAX_NUM_PRI];
420 	struct ecore_eth_stats	hw_stats;
421 	struct ecore_rss_params	rss_params;
422         uint32_t		rx_buf_size;
423         bool			rx_csum_offload;
424 
425 	uint32_t		rx_coalesce_usecs;
426 	uint32_t		tx_coalesce_usecs;
427 
428 	/* link related */
429 	qlnx_link_output_t	if_link;
430 
431 	/* global counters */
432 	uint64_t		sp_interrupts;
433 	uint64_t		err_illegal_intr;
434 	uint64_t		err_fp_null;
435 	uint64_t		err_get_proto_invalid_type;
436 
437 	/* grcdump related */
438 	uint32_t		err_inject;
439 	uint32_t		grcdump_taken;
440 	uint32_t		grcdump_dwords[QLNX_MAX_HW_FUNCS];
441 	uint32_t		grcdump_size[QLNX_MAX_HW_FUNCS];
442 	void			*grcdump[QLNX_MAX_HW_FUNCS];
443 
444 	uint32_t		idle_chk_taken;
445 	uint32_t		idle_chk_dwords[QLNX_MAX_HW_FUNCS];
446 	uint32_t		idle_chk_size[QLNX_MAX_HW_FUNCS];
447 	void			*idle_chk[QLNX_MAX_HW_FUNCS];
448 
449 	/* storm stats related */
450 #define QLNX_STORM_STATS_TOTAL \
451 		(QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
452 	qlnx_storm_stats_t	storm_stats[QLNX_STORM_STATS_TOTAL];
453 	uint32_t		storm_stats_index;
454 	uint32_t		storm_stats_enable;
455 
456 	uint32_t		personality;
457 };
458 
459 typedef struct qlnx_host qlnx_host_t;
460 
461 /* note that align has to be a power of 2 */
462 #define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
463 #define QL_MIN(x, y) ((x < y) ? x : y)
464 
465 #define QL_RUNNING(ifp) \
466 		((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
467 			IFF_DRV_RUNNING)
468 
469 #define QLNX_MAX_MTU			9000
470 #define QLNX_MAX_SEGMENTS_NON_TSO	(ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
471 #define QLNX_MAX_TSO_FRAME_SIZE		((64 * 1024 - 1) + 22)
472 
473 #define QL_MAC_CMP(mac1, mac2)    \
474         ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
475         (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
476 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
477 
478 /*
479  * Debug Related
480  */
481 
482 #ifdef QLNX_DEBUG
483 
484 #define QL_DPRINT1(ha, x)       if (ha->dbg_level & 0x0001) device_printf x
485 #define QL_DPRINT2(ha, x)       if (ha->dbg_level & 0x0002) device_printf x
486 #define QL_DPRINT3(ha, x)       if (ha->dbg_level & 0x0004) device_printf x
487 #define QL_DPRINT4(ha, x)       if (ha->dbg_level & 0x0008) device_printf x
488 #define QL_DPRINT5(ha, x)       if (ha->dbg_level & 0x0010) device_printf x
489 #define QL_DPRINT6(ha, x)       if (ha->dbg_level & 0x0020) device_printf x
490 #define QL_DPRINT7(ha, x)       if (ha->dbg_level & 0x0040) device_printf x
491 #define QL_DPRINT8(ha, x)       if (ha->dbg_level & 0x0080) device_printf x
492 #define QL_DPRINT9(ha, x)       if (ha->dbg_level & 0x0100) device_printf x
493 #define QL_DPRINT11(ha, x)      if (ha->dbg_level & 0x0400) device_printf x
494 #define QL_DPRINT12(ha, x)      if (ha->dbg_level & 0x0800) device_printf x
495 #define QL_DPRINT13(ha, x)      if (ha->dbg_level & 0x1000) device_printf x
496 #define QL_DPRINT14(ha, x)      if (ha->dbg_level & 0x2000) device_printf x
497 
498 #else
499 
500 #define QL_DPRINT1(ha, x)
501 #define QL_DPRINT2(ha, x)
502 #define QL_DPRINT3(ha, x)
503 #define QL_DPRINT4(ha, x)
504 #define QL_DPRINT5(ha, x)
505 #define QL_DPRINT6(ha, x)
506 #define QL_DPRINT7(ha, x)
507 #define QL_DPRINT8(ha, x)
508 #define QL_DPRINT9(ha, x)
509 #define QL_DPRINT11(ha, x)
510 #define QL_DPRINT12(ha, x)
511 #define QL_DPRINT13(ha, x)
512 #define QL_DPRINT14(ha, x)
513 
514 #endif /* #ifdef QLNX_DEBUG */
515 
516 #define QL_ASSERT(ha, x, y)     if (!x) panic y
517 
518 #define QL_ERR_INJECT(ha, val)		(ha->err_inject == val)
519 #define QL_RESET_ERR_INJECT(ha, val)	{if (ha->err_inject == val) ha->err_inject = 0;}
520 #define QL_ERR_INJCT_TX_INT_DIFF	0x0001
521 #define QL_ERR_INJCT_TX_INT_MBUF_NULL	0x0002
522 
523 
524 /*
525  * exported functions
526  */
527 extern int qlnx_make_cdev(qlnx_host_t *ha);
528 extern void qlnx_del_cdev(qlnx_host_t *ha);
529 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
530 		int hwfn_index);
531 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
532 		int hwfn_index);
533 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
534 extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
535                           struct qlnx_link_output *if_link);
536 
537 /*
538  * Some OS specific stuff
539  */
540 
541 #if (defined IFM_100G_SR4)
542 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
543 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
544 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
545 #else
546 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
547 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
548 #endif
549 
550 #if (defined IFM_25G_SR)
551 #define QLNX_IFM_25G_SR IFM_25G_SR
552 #define QLNX_IFM_25G_CR IFM_25G_CR
553 #else
554 #define QLNX_IFM_25G_SR IFM_UNKNOWN
555 #define QLNX_IFM_25G_CR IFM_UNKNOWN
556 #endif
557 
558 
559 #if __FreeBSD_version < 1100000
560 
561 #define QLNX_INC_IERRORS(ifp)		ifp->if_ierrors++
562 #define QLNX_INC_IQDROPS(ifp)		ifp->if_iqdrops++
563 #define QLNX_INC_IPACKETS(ifp)		ifp->if_ipackets++
564 #define QLNX_INC_OPACKETS(ifp)		ifp->if_opackets++
565 #define QLNX_INC_OBYTES(ifp, len)	ifp->if_obytes += len
566 #define QLNX_INC_IBYTES(ifp, len)	ifp->if_ibytes += len
567 
568 #else
569 
570 #define QLNX_INC_IERRORS(ifp)	if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
571 #define QLNX_INC_IQDROPS(ifp)	if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
572 #define QLNX_INC_IPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
573 #define QLNX_INC_OPACKETS(ifp)	if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
574 
575 #define QLNX_INC_OBYTES(ifp, len)	\
576 			if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
577 #define QLNX_INC_IBYTES(ifp, len)	\
578 			if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
579 
580 #endif /* #if __FreeBSD_version < 1100000 */
581 
582 #define CQE_L3_PACKET(flags)    \
583         ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv4) || \
584         (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv6))
585 
586 #define CQE_IP_HDR_ERR(flags) \
587         ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
588                 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
589 
590 #define CQE_L4_HAS_CSUM(flags) \
591         ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
592                 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
593 
594 #define CQE_HAS_VLAN(flags) \
595         ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
596                 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
597 
598 
599 #endif /* #ifndef _QLNX_DEF_H_ */
600