1 /*
2 * Copyright (c) 2017-2018 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 *
27 */
28
29 /*
30 * File: qlnx_def.h
31 * Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
32 */
33
34 #ifndef _QLNX_DEF_H_
35 #define _QLNX_DEF_H_
36
37 #define VER_SIZE 16
38
39 struct qlnx_ivec {
40 uint32_t rss_idx;
41 void *ha;
42 struct resource *irq;
43 void *handle;
44 int irq_rid;
45 };
46
47 typedef struct qlnx_ivec qlnx_ivec_t;
48
49 //#define QLNX_MAX_RSS 30
50 #define QLNX_MAX_VF_RSS 4
51 #define QLNX_MAX_RSS 36
52 #define QLNX_DEFAULT_RSS 16
53 #define QLNX_MAX_TC 1
54
55 enum QLNX_STATE {
56 QLNX_STATE_CLOSED,
57 QLNX_STATE_OPEN,
58 };
59
60 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
61
62 #define MAX_NUM_TC 8
63 #define MAX_NUM_PRI 8
64
65 #ifndef BITS_PER_BYTE
66 #define BITS_PER_BYTE 8
67 #endif /* #ifndef BITS_PER_BYTE */
68
69 /*
70 * RX ring buffer contains pointer to kmalloc() data only,
71 */
72 struct sw_rx_data {
73 void *data;
74 bus_dmamap_t map;
75 dma_addr_t dma_addr;
76 };
77
78 enum qlnx_agg_state {
79 QLNX_AGG_STATE_NONE = 0,
80 QLNX_AGG_STATE_START = 1,
81 QLNX_AGG_STATE_ERROR = 2
82 };
83
84 struct qlnx_agg_info {
85 /* rx_buf is a data buffer that can be placed /consumed from rx bd
86 * chain. It has two purposes: We will preallocate the data buffer
87 * for each aggregation when we open the interface and will place this
88 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
89 * to be in a state where allocation fails, as we can't reuse the
90 * consumer buffer in the rx-chain since FW may still be writing to it
91 * (since header needs to be modified for TPA.
92 * The second purpose is to keep a pointer to the bd buffer during
93 * aggregation.
94 */
95 struct sw_rx_data rx_buf;
96 enum qlnx_agg_state agg_state;
97 uint16_t placement_offset;
98 struct mbuf *mpf; /* first mbuf in chain */
99 struct mbuf *mpl; /* last mbuf in chain */
100 };
101
102 #define RX_RING_SIZE_POW 13
103 #define RX_RING_SIZE (1 << RX_RING_SIZE_POW)
104
105 #define TX_RING_SIZE_POW 14
106 #define TX_RING_SIZE (1 << TX_RING_SIZE_POW)
107
108 struct qlnx_rx_queue {
109 volatile __le16 *hw_cons_ptr;
110 struct sw_rx_data sw_rx_ring[RX_RING_SIZE];
111 uint16_t sw_rx_cons;
112 uint16_t sw_rx_prod;
113 struct ecore_chain rx_bd_ring;
114 struct ecore_chain rx_comp_ring;
115 void __iomem *hw_rxq_prod_addr;
116 void *handle;
117
118 /* LRO */
119 struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
120
121 uint32_t rx_buf_size;
122
123 uint16_t num_rx_buffers;
124 uint16_t rxq_id;
125
126 #ifdef QLNX_SOFT_LRO
127 struct lro_ctrl lro;
128 #endif
129 };
130
131 union db_prod {
132 struct eth_db_data data;
133 uint32_t raw;
134 };
135
136 struct sw_tx_bd {
137 struct mbuf *mp;
138 bus_dmamap_t map;
139 uint8_t flags;
140 int nsegs;
141
142 /* Set on the first BD descriptor when there is a split BD */
143 #define QLNX_TSO_SPLIT_BD (1<<0)
144 };
145
146 #define QLNX_MAX_SEGMENTS 255
147 struct qlnx_tx_queue {
148 int index; /* Queue index */
149 volatile __le16 *hw_cons_ptr;
150 struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
151 uint16_t sw_tx_cons;
152 uint16_t sw_tx_prod;
153 struct ecore_chain tx_pbl;
154 void __iomem *doorbell_addr;
155 void *handle;
156 union db_prod tx_db;
157
158 bus_dma_segment_t segs[QLNX_MAX_SEGMENTS];
159
160 uint16_t num_tx_buffers;
161 };
162
163 #define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \
164 le32toh((bd)->addr.lo))
165 #define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes))
166
167 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
168 do { \
169 (bd)->addr.hi = htole32(U64_HI(maddr)); \
170 (bd)->addr.lo = htole32(U64_LO(maddr)); \
171 (bd)->nbytes = htole16(len); \
172 } while (0);
173
174 #define QLNX_FP_MAX_SEGS 24
175
176 struct qlnx_fastpath {
177 void *edev;
178 uint8_t rss_id;
179 struct ecore_sb_info *sb_info;
180 struct qlnx_rx_queue *rxq;
181 struct qlnx_tx_queue *txq[MAX_NUM_TC];
182 char name[64];
183
184 struct mtx tx_mtx;
185 char tx_mtx_name[32];
186 struct buf_ring *tx_br;
187 uint32_t tx_ring_full;
188
189 struct task fp_task;
190 struct taskqueue *fp_taskqueue;
191
192 /* transmit statistics */
193 uint64_t tx_pkts_processed;
194 uint64_t tx_pkts_freed;
195 uint64_t tx_pkts_transmitted;
196 uint64_t tx_pkts_completed;
197 uint64_t tx_tso_pkts;
198 uint64_t tx_non_tso_pkts;
199
200 #ifdef QLNX_TRACE_PERF_DATA
201 uint64_t tx_pkts_trans_ctx;
202 uint64_t tx_pkts_compl_ctx;
203 uint64_t tx_pkts_trans_fp;
204 uint64_t tx_pkts_compl_fp;
205 uint64_t tx_pkts_compl_intr;
206 #endif
207
208 uint64_t tx_lso_wnd_min_len;
209 uint64_t tx_defrag;
210 uint64_t tx_nsegs_gt_elem_left;
211 uint32_t tx_tso_max_nsegs;
212 uint32_t tx_tso_min_nsegs;
213 uint32_t tx_tso_max_pkt_len;
214 uint32_t tx_tso_min_pkt_len;
215 uint64_t tx_pkts[QLNX_FP_MAX_SEGS];
216
217 #ifdef QLNX_TRACE_PERF_DATA
218 uint64_t tx_pkts_hist[QLNX_FP_MAX_SEGS];
219 uint64_t tx_comInt[QLNX_FP_MAX_SEGS];
220 uint64_t tx_pkts_q[QLNX_FP_MAX_SEGS];
221 #endif
222
223 uint64_t err_tx_nsegs_gt_elem_left;
224 uint64_t err_tx_dmamap_create;
225 uint64_t err_tx_defrag_dmamap_load;
226 uint64_t err_tx_non_tso_max_seg;
227 uint64_t err_tx_dmamap_load;
228 uint64_t err_tx_defrag;
229 uint64_t err_tx_free_pkt_null;
230 uint64_t err_tx_cons_idx_conflict;
231
232 uint64_t lro_cnt_64;
233 uint64_t lro_cnt_128;
234 uint64_t lro_cnt_256;
235 uint64_t lro_cnt_512;
236 uint64_t lro_cnt_1024;
237
238 /* receive statistics */
239 uint64_t rx_pkts;
240 uint64_t tpa_start;
241 uint64_t tpa_cont;
242 uint64_t tpa_end;
243 uint64_t err_m_getcl;
244 uint64_t err_m_getjcl;
245 uint64_t err_rx_hw_errors;
246 uint64_t err_rx_alloc_errors;
247 uint64_t err_rx_jumbo_chain_pkts;
248 uint64_t err_rx_mp_null;
249 uint64_t err_rx_tpa_invalid_agg_num;
250 };
251
252 struct qlnx_update_vport_params {
253 uint8_t vport_id;
254 uint8_t update_vport_active_rx_flg;
255 uint8_t vport_active_rx_flg;
256 uint8_t update_vport_active_tx_flg;
257 uint8_t vport_active_tx_flg;
258 uint8_t update_inner_vlan_removal_flg;
259 uint8_t inner_vlan_removal_flg;
260 struct ecore_rss_params *rss_params;
261 struct ecore_sge_tpa_params *sge_tpa_params;
262 };
263
264 /*
265 * link related
266 */
267 struct qlnx_link_output {
268 bool link_up;
269 uint32_t supported_caps;
270 uint32_t advertised_caps;
271 uint32_t link_partner_caps;
272 uint32_t speed; /* In Mb/s */
273 bool autoneg;
274 uint32_t media_type;
275 uint32_t duplex;
276 };
277 typedef struct qlnx_link_output qlnx_link_output_t;
278
279 #define QLNX_LINK_DUPLEX 0x0001
280
281 #define QLNX_LINK_CAP_FIBRE 0x0001
282 #define QLNX_LINK_CAP_Autoneg 0x0002
283 #define QLNX_LINK_CAP_Pause 0x0004
284 #define QLNX_LINK_CAP_Asym_Pause 0x0008
285 #define QLNX_LINK_CAP_1000baseT_Half 0x0010
286 #define QLNX_LINK_CAP_1000baseT_Full 0x0020
287 #define QLNX_LINK_CAP_10000baseKR_Full 0x0040
288 #define QLNX_LINK_CAP_25000baseKR_Full 0x0080
289 #define QLNX_LINK_CAP_40000baseLR4_Full 0x0100
290 #define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
291 #define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
292
293 /* Functions definition */
294
295 #define XMIT_PLAIN 0
296 #define XMIT_L4_CSUM (1 << 0)
297 #define XMIT_LSO (1 << 1)
298
299 #define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \
300 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \
301 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \
302 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \
303 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
304 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
305 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
306 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
307
308 #define RX_COPY_THRESH 92
309 #define ETH_MAX_PACKET_SIZE 1500
310
311 #define QLNX_MFW_VERSION_LENGTH 32
312 #define QLNX_STORMFW_VERSION_LENGTH 32
313
314 #define QLNX_TX_ELEM_RESERVE 2
315 #define QLNX_TX_ELEM_THRESH 128
316 #define QLNX_TX_ELEM_MAX_THRESH 512
317 #define QLNX_TX_ELEM_MIN_THRESH 32
318 #define QLNX_TX_COMPL_THRESH 32
319
320 #define QLNX_TPA_MAX_AGG_BUFFERS (20)
321
322 #define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
323 typedef struct _qlnx_mcast {
324 uint16_t rsrvd;
325 uint8_t addr[6];
326 } __packed qlnx_mcast_t;
327
328 typedef struct _qlnx_vf_attr {
329 uint8_t mac_addr[ETHER_ADDR_LEN];
330 uint32_t num_rings;
331 } qlnx_vf_attr_t;
332
333 typedef struct _qlnx_sriov_task {
334 struct task pf_task;
335 struct taskqueue *pf_taskqueue;
336
337 #define QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG 0x01
338 #define QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE 0x02
339 #define QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE 0x04
340 volatile uint32_t flags;
341
342 } qlnx_sriov_task_t;
343
344 /*
345 * Adapter structure contains the hardware independent information of the
346 * pci function.
347 */
348 struct qlnx_host {
349 /* interface to ecore */
350
351 struct ecore_dev cdev;
352
353 uint32_t state;
354
355 /* some flags */
356 volatile struct {
357 volatile uint32_t
358 hw_init :1,
359 callout_init :1,
360 slowpath_start :1,
361 parent_tag :1,
362 lock_init :1;
363 } flags;
364
365 /* interface to o.s */
366
367 device_t pci_dev;
368 uint8_t pci_func;
369 uint8_t dev_unit;
370 uint16_t device_id;
371
372 if_t ifp;
373 int if_flags;
374 volatile int link_up;
375 struct ifmedia media;
376 uint16_t max_frame_size;
377
378 struct cdev *ioctl_dev;
379
380 /* resources */
381 struct resource *pci_reg;
382 int reg_rid;
383
384 struct resource *pci_dbells;
385 int dbells_rid;
386 uint64_t dbells_phys_addr;
387 uint32_t dbells_size;
388
389 struct resource *msix_bar;
390 int msix_rid;
391
392 int msix_count;
393
394 struct sx hw_lock;
395
396 /* debug */
397
398 uint32_t dbg_level;
399 uint32_t dbg_trace_lro_cnt;
400 uint32_t dbg_trace_tso_pkt_len;
401 uint32_t dp_level;
402 uint32_t dp_module;
403
404 /* misc */
405 uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH];
406 uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
407 uint32_t flash_size;
408
409 /* dma related */
410
411 bus_dma_tag_t parent_tag;
412 bus_dma_tag_t tx_tag;
413 bus_dma_tag_t rx_tag;
414
415 struct ecore_sb_info sb_array[QLNX_MAX_RSS];
416 struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
417 struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
418 struct qlnx_fastpath fp_array[QLNX_MAX_RSS];
419
420 /* tx related */
421 struct callout tx_callout;
422 uint32_t txr_idx;
423
424 /* rx related */
425 uint32_t rx_pkt_threshold;
426 uint32_t rx_jumbo_buf_eq_mtu;
427
428 /* slow path related */
429 struct resource *sp_irq[MAX_HWFNS_PER_DEVICE];
430 void *sp_handle[MAX_HWFNS_PER_DEVICE];
431 int sp_irq_rid[MAX_HWFNS_PER_DEVICE];
432 struct task sp_task[MAX_HWFNS_PER_DEVICE];
433 struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE];
434
435 struct callout qlnx_callout;
436
437 /* fast path related */
438 int num_rss;
439 int num_tc;
440
441 #define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
442
443 qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
444
445 uint8_t filter;
446 uint32_t nmcast;
447 qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
448 struct ecore_filter_mcast ecore_mcast;
449 uint8_t primary_mac[ETH_ALEN];
450 uint8_t prio_to_tc[MAX_NUM_PRI];
451 struct ecore_eth_stats hw_stats;
452 struct ecore_rss_params rss_params;
453 uint32_t rx_buf_size;
454 bool rx_csum_offload;
455
456 uint32_t rx_coalesce_usecs;
457 uint32_t tx_coalesce_usecs;
458
459 /* link related */
460 qlnx_link_output_t if_link;
461
462 /* global counters */
463 uint64_t sp_interrupts;
464 uint64_t err_illegal_intr;
465 uint64_t err_fp_null;
466 uint64_t err_get_proto_invalid_type;
467
468 /* error recovery related */
469 uint32_t error_recovery;
470 struct task err_task;
471 struct taskqueue *err_taskqueue;
472
473 /* grcdump related */
474 uint32_t err_inject;
475 uint32_t grcdump_taken;
476 uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
477 uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
478 void *grcdump[QLNX_MAX_HW_FUNCS];
479
480 uint32_t idle_chk_taken;
481 uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
482 uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
483 void *idle_chk[QLNX_MAX_HW_FUNCS];
484
485 /* storm stats related */
486 #define QLNX_STORM_STATS_TOTAL \
487 (QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
488 qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL];
489 uint32_t storm_stats_index;
490 uint32_t storm_stats_enable;
491 uint32_t storm_stats_gather;
492
493 uint32_t personality;
494
495 uint16_t sriov_initialized;
496 uint16_t num_vfs;
497 qlnx_vf_attr_t *vf_attr;
498 qlnx_sriov_task_t sriov_task[MAX_HWFNS_PER_DEVICE];
499 uint32_t curr_vf;
500
501 void *next;
502 void *qlnx_rdma;
503 volatile int qlnxr_debug;
504 };
505
506 typedef struct qlnx_host qlnx_host_t;
507
508 /* note that align has to be a power of 2 */
509 #define QL_ALIGN(size, align) (((size) + ((align) - 1)) & (~((align) - 1)));
510 #define QL_MIN(x, y) ((x < y) ? x : y)
511
512 #define QL_RUNNING(ifp) \
513 ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
514 IFF_DRV_RUNNING)
515
516 #define QLNX_MAX_MTU 9000
517 #define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
518 //#define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
519 #define QLNX_MAX_TSO_FRAME_SIZE 65536
520 #define QLNX_MAX_TX_MBUF_SIZE 65536 /* bytes - bd_len = 16bits */
521
522 #define QL_MAC_CMP(mac1, mac2) \
523 ((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
524 (*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
525 #define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
526
527 /*
528 * Debug Related
529 */
530
531 #ifdef QLNX_DEBUG
532
533 #define QL_DPRINT1(ha, x, ...) \
534 do { \
535 if ((ha)->dbg_level & 0x0001) { \
536 device_printf ((ha)->pci_dev, \
537 "[%s:%d]" x, \
538 __func__, __LINE__, \
539 ## __VA_ARGS__); \
540 } \
541 } while (0)
542
543 #define QL_DPRINT2(ha, x, ...) \
544 do { \
545 if ((ha)->dbg_level & 0x0002) { \
546 device_printf ((ha)->pci_dev, \
547 "[%s:%d]" x, \
548 __func__, __LINE__, \
549 ## __VA_ARGS__); \
550 } \
551 } while (0)
552
553 #define QL_DPRINT3(ha, x, ...) \
554 do { \
555 if ((ha)->dbg_level & 0x0004) { \
556 device_printf ((ha)->pci_dev, \
557 "[%s:%d]" x, \
558 __func__, __LINE__, \
559 ## __VA_ARGS__); \
560 } \
561 } while (0)
562
563 #define QL_DPRINT4(ha, x, ...) \
564 do { \
565 if ((ha)->dbg_level & 0x0008) { \
566 device_printf ((ha)->pci_dev, \
567 "[%s:%d]" x, \
568 __func__, __LINE__, \
569 ## __VA_ARGS__); \
570 } \
571 } while (0)
572
573 #define QL_DPRINT5(ha, x, ...) \
574 do { \
575 if ((ha)->dbg_level & 0x0010) { \
576 device_printf ((ha)->pci_dev, \
577 "[%s:%d]" x, \
578 __func__, __LINE__, \
579 ## __VA_ARGS__); \
580 } \
581 } while (0)
582
583 #define QL_DPRINT6(ha, x, ...) \
584 do { \
585 if ((ha)->dbg_level & 0x0020) { \
586 device_printf ((ha)->pci_dev, \
587 "[%s:%d]" x, \
588 __func__, __LINE__, \
589 ## __VA_ARGS__); \
590 } \
591 } while (0)
592
593 #define QL_DPRINT7(ha, x, ...) \
594 do { \
595 if ((ha)->dbg_level & 0x0040) { \
596 device_printf ((ha)->pci_dev, \
597 "[%s:%d]" x, \
598 __func__, __LINE__, \
599 ## __VA_ARGS__); \
600 } \
601 } while (0)
602
603 #define QL_DPRINT8(ha, x, ...) \
604 do { \
605 if ((ha)->dbg_level & 0x0080) { \
606 device_printf ((ha)->pci_dev, \
607 "[%s:%d]" x, \
608 __func__, __LINE__, \
609 ## __VA_ARGS__); \
610 } \
611 } while (0)
612
613 #define QL_DPRINT9(ha, x, ...) \
614 do { \
615 if ((ha)->dbg_level & 0x0100) { \
616 device_printf ((ha)->pci_dev, \
617 "[%s:%d]" x, \
618 __func__, __LINE__, \
619 ## __VA_ARGS__); \
620 } \
621 } while (0)
622
623 #define QL_DPRINT11(ha, x, ...) \
624 do { \
625 if ((ha)->dbg_level & 0x0400) { \
626 device_printf ((ha)->pci_dev, \
627 "[%s:%d]" x, \
628 __func__, __LINE__, \
629 ## __VA_ARGS__); \
630 } \
631 } while (0)
632
633 #define QL_DPRINT12(ha, x, ...) \
634 do { \
635 if ((ha)->dbg_level & 0x0800) { \
636 device_printf ((ha)->pci_dev, \
637 "[%s:%d]" x, \
638 __func__, __LINE__, \
639 ## __VA_ARGS__); \
640 } \
641 } while (0)
642
643 #define QL_DPRINT13(ha, x, ...) \
644 do { \
645 if ((ha)->dbg_level & 0x1000) { \
646 device_printf ((ha)->pci_dev, \
647 "[%s:%d]" x, \
648 __func__, __LINE__, \
649 ## __VA_ARGS__); \
650 } \
651 } while (0)
652
653 #else
654
655 #define QL_DPRINT1(ha, x, ...)
656 #define QL_DPRINT2(ha, x, ...)
657 #define QL_DPRINT3(ha, x, ...)
658 #define QL_DPRINT4(ha, x, ...)
659 #define QL_DPRINT5(ha, x, ...)
660 #define QL_DPRINT6(ha, x, ...)
661 #define QL_DPRINT7(ha, x, ...)
662 #define QL_DPRINT8(ha, x, ...)
663 #define QL_DPRINT9(ha, x, ...)
664 #define QL_DPRINT11(ha, x, ...)
665 #define QL_DPRINT12(ha, x, ...)
666 #define QL_DPRINT13(ha, x, ...)
667
668 #endif /* #ifdef QLNX_DEBUG */
669
670 #define QL_ASSERT(ha, x, y) if (!x) panic y
671
672 #define QL_ERR_INJECT(ha, val) (ha->err_inject == val)
673 #define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;}
674 #define QL_ERR_INJCT_TX_INT_DIFF 0x0001
675 #define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
676
677 /*
678 * exported functions
679 */
680 extern int qlnx_make_cdev(qlnx_host_t *ha);
681 extern void qlnx_del_cdev(qlnx_host_t *ha);
682 extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
683 int hwfn_index);
684 extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
685 int hwfn_index);
686 extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
687 extern void qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
688 struct qlnx_link_output *if_link);
689 extern int qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs);
690 extern int qlnx_vf_device(qlnx_host_t *ha);
691 extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info);
692 extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
693 u16 sb_id);
694
695 /*
696 * Some OS specific stuff
697 */
698
699 #if (defined IFM_100G_SR4)
700 #define QLNX_IFM_100G_SR4 IFM_100G_SR4
701 #define QLNX_IFM_100G_LR4 IFM_100G_LR4
702 #define QLNX_IFM_100G_CR4 IFM_100G_CR4
703 #else
704 #define QLNX_IFM_100G_SR4 IFM_UNKNOWN
705 #define QLNX_IFM_100G_LR4 IFM_UNKNOWN
706 #endif /* #if (defined IFM_100G_SR4) */
707
708 #if (defined IFM_25G_SR)
709 #define QLNX_IFM_25G_SR IFM_25G_SR
710 #define QLNX_IFM_25G_CR IFM_25G_CR
711 #else
712 #define QLNX_IFM_25G_SR IFM_UNKNOWN
713 #define QLNX_IFM_25G_CR IFM_UNKNOWN
714 #endif /* #if (defined IFM_25G_SR) */
715
716 #define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
717 #define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
718 #define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
719 #define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
720
721 #define QLNX_INC_OBYTES(ifp, len) \
722 if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
723 #define QLNX_INC_IBYTES(ifp, len) \
724 if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
725
726 #define CQE_L3_PACKET(flags) \
727 ((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
728 (((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
729
730 #define CQE_IP_HDR_ERR(flags) \
731 ((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
732 << PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
733
734 #define CQE_L4_HAS_CSUM(flags) \
735 ((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
736 << PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
737
738 #define CQE_HAS_VLAN(flags) \
739 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
740 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
741
742 #ifndef QLNX_RDMA
743 #if defined(__i386__) || defined(__amd64__)
744
745 static __inline
prefetch(void * x)746 void prefetch(void *x)
747 {
748 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
749 }
750
751 #else
752 #define prefetch(x)
753 #endif
754 #endif
755
756 #endif /* #ifndef _QLNX_DEF_H_ */
757