1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 1999 - 2024 Intel Corporation. */
3
4 #ifndef _IXGBE_H_
5 #define _IXGBE_H_
6
7 #include <linux/bitops.h>
8 #include <linux/types.h>
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/cpumask.h>
12 #include <linux/if_vlan.h>
13 #include <linux/jiffies.h>
14 #include <linux/phy.h>
15
16 #include <linux/timecounter.h>
17 #include <linux/net_tstamp.h>
18 #include <linux/ptp_clock_kernel.h>
19
20 #include <net/devlink.h>
21
22 #include "ixgbe_type.h"
23 #include "ixgbe_common.h"
24 #include "ixgbe_dcb.h"
25 #include "ixgbe_e610.h"
26 #if IS_ENABLED(CONFIG_FCOE)
27 #define IXGBE_FCOE
28 #include "ixgbe_fcoe.h"
29 #endif /* IS_ENABLED(CONFIG_FCOE) */
30 #ifdef CONFIG_IXGBE_DCA
31 #include <linux/dca.h>
32 #endif
33 #include "ixgbe_ipsec.h"
34
35 #include <net/xdp.h>
36
37 /* common prefix used by pr_<> macros */
38 #undef pr_fmt
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41 /* TX/RX descriptor defines */
42 #define IXGBE_DEFAULT_TXD 512
43 #define IXGBE_DEFAULT_TX_WORK 256
44 #define IXGBE_MAX_TXD_82598 4096
45 #define IXGBE_MAX_TXD_82599 8192
46 #define IXGBE_MAX_TXD_X540 8192
47 #define IXGBE_MAX_TXD_X550 32768
48 #define IXGBE_MIN_TXD 64
49
50 #if (PAGE_SIZE < 8192)
51 #define IXGBE_DEFAULT_RXD 512
52 #else
53 #define IXGBE_DEFAULT_RXD 128
54 #endif
55 #define IXGBE_MAX_RXD_82598 4096
56 #define IXGBE_MAX_RXD_82599 8192
57 #define IXGBE_MAX_RXD_X540 8192
58 #define IXGBE_MAX_RXD_X550 32768
59 #define IXGBE_MIN_RXD 64
60
61 /* flow control */
62 #define IXGBE_MIN_FCRTL 0x40
63 #define IXGBE_MAX_FCRTL 0x7FF80
64 #define IXGBE_MIN_FCRTH 0x600
65 #define IXGBE_MAX_FCRTH 0x7FFF0
66 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF
67 #define IXGBE_MIN_FCPAUSE 0
68 #define IXGBE_MAX_FCPAUSE 0xFFFF
69
70 /* Supported Rx Buffer Sizes */
71 #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
72 #define IXGBE_RXBUFFER_1536 1536
73 #define IXGBE_RXBUFFER_2K 2048
74 #define IXGBE_RXBUFFER_3K 3072
75 #define IXGBE_RXBUFFER_4K 4096
76 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
77
78 #define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
79
80 /* Attempt to maximize the headroom available for incoming frames. We
81 * use a 2K buffer for receives and need 1536/1534 to store the data for
82 * the frame. This leaves us with 512 bytes of room. From that we need
83 * to deduct the space needed for the shared info and the padding needed
84 * to IP align the frame.
85 *
86 * Note: For cache line sizes 256 or larger this value is going to end
87 * up negative. In these cases we should fall back to the 3K
88 * buffers.
89 */
90 #if (PAGE_SIZE < 8192)
91 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
92 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \
93 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
94
ixgbe_compute_pad(int rx_buf_len)95 static inline int ixgbe_compute_pad(int rx_buf_len)
96 {
97 int page_size, pad_size;
98
99 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
100 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
101
102 return pad_size;
103 }
104
ixgbe_skb_pad(void)105 static inline int ixgbe_skb_pad(void)
106 {
107 int rx_buf_len;
108
109 /* If a 2K buffer cannot handle a standard Ethernet frame then
110 * optimize padding for a 3K buffer instead of a 1.5K buffer.
111 *
112 * For a 3K buffer we need to add enough padding to allow for
113 * tailroom due to NET_IP_ALIGN possibly shifting us out of
114 * cache-line alignment.
115 */
116 if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
117 rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
118 else
119 rx_buf_len = IXGBE_RXBUFFER_1536;
120
121 /* if needed make room for NET_IP_ALIGN */
122 rx_buf_len -= NET_IP_ALIGN;
123
124 return ixgbe_compute_pad(rx_buf_len);
125 }
126
127 #define IXGBE_SKB_PAD ixgbe_skb_pad()
128 #else
129 #define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
130 #endif
131
132 /*
133 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
134 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
135 * this adds up to 448 bytes of extra data.
136 *
137 * Since netdev_alloc_skb now allocates a page fragment we can use a value
138 * of 256 and the resultant skb will have a truesize of 960 or less.
139 */
140 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
141
142 /* How many Rx Buffers do we bundle into one write to the hardware ? */
143 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
144
145 #define IXGBE_RX_DMA_ATTR \
146 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
147
148 enum ixgbe_tx_flags {
149 /* cmd_type flags */
150 IXGBE_TX_FLAGS_HW_VLAN = 0x01,
151 IXGBE_TX_FLAGS_TSO = 0x02,
152 IXGBE_TX_FLAGS_TSTAMP = 0x04,
153
154 /* olinfo flags */
155 IXGBE_TX_FLAGS_CC = 0x08,
156 IXGBE_TX_FLAGS_IPV4 = 0x10,
157 IXGBE_TX_FLAGS_CSUM = 0x20,
158 IXGBE_TX_FLAGS_IPSEC = 0x40,
159
160 /* software defined flags */
161 IXGBE_TX_FLAGS_SW_VLAN = 0x80,
162 IXGBE_TX_FLAGS_FCOE = 0x100,
163 };
164
165 /* VLAN info */
166 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
167 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
168 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
169 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
170
171 #define IXGBE_MAX_VF_MC_ENTRIES 30
172 #define IXGBE_MAX_VF_FUNCTIONS 64
173 #define IXGBE_MAX_VFTA_ENTRIES 128
174 #define MAX_EMULATION_MAC_ADDRS 16
175 #define IXGBE_MAX_PF_MACVLANS 15
176 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
177 #define IXGBE_82599_VF_DEVICE_ID 0x10ED
178 #define IXGBE_X540_VF_DEVICE_ID 0x1515
179 #define IXGBE_E610_VF_DEVICE_ID 0x57AD
180
181 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
182 { \
183 u32 current_counter = IXGBE_READ_REG(hw, reg); \
184 if (current_counter < last_counter) \
185 counter += 0x100000000LL; \
186 last_counter = current_counter; \
187 counter &= 0xFFFFFFFF00000000LL; \
188 counter |= current_counter; \
189 }
190
191 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
192 { \
193 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
194 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
195 u64 current_counter = (current_counter_msb << 32) | \
196 current_counter_lsb; \
197 if (current_counter < last_counter) \
198 counter += 0x1000000000LL; \
199 last_counter = current_counter; \
200 counter &= 0xFFFFFFF000000000LL; \
201 counter |= current_counter; \
202 }
203
204 struct vf_stats {
205 u64 gprc;
206 u64 gorc;
207 u64 gptc;
208 u64 gotc;
209 u64 mprc;
210 };
211
212 struct vf_data_storage {
213 struct pci_dev *vfdev;
214 unsigned char vf_mac_addresses[ETH_ALEN];
215 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
216 u16 num_vf_mc_hashes;
217 bool clear_to_send;
218 struct vf_stats vfstats;
219 struct vf_stats last_vfstats;
220 struct vf_stats saved_rst_vfstats;
221 bool pf_set_mac;
222 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
223 u16 pf_qos;
224 u16 tx_rate;
225 int link_enable;
226 int link_state;
227 u8 spoofchk_enabled;
228 bool rss_query_enabled;
229 u8 trusted;
230 int xcast_mode;
231 unsigned int vf_api;
232 u8 primary_abort_count;
233 };
234
235 enum ixgbevf_xcast_modes {
236 IXGBEVF_XCAST_MODE_NONE = 0,
237 IXGBEVF_XCAST_MODE_MULTI,
238 IXGBEVF_XCAST_MODE_ALLMULTI,
239 IXGBEVF_XCAST_MODE_PROMISC,
240 };
241
242 struct vf_macvlans {
243 struct list_head l;
244 int vf;
245 bool free;
246 bool is_macvlan;
247 u8 vf_macvlan[ETH_ALEN];
248 };
249
250 #define IXGBE_MAX_TXD_PWR 14
251 #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
252
253 /* Tx Descriptors needed, worst case */
254 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
255 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
256
257 /* wrapper around a pointer to a socket buffer,
258 * so a DMA handle can be stored along with the buffer */
259 struct ixgbe_tx_buffer {
260 union ixgbe_adv_tx_desc *next_to_watch;
261 unsigned long time_stamp;
262 union {
263 struct sk_buff *skb;
264 struct xdp_frame *xdpf;
265 };
266 unsigned int bytecount;
267 unsigned short gso_segs;
268 __be16 protocol;
269 DEFINE_DMA_UNMAP_ADDR(dma);
270 DEFINE_DMA_UNMAP_LEN(len);
271 u32 tx_flags;
272 };
273
274 struct ixgbe_rx_buffer {
275 union {
276 struct {
277 struct sk_buff *skb;
278 dma_addr_t dma;
279 struct page *page;
280 __u32 page_offset;
281 __u16 pagecnt_bias;
282 };
283 struct {
284 bool discard;
285 struct xdp_buff *xdp;
286 };
287 };
288 };
289
290 struct ixgbe_queue_stats {
291 u64 packets;
292 u64 bytes;
293 };
294
295 struct ixgbe_tx_queue_stats {
296 u64 restart_queue;
297 u64 tx_busy;
298 u64 tx_done_old;
299 };
300
301 struct ixgbe_rx_queue_stats {
302 u64 rsc_count;
303 u64 rsc_flush;
304 u64 non_eop_descs;
305 u64 alloc_rx_page;
306 u64 alloc_rx_page_failed;
307 u64 alloc_rx_buff_failed;
308 u64 csum_err;
309 };
310
311 #define IXGBE_TS_HDR_LEN 8
312
313 enum ixgbe_ring_state_t {
314 __IXGBE_RX_3K_BUFFER,
315 __IXGBE_RX_BUILD_SKB_ENABLED,
316 __IXGBE_RX_RSC_ENABLED,
317 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
318 __IXGBE_RX_FCOE,
319 __IXGBE_TX_FDIR_INIT_DONE,
320 __IXGBE_TX_XPS_INIT_DONE,
321 __IXGBE_TX_DETECT_HANG,
322 __IXGBE_HANG_CHECK_ARMED,
323 __IXGBE_TX_XDP_RING,
324 __IXGBE_TX_DISABLED,
325 __IXGBE_RING_STATE_NBITS, /* must be last */
326 };
327
328 #define ring_uses_build_skb(ring) \
329 test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, (ring)->state)
330
331 struct ixgbe_fwd_adapter {
332 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
333 struct net_device *netdev;
334 unsigned int tx_base_queue;
335 unsigned int rx_base_queue;
336 int pool;
337 };
338
339 #define check_for_tx_hang(ring) \
340 test_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
341 #define set_check_for_tx_hang(ring) \
342 set_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
343 #define clear_check_for_tx_hang(ring) \
344 clear_bit(__IXGBE_TX_DETECT_HANG, (ring)->state)
345 #define ring_is_rsc_enabled(ring) \
346 test_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
347 #define set_ring_rsc_enabled(ring) \
348 set_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
349 #define clear_ring_rsc_enabled(ring) \
350 clear_bit(__IXGBE_RX_RSC_ENABLED, (ring)->state)
351 #define ring_is_xdp(ring) \
352 test_bit(__IXGBE_TX_XDP_RING, (ring)->state)
353 #define set_ring_xdp(ring) \
354 set_bit(__IXGBE_TX_XDP_RING, (ring)->state)
355 #define clear_ring_xdp(ring) \
356 clear_bit(__IXGBE_TX_XDP_RING, (ring)->state)
357 struct ixgbe_ring {
358 struct ixgbe_ring *next; /* pointer to next ring in q_vector */
359 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
360 struct net_device *netdev; /* netdev ring belongs to */
361 struct bpf_prog *xdp_prog;
362 struct device *dev; /* device for DMA mapping */
363 void *desc; /* descriptor ring memory */
364 union {
365 struct ixgbe_tx_buffer *tx_buffer_info;
366 struct ixgbe_rx_buffer *rx_buffer_info;
367 };
368 DECLARE_BITMAP(state, __IXGBE_RING_STATE_NBITS);
369 u8 __iomem *tail;
370 dma_addr_t dma; /* phys. address of descriptor ring */
371 unsigned int size; /* length in bytes */
372
373 u16 count; /* amount of descriptors */
374
375 u8 queue_index; /* needed for multiqueue queue management */
376 u8 reg_idx; /* holds the special value that gets
377 * the hardware register offset
378 * associated with this ring, which is
379 * different for DCB and RSS modes
380 */
381 u16 next_to_use;
382 u16 next_to_clean;
383
384 unsigned long last_rx_timestamp;
385
386 union {
387 u16 next_to_alloc;
388 struct {
389 u8 atr_sample_rate;
390 u8 atr_count;
391 };
392 };
393
394 u8 dcb_tc;
395 struct ixgbe_queue_stats stats;
396 struct u64_stats_sync syncp;
397 union {
398 struct ixgbe_tx_queue_stats tx_stats;
399 struct ixgbe_rx_queue_stats rx_stats;
400 };
401 u16 rx_offset;
402 struct xdp_rxq_info xdp_rxq;
403 spinlock_t tx_lock; /* used in XDP mode */
404 struct xsk_buff_pool *xsk_pool;
405 u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
406 u16 rx_buf_len;
407 } ____cacheline_internodealigned_in_smp;
408
409 enum ixgbe_ring_f_enum {
410 RING_F_NONE = 0,
411 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
412 RING_F_RSS,
413 RING_F_FDIR,
414 #ifdef IXGBE_FCOE
415 RING_F_FCOE,
416 #endif /* IXGBE_FCOE */
417
418 RING_F_ARRAY_SIZE /* must be last in enum set */
419 };
420
421 #define IXGBE_MAX_RSS_INDICES 16
422 #define IXGBE_MAX_RSS_INDICES_X550 63
423 #define IXGBE_MAX_VMDQ_INDICES 64
424 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
425 #define IXGBE_MAX_FCOE_INDICES 8
426 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
427 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
428 #define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1)
429 #define IXGBE_MAX_L2A_QUEUES 4
430 #define IXGBE_BAD_L2A_QUEUE 3
431 #define IXGBE_MAX_MACVLANS 63
432
433 #define IXGBE_MAX_TX_QUEUES 128
434 #define IXGBE_MAX_TX_DESCRIPTORS 40
435 #define IXGBE_MAX_TX_VF_HANGS 4
436
437 DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
438
439 struct ixgbe_ring_feature {
440 u16 limit; /* upper limit on feature indices */
441 u16 indices; /* current value of indices */
442 u16 mask; /* Mask used for feature to ring mapping */
443 u16 offset; /* offset to start of feature */
444 } ____cacheline_internodealigned_in_smp;
445
446 #define IXGBE_82599_VMDQ_8Q_MASK 0x78
447 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C
448 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E
449
450 /*
451 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
452 * this is twice the size of a half page we need to double the page order
453 * for FCoE enabled Rx queues.
454 */
ixgbe_rx_bufsz(struct ixgbe_ring * ring)455 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
456 {
457 if (test_bit(__IXGBE_RX_3K_BUFFER, ring->state))
458 return IXGBE_RXBUFFER_3K;
459 #if (PAGE_SIZE < 8192)
460 if (ring_uses_build_skb(ring))
461 return IXGBE_MAX_2K_FRAME_BUILD_SKB;
462 #endif
463 return IXGBE_RXBUFFER_2K;
464 }
465
ixgbe_rx_pg_order(struct ixgbe_ring * ring)466 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
467 {
468 #if (PAGE_SIZE < 8192)
469 if (test_bit(__IXGBE_RX_3K_BUFFER, ring->state))
470 return 1;
471 #endif
472 return 0;
473 }
474 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
475
476 #define IXGBE_ITR_ADAPTIVE_MIN_INC 2
477 #define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
478 #define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
479 #define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
480 #define IXGBE_ITR_ADAPTIVE_BULK 0x00
481
482 struct ixgbe_ring_container {
483 struct ixgbe_ring *ring; /* pointer to linked list of rings */
484 unsigned long next_update; /* jiffies value of last update */
485 unsigned int total_bytes; /* total bytes processed this int */
486 unsigned int total_packets; /* total packets processed this int */
487 u16 work_limit; /* total work allowed per interrupt */
488 u8 count; /* total number of rings in vector */
489 u8 itr; /* current ITR setting for ring */
490 };
491
492 /* iterator for handling rings in ring container */
493 #define ixgbe_for_each_ring(pos, head) \
494 for (pos = (head).ring; pos != NULL; pos = pos->next)
495
496 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
497 ? 8 : 1)
498 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
499
500 /* MAX_Q_VECTORS of these are allocated,
501 * but we only use one per queue-specific vector.
502 */
503 struct ixgbe_q_vector {
504 struct ixgbe_adapter *adapter;
505 #ifdef CONFIG_IXGBE_DCA
506 int cpu; /* CPU for DCA */
507 #endif
508 u16 v_idx; /* index of q_vector within array, also used for
509 * finding the bit in EICR and friends that
510 * represents the vector for this ring */
511 u16 itr; /* Interrupt throttle rate written to EITR */
512 struct ixgbe_ring_container rx, tx;
513
514 struct napi_struct napi;
515 struct rcu_head rcu; /* to avoid race with update stats on free */
516
517 cpumask_t affinity_mask;
518 int numa_node;
519 char name[IFNAMSIZ + 9];
520
521 /* for dynamic allocation of rings associated with this q_vector */
522 struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
523 };
524
525 #ifdef CONFIG_IXGBE_HWMON
526
527 #define IXGBE_HWMON_TYPE_LOC 0
528 #define IXGBE_HWMON_TYPE_TEMP 1
529 #define IXGBE_HWMON_TYPE_CAUTION 2
530 #define IXGBE_HWMON_TYPE_MAX 3
531
532 struct hwmon_attr {
533 struct device_attribute dev_attr;
534 struct ixgbe_hw *hw;
535 struct ixgbe_thermal_diode_data *sensor;
536 char name[12];
537 };
538
539 struct hwmon_buff {
540 struct attribute_group group;
541 const struct attribute_group *groups[2];
542 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
543 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
544 unsigned int n_hwmon;
545 };
546 #endif /* CONFIG_IXGBE_HWMON */
547
548 /*
549 * microsecond values for various ITR rates shifted by 2 to fit itr register
550 * with the first 3 bits reserved 0
551 */
552 #define IXGBE_MIN_RSC_ITR 24
553 #define IXGBE_100K_ITR 40
554 #define IXGBE_20K_ITR 200
555 #define IXGBE_12K_ITR 336
556
557 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
ixgbe_test_staterr(union ixgbe_adv_rx_desc * rx_desc,const u32 stat_err_bits)558 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
559 const u32 stat_err_bits)
560 {
561 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
562 }
563
ixgbe_desc_unused(struct ixgbe_ring * ring)564 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
565 {
566 u16 ntc = ring->next_to_clean;
567 u16 ntu = ring->next_to_use;
568
569 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
570 }
571
572 #define IXGBE_RX_DESC(R, i) \
573 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
574 #define IXGBE_TX_DESC(R, i) \
575 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
576 #define IXGBE_TX_CTXTDESC(R, i) \
577 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
578
579 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
580 #ifdef IXGBE_FCOE
581 /* Use 3K as the baby jumbo frame size for FCoE */
582 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
583 #endif /* IXGBE_FCOE */
584
585 #define OTHER_VECTOR 1
586 #define NON_Q_VECTORS (OTHER_VECTOR)
587
588 #define MAX_MSIX_VECTORS_82599 64
589 #define MAX_Q_VECTORS_82599 64
590 #define MAX_MSIX_VECTORS_82598 18
591 #define MAX_Q_VECTORS_82598 16
592
593 struct ixgbe_mac_addr {
594 u8 addr[ETH_ALEN];
595 u16 pool;
596 u16 state; /* bitmask */
597 };
598
599 #define IXGBE_MAC_STATE_DEFAULT 0x1
600 #define IXGBE_MAC_STATE_MODIFIED 0x2
601 #define IXGBE_MAC_STATE_IN_USE 0x4
602
603 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
604 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
605
606 #define MIN_MSIX_Q_VECTORS 1
607 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
608
609 /* default to trying for four seconds */
610 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
611 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
612
613 #define IXGBE_PRIMARY_ABORT_LIMIT 5
614
615 /* board specific private data structure */
616 struct ixgbe_adapter {
617 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
618 /* OS defined structs */
619 struct net_device *netdev;
620 struct bpf_prog *xdp_prog;
621 struct pci_dev *pdev;
622 struct mii_bus *mii_bus;
623 struct devlink *devlink;
624 struct devlink_port devlink_port;
625 struct devlink_region *nvm_region;
626 struct devlink_region *sram_region;
627 struct devlink_region *devcaps_region;
628
629 unsigned long state;
630
631 /* Some features need tri-state capability,
632 * thus the additional *_CAPABLE flags.
633 */
634 u32 flags;
635 #define IXGBE_FLAG_MSI_ENABLED BIT(1)
636 #define IXGBE_FLAG_MSIX_ENABLED BIT(3)
637 #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
638 #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
639 #define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
640 #define IXGBE_FLAG_DCA_ENABLED BIT(8)
641 #define IXGBE_FLAG_DCA_CAPABLE BIT(9)
642 #define IXGBE_FLAG_IMIR_ENABLED BIT(10)
643 #define IXGBE_FLAG_MQ_CAPABLE BIT(11)
644 #define IXGBE_FLAG_DCB_ENABLED BIT(12)
645 #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
646 #define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
647 #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
648 #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
649 #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
650 #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
651 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
652 #define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
653 #define IXGBE_FLAG_FCOE_ENABLED BIT(21)
654 #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
655 #define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
656 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
657 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
658 #define IXGBE_FLAG_DCB_CAPABLE BIT(27)
659
660 u32 flags2;
661 #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
662 #define IXGBE_FLAG2_RSC_ENABLED BIT(1)
663 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
664 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
665 #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
666 #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
667 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
668 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
669 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
670 #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
671 #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
672 #define IXGBE_FLAG2_FW_ASYNC_EVENT BIT(12)
673 #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
674 #define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
675 #define IXGBE_FLAG2_EEE_ENABLED BIT(15)
676 #define IXGBE_FLAG2_RX_LEGACY BIT(16)
677 #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
678 #define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
679 #define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
680 #define IXGBE_FLAG2_PHY_FW_LOAD_FAILED BIT(20)
681 #define IXGBE_FLAG2_NO_MEDIA BIT(21)
682 #define IXGBE_FLAG2_MOD_POWER_UNSUPPORTED BIT(22)
683 #define IXGBE_FLAG2_API_MISMATCH BIT(23)
684 #define IXGBE_FLAG2_FW_ROLLBACK BIT(24)
685
686 /* Tx fast path data */
687 int num_tx_queues;
688 u16 tx_itr_setting;
689 u16 tx_work_limit;
690 u64 tx_ipsec;
691
692 /* Rx fast path data */
693 int num_rx_queues;
694 u16 rx_itr_setting;
695 u64 rx_ipsec;
696
697 /* Port number used to identify VXLAN traffic */
698 __be16 vxlan_port;
699 __be16 geneve_port;
700
701 /* XDP */
702 int num_xdp_queues;
703 struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
704 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
705
706 /* TX */
707 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
708
709 u64 restart_queue;
710 u64 lsc_int;
711 u32 tx_timeout_count;
712
713 /* RX */
714 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
715 int num_rx_pools; /* == num_rx_queues in 82598 */
716 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
717 u64 hw_csum_rx_error;
718 u64 hw_rx_no_dma_resources;
719 u64 rsc_total_count;
720 u64 rsc_total_flush;
721 u64 non_eop_descs;
722 u32 alloc_rx_page;
723 u32 alloc_rx_page_failed;
724 u32 alloc_rx_buff_failed;
725
726 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
727
728 /* DCB parameters */
729 struct ieee_pfc *ixgbe_ieee_pfc;
730 struct ieee_ets *ixgbe_ieee_ets;
731 struct ixgbe_dcb_config dcb_cfg;
732 struct ixgbe_dcb_config temp_dcb_cfg;
733 u8 hw_tcs;
734 u8 dcb_set_bitmap;
735 u8 dcbx_cap;
736 enum ixgbe_fc_mode last_lfc_mode;
737
738 int num_q_vectors; /* current number of q_vectors for device */
739 int max_q_vectors; /* true count of q_vectors for device */
740 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
741 struct msix_entry *msix_entries;
742
743 u32 test_icr;
744 struct ixgbe_ring test_tx_ring;
745 struct ixgbe_ring test_rx_ring;
746
747 /* structs defined in ixgbe_hw.h */
748 struct ixgbe_hw hw;
749 u16 msg_enable;
750 struct ixgbe_hw_stats stats;
751
752 u64 tx_busy;
753 unsigned int tx_ring_count;
754 unsigned int xdp_ring_count;
755 unsigned int rx_ring_count;
756
757 u32 link_speed;
758 bool link_up;
759 unsigned long sfp_poll_time;
760 unsigned long link_check_timeout;
761 u32 link_down_events;
762
763 struct timer_list service_timer;
764 struct work_struct service_task;
765
766 struct hlist_head fdir_filter_list;
767 unsigned long fdir_overflow; /* number of times ATR was backed off */
768 union ixgbe_atr_input fdir_mask;
769 int fdir_filter_count;
770 u32 fdir_pballoc;
771 u32 atr_sample_rate;
772 spinlock_t fdir_perfect_lock;
773
774 bool fw_emp_reset_disabled;
775
776 #ifdef IXGBE_FCOE
777 struct ixgbe_fcoe fcoe;
778 #endif /* IXGBE_FCOE */
779 u8 __iomem *io_addr; /* Mainly for iounmap use */
780 u32 wol;
781
782 u16 bridge_mode;
783
784 char eeprom_id[NVM_VER_SIZE];
785 u16 eeprom_cap;
786
787 u32 interrupt_event;
788 u32 led_reg;
789
790 struct ptp_clock *ptp_clock;
791 struct ptp_clock_info ptp_caps;
792 struct work_struct ptp_tx_work;
793 struct sk_buff *ptp_tx_skb;
794 struct kernel_hwtstamp_config tstamp_config;
795 unsigned long ptp_tx_start;
796 unsigned long last_overflow_check;
797 unsigned long last_rx_ptp_check;
798 unsigned long last_rx_timestamp;
799 spinlock_t tmreg_lock;
800 struct cyclecounter hw_cc;
801 struct timecounter hw_tc;
802 u32 base_incval;
803 u32 tx_hwtstamp_timeouts;
804 u32 tx_hwtstamp_skipped;
805 u32 rx_hwtstamp_cleared;
806 void (*ptp_setup_sdp)(struct ixgbe_adapter *);
807
808 /* SR-IOV */
809 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
810 unsigned int num_vfs;
811 struct vf_data_storage *vfinfo;
812 int vf_rate_link_speed;
813 struct vf_macvlans vf_mvs;
814 struct vf_macvlans *mv_list;
815
816 u32 timer_event_accumulator;
817 u32 vferr_refcount;
818 struct ixgbe_mac_addr *mac_table;
819 u8 tx_hang_count[IXGBE_MAX_TX_QUEUES];
820 struct kobject *info_kobj;
821 u16 lse_mask;
822 #ifdef CONFIG_IXGBE_HWMON
823 struct hwmon_buff *ixgbe_hwmon_buff;
824 #endif /* CONFIG_IXGBE_HWMON */
825 struct dentry *ixgbe_dbg_adapter;
826
827 u8 default_up;
828 /* Bitmask indicating in use pools */
829 DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
830
831 #define IXGBE_MAX_LINK_HANDLE 10
832 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
833 unsigned long tables;
834
835 /* maximum number of RETA entries among all devices supported by ixgbe
836 * driver: currently it's x550 device in non-SRIOV mode
837 */
838 #define IXGBE_MAX_RETA_ENTRIES 512
839 u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
840
841 #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
842 u32 *rss_key;
843
844 #ifdef CONFIG_IXGBE_IPSEC
845 struct ixgbe_ipsec *ipsec;
846 #endif /* CONFIG_IXGBE_IPSEC */
847 spinlock_t vfs_lock;
848 };
849
850 struct ixgbe_netdevice_priv {
851 struct ixgbe_adapter *adapter;
852 };
853
ixgbe_from_netdev(struct net_device * netdev)854 static inline struct ixgbe_adapter *ixgbe_from_netdev(struct net_device *netdev)
855 {
856 struct ixgbe_netdevice_priv *priv = netdev_priv(netdev);
857
858 return priv->adapter;
859 }
860
ixgbe_determine_xdp_q_idx(int cpu)861 static inline int ixgbe_determine_xdp_q_idx(int cpu)
862 {
863 if (static_key_enabled(&ixgbe_xdp_locking_key))
864 return cpu % IXGBE_MAX_XDP_QS;
865 else
866 return cpu;
867 }
868
869 static inline
ixgbe_determine_xdp_ring(struct ixgbe_adapter * adapter)870 struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
871 {
872 int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
873
874 return adapter->xdp_ring[index];
875 }
876
ixgbe_max_rss_indices(struct ixgbe_adapter * adapter)877 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
878 {
879 switch (adapter->hw.mac.type) {
880 case ixgbe_mac_82598EB:
881 case ixgbe_mac_82599EB:
882 case ixgbe_mac_X540:
883 return IXGBE_MAX_RSS_INDICES;
884 case ixgbe_mac_X550:
885 case ixgbe_mac_X550EM_x:
886 case ixgbe_mac_x550em_a:
887 case ixgbe_mac_e610:
888 return IXGBE_MAX_RSS_INDICES_X550;
889 default:
890 return 0;
891 }
892 }
893
894 struct ixgbe_fdir_filter {
895 struct hlist_node fdir_node;
896 union ixgbe_atr_input filter;
897 u16 sw_idx;
898 u64 action;
899 };
900
901 enum ixgbe_state_t {
902 __IXGBE_TESTING,
903 __IXGBE_RESETTING,
904 __IXGBE_DOWN,
905 __IXGBE_DISABLED,
906 __IXGBE_REMOVING,
907 __IXGBE_SERVICE_SCHED,
908 __IXGBE_SERVICE_INITED,
909 __IXGBE_IN_SFP_INIT,
910 __IXGBE_PTP_RUNNING,
911 __IXGBE_PTP_TX_IN_PROGRESS,
912 __IXGBE_RESET_REQUESTED,
913 __IXGBE_PHY_INIT_COMPLETE,
914 };
915
916 struct ixgbe_cb {
917 union { /* Union defining head/tail partner */
918 struct sk_buff *head;
919 struct sk_buff *tail;
920 };
921 dma_addr_t dma;
922 u16 append_cnt;
923 bool page_released;
924 };
925 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
926
927 enum ixgbe_boards {
928 board_82598,
929 board_82599,
930 board_X540,
931 board_X550,
932 board_X550EM_x,
933 board_x550em_x_fw,
934 board_x550em_a,
935 board_x550em_a_fw,
936 board_e610,
937 };
938
939 extern const struct ixgbe_info ixgbe_82598_info;
940 extern const struct ixgbe_info ixgbe_82599_info;
941 extern const struct ixgbe_info ixgbe_X540_info;
942 extern const struct ixgbe_info ixgbe_X550_info;
943 extern const struct ixgbe_info ixgbe_X550EM_x_info;
944 extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
945 extern const struct ixgbe_info ixgbe_x550em_a_info;
946 extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
947 extern const struct ixgbe_info ixgbe_e610_info;
948 #ifdef CONFIG_IXGBE_DCB
949 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
950 #endif
951
952 extern char ixgbe_driver_name[];
953 #ifdef IXGBE_FCOE
954 extern char ixgbe_default_device_descr[];
955 #endif /* IXGBE_FCOE */
956
957 int ixgbe_open(struct net_device *netdev);
958 int ixgbe_close(struct net_device *netdev);
959 void ixgbe_up(struct ixgbe_adapter *adapter);
960 void ixgbe_down(struct ixgbe_adapter *adapter);
961 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
962 void ixgbe_reset(struct ixgbe_adapter *adapter);
963 void ixgbe_set_ethtool_ops(struct net_device *netdev);
964 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
965 int ixgbe_setup_tx_resources(struct ixgbe_ring *);
966 void ixgbe_free_rx_resources(struct ixgbe_ring *);
967 void ixgbe_free_tx_resources(struct ixgbe_ring *);
968 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
969 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
970 void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
971 void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
972 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
973 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
974 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
975 u16 subdevice_id);
976 void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter);
977 int ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter);
978 #ifdef CONFIG_PCI_IOV
979 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
980 #endif
981 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
982 const u8 *addr, u16 queue);
983 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
984 const u8 *addr, u16 queue);
985 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
986 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
987 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
988 struct ixgbe_ring *);
989 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
990 void ixgbe_write_eitr(struct ixgbe_q_vector *);
991 int ixgbe_poll(struct napi_struct *napi, int budget);
992 int ethtool_ioctl(struct ifreq *ifr);
993 int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
994 int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
995 int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
996 int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
997 union ixgbe_atr_hash_dword input,
998 union ixgbe_atr_hash_dword common,
999 u8 queue);
1000 int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1001 union ixgbe_atr_input *input_mask);
1002 int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1003 union ixgbe_atr_input *input,
1004 u16 soft_id, u8 queue);
1005 int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1006 union ixgbe_atr_input *input,
1007 u16 soft_id);
1008 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1009 union ixgbe_atr_input *mask);
1010 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
1011 struct ixgbe_fdir_filter *input,
1012 u16 sw_idx);
1013 void ixgbe_set_rx_mode(struct net_device *netdev);
1014 #ifdef CONFIG_IXGBE_DCB
1015 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
1016 #endif
1017 int ixgbe_setup_tc(struct net_device *dev, u8 tc);
1018 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
1019 void ixgbe_do_reset(struct net_device *netdev);
1020 #ifdef CONFIG_IXGBE_HWMON
1021 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
1022 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
1023 #endif /* CONFIG_IXGBE_HWMON */
1024 #ifdef IXGBE_FCOE
1025 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
1026 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1027 u8 *hdr_len);
1028 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
1029 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
1030 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
1031 struct scatterlist *sgl, unsigned int sgc);
1032 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
1033 struct scatterlist *sgl, unsigned int sgc);
1034 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
1035 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
1036 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
1037 int ixgbe_fcoe_enable(struct net_device *netdev);
1038 int ixgbe_fcoe_disable(struct net_device *netdev);
1039 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
1040 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
1041 struct netdev_fcoe_hbainfo *info);
1042 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
1043 #endif /* IXGBE_FCOE */
1044 #ifdef CONFIG_DEBUG_FS
1045 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
1046 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
1047 void ixgbe_dbg_init(void);
1048 void ixgbe_dbg_exit(void);
1049 #else
ixgbe_dbg_adapter_init(struct ixgbe_adapter * adapter)1050 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
ixgbe_dbg_adapter_exit(struct ixgbe_adapter * adapter)1051 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
ixgbe_dbg_init(void)1052 static inline void ixgbe_dbg_init(void) {}
ixgbe_dbg_exit(void)1053 static inline void ixgbe_dbg_exit(void) {}
1054 #endif /* CONFIG_DEBUG_FS */
txring_txq(const struct ixgbe_ring * ring)1055 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
1056 {
1057 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
1058 }
1059
1060 void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
1061 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
1062 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
1063 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
1064 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
1065 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
1066 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
1067 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)1068 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
1069 union ixgbe_adv_rx_desc *rx_desc,
1070 struct sk_buff *skb)
1071 {
1072 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
1073 ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
1074 return;
1075 }
1076
1077 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1078 return;
1079
1080 ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
1081
1082 /* Update the last_rx_timestamp timer in order to enable watchdog check
1083 * for error case of latched timestamp on a dropped packet.
1084 */
1085 rx_ring->last_rx_timestamp = jiffies;
1086 }
1087
1088 int ixgbe_ptp_hwtstamp_get(struct net_device *netdev,
1089 struct kernel_hwtstamp_config *config);
1090 int ixgbe_ptp_hwtstamp_set(struct net_device *netdev,
1091 struct kernel_hwtstamp_config *config,
1092 struct netlink_ext_ack *extack);
1093 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
1094 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1095 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1096 #ifdef CONFIG_PCI_IOV
1097 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1098 #endif
1099
1100 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1101 struct ixgbe_adapter *adapter,
1102 struct ixgbe_ring *tx_ring);
1103 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1104 void ixgbe_store_key(struct ixgbe_adapter *adapter);
1105 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1106 int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1107 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1108 #ifdef CONFIG_IXGBE_IPSEC
1109 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1110 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1111 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1112 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1113 union ixgbe_adv_rx_desc *rx_desc,
1114 struct sk_buff *skb);
1115 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1116 struct ixgbe_ipsec_tx_data *itd);
1117 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1118 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1119 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1120 #else
ixgbe_init_ipsec_offload(struct ixgbe_adapter * adapter)1121 static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
ixgbe_stop_ipsec_offload(struct ixgbe_adapter * adapter)1122 static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
ixgbe_ipsec_restore(struct ixgbe_adapter * adapter)1123 static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
ixgbe_ipsec_rx(struct ixgbe_ring * rx_ring,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)1124 static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1125 union ixgbe_adv_rx_desc *rx_desc,
1126 struct sk_buff *skb) { }
ixgbe_ipsec_tx(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * first,struct ixgbe_ipsec_tx_data * itd)1127 static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1128 struct ixgbe_tx_buffer *first,
1129 struct ixgbe_ipsec_tx_data *itd) { return 0; }
ixgbe_ipsec_vf_clear(struct ixgbe_adapter * adapter,u32 vf)1130 static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1131 u32 vf) { }
ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter * adapter,u32 * mbuf,u32 vf)1132 static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1133 u32 *mbuf, u32 vf) { return -EACCES; }
ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter * adapter,u32 * mbuf,u32 vf)1134 static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1135 u32 *mbuf, u32 vf) { return -EACCES; }
1136 #endif /* CONFIG_IXGBE_IPSEC */
1137
ixgbe_enabled_xdp_adapter(struct ixgbe_adapter * adapter)1138 static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
1139 {
1140 return !!adapter->xdp_prog;
1141 }
1142
1143 #endif /* _IXGBE_H_ */
1144