xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe.h (revision b68fc09be48edbc47de1a0f3d42ef8adf6c0ac55)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #ifndef _IXGBE_H_
5 #define _IXGBE_H_
6 
7 #include <linux/bitops.h>
8 #include <linux/types.h>
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/cpumask.h>
12 #include <linux/aer.h>
13 #include <linux/if_vlan.h>
14 #include <linux/jiffies.h>
15 
16 #include <linux/timecounter.h>
17 #include <linux/net_tstamp.h>
18 #include <linux/ptp_clock_kernel.h>
19 
20 #include "ixgbe_type.h"
21 #include "ixgbe_common.h"
22 #include "ixgbe_dcb.h"
23 #if IS_ENABLED(CONFIG_FCOE)
24 #define IXGBE_FCOE
25 #include "ixgbe_fcoe.h"
26 #endif /* IS_ENABLED(CONFIG_FCOE) */
27 #ifdef CONFIG_IXGBE_DCA
28 #include <linux/dca.h>
29 #endif
30 #include "ixgbe_ipsec.h"
31 
32 #include <net/xdp.h>
33 #include <net/busy_poll.h>
34 
35 /* common prefix used by pr_<> macros */
36 #undef pr_fmt
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 
39 /* TX/RX descriptor defines */
40 #define IXGBE_DEFAULT_TXD		    512
41 #define IXGBE_DEFAULT_TX_WORK		    256
42 #define IXGBE_MAX_TXD			   4096
43 #define IXGBE_MIN_TXD			     64
44 
45 #if (PAGE_SIZE < 8192)
46 #define IXGBE_DEFAULT_RXD		    512
47 #else
48 #define IXGBE_DEFAULT_RXD		    128
49 #endif
50 #define IXGBE_MAX_RXD			   4096
51 #define IXGBE_MIN_RXD			     64
52 
53 #define IXGBE_ETH_P_LLDP		 0x88CC
54 
55 /* flow control */
56 #define IXGBE_MIN_FCRTL			   0x40
57 #define IXGBE_MAX_FCRTL			0x7FF80
58 #define IXGBE_MIN_FCRTH			  0x600
59 #define IXGBE_MAX_FCRTH			0x7FFF0
60 #define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
61 #define IXGBE_MIN_FCPAUSE		      0
62 #define IXGBE_MAX_FCPAUSE		 0xFFFF
63 
64 /* Supported Rx Buffer Sizes */
65 #define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
66 #define IXGBE_RXBUFFER_1536  1536
67 #define IXGBE_RXBUFFER_2K    2048
68 #define IXGBE_RXBUFFER_3K    3072
69 #define IXGBE_RXBUFFER_4K    4096
70 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
71 
72 /* Attempt to maximize the headroom available for incoming frames.  We
73  * use a 2K buffer for receives and need 1536/1534 to store the data for
74  * the frame.  This leaves us with 512 bytes of room.  From that we need
75  * to deduct the space needed for the shared info and the padding needed
76  * to IP align the frame.
77  *
78  * Note: For cache line sizes 256 or larger this value is going to end
79  *	 up negative.  In these cases we should fall back to the 3K
80  *	 buffers.
81  */
82 #if (PAGE_SIZE < 8192)
83 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
84 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \
85 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
86 
87 static inline int ixgbe_compute_pad(int rx_buf_len)
88 {
89 	int page_size, pad_size;
90 
91 	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
92 	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
93 
94 	return pad_size;
95 }
96 
97 static inline int ixgbe_skb_pad(void)
98 {
99 	int rx_buf_len;
100 
101 	/* If a 2K buffer cannot handle a standard Ethernet frame then
102 	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
103 	 *
104 	 * For a 3K buffer we need to add enough padding to allow for
105 	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
106 	 * cache-line alignment.
107 	 */
108 	if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
109 		rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
110 	else
111 		rx_buf_len = IXGBE_RXBUFFER_1536;
112 
113 	/* if needed make room for NET_IP_ALIGN */
114 	rx_buf_len -= NET_IP_ALIGN;
115 
116 	return ixgbe_compute_pad(rx_buf_len);
117 }
118 
119 #define IXGBE_SKB_PAD	ixgbe_skb_pad()
120 #else
121 #define IXGBE_SKB_PAD	(NET_SKB_PAD + NET_IP_ALIGN)
122 #endif
123 
124 /*
125  * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
126  * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
127  * this adds up to 448 bytes of extra data.
128  *
129  * Since netdev_alloc_skb now allocates a page fragment we can use a value
130  * of 256 and the resultant skb will have a truesize of 960 or less.
131  */
132 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
133 
134 /* How many Rx Buffers do we bundle into one write to the hardware ? */
135 #define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
136 
137 #define IXGBE_RX_DMA_ATTR \
138 	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
139 
140 enum ixgbe_tx_flags {
141 	/* cmd_type flags */
142 	IXGBE_TX_FLAGS_HW_VLAN	= 0x01,
143 	IXGBE_TX_FLAGS_TSO	= 0x02,
144 	IXGBE_TX_FLAGS_TSTAMP	= 0x04,
145 
146 	/* olinfo flags */
147 	IXGBE_TX_FLAGS_CC	= 0x08,
148 	IXGBE_TX_FLAGS_IPV4	= 0x10,
149 	IXGBE_TX_FLAGS_CSUM	= 0x20,
150 	IXGBE_TX_FLAGS_IPSEC	= 0x40,
151 
152 	/* software defined flags */
153 	IXGBE_TX_FLAGS_SW_VLAN	= 0x80,
154 	IXGBE_TX_FLAGS_FCOE	= 0x100,
155 };
156 
157 /* VLAN info */
158 #define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
159 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
160 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
161 #define IXGBE_TX_FLAGS_VLAN_SHIFT	16
162 
163 #define IXGBE_MAX_VF_MC_ENTRIES         30
164 #define IXGBE_MAX_VF_FUNCTIONS          64
165 #define IXGBE_MAX_VFTA_ENTRIES          128
166 #define MAX_EMULATION_MAC_ADDRS         16
167 #define IXGBE_MAX_PF_MACVLANS           15
168 #define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
169 #define IXGBE_82599_VF_DEVICE_ID        0x10ED
170 #define IXGBE_X540_VF_DEVICE_ID         0x1515
171 
172 struct vf_data_storage {
173 	struct pci_dev *vfdev;
174 	unsigned char vf_mac_addresses[ETH_ALEN];
175 	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
176 	u16 num_vf_mc_hashes;
177 	bool clear_to_send;
178 	bool pf_set_mac;
179 	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
180 	u16 pf_qos;
181 	u16 tx_rate;
182 	u8 spoofchk_enabled;
183 	bool rss_query_enabled;
184 	u8 trusted;
185 	int xcast_mode;
186 	unsigned int vf_api;
187 };
188 
189 enum ixgbevf_xcast_modes {
190 	IXGBEVF_XCAST_MODE_NONE = 0,
191 	IXGBEVF_XCAST_MODE_MULTI,
192 	IXGBEVF_XCAST_MODE_ALLMULTI,
193 	IXGBEVF_XCAST_MODE_PROMISC,
194 };
195 
196 struct vf_macvlans {
197 	struct list_head l;
198 	int vf;
199 	bool free;
200 	bool is_macvlan;
201 	u8 vf_macvlan[ETH_ALEN];
202 };
203 
204 #define IXGBE_MAX_TXD_PWR	14
205 #define IXGBE_MAX_DATA_PER_TXD	(1u << IXGBE_MAX_TXD_PWR)
206 
207 /* Tx Descriptors needed, worst case */
208 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
209 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
210 
211 /* wrapper around a pointer to a socket buffer,
212  * so a DMA handle can be stored along with the buffer */
213 struct ixgbe_tx_buffer {
214 	union ixgbe_adv_tx_desc *next_to_watch;
215 	unsigned long time_stamp;
216 	union {
217 		struct sk_buff *skb;
218 		struct xdp_frame *xdpf;
219 	};
220 	unsigned int bytecount;
221 	unsigned short gso_segs;
222 	__be16 protocol;
223 	DEFINE_DMA_UNMAP_ADDR(dma);
224 	DEFINE_DMA_UNMAP_LEN(len);
225 	u32 tx_flags;
226 };
227 
228 struct ixgbe_rx_buffer {
229 	struct sk_buff *skb;
230 	dma_addr_t dma;
231 	union {
232 		struct {
233 			struct page *page;
234 			__u32 page_offset;
235 			__u16 pagecnt_bias;
236 		};
237 		struct {
238 			void *addr;
239 			u64 handle;
240 		};
241 	};
242 };
243 
244 struct ixgbe_queue_stats {
245 	u64 packets;
246 	u64 bytes;
247 };
248 
249 struct ixgbe_tx_queue_stats {
250 	u64 restart_queue;
251 	u64 tx_busy;
252 	u64 tx_done_old;
253 };
254 
255 struct ixgbe_rx_queue_stats {
256 	u64 rsc_count;
257 	u64 rsc_flush;
258 	u64 non_eop_descs;
259 	u64 alloc_rx_page;
260 	u64 alloc_rx_page_failed;
261 	u64 alloc_rx_buff_failed;
262 	u64 csum_err;
263 };
264 
265 #define IXGBE_TS_HDR_LEN 8
266 
267 enum ixgbe_ring_state_t {
268 	__IXGBE_RX_3K_BUFFER,
269 	__IXGBE_RX_BUILD_SKB_ENABLED,
270 	__IXGBE_RX_RSC_ENABLED,
271 	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
272 	__IXGBE_RX_FCOE,
273 	__IXGBE_TX_FDIR_INIT_DONE,
274 	__IXGBE_TX_XPS_INIT_DONE,
275 	__IXGBE_TX_DETECT_HANG,
276 	__IXGBE_HANG_CHECK_ARMED,
277 	__IXGBE_TX_XDP_RING,
278 	__IXGBE_TX_DISABLED,
279 };
280 
281 #define ring_uses_build_skb(ring) \
282 	test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
283 
284 struct ixgbe_fwd_adapter {
285 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
286 	struct net_device *netdev;
287 	unsigned int tx_base_queue;
288 	unsigned int rx_base_queue;
289 	int pool;
290 };
291 
292 #define check_for_tx_hang(ring) \
293 	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
294 #define set_check_for_tx_hang(ring) \
295 	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
296 #define clear_check_for_tx_hang(ring) \
297 	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
298 #define ring_is_rsc_enabled(ring) \
299 	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
300 #define set_ring_rsc_enabled(ring) \
301 	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
302 #define clear_ring_rsc_enabled(ring) \
303 	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
304 #define ring_is_xdp(ring) \
305 	test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
306 #define set_ring_xdp(ring) \
307 	set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
308 #define clear_ring_xdp(ring) \
309 	clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
310 struct ixgbe_ring {
311 	struct ixgbe_ring *next;	/* pointer to next ring in q_vector */
312 	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
313 	struct net_device *netdev;	/* netdev ring belongs to */
314 	struct bpf_prog *xdp_prog;
315 	struct device *dev;		/* device for DMA mapping */
316 	void *desc;			/* descriptor ring memory */
317 	union {
318 		struct ixgbe_tx_buffer *tx_buffer_info;
319 		struct ixgbe_rx_buffer *rx_buffer_info;
320 	};
321 	unsigned long state;
322 	u8 __iomem *tail;
323 	dma_addr_t dma;			/* phys. address of descriptor ring */
324 	unsigned int size;		/* length in bytes */
325 
326 	u16 count;			/* amount of descriptors */
327 
328 	u8 queue_index; /* needed for multiqueue queue management */
329 	u8 reg_idx;			/* holds the special value that gets
330 					 * the hardware register offset
331 					 * associated with this ring, which is
332 					 * different for DCB and RSS modes
333 					 */
334 	u16 next_to_use;
335 	u16 next_to_clean;
336 
337 	unsigned long last_rx_timestamp;
338 
339 	union {
340 		u16 next_to_alloc;
341 		struct {
342 			u8 atr_sample_rate;
343 			u8 atr_count;
344 		};
345 	};
346 
347 	u8 dcb_tc;
348 	struct ixgbe_queue_stats stats;
349 	struct u64_stats_sync syncp;
350 	union {
351 		struct ixgbe_tx_queue_stats tx_stats;
352 		struct ixgbe_rx_queue_stats rx_stats;
353 	};
354 	struct xdp_rxq_info xdp_rxq;
355 	struct xdp_umem *xsk_umem;
356 	struct zero_copy_allocator zca; /* ZC allocator anchor */
357 	u16 ring_idx;		/* {rx,tx,xdp}_ring back reference idx */
358 	u16 rx_buf_len;
359 } ____cacheline_internodealigned_in_smp;
360 
361 enum ixgbe_ring_f_enum {
362 	RING_F_NONE = 0,
363 	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
364 	RING_F_RSS,
365 	RING_F_FDIR,
366 #ifdef IXGBE_FCOE
367 	RING_F_FCOE,
368 #endif /* IXGBE_FCOE */
369 
370 	RING_F_ARRAY_SIZE      /* must be last in enum set */
371 };
372 
373 #define IXGBE_MAX_RSS_INDICES		16
374 #define IXGBE_MAX_RSS_INDICES_X550	63
375 #define IXGBE_MAX_VMDQ_INDICES		64
376 #define IXGBE_MAX_FDIR_INDICES		63	/* based on q_vector limit */
377 #define IXGBE_MAX_FCOE_INDICES		8
378 #define MAX_RX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
379 #define MAX_TX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
380 #define MAX_XDP_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
381 #define IXGBE_MAX_L2A_QUEUES		4
382 #define IXGBE_BAD_L2A_QUEUE		3
383 #define IXGBE_MAX_MACVLANS		63
384 
385 struct ixgbe_ring_feature {
386 	u16 limit;	/* upper limit on feature indices */
387 	u16 indices;	/* current value of indices */
388 	u16 mask;	/* Mask used for feature to ring mapping */
389 	u16 offset;	/* offset to start of feature */
390 } ____cacheline_internodealigned_in_smp;
391 
392 #define IXGBE_82599_VMDQ_8Q_MASK 0x78
393 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C
394 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E
395 
396 /*
397  * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
398  * this is twice the size of a half page we need to double the page order
399  * for FCoE enabled Rx queues.
400  */
401 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
402 {
403 	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
404 		return IXGBE_RXBUFFER_3K;
405 #if (PAGE_SIZE < 8192)
406 	if (ring_uses_build_skb(ring))
407 		return IXGBE_MAX_2K_FRAME_BUILD_SKB;
408 #endif
409 	return IXGBE_RXBUFFER_2K;
410 }
411 
412 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
413 {
414 #if (PAGE_SIZE < 8192)
415 	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
416 		return 1;
417 #endif
418 	return 0;
419 }
420 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
421 
422 #define IXGBE_ITR_ADAPTIVE_MIN_INC	2
423 #define IXGBE_ITR_ADAPTIVE_MIN_USECS	10
424 #define IXGBE_ITR_ADAPTIVE_MAX_USECS	126
425 #define IXGBE_ITR_ADAPTIVE_LATENCY	0x80
426 #define IXGBE_ITR_ADAPTIVE_BULK		0x00
427 
428 struct ixgbe_ring_container {
429 	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
430 	unsigned long next_update;	/* jiffies value of last update */
431 	unsigned int total_bytes;	/* total bytes processed this int */
432 	unsigned int total_packets;	/* total packets processed this int */
433 	u16 work_limit;			/* total work allowed per interrupt */
434 	u8 count;			/* total number of rings in vector */
435 	u8 itr;				/* current ITR setting for ring */
436 };
437 
438 /* iterator for handling rings in ring container */
439 #define ixgbe_for_each_ring(pos, head) \
440 	for (pos = (head).ring; pos != NULL; pos = pos->next)
441 
442 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
443 			      ? 8 : 1)
444 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
445 
446 /* MAX_Q_VECTORS of these are allocated,
447  * but we only use one per queue-specific vector.
448  */
449 struct ixgbe_q_vector {
450 	struct ixgbe_adapter *adapter;
451 #ifdef CONFIG_IXGBE_DCA
452 	int cpu;	    /* CPU for DCA */
453 #endif
454 	u16 v_idx;		/* index of q_vector within array, also used for
455 				 * finding the bit in EICR and friends that
456 				 * represents the vector for this ring */
457 	u16 itr;		/* Interrupt throttle rate written to EITR */
458 	struct ixgbe_ring_container rx, tx;
459 
460 	struct napi_struct napi;
461 	cpumask_t affinity_mask;
462 	int numa_node;
463 	struct rcu_head rcu;	/* to avoid race with update stats on free */
464 	char name[IFNAMSIZ + 9];
465 
466 	/* for dynamic allocation of rings associated with this q_vector */
467 	struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
468 };
469 
470 #ifdef CONFIG_IXGBE_HWMON
471 
472 #define IXGBE_HWMON_TYPE_LOC		0
473 #define IXGBE_HWMON_TYPE_TEMP		1
474 #define IXGBE_HWMON_TYPE_CAUTION	2
475 #define IXGBE_HWMON_TYPE_MAX		3
476 
477 struct hwmon_attr {
478 	struct device_attribute dev_attr;
479 	struct ixgbe_hw *hw;
480 	struct ixgbe_thermal_diode_data *sensor;
481 	char name[12];
482 };
483 
484 struct hwmon_buff {
485 	struct attribute_group group;
486 	const struct attribute_group *groups[2];
487 	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
488 	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
489 	unsigned int n_hwmon;
490 };
491 #endif /* CONFIG_IXGBE_HWMON */
492 
493 /*
494  * microsecond values for various ITR rates shifted by 2 to fit itr register
495  * with the first 3 bits reserved 0
496  */
497 #define IXGBE_MIN_RSC_ITR	24
498 #define IXGBE_100K_ITR		40
499 #define IXGBE_20K_ITR		200
500 #define IXGBE_12K_ITR		336
501 
502 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
503 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
504 					const u32 stat_err_bits)
505 {
506 	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
507 }
508 
509 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
510 {
511 	u16 ntc = ring->next_to_clean;
512 	u16 ntu = ring->next_to_use;
513 
514 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
515 }
516 
517 #define IXGBE_RX_DESC(R, i)	    \
518 	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
519 #define IXGBE_TX_DESC(R, i)	    \
520 	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
521 #define IXGBE_TX_CTXTDESC(R, i)	    \
522 	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
523 
524 #define IXGBE_MAX_JUMBO_FRAME_SIZE	9728 /* Maximum Supported Size 9.5KB */
525 #ifdef IXGBE_FCOE
526 /* Use 3K as the baby jumbo frame size for FCoE */
527 #define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
528 #endif /* IXGBE_FCOE */
529 
530 #define OTHER_VECTOR 1
531 #define NON_Q_VECTORS (OTHER_VECTOR)
532 
533 #define MAX_MSIX_VECTORS_82599 64
534 #define MAX_Q_VECTORS_82599 64
535 #define MAX_MSIX_VECTORS_82598 18
536 #define MAX_Q_VECTORS_82598 16
537 
538 struct ixgbe_mac_addr {
539 	u8 addr[ETH_ALEN];
540 	u16 pool;
541 	u16 state; /* bitmask */
542 };
543 
544 #define IXGBE_MAC_STATE_DEFAULT		0x1
545 #define IXGBE_MAC_STATE_MODIFIED	0x2
546 #define IXGBE_MAC_STATE_IN_USE		0x4
547 
548 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
549 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
550 
551 #define MIN_MSIX_Q_VECTORS 1
552 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
553 
554 /* default to trying for four seconds */
555 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
556 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
557 
558 /* board specific private data structure */
559 struct ixgbe_adapter {
560 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
561 	/* OS defined structs */
562 	struct net_device *netdev;
563 	struct bpf_prog *xdp_prog;
564 	struct pci_dev *pdev;
565 
566 	unsigned long state;
567 
568 	/* Some features need tri-state capability,
569 	 * thus the additional *_CAPABLE flags.
570 	 */
571 	u32 flags;
572 #define IXGBE_FLAG_MSI_ENABLED			BIT(1)
573 #define IXGBE_FLAG_MSIX_ENABLED			BIT(3)
574 #define IXGBE_FLAG_RX_1BUF_CAPABLE		BIT(4)
575 #define IXGBE_FLAG_RX_PS_CAPABLE		BIT(5)
576 #define IXGBE_FLAG_RX_PS_ENABLED		BIT(6)
577 #define IXGBE_FLAG_DCA_ENABLED			BIT(8)
578 #define IXGBE_FLAG_DCA_CAPABLE			BIT(9)
579 #define IXGBE_FLAG_IMIR_ENABLED			BIT(10)
580 #define IXGBE_FLAG_MQ_CAPABLE			BIT(11)
581 #define IXGBE_FLAG_DCB_ENABLED			BIT(12)
582 #define IXGBE_FLAG_VMDQ_CAPABLE			BIT(13)
583 #define IXGBE_FLAG_VMDQ_ENABLED			BIT(14)
584 #define IXGBE_FLAG_FAN_FAIL_CAPABLE		BIT(15)
585 #define IXGBE_FLAG_NEED_LINK_UPDATE		BIT(16)
586 #define IXGBE_FLAG_NEED_LINK_CONFIG		BIT(17)
587 #define IXGBE_FLAG_FDIR_HASH_CAPABLE		BIT(18)
588 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE		BIT(19)
589 #define IXGBE_FLAG_FCOE_CAPABLE			BIT(20)
590 #define IXGBE_FLAG_FCOE_ENABLED			BIT(21)
591 #define IXGBE_FLAG_SRIOV_CAPABLE		BIT(22)
592 #define IXGBE_FLAG_SRIOV_ENABLED		BIT(23)
593 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE	BIT(24)
594 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
595 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
596 #define IXGBE_FLAG_DCB_CAPABLE			BIT(27)
597 #define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE	BIT(28)
598 
599 	u32 flags2;
600 #define IXGBE_FLAG2_RSC_CAPABLE			BIT(0)
601 #define IXGBE_FLAG2_RSC_ENABLED			BIT(1)
602 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE		BIT(2)
603 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT		BIT(3)
604 #define IXGBE_FLAG2_SEARCH_FOR_SFP		BIT(4)
605 #define IXGBE_FLAG2_SFP_NEEDS_RESET		BIT(5)
606 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT	BIT(7)
607 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		BIT(8)
608 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		BIT(9)
609 #define IXGBE_FLAG2_PTP_PPS_ENABLED		BIT(10)
610 #define IXGBE_FLAG2_PHY_INTERRUPT		BIT(11)
611 #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED	BIT(12)
612 #define IXGBE_FLAG2_VLAN_PROMISC		BIT(13)
613 #define IXGBE_FLAG2_EEE_CAPABLE			BIT(14)
614 #define IXGBE_FLAG2_EEE_ENABLED			BIT(15)
615 #define IXGBE_FLAG2_RX_LEGACY			BIT(16)
616 #define IXGBE_FLAG2_IPSEC_ENABLED		BIT(17)
617 #define IXGBE_FLAG2_VF_IPSEC_ENABLED		BIT(18)
618 
619 	/* Tx fast path data */
620 	int num_tx_queues;
621 	u16 tx_itr_setting;
622 	u16 tx_work_limit;
623 	u64 tx_ipsec;
624 
625 	/* Rx fast path data */
626 	int num_rx_queues;
627 	u16 rx_itr_setting;
628 	u64 rx_ipsec;
629 
630 	/* Port number used to identify VXLAN traffic */
631 	__be16 vxlan_port;
632 	__be16 geneve_port;
633 
634 	/* XDP */
635 	int num_xdp_queues;
636 	struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
637 
638 	/* TX */
639 	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
640 
641 	u64 restart_queue;
642 	u64 lsc_int;
643 	u32 tx_timeout_count;
644 
645 	/* RX */
646 	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
647 	int num_rx_pools;		/* == num_rx_queues in 82598 */
648 	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
649 	u64 hw_csum_rx_error;
650 	u64 hw_rx_no_dma_resources;
651 	u64 rsc_total_count;
652 	u64 rsc_total_flush;
653 	u64 non_eop_descs;
654 	u32 alloc_rx_page;
655 	u32 alloc_rx_page_failed;
656 	u32 alloc_rx_buff_failed;
657 
658 	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
659 
660 	/* DCB parameters */
661 	struct ieee_pfc *ixgbe_ieee_pfc;
662 	struct ieee_ets *ixgbe_ieee_ets;
663 	struct ixgbe_dcb_config dcb_cfg;
664 	struct ixgbe_dcb_config temp_dcb_cfg;
665 	u8 hw_tcs;
666 	u8 dcb_set_bitmap;
667 	u8 dcbx_cap;
668 	enum ixgbe_fc_mode last_lfc_mode;
669 
670 	int num_q_vectors;	/* current number of q_vectors for device */
671 	int max_q_vectors;	/* true count of q_vectors for device */
672 	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
673 	struct msix_entry *msix_entries;
674 
675 	u32 test_icr;
676 	struct ixgbe_ring test_tx_ring;
677 	struct ixgbe_ring test_rx_ring;
678 
679 	/* structs defined in ixgbe_hw.h */
680 	struct ixgbe_hw hw;
681 	u16 msg_enable;
682 	struct ixgbe_hw_stats stats;
683 
684 	u64 tx_busy;
685 	unsigned int tx_ring_count;
686 	unsigned int xdp_ring_count;
687 	unsigned int rx_ring_count;
688 
689 	u32 link_speed;
690 	bool link_up;
691 	unsigned long sfp_poll_time;
692 	unsigned long link_check_timeout;
693 
694 	struct timer_list service_timer;
695 	struct work_struct service_task;
696 
697 	struct hlist_head fdir_filter_list;
698 	unsigned long fdir_overflow; /* number of times ATR was backed off */
699 	union ixgbe_atr_input fdir_mask;
700 	int fdir_filter_count;
701 	u32 fdir_pballoc;
702 	u32 atr_sample_rate;
703 	spinlock_t fdir_perfect_lock;
704 
705 #ifdef IXGBE_FCOE
706 	struct ixgbe_fcoe fcoe;
707 #endif /* IXGBE_FCOE */
708 	u8 __iomem *io_addr; /* Mainly for iounmap use */
709 	u32 wol;
710 
711 	u16 bridge_mode;
712 
713 	char eeprom_id[NVM_VER_SIZE];
714 	u16 eeprom_cap;
715 
716 	u32 interrupt_event;
717 	u32 led_reg;
718 
719 	struct ptp_clock *ptp_clock;
720 	struct ptp_clock_info ptp_caps;
721 	struct work_struct ptp_tx_work;
722 	struct sk_buff *ptp_tx_skb;
723 	struct hwtstamp_config tstamp_config;
724 	unsigned long ptp_tx_start;
725 	unsigned long last_overflow_check;
726 	unsigned long last_rx_ptp_check;
727 	unsigned long last_rx_timestamp;
728 	spinlock_t tmreg_lock;
729 	struct cyclecounter hw_cc;
730 	struct timecounter hw_tc;
731 	u32 base_incval;
732 	u32 tx_hwtstamp_timeouts;
733 	u32 tx_hwtstamp_skipped;
734 	u32 rx_hwtstamp_cleared;
735 	void (*ptp_setup_sdp)(struct ixgbe_adapter *);
736 
737 	/* SR-IOV */
738 	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
739 	unsigned int num_vfs;
740 	struct vf_data_storage *vfinfo;
741 	int vf_rate_link_speed;
742 	struct vf_macvlans vf_mvs;
743 	struct vf_macvlans *mv_list;
744 
745 	u32 timer_event_accumulator;
746 	u32 vferr_refcount;
747 	struct ixgbe_mac_addr *mac_table;
748 	struct kobject *info_kobj;
749 #ifdef CONFIG_IXGBE_HWMON
750 	struct hwmon_buff *ixgbe_hwmon_buff;
751 #endif /* CONFIG_IXGBE_HWMON */
752 #ifdef CONFIG_DEBUG_FS
753 	struct dentry *ixgbe_dbg_adapter;
754 #endif /*CONFIG_DEBUG_FS*/
755 
756 	u8 default_up;
757 	/* Bitmask indicating in use pools */
758 	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
759 
760 #define IXGBE_MAX_LINK_HANDLE 10
761 	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
762 	unsigned long tables;
763 
764 /* maximum number of RETA entries among all devices supported by ixgbe
765  * driver: currently it's x550 device in non-SRIOV mode
766  */
767 #define IXGBE_MAX_RETA_ENTRIES 512
768 	u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
769 
770 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
771 	u32 *rss_key;
772 
773 #ifdef CONFIG_XFRM_OFFLOAD
774 	struct ixgbe_ipsec *ipsec;
775 #endif /* CONFIG_XFRM_OFFLOAD */
776 
777 	/* AF_XDP zero-copy */
778 	struct xdp_umem **xsk_umems;
779 	u16 num_xsk_umems_used;
780 	u16 num_xsk_umems;
781 };
782 
783 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
784 {
785 	switch (adapter->hw.mac.type) {
786 	case ixgbe_mac_82598EB:
787 	case ixgbe_mac_82599EB:
788 	case ixgbe_mac_X540:
789 		return IXGBE_MAX_RSS_INDICES;
790 	case ixgbe_mac_X550:
791 	case ixgbe_mac_X550EM_x:
792 	case ixgbe_mac_x550em_a:
793 		return IXGBE_MAX_RSS_INDICES_X550;
794 	default:
795 		return 0;
796 	}
797 }
798 
799 struct ixgbe_fdir_filter {
800 	struct hlist_node fdir_node;
801 	union ixgbe_atr_input filter;
802 	u16 sw_idx;
803 	u64 action;
804 };
805 
806 enum ixgbe_state_t {
807 	__IXGBE_TESTING,
808 	__IXGBE_RESETTING,
809 	__IXGBE_DOWN,
810 	__IXGBE_DISABLED,
811 	__IXGBE_REMOVING,
812 	__IXGBE_SERVICE_SCHED,
813 	__IXGBE_SERVICE_INITED,
814 	__IXGBE_IN_SFP_INIT,
815 	__IXGBE_PTP_RUNNING,
816 	__IXGBE_PTP_TX_IN_PROGRESS,
817 	__IXGBE_RESET_REQUESTED,
818 };
819 
820 struct ixgbe_cb {
821 	union {				/* Union defining head/tail partner */
822 		struct sk_buff *head;
823 		struct sk_buff *tail;
824 	};
825 	dma_addr_t dma;
826 	u16 append_cnt;
827 	bool page_released;
828 };
829 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
830 
831 enum ixgbe_boards {
832 	board_82598,
833 	board_82599,
834 	board_X540,
835 	board_X550,
836 	board_X550EM_x,
837 	board_x550em_x_fw,
838 	board_x550em_a,
839 	board_x550em_a_fw,
840 };
841 
842 extern const struct ixgbe_info ixgbe_82598_info;
843 extern const struct ixgbe_info ixgbe_82599_info;
844 extern const struct ixgbe_info ixgbe_X540_info;
845 extern const struct ixgbe_info ixgbe_X550_info;
846 extern const struct ixgbe_info ixgbe_X550EM_x_info;
847 extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
848 extern const struct ixgbe_info ixgbe_x550em_a_info;
849 extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
850 #ifdef CONFIG_IXGBE_DCB
851 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
852 #endif
853 
854 extern char ixgbe_driver_name[];
855 extern const char ixgbe_driver_version[];
856 #ifdef IXGBE_FCOE
857 extern char ixgbe_default_device_descr[];
858 #endif /* IXGBE_FCOE */
859 
860 int ixgbe_open(struct net_device *netdev);
861 int ixgbe_close(struct net_device *netdev);
862 void ixgbe_up(struct ixgbe_adapter *adapter);
863 void ixgbe_down(struct ixgbe_adapter *adapter);
864 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
865 void ixgbe_reset(struct ixgbe_adapter *adapter);
866 void ixgbe_set_ethtool_ops(struct net_device *netdev);
867 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
868 int ixgbe_setup_tx_resources(struct ixgbe_ring *);
869 void ixgbe_free_rx_resources(struct ixgbe_ring *);
870 void ixgbe_free_tx_resources(struct ixgbe_ring *);
871 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
872 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
873 void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
874 void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
875 void ixgbe_update_stats(struct ixgbe_adapter *adapter);
876 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
877 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
878 			 u16 subdevice_id);
879 #ifdef CONFIG_PCI_IOV
880 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
881 #endif
882 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
883 			 const u8 *addr, u16 queue);
884 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
885 			 const u8 *addr, u16 queue);
886 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
887 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
888 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
889 				  struct ixgbe_ring *);
890 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
891 				      struct ixgbe_tx_buffer *);
892 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
893 void ixgbe_write_eitr(struct ixgbe_q_vector *);
894 int ixgbe_poll(struct napi_struct *napi, int budget);
895 int ethtool_ioctl(struct ifreq *ifr);
896 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
897 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
898 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
899 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
900 					  union ixgbe_atr_hash_dword input,
901 					  union ixgbe_atr_hash_dword common,
902 					  u8 queue);
903 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
904 				    union ixgbe_atr_input *input_mask);
905 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
906 					  union ixgbe_atr_input *input,
907 					  u16 soft_id, u8 queue);
908 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
909 					  union ixgbe_atr_input *input,
910 					  u16 soft_id);
911 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
912 					  union ixgbe_atr_input *mask);
913 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
914 				    struct ixgbe_fdir_filter *input,
915 				    u16 sw_idx);
916 void ixgbe_set_rx_mode(struct net_device *netdev);
917 #ifdef CONFIG_IXGBE_DCB
918 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
919 #endif
920 int ixgbe_setup_tc(struct net_device *dev, u8 tc);
921 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
922 void ixgbe_do_reset(struct net_device *netdev);
923 #ifdef CONFIG_IXGBE_HWMON
924 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
925 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
926 #endif /* CONFIG_IXGBE_HWMON */
927 #ifdef IXGBE_FCOE
928 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
929 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
930 	      u8 *hdr_len);
931 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
932 		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
933 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
934 		       struct scatterlist *sgl, unsigned int sgc);
935 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
936 			  struct scatterlist *sgl, unsigned int sgc);
937 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
938 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
939 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
940 int ixgbe_fcoe_enable(struct net_device *netdev);
941 int ixgbe_fcoe_disable(struct net_device *netdev);
942 #ifdef CONFIG_IXGBE_DCB
943 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
944 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
945 #endif /* CONFIG_IXGBE_DCB */
946 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
947 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
948 			   struct netdev_fcoe_hbainfo *info);
949 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
950 #endif /* IXGBE_FCOE */
951 #ifdef CONFIG_DEBUG_FS
952 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
953 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
954 void ixgbe_dbg_init(void);
955 void ixgbe_dbg_exit(void);
956 #else
957 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
958 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
959 static inline void ixgbe_dbg_init(void) {}
960 static inline void ixgbe_dbg_exit(void) {}
961 #endif /* CONFIG_DEBUG_FS */
962 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
963 {
964 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
965 }
966 
967 void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
968 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
969 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
970 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
971 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
972 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
973 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
974 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
975 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
976 					 union ixgbe_adv_rx_desc *rx_desc,
977 					 struct sk_buff *skb)
978 {
979 	if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
980 		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
981 		return;
982 	}
983 
984 	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
985 		return;
986 
987 	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
988 
989 	/* Update the last_rx_timestamp timer in order to enable watchdog check
990 	 * for error case of latched timestamp on a dropped packet.
991 	 */
992 	rx_ring->last_rx_timestamp = jiffies;
993 }
994 
995 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
996 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
997 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
998 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
999 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1000 #ifdef CONFIG_PCI_IOV
1001 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1002 #endif
1003 
1004 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1005 				  struct ixgbe_adapter *adapter,
1006 				  struct ixgbe_ring *tx_ring);
1007 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1008 void ixgbe_store_key(struct ixgbe_adapter *adapter);
1009 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1010 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1011 		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1012 #ifdef CONFIG_XFRM_OFFLOAD
1013 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1014 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1015 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1016 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1017 		    union ixgbe_adv_rx_desc *rx_desc,
1018 		    struct sk_buff *skb);
1019 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1020 		   struct ixgbe_ipsec_tx_data *itd);
1021 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1022 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1023 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1024 #else
1025 static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
1026 static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
1027 static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
1028 static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1029 				  union ixgbe_adv_rx_desc *rx_desc,
1030 				  struct sk_buff *skb) { }
1031 static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1032 				 struct ixgbe_tx_buffer *first,
1033 				 struct ixgbe_ipsec_tx_data *itd) { return 0; }
1034 static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1035 					u32 vf) { }
1036 static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1037 					u32 *mbuf, u32 vf) { return -EACCES; }
1038 static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1039 					u32 *mbuf, u32 vf) { return -EACCES; }
1040 #endif /* CONFIG_XFRM_OFFLOAD */
1041 #endif /* _IXGBE_H_ */
1042