1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /******************************************************************************* 3 4 Intel 10 Gigabit PCI Express Linux driver 5 Copyright(c) 1999 - 2016 Intel Corporation. 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 You should have received a copy of the GNU General Public License along with 17 this program; if not, write to the Free Software Foundation, Inc., 18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 20 The full GNU General Public License is included in this distribution in 21 the file called "COPYING". 22 23 Contact Information: 24 Linux NICS <linux.nics@intel.com> 25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 28 *******************************************************************************/ 29 30 #ifndef _IXGBE_H_ 31 #define _IXGBE_H_ 32 33 #include <linux/bitops.h> 34 #include <linux/types.h> 35 #include <linux/pci.h> 36 #include <linux/netdevice.h> 37 #include <linux/cpumask.h> 38 #include <linux/aer.h> 39 #include <linux/if_vlan.h> 40 #include <linux/jiffies.h> 41 42 #include <linux/timecounter.h> 43 #include <linux/net_tstamp.h> 44 #include <linux/ptp_clock_kernel.h> 45 46 #include "ixgbe_type.h" 47 #include "ixgbe_common.h" 48 #include "ixgbe_dcb.h" 49 #if IS_ENABLED(CONFIG_FCOE) 50 #define IXGBE_FCOE 51 #include "ixgbe_fcoe.h" 52 #endif /* IS_ENABLED(CONFIG_FCOE) */ 53 #ifdef CONFIG_IXGBE_DCA 54 #include <linux/dca.h> 55 #endif 56 #include "ixgbe_ipsec.h" 57 58 #include <net/xdp.h> 59 #include <net/busy_poll.h> 60 61 /* common prefix used by pr_<> macros */ 62 #undef pr_fmt 63 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 64 65 /* TX/RX descriptor defines */ 66 #define IXGBE_DEFAULT_TXD 512 67 #define IXGBE_DEFAULT_TX_WORK 256 68 #define IXGBE_MAX_TXD 4096 69 #define IXGBE_MIN_TXD 64 70 71 #if (PAGE_SIZE < 8192) 72 #define IXGBE_DEFAULT_RXD 512 73 #else 74 #define IXGBE_DEFAULT_RXD 128 75 #endif 76 #define IXGBE_MAX_RXD 4096 77 #define IXGBE_MIN_RXD 64 78 79 #define IXGBE_ETH_P_LLDP 0x88CC 80 81 /* flow control */ 82 #define IXGBE_MIN_FCRTL 0x40 83 #define IXGBE_MAX_FCRTL 0x7FF80 84 #define IXGBE_MIN_FCRTH 0x600 85 #define IXGBE_MAX_FCRTH 0x7FFF0 86 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF 87 #define IXGBE_MIN_FCPAUSE 0 88 #define IXGBE_MAX_FCPAUSE 0xFFFF 89 90 /* Supported Rx Buffer Sizes */ 91 #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ 92 #define IXGBE_RXBUFFER_1536 1536 93 #define IXGBE_RXBUFFER_2K 2048 94 #define IXGBE_RXBUFFER_3K 3072 95 #define IXGBE_RXBUFFER_4K 4096 96 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 97 98 /* Attempt to maximize the headroom available for incoming frames. We 99 * use a 2K buffer for receives and need 1536/1534 to store the data for 100 * the frame. This leaves us with 512 bytes of room. From that we need 101 * to deduct the space needed for the shared info and the padding needed 102 * to IP align the frame. 103 * 104 * Note: For cache line sizes 256 or larger this value is going to end 105 * up negative. In these cases we should fall back to the 3K 106 * buffers. 107 */ 108 #if (PAGE_SIZE < 8192) 109 #define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) 110 #define IXGBE_2K_TOO_SMALL_WITH_PADDING \ 111 ((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) 112 113 static inline int ixgbe_compute_pad(int rx_buf_len) 114 { 115 int page_size, pad_size; 116 117 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); 118 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; 119 120 return pad_size; 121 } 122 123 static inline int ixgbe_skb_pad(void) 124 { 125 int rx_buf_len; 126 127 /* If a 2K buffer cannot handle a standard Ethernet frame then 128 * optimize padding for a 3K buffer instead of a 1.5K buffer. 129 * 130 * For a 3K buffer we need to add enough padding to allow for 131 * tailroom due to NET_IP_ALIGN possibly shifting us out of 132 * cache-line alignment. 133 */ 134 if (IXGBE_2K_TOO_SMALL_WITH_PADDING) 135 rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); 136 else 137 rx_buf_len = IXGBE_RXBUFFER_1536; 138 139 /* if needed make room for NET_IP_ALIGN */ 140 rx_buf_len -= NET_IP_ALIGN; 141 142 return ixgbe_compute_pad(rx_buf_len); 143 } 144 145 #define IXGBE_SKB_PAD ixgbe_skb_pad() 146 #else 147 #define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 148 #endif 149 150 /* 151 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we 152 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, 153 * this adds up to 448 bytes of extra data. 154 * 155 * Since netdev_alloc_skb now allocates a page fragment we can use a value 156 * of 256 and the resultant skb will have a truesize of 960 or less. 157 */ 158 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 159 160 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 161 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 162 163 #define IXGBE_RX_DMA_ATTR \ 164 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) 165 166 enum ixgbe_tx_flags { 167 /* cmd_type flags */ 168 IXGBE_TX_FLAGS_HW_VLAN = 0x01, 169 IXGBE_TX_FLAGS_TSO = 0x02, 170 IXGBE_TX_FLAGS_TSTAMP = 0x04, 171 172 /* olinfo flags */ 173 IXGBE_TX_FLAGS_CC = 0x08, 174 IXGBE_TX_FLAGS_IPV4 = 0x10, 175 IXGBE_TX_FLAGS_CSUM = 0x20, 176 IXGBE_TX_FLAGS_IPSEC = 0x40, 177 178 /* software defined flags */ 179 IXGBE_TX_FLAGS_SW_VLAN = 0x80, 180 IXGBE_TX_FLAGS_FCOE = 0x100, 181 }; 182 183 /* VLAN info */ 184 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 185 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 186 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 187 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 188 189 #define IXGBE_MAX_VF_MC_ENTRIES 30 190 #define IXGBE_MAX_VF_FUNCTIONS 64 191 #define IXGBE_MAX_VFTA_ENTRIES 128 192 #define MAX_EMULATION_MAC_ADDRS 16 193 #define IXGBE_MAX_PF_MACVLANS 15 194 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) 195 #define IXGBE_82599_VF_DEVICE_ID 0x10ED 196 #define IXGBE_X540_VF_DEVICE_ID 0x1515 197 198 struct vf_data_storage { 199 struct pci_dev *vfdev; 200 unsigned char vf_mac_addresses[ETH_ALEN]; 201 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; 202 u16 num_vf_mc_hashes; 203 bool clear_to_send; 204 bool pf_set_mac; 205 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 206 u16 pf_qos; 207 u16 tx_rate; 208 u8 spoofchk_enabled; 209 bool rss_query_enabled; 210 u8 trusted; 211 int xcast_mode; 212 unsigned int vf_api; 213 }; 214 215 enum ixgbevf_xcast_modes { 216 IXGBEVF_XCAST_MODE_NONE = 0, 217 IXGBEVF_XCAST_MODE_MULTI, 218 IXGBEVF_XCAST_MODE_ALLMULTI, 219 IXGBEVF_XCAST_MODE_PROMISC, 220 }; 221 222 struct vf_macvlans { 223 struct list_head l; 224 int vf; 225 bool free; 226 bool is_macvlan; 227 u8 vf_macvlan[ETH_ALEN]; 228 }; 229 230 #define IXGBE_MAX_TXD_PWR 14 231 #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) 232 233 /* Tx Descriptors needed, worst case */ 234 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) 235 #define DESC_NEEDED (MAX_SKB_FRAGS + 4) 236 237 /* wrapper around a pointer to a socket buffer, 238 * so a DMA handle can be stored along with the buffer */ 239 struct ixgbe_tx_buffer { 240 union ixgbe_adv_tx_desc *next_to_watch; 241 unsigned long time_stamp; 242 union { 243 struct sk_buff *skb; 244 struct xdp_frame *xdpf; 245 }; 246 unsigned int bytecount; 247 unsigned short gso_segs; 248 __be16 protocol; 249 DEFINE_DMA_UNMAP_ADDR(dma); 250 DEFINE_DMA_UNMAP_LEN(len); 251 u32 tx_flags; 252 }; 253 254 struct ixgbe_rx_buffer { 255 struct sk_buff *skb; 256 dma_addr_t dma; 257 struct page *page; 258 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 259 __u32 page_offset; 260 #else 261 __u16 page_offset; 262 #endif 263 __u16 pagecnt_bias; 264 }; 265 266 struct ixgbe_queue_stats { 267 u64 packets; 268 u64 bytes; 269 }; 270 271 struct ixgbe_tx_queue_stats { 272 u64 restart_queue; 273 u64 tx_busy; 274 u64 tx_done_old; 275 }; 276 277 struct ixgbe_rx_queue_stats { 278 u64 rsc_count; 279 u64 rsc_flush; 280 u64 non_eop_descs; 281 u64 alloc_rx_page; 282 u64 alloc_rx_page_failed; 283 u64 alloc_rx_buff_failed; 284 u64 csum_err; 285 }; 286 287 #define IXGBE_TS_HDR_LEN 8 288 289 enum ixgbe_ring_state_t { 290 __IXGBE_RX_3K_BUFFER, 291 __IXGBE_RX_BUILD_SKB_ENABLED, 292 __IXGBE_RX_RSC_ENABLED, 293 __IXGBE_RX_CSUM_UDP_ZERO_ERR, 294 __IXGBE_RX_FCOE, 295 __IXGBE_TX_FDIR_INIT_DONE, 296 __IXGBE_TX_XPS_INIT_DONE, 297 __IXGBE_TX_DETECT_HANG, 298 __IXGBE_HANG_CHECK_ARMED, 299 __IXGBE_TX_XDP_RING, 300 }; 301 302 #define ring_uses_build_skb(ring) \ 303 test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) 304 305 struct ixgbe_fwd_adapter { 306 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 307 struct net_device *netdev; 308 unsigned int tx_base_queue; 309 unsigned int rx_base_queue; 310 int pool; 311 }; 312 313 #define check_for_tx_hang(ring) \ 314 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 315 #define set_check_for_tx_hang(ring) \ 316 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 317 #define clear_check_for_tx_hang(ring) \ 318 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) 319 #define ring_is_rsc_enabled(ring) \ 320 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 321 #define set_ring_rsc_enabled(ring) \ 322 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 323 #define clear_ring_rsc_enabled(ring) \ 324 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) 325 #define ring_is_xdp(ring) \ 326 test_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 327 #define set_ring_xdp(ring) \ 328 set_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 329 #define clear_ring_xdp(ring) \ 330 clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state) 331 struct ixgbe_ring { 332 struct ixgbe_ring *next; /* pointer to next ring in q_vector */ 333 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ 334 struct net_device *netdev; /* netdev ring belongs to */ 335 struct bpf_prog *xdp_prog; 336 struct device *dev; /* device for DMA mapping */ 337 void *desc; /* descriptor ring memory */ 338 union { 339 struct ixgbe_tx_buffer *tx_buffer_info; 340 struct ixgbe_rx_buffer *rx_buffer_info; 341 }; 342 unsigned long state; 343 u8 __iomem *tail; 344 dma_addr_t dma; /* phys. address of descriptor ring */ 345 unsigned int size; /* length in bytes */ 346 347 u16 count; /* amount of descriptors */ 348 349 u8 queue_index; /* needed for multiqueue queue management */ 350 u8 reg_idx; /* holds the special value that gets 351 * the hardware register offset 352 * associated with this ring, which is 353 * different for DCB and RSS modes 354 */ 355 u16 next_to_use; 356 u16 next_to_clean; 357 358 unsigned long last_rx_timestamp; 359 360 union { 361 u16 next_to_alloc; 362 struct { 363 u8 atr_sample_rate; 364 u8 atr_count; 365 }; 366 }; 367 368 u8 dcb_tc; 369 struct ixgbe_queue_stats stats; 370 struct u64_stats_sync syncp; 371 union { 372 struct ixgbe_tx_queue_stats tx_stats; 373 struct ixgbe_rx_queue_stats rx_stats; 374 }; 375 struct xdp_rxq_info xdp_rxq; 376 } ____cacheline_internodealigned_in_smp; 377 378 enum ixgbe_ring_f_enum { 379 RING_F_NONE = 0, 380 RING_F_VMDQ, /* SR-IOV uses the same ring feature */ 381 RING_F_RSS, 382 RING_F_FDIR, 383 #ifdef IXGBE_FCOE 384 RING_F_FCOE, 385 #endif /* IXGBE_FCOE */ 386 387 RING_F_ARRAY_SIZE /* must be last in enum set */ 388 }; 389 390 #define IXGBE_MAX_RSS_INDICES 16 391 #define IXGBE_MAX_RSS_INDICES_X550 63 392 #define IXGBE_MAX_VMDQ_INDICES 64 393 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ 394 #define IXGBE_MAX_FCOE_INDICES 8 395 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 396 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 397 #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) 398 #define IXGBE_MAX_L2A_QUEUES 4 399 #define IXGBE_BAD_L2A_QUEUE 3 400 #define IXGBE_MAX_MACVLANS 63 401 402 struct ixgbe_ring_feature { 403 u16 limit; /* upper limit on feature indices */ 404 u16 indices; /* current value of indices */ 405 u16 mask; /* Mask used for feature to ring mapping */ 406 u16 offset; /* offset to start of feature */ 407 } ____cacheline_internodealigned_in_smp; 408 409 #define IXGBE_82599_VMDQ_8Q_MASK 0x78 410 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C 411 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E 412 413 /* 414 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since 415 * this is twice the size of a half page we need to double the page order 416 * for FCoE enabled Rx queues. 417 */ 418 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) 419 { 420 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 421 return IXGBE_RXBUFFER_3K; 422 #if (PAGE_SIZE < 8192) 423 if (ring_uses_build_skb(ring)) 424 return IXGBE_MAX_2K_FRAME_BUILD_SKB; 425 #endif 426 return IXGBE_RXBUFFER_2K; 427 } 428 429 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) 430 { 431 #if (PAGE_SIZE < 8192) 432 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) 433 return 1; 434 #endif 435 return 0; 436 } 437 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) 438 439 #define IXGBE_ITR_ADAPTIVE_MIN_INC 2 440 #define IXGBE_ITR_ADAPTIVE_MIN_USECS 10 441 #define IXGBE_ITR_ADAPTIVE_MAX_USECS 126 442 #define IXGBE_ITR_ADAPTIVE_LATENCY 0x80 443 #define IXGBE_ITR_ADAPTIVE_BULK 0x00 444 445 struct ixgbe_ring_container { 446 struct ixgbe_ring *ring; /* pointer to linked list of rings */ 447 unsigned long next_update; /* jiffies value of last update */ 448 unsigned int total_bytes; /* total bytes processed this int */ 449 unsigned int total_packets; /* total packets processed this int */ 450 u16 work_limit; /* total work allowed per interrupt */ 451 u8 count; /* total number of rings in vector */ 452 u8 itr; /* current ITR setting for ring */ 453 }; 454 455 /* iterator for handling rings in ring container */ 456 #define ixgbe_for_each_ring(pos, head) \ 457 for (pos = (head).ring; pos != NULL; pos = pos->next) 458 459 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ 460 ? 8 : 1) 461 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS 462 463 /* MAX_Q_VECTORS of these are allocated, 464 * but we only use one per queue-specific vector. 465 */ 466 struct ixgbe_q_vector { 467 struct ixgbe_adapter *adapter; 468 #ifdef CONFIG_IXGBE_DCA 469 int cpu; /* CPU for DCA */ 470 #endif 471 u16 v_idx; /* index of q_vector within array, also used for 472 * finding the bit in EICR and friends that 473 * represents the vector for this ring */ 474 u16 itr; /* Interrupt throttle rate written to EITR */ 475 struct ixgbe_ring_container rx, tx; 476 477 struct napi_struct napi; 478 cpumask_t affinity_mask; 479 int numa_node; 480 struct rcu_head rcu; /* to avoid race with update stats on free */ 481 char name[IFNAMSIZ + 9]; 482 483 /* for dynamic allocation of rings associated with this q_vector */ 484 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 485 }; 486 487 #ifdef CONFIG_IXGBE_HWMON 488 489 #define IXGBE_HWMON_TYPE_LOC 0 490 #define IXGBE_HWMON_TYPE_TEMP 1 491 #define IXGBE_HWMON_TYPE_CAUTION 2 492 #define IXGBE_HWMON_TYPE_MAX 3 493 494 struct hwmon_attr { 495 struct device_attribute dev_attr; 496 struct ixgbe_hw *hw; 497 struct ixgbe_thermal_diode_data *sensor; 498 char name[12]; 499 }; 500 501 struct hwmon_buff { 502 struct attribute_group group; 503 const struct attribute_group *groups[2]; 504 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; 505 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; 506 unsigned int n_hwmon; 507 }; 508 #endif /* CONFIG_IXGBE_HWMON */ 509 510 /* 511 * microsecond values for various ITR rates shifted by 2 to fit itr register 512 * with the first 3 bits reserved 0 513 */ 514 #define IXGBE_MIN_RSC_ITR 24 515 #define IXGBE_100K_ITR 40 516 #define IXGBE_20K_ITR 200 517 #define IXGBE_12K_ITR 336 518 519 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ 520 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, 521 const u32 stat_err_bits) 522 { 523 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); 524 } 525 526 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) 527 { 528 u16 ntc = ring->next_to_clean; 529 u16 ntu = ring->next_to_use; 530 531 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 532 } 533 534 #define IXGBE_RX_DESC(R, i) \ 535 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 536 #define IXGBE_TX_DESC(R, i) \ 537 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) 538 #define IXGBE_TX_CTXTDESC(R, i) \ 539 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) 540 541 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ 542 #ifdef IXGBE_FCOE 543 /* Use 3K as the baby jumbo frame size for FCoE */ 544 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 545 #endif /* IXGBE_FCOE */ 546 547 #define OTHER_VECTOR 1 548 #define NON_Q_VECTORS (OTHER_VECTOR) 549 550 #define MAX_MSIX_VECTORS_82599 64 551 #define MAX_Q_VECTORS_82599 64 552 #define MAX_MSIX_VECTORS_82598 18 553 #define MAX_Q_VECTORS_82598 16 554 555 struct ixgbe_mac_addr { 556 u8 addr[ETH_ALEN]; 557 u16 pool; 558 u16 state; /* bitmask */ 559 }; 560 561 #define IXGBE_MAC_STATE_DEFAULT 0x1 562 #define IXGBE_MAC_STATE_MODIFIED 0x2 563 #define IXGBE_MAC_STATE_IN_USE 0x4 564 565 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599 566 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 567 568 #define MIN_MSIX_Q_VECTORS 1 569 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) 570 571 /* default to trying for four seconds */ 572 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 573 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ 574 575 /* board specific private data structure */ 576 struct ixgbe_adapter { 577 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 578 /* OS defined structs */ 579 struct net_device *netdev; 580 struct bpf_prog *xdp_prog; 581 struct pci_dev *pdev; 582 583 unsigned long state; 584 585 /* Some features need tri-state capability, 586 * thus the additional *_CAPABLE flags. 587 */ 588 u32 flags; 589 #define IXGBE_FLAG_MSI_ENABLED BIT(1) 590 #define IXGBE_FLAG_MSIX_ENABLED BIT(3) 591 #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) 592 #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) 593 #define IXGBE_FLAG_RX_PS_ENABLED BIT(6) 594 #define IXGBE_FLAG_DCA_ENABLED BIT(8) 595 #define IXGBE_FLAG_DCA_CAPABLE BIT(9) 596 #define IXGBE_FLAG_IMIR_ENABLED BIT(10) 597 #define IXGBE_FLAG_MQ_CAPABLE BIT(11) 598 #define IXGBE_FLAG_DCB_ENABLED BIT(12) 599 #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) 600 #define IXGBE_FLAG_VMDQ_ENABLED BIT(14) 601 #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) 602 #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) 603 #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) 604 #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) 605 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) 606 #define IXGBE_FLAG_FCOE_CAPABLE BIT(20) 607 #define IXGBE_FLAG_FCOE_ENABLED BIT(21) 608 #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) 609 #define IXGBE_FLAG_SRIOV_ENABLED BIT(23) 610 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) 611 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) 612 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) 613 #define IXGBE_FLAG_DCB_CAPABLE BIT(27) 614 #define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) 615 616 u32 flags2; 617 #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) 618 #define IXGBE_FLAG2_RSC_ENABLED BIT(1) 619 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) 620 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) 621 #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) 622 #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) 623 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) 624 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) 625 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) 626 #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) 627 #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) 628 #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) 629 #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) 630 #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) 631 #define IXGBE_FLAG2_EEE_ENABLED BIT(15) 632 #define IXGBE_FLAG2_RX_LEGACY BIT(16) 633 #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) 634 635 /* Tx fast path data */ 636 int num_tx_queues; 637 u16 tx_itr_setting; 638 u16 tx_work_limit; 639 u64 tx_ipsec; 640 641 /* Rx fast path data */ 642 int num_rx_queues; 643 u16 rx_itr_setting; 644 u64 rx_ipsec; 645 646 /* Port number used to identify VXLAN traffic */ 647 __be16 vxlan_port; 648 __be16 geneve_port; 649 650 /* XDP */ 651 int num_xdp_queues; 652 struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; 653 654 /* TX */ 655 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; 656 657 u64 restart_queue; 658 u64 lsc_int; 659 u32 tx_timeout_count; 660 661 /* RX */ 662 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; 663 int num_rx_pools; /* == num_rx_queues in 82598 */ 664 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ 665 u64 hw_csum_rx_error; 666 u64 hw_rx_no_dma_resources; 667 u64 rsc_total_count; 668 u64 rsc_total_flush; 669 u64 non_eop_descs; 670 u32 alloc_rx_page; 671 u32 alloc_rx_page_failed; 672 u32 alloc_rx_buff_failed; 673 674 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; 675 676 /* DCB parameters */ 677 struct ieee_pfc *ixgbe_ieee_pfc; 678 struct ieee_ets *ixgbe_ieee_ets; 679 struct ixgbe_dcb_config dcb_cfg; 680 struct ixgbe_dcb_config temp_dcb_cfg; 681 u8 hw_tcs; 682 u8 dcb_set_bitmap; 683 u8 dcbx_cap; 684 enum ixgbe_fc_mode last_lfc_mode; 685 686 int num_q_vectors; /* current number of q_vectors for device */ 687 int max_q_vectors; /* true count of q_vectors for device */ 688 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; 689 struct msix_entry *msix_entries; 690 691 u32 test_icr; 692 struct ixgbe_ring test_tx_ring; 693 struct ixgbe_ring test_rx_ring; 694 695 /* structs defined in ixgbe_hw.h */ 696 struct ixgbe_hw hw; 697 u16 msg_enable; 698 struct ixgbe_hw_stats stats; 699 700 u64 tx_busy; 701 unsigned int tx_ring_count; 702 unsigned int xdp_ring_count; 703 unsigned int rx_ring_count; 704 705 u32 link_speed; 706 bool link_up; 707 unsigned long sfp_poll_time; 708 unsigned long link_check_timeout; 709 710 struct timer_list service_timer; 711 struct work_struct service_task; 712 713 struct hlist_head fdir_filter_list; 714 unsigned long fdir_overflow; /* number of times ATR was backed off */ 715 union ixgbe_atr_input fdir_mask; 716 int fdir_filter_count; 717 u32 fdir_pballoc; 718 u32 atr_sample_rate; 719 spinlock_t fdir_perfect_lock; 720 721 #ifdef IXGBE_FCOE 722 struct ixgbe_fcoe fcoe; 723 #endif /* IXGBE_FCOE */ 724 u8 __iomem *io_addr; /* Mainly for iounmap use */ 725 u32 wol; 726 727 u16 bridge_mode; 728 729 char eeprom_id[NVM_VER_SIZE]; 730 u16 eeprom_cap; 731 732 u32 interrupt_event; 733 u32 led_reg; 734 735 struct ptp_clock *ptp_clock; 736 struct ptp_clock_info ptp_caps; 737 struct work_struct ptp_tx_work; 738 struct sk_buff *ptp_tx_skb; 739 struct hwtstamp_config tstamp_config; 740 unsigned long ptp_tx_start; 741 unsigned long last_overflow_check; 742 unsigned long last_rx_ptp_check; 743 unsigned long last_rx_timestamp; 744 spinlock_t tmreg_lock; 745 struct cyclecounter hw_cc; 746 struct timecounter hw_tc; 747 u32 base_incval; 748 u32 tx_hwtstamp_timeouts; 749 u32 tx_hwtstamp_skipped; 750 u32 rx_hwtstamp_cleared; 751 void (*ptp_setup_sdp)(struct ixgbe_adapter *); 752 753 /* SR-IOV */ 754 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 755 unsigned int num_vfs; 756 struct vf_data_storage *vfinfo; 757 int vf_rate_link_speed; 758 struct vf_macvlans vf_mvs; 759 struct vf_macvlans *mv_list; 760 761 u32 timer_event_accumulator; 762 u32 vferr_refcount; 763 struct ixgbe_mac_addr *mac_table; 764 struct kobject *info_kobj; 765 #ifdef CONFIG_IXGBE_HWMON 766 struct hwmon_buff *ixgbe_hwmon_buff; 767 #endif /* CONFIG_IXGBE_HWMON */ 768 #ifdef CONFIG_DEBUG_FS 769 struct dentry *ixgbe_dbg_adapter; 770 #endif /*CONFIG_DEBUG_FS*/ 771 772 u8 default_up; 773 /* Bitmask indicating in use pools */ 774 DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1); 775 776 #define IXGBE_MAX_LINK_HANDLE 10 777 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; 778 unsigned long tables; 779 780 /* maximum number of RETA entries among all devices supported by ixgbe 781 * driver: currently it's x550 device in non-SRIOV mode 782 */ 783 #define IXGBE_MAX_RETA_ENTRIES 512 784 u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; 785 786 #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ 787 u32 *rss_key; 788 789 #ifdef CONFIG_XFRM 790 struct ixgbe_ipsec *ipsec; 791 #endif /* CONFIG_XFRM */ 792 }; 793 794 static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) 795 { 796 switch (adapter->hw.mac.type) { 797 case ixgbe_mac_82598EB: 798 case ixgbe_mac_82599EB: 799 case ixgbe_mac_X540: 800 return IXGBE_MAX_RSS_INDICES; 801 case ixgbe_mac_X550: 802 case ixgbe_mac_X550EM_x: 803 case ixgbe_mac_x550em_a: 804 return IXGBE_MAX_RSS_INDICES_X550; 805 default: 806 return 0; 807 } 808 } 809 810 struct ixgbe_fdir_filter { 811 struct hlist_node fdir_node; 812 union ixgbe_atr_input filter; 813 u16 sw_idx; 814 u64 action; 815 }; 816 817 enum ixgbe_state_t { 818 __IXGBE_TESTING, 819 __IXGBE_RESETTING, 820 __IXGBE_DOWN, 821 __IXGBE_DISABLED, 822 __IXGBE_REMOVING, 823 __IXGBE_SERVICE_SCHED, 824 __IXGBE_SERVICE_INITED, 825 __IXGBE_IN_SFP_INIT, 826 __IXGBE_PTP_RUNNING, 827 __IXGBE_PTP_TX_IN_PROGRESS, 828 __IXGBE_RESET_REQUESTED, 829 }; 830 831 struct ixgbe_cb { 832 union { /* Union defining head/tail partner */ 833 struct sk_buff *head; 834 struct sk_buff *tail; 835 }; 836 dma_addr_t dma; 837 u16 append_cnt; 838 bool page_released; 839 }; 840 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) 841 842 enum ixgbe_boards { 843 board_82598, 844 board_82599, 845 board_X540, 846 board_X550, 847 board_X550EM_x, 848 board_x550em_x_fw, 849 board_x550em_a, 850 board_x550em_a_fw, 851 }; 852 853 extern const struct ixgbe_info ixgbe_82598_info; 854 extern const struct ixgbe_info ixgbe_82599_info; 855 extern const struct ixgbe_info ixgbe_X540_info; 856 extern const struct ixgbe_info ixgbe_X550_info; 857 extern const struct ixgbe_info ixgbe_X550EM_x_info; 858 extern const struct ixgbe_info ixgbe_x550em_x_fw_info; 859 extern const struct ixgbe_info ixgbe_x550em_a_info; 860 extern const struct ixgbe_info ixgbe_x550em_a_fw_info; 861 #ifdef CONFIG_IXGBE_DCB 862 extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; 863 #endif 864 865 extern char ixgbe_driver_name[]; 866 extern const char ixgbe_driver_version[]; 867 #ifdef IXGBE_FCOE 868 extern char ixgbe_default_device_descr[]; 869 #endif /* IXGBE_FCOE */ 870 871 int ixgbe_open(struct net_device *netdev); 872 int ixgbe_close(struct net_device *netdev); 873 void ixgbe_up(struct ixgbe_adapter *adapter); 874 void ixgbe_down(struct ixgbe_adapter *adapter); 875 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 876 void ixgbe_reset(struct ixgbe_adapter *adapter); 877 void ixgbe_set_ethtool_ops(struct net_device *netdev); 878 int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 879 int ixgbe_setup_tx_resources(struct ixgbe_ring *); 880 void ixgbe_free_rx_resources(struct ixgbe_ring *); 881 void ixgbe_free_tx_resources(struct ixgbe_ring *); 882 void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); 883 void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); 884 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); 885 void ixgbe_update_stats(struct ixgbe_adapter *adapter); 886 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 887 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 888 u16 subdevice_id); 889 #ifdef CONFIG_PCI_IOV 890 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); 891 #endif 892 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, 893 const u8 *addr, u16 queue); 894 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, 895 const u8 *addr, u16 queue); 896 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); 897 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 898 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, 899 struct ixgbe_ring *); 900 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, 901 struct ixgbe_tx_buffer *); 902 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); 903 void ixgbe_write_eitr(struct ixgbe_q_vector *); 904 int ixgbe_poll(struct napi_struct *napi, int budget); 905 int ethtool_ioctl(struct ifreq *ifr); 906 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 907 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); 908 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); 909 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 910 union ixgbe_atr_hash_dword input, 911 union ixgbe_atr_hash_dword common, 912 u8 queue); 913 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 914 union ixgbe_atr_input *input_mask); 915 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 916 union ixgbe_atr_input *input, 917 u16 soft_id, u8 queue); 918 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 919 union ixgbe_atr_input *input, 920 u16 soft_id); 921 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 922 union ixgbe_atr_input *mask); 923 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, 924 struct ixgbe_fdir_filter *input, 925 u16 sw_idx); 926 void ixgbe_set_rx_mode(struct net_device *netdev); 927 #ifdef CONFIG_IXGBE_DCB 928 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 929 #endif 930 int ixgbe_setup_tc(struct net_device *dev, u8 tc); 931 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 932 void ixgbe_do_reset(struct net_device *netdev); 933 #ifdef CONFIG_IXGBE_HWMON 934 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); 935 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); 936 #endif /* CONFIG_IXGBE_HWMON */ 937 #ifdef IXGBE_FCOE 938 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 939 int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, 940 u8 *hdr_len); 941 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 942 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); 943 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 944 struct scatterlist *sgl, unsigned int sgc); 945 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 946 struct scatterlist *sgl, unsigned int sgc); 947 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); 948 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 949 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); 950 int ixgbe_fcoe_enable(struct net_device *netdev); 951 int ixgbe_fcoe_disable(struct net_device *netdev); 952 #ifdef CONFIG_IXGBE_DCB 953 u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 954 u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 955 #endif /* CONFIG_IXGBE_DCB */ 956 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); 957 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, 958 struct netdev_fcoe_hbainfo *info); 959 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); 960 #endif /* IXGBE_FCOE */ 961 #ifdef CONFIG_DEBUG_FS 962 void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); 963 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); 964 void ixgbe_dbg_init(void); 965 void ixgbe_dbg_exit(void); 966 #else 967 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} 968 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} 969 static inline void ixgbe_dbg_init(void) {} 970 static inline void ixgbe_dbg_exit(void) {} 971 #endif /* CONFIG_DEBUG_FS */ 972 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) 973 { 974 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 975 } 976 977 void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 978 void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); 979 void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 980 void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 981 void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 982 void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); 983 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *); 984 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb); 985 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, 986 union ixgbe_adv_rx_desc *rx_desc, 987 struct sk_buff *skb) 988 { 989 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { 990 ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); 991 return; 992 } 993 994 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) 995 return; 996 997 ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); 998 999 /* Update the last_rx_timestamp timer in order to enable watchdog check 1000 * for error case of latched timestamp on a dropped packet. 1001 */ 1002 rx_ring->last_rx_timestamp = jiffies; 1003 } 1004 1005 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 1006 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 1007 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 1008 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); 1009 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); 1010 #ifdef CONFIG_PCI_IOV 1011 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); 1012 #endif 1013 1014 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 1015 struct ixgbe_adapter *adapter, 1016 struct ixgbe_ring *tx_ring); 1017 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); 1018 void ixgbe_store_key(struct ixgbe_adapter *adapter); 1019 void ixgbe_store_reta(struct ixgbe_adapter *adapter); 1020 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, 1021 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); 1022 #ifdef CONFIG_XFRM_OFFLOAD 1023 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter); 1024 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter); 1025 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter); 1026 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, 1027 union ixgbe_adv_rx_desc *rx_desc, 1028 struct sk_buff *skb); 1029 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, 1030 struct ixgbe_ipsec_tx_data *itd); 1031 #else 1032 static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; 1033 static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; 1034 static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; 1035 static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, 1036 union ixgbe_adv_rx_desc *rx_desc, 1037 struct sk_buff *skb) { }; 1038 static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, 1039 struct ixgbe_tx_buffer *first, 1040 struct ixgbe_ipsec_tx_data *itd) { return 0; }; 1041 #endif /* CONFIG_XFRM_OFFLOAD */ 1042 #endif /* _IXGBE_H_ */ 1043