1 /****************************************************************************** 2 3 Copyright (c) 2001-2015, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #ifndef _IXGBE_H_ 37 #define _IXGBE_H_ 38 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #ifndef IXGBE_LEGACY_TX 43 #include <sys/buf_ring.h> 44 #endif 45 #include <sys/mbuf.h> 46 #include <sys/protosw.h> 47 #include <sys/socket.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/module.h> 51 #include <sys/sockio.h> 52 #include <sys/eventhandler.h> 53 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_arp.h> 57 #include <net/bpf.h> 58 #include <net/ethernet.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 62 #include <net/bpf.h> 63 #include <net/if_types.h> 64 #include <net/if_vlan_var.h> 65 66 #include <netinet/in_systm.h> 67 #include <netinet/in.h> 68 #include <netinet/if_ether.h> 69 #include <netinet/ip.h> 70 #include <netinet/ip6.h> 71 #include <netinet/tcp.h> 72 #include <netinet/tcp_lro.h> 73 #include <netinet/udp.h> 74 75 #include <machine/in_cksum.h> 76 77 #include <sys/bus.h> 78 #include <machine/bus.h> 79 #include <sys/rman.h> 80 #include <machine/resource.h> 81 #include <vm/vm.h> 82 #include <vm/pmap.h> 83 #include <machine/clock.h> 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/pcireg.h> 86 #include <sys/proc.h> 87 #include <sys/sysctl.h> 88 #include <sys/endian.h> 89 #include <sys/taskqueue.h> 90 #include <sys/pcpu.h> 91 #include <sys/smp.h> 92 #include <machine/smp.h> 93 #include <sys/sbuf.h> 94 95 #ifdef PCI_IOV 96 #include <sys/nv.h> 97 #include <sys/iov_schema.h> 98 #include <dev/pci/pci_iov.h> 99 #endif 100 101 #include "ixgbe_api.h" 102 #include "ixgbe_common.h" 103 #include "ixgbe_phy.h" 104 #include "ixgbe_vf.h" 105 106 #ifdef PCI_IOV 107 #include "ixgbe_common.h" 108 #include "ixgbe_mbx.h" 109 #endif 110 111 /* Tunables */ 112 113 /* 114 * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the 115 * number of transmit descriptors allocated by the driver. Increasing this 116 * value allows the driver to queue more transmits. Each descriptor is 16 117 * bytes. Performance tests have show the 2K value to be optimal for top 118 * performance. 119 */ 120 #define DEFAULT_TXD 1024 121 #define PERFORM_TXD 2048 122 #define MAX_TXD 4096 123 #define MIN_TXD 64 124 125 /* 126 * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the 127 * number of receive descriptors allocated for each RX queue. Increasing this 128 * value allows the driver to buffer more incoming packets. Each descriptor 129 * is 16 bytes. A receive buffer is also allocated for each descriptor. 130 * 131 * Note: with 8 rings and a dual port card, it is possible to bump up 132 * against the system mbuf pool limit, you can tune nmbclusters 133 * to adjust for this. 134 */ 135 #define DEFAULT_RXD 1024 136 #define PERFORM_RXD 2048 137 #define MAX_RXD 4096 138 #define MIN_RXD 64 139 140 /* Alignment for rings */ 141 #define DBA_ALIGN 128 142 143 /* 144 * This parameter controls the maximum no of times the driver will loop in 145 * the isr. Minimum Value = 1 146 */ 147 #define MAX_LOOP 10 148 149 /* 150 * This is the max watchdog interval, ie. the time that can 151 * pass between any two TX clean operations, such only happening 152 * when the TX hardware is functioning. 153 */ 154 #define IXGBE_WATCHDOG (10 * hz) 155 156 /* 157 * This parameters control when the driver calls the routine to reclaim 158 * transmit descriptors. 159 */ 160 #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) 161 #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) 162 163 /* These defines are used in MTU calculations */ 164 #define IXGBE_MAX_FRAME_SIZE 9728 165 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN + \ 166 ETHER_VLAN_ENCAP_LEN) 167 #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) 168 169 /* Flow control constants */ 170 #define IXGBE_FC_PAUSE 0xFFFF 171 #define IXGBE_FC_HI 0x20000 172 #define IXGBE_FC_LO 0x10000 173 174 /* 175 * Used for optimizing small rx mbufs. Effort is made to keep the copy 176 * small and aligned for the CPU L1 cache. 177 * 178 * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting 179 * 32 byte alignment needed for the fast bcopy results in 8 bytes being 180 * wasted. Getting 64 byte alignment, which _should_ be ideal for 181 * modern Intel CPUs, results in 40 bytes wasted and a significant drop 182 * in observed efficiency of the optimization, 97.9% -> 81.8%. 183 */ 184 #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32) 185 #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED) 186 #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE) 187 188 /* Keep older OS drivers building... */ 189 #if !defined(SYSCTL_ADD_UQUAD) 190 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 191 #endif 192 193 /* Defines for printing debug information */ 194 #define DEBUG_INIT 0 195 #define DEBUG_IOCTL 0 196 #define DEBUG_HW 0 197 198 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") 199 #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) 200 #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) 201 #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") 202 #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) 203 #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) 204 #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") 205 #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) 206 #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) 207 208 #define MAX_NUM_MULTICAST_ADDRESSES 128 209 #define IXGBE_82598_SCATTER 100 210 #define IXGBE_82599_SCATTER 32 211 #define MSIX_82598_BAR 3 212 #define MSIX_82599_BAR 4 213 #define IXGBE_TSO_SIZE 262140 214 #define IXGBE_TX_BUFFER_SIZE ((u32) 1514) 215 #define IXGBE_RX_HDR 128 216 #define IXGBE_VFTA_SIZE 128 217 #define IXGBE_BR_SIZE 4096 218 #define IXGBE_QUEUE_MIN_FREE 32 219 #define IXGBE_MAX_TX_BUSY 10 220 #define IXGBE_QUEUE_HUNG 0x80000000 221 222 #define IXV_EITR_DEFAULT 128 223 224 /* Offload bits in mbuf flag */ 225 #if __FreeBSD_version >= 800000 226 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) 227 #else 228 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) 229 #endif 230 231 /* Backward compatibility items for very old versions */ 232 #ifndef pci_find_cap 233 #define pci_find_cap pci_find_extcap 234 #endif 235 236 #ifndef DEVMETHOD_END 237 #define DEVMETHOD_END { NULL, NULL } 238 #endif 239 240 /* 241 * Interrupt Moderation parameters 242 */ 243 #define IXGBE_LOW_LATENCY 128 244 #define IXGBE_AVE_LATENCY 400 245 #define IXGBE_BULK_LATENCY 1200 246 #define IXGBE_LINK_ITR 2000 247 248 /* MAC type macros */ 249 #define IXGBE_IS_X550VF(_adapter) \ 250 ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \ 251 (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf)) 252 253 #define IXGBE_IS_VF(_adapter) \ 254 (IXGBE_IS_X550VF(_adapter) || \ 255 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \ 256 (_adapter->hw.mac.type == ixgbe_mac_82599_vf)) 257 258 #ifdef PCI_IOV 259 #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32) 260 #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32)) 261 262 #define IXGBE_VT_MSG_MASK 0xFFFF 263 264 #define IXGBE_VT_MSGINFO(msg) \ 265 (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT) 266 267 #define IXGBE_VF_GET_QUEUES_RESP_LEN 5 268 269 #define IXGBE_API_VER_1_0 0 270 #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */ 271 #define IXGBE_API_VER_1_1 2 272 #define IXGBE_API_VER_UNKNOWN UINT16_MAX 273 274 enum ixgbe_iov_mode { 275 IXGBE_64_VM, 276 IXGBE_32_VM, 277 IXGBE_NO_VM 278 }; 279 #endif /* PCI_IOV */ 280 281 282 /* 283 ***************************************************************************** 284 * vendor_info_array 285 * 286 * This array contains the list of Subvendor/Subdevice IDs on which the driver 287 * should load. 288 * 289 ***************************************************************************** 290 */ 291 typedef struct _ixgbe_vendor_info_t { 292 unsigned int vendor_id; 293 unsigned int device_id; 294 unsigned int subvendor_id; 295 unsigned int subdevice_id; 296 unsigned int index; 297 } ixgbe_vendor_info_t; 298 299 300 struct ixgbe_tx_buf { 301 union ixgbe_adv_tx_desc *eop; 302 struct mbuf *m_head; 303 bus_dmamap_t map; 304 }; 305 306 struct ixgbe_rx_buf { 307 struct mbuf *buf; 308 struct mbuf *fmp; 309 bus_dmamap_t pmap; 310 u_int flags; 311 #define IXGBE_RX_COPY 0x01 312 uint64_t addr; 313 }; 314 315 /* 316 * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free. 317 */ 318 struct ixgbe_dma_alloc { 319 bus_addr_t dma_paddr; 320 caddr_t dma_vaddr; 321 bus_dma_tag_t dma_tag; 322 bus_dmamap_t dma_map; 323 bus_dma_segment_t dma_seg; 324 bus_size_t dma_size; 325 int dma_nseg; 326 }; 327 328 struct ixgbe_mc_addr { 329 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 330 u32 vmdq; 331 }; 332 333 /* 334 ** Driver queue struct: this is the interrupt container 335 ** for the associated tx and rx ring. 336 */ 337 struct ix_queue { 338 struct adapter *adapter; 339 u32 msix; /* This queue's MSIX vector */ 340 u32 eims; /* This queue's EIMS bit */ 341 u32 eitr_setting; 342 u32 me; 343 struct resource *res; 344 void *tag; 345 int busy; 346 struct tx_ring *txr; 347 struct rx_ring *rxr; 348 struct task que_task; 349 struct taskqueue *tq; 350 u64 irqs; 351 }; 352 353 /* 354 * The transmit ring, one per queue 355 */ 356 struct tx_ring { 357 struct adapter *adapter; 358 struct mtx tx_mtx; 359 u32 me; 360 u32 tail; 361 int busy; 362 union ixgbe_adv_tx_desc *tx_base; 363 struct ixgbe_tx_buf *tx_buffers; 364 struct ixgbe_dma_alloc txdma; 365 volatile u16 tx_avail; 366 u16 next_avail_desc; 367 u16 next_to_clean; 368 u16 process_limit; 369 u16 num_desc; 370 u32 txd_cmd; 371 bus_dma_tag_t txtag; 372 char mtx_name[16]; 373 #ifndef IXGBE_LEGACY_TX 374 struct buf_ring *br; 375 struct task txq_task; 376 #endif 377 #ifdef IXGBE_FDIR 378 u16 atr_sample; 379 u16 atr_count; 380 #endif 381 u32 bytes; /* used for AIM */ 382 u32 packets; 383 /* Soft Stats */ 384 unsigned long tso_tx; 385 unsigned long no_tx_map_avail; 386 unsigned long no_tx_dma_setup; 387 u64 no_desc_avail; 388 u64 total_packets; 389 }; 390 391 392 /* 393 * The Receive ring, one per rx queue 394 */ 395 struct rx_ring { 396 struct adapter *adapter; 397 struct mtx rx_mtx; 398 u32 me; 399 u32 tail; 400 union ixgbe_adv_rx_desc *rx_base; 401 struct ixgbe_dma_alloc rxdma; 402 struct lro_ctrl lro; 403 bool lro_enabled; 404 bool hw_rsc; 405 bool vtag_strip; 406 u16 next_to_refresh; 407 u16 next_to_check; 408 u16 num_desc; 409 u16 mbuf_sz; 410 u16 process_limit; 411 char mtx_name[16]; 412 struct ixgbe_rx_buf *rx_buffers; 413 bus_dma_tag_t ptag; 414 415 u32 bytes; /* Used for AIM calc */ 416 u32 packets; 417 418 /* Soft stats */ 419 u64 rx_irq; 420 u64 rx_copies; 421 u64 rx_packets; 422 u64 rx_bytes; 423 u64 rx_discarded; 424 u64 rsc_num; 425 #ifdef IXGBE_FDIR 426 u64 flm; 427 #endif 428 }; 429 430 #ifdef PCI_IOV 431 #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */ 432 #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */ 433 #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */ 434 #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */ 435 436 #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */ 437 438 struct ixgbe_vf { 439 u_int pool; 440 u_int rar_index; 441 u_int max_frame_size; 442 uint32_t flags; 443 uint8_t ether_addr[ETHER_ADDR_LEN]; 444 uint16_t mc_hash[IXGBE_MAX_VF_MC]; 445 uint16_t num_mc_hashes; 446 uint16_t default_vlan; 447 uint16_t vlan_tag; 448 uint16_t api_ver; 449 }; 450 #endif /* PCI_IOV */ 451 452 /* Our adapter structure */ 453 struct adapter { 454 struct ifnet *ifp; 455 struct ixgbe_hw hw; 456 457 struct ixgbe_osdep osdep; 458 struct device *dev; 459 460 struct resource *pci_mem; 461 struct resource *msix_mem; 462 463 /* 464 * Interrupt resources: this set is 465 * either used for legacy, or for Link 466 * when doing MSIX 467 */ 468 void *tag; 469 struct resource *res; 470 471 struct ifmedia media; 472 struct callout timer; 473 int msix; 474 int if_flags; 475 476 struct mtx core_mtx; 477 478 eventhandler_tag vlan_attach; 479 eventhandler_tag vlan_detach; 480 481 u16 num_vlans; 482 u16 num_queues; 483 484 /* 485 ** Shadow VFTA table, this is needed because 486 ** the real vlan filter table gets cleared during 487 ** a soft reset and the driver needs to be able 488 ** to repopulate it. 489 */ 490 u32 shadow_vfta[IXGBE_VFTA_SIZE]; 491 492 /* Info about the interface */ 493 u32 optics; 494 u32 fc; /* local flow ctrl setting */ 495 int advertise; /* link speeds */ 496 bool link_active; 497 u16 max_frame_size; 498 u16 num_segs; 499 u32 link_speed; 500 bool link_up; 501 u32 vector; 502 u16 dmac; 503 bool eee_enabled; 504 u32 phy_layer; 505 506 /* Power management-related */ 507 bool wol_support; 508 u32 wufc; 509 510 /* Mbuf cluster size */ 511 u32 rx_mbuf_sz; 512 513 /* Support for pluggable optics */ 514 bool sfp_probe; 515 struct task link_task; /* Link tasklet */ 516 struct task mod_task; /* SFP tasklet */ 517 struct task msf_task; /* Multispeed Fiber */ 518 #ifdef PCI_IOV 519 struct task mbx_task; /* VF -> PF mailbox interrupt */ 520 #endif /* PCI_IOV */ 521 #ifdef IXGBE_FDIR 522 int fdir_reinit; 523 struct task fdir_task; 524 #endif 525 struct task phy_task; /* PHY intr tasklet */ 526 struct taskqueue *tq; 527 528 /* 529 ** Queues: 530 ** This is the irq holder, it has 531 ** and RX/TX pair or rings associated 532 ** with it. 533 */ 534 struct ix_queue *queues; 535 536 /* 537 * Transmit rings: 538 * Allocated at run time, an array of rings. 539 */ 540 struct tx_ring *tx_rings; 541 u32 num_tx_desc; 542 543 /* 544 * Receive rings: 545 * Allocated at run time, an array of rings. 546 */ 547 struct rx_ring *rx_rings; 548 u64 active_queues; 549 u32 num_rx_desc; 550 551 /* Multicast array memory */ 552 struct ixgbe_mc_addr *mta; 553 int num_vfs; 554 int pool; 555 #ifdef PCI_IOV 556 struct ixgbe_vf *vfs; 557 #endif 558 559 /* Misc stats maintained by the driver */ 560 unsigned long dropped_pkts; 561 unsigned long mbuf_defrag_failed; 562 unsigned long mbuf_header_failed; 563 unsigned long mbuf_packet_failed; 564 unsigned long watchdog_events; 565 unsigned long link_irq; 566 union { 567 struct ixgbe_hw_stats pf; 568 struct ixgbevf_hw_stats vf; 569 } stats; 570 #if __FreeBSD_version >= 1100036 571 /* counter(9) stats */ 572 u64 ipackets; 573 u64 ierrors; 574 u64 opackets; 575 u64 oerrors; 576 u64 ibytes; 577 u64 obytes; 578 u64 imcasts; 579 u64 omcasts; 580 u64 iqdrops; 581 u64 noproto; 582 #endif 583 }; 584 585 586 /* Precision Time Sync (IEEE 1588) defines */ 587 #define ETHERTYPE_IEEE1588 0x88F7 588 #define PICOSECS_PER_TICK 20833 589 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */ 590 #define IXGBE_ADVTXD_TSTAMP 0x00080000 591 592 593 #define IXGBE_CORE_LOCK_INIT(_sc, _name) \ 594 mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF) 595 #define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) 596 #define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) 597 #define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) 598 #define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) 599 #define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) 600 #define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) 601 #define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) 602 #define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) 603 #define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) 604 #define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) 605 #define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) 606 #define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) 607 608 /* For backward compatibility */ 609 #if !defined(PCIER_LINK_STA) 610 #define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA 611 #endif 612 613 /* Stats macros */ 614 #if __FreeBSD_version >= 1100036 615 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count) 616 #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count) 617 #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count) 618 #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count) 619 #define IXGBE_SET_COLLISIONS(sc, count) 620 #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count) 621 #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count) 622 #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count) 623 #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count) 624 #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count) 625 #else 626 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count) 627 #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count) 628 #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count) 629 #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count) 630 #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count) 631 #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count) 632 #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count) 633 #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count) 634 #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count) 635 #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count) 636 #endif 637 638 /* External PHY register addresses */ 639 #define IXGBE_PHY_CURRENT_TEMP 0xC820 640 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830 641 642 /* Sysctl help messages; displayed with sysctl -d */ 643 #define IXGBE_SYSCTL_DESC_ADV_SPEED \ 644 "\nControl advertised link speed using these flags:\n" \ 645 "\t0x1 - advertise 100M\n" \ 646 "\t0x2 - advertise 1G\n" \ 647 "\t0x4 - advertise 10G\n\n" \ 648 "\t100M is only supported on certain 10GBaseT adapters.\n" 649 650 #define IXGBE_SYSCTL_DESC_SET_FC \ 651 "\nSet flow control mode using these values:\n" \ 652 "\t0 - off\n" \ 653 "\t1 - rx pause\n" \ 654 "\t2 - tx pause\n" \ 655 "\t3 - tx and rx pause" 656 657 static inline bool 658 ixgbe_is_sfp(struct ixgbe_hw *hw) 659 { 660 switch (hw->phy.type) { 661 case ixgbe_phy_sfp_avago: 662 case ixgbe_phy_sfp_ftl: 663 case ixgbe_phy_sfp_intel: 664 case ixgbe_phy_sfp_unknown: 665 case ixgbe_phy_sfp_passive_tyco: 666 case ixgbe_phy_sfp_passive_unknown: 667 case ixgbe_phy_qsfp_passive_unknown: 668 case ixgbe_phy_qsfp_active_unknown: 669 case ixgbe_phy_qsfp_intel: 670 case ixgbe_phy_qsfp_unknown: 671 return TRUE; 672 default: 673 return FALSE; 674 } 675 } 676 677 /* Workaround to make 8.0 buildable */ 678 #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 679 static __inline int 680 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) 681 { 682 #ifdef ALTQ 683 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 684 return (1); 685 #endif 686 return (!buf_ring_empty(br)); 687 } 688 #endif 689 690 /* 691 ** Find the number of unrefreshed RX descriptors 692 */ 693 static inline u16 694 ixgbe_rx_unrefreshed(struct rx_ring *rxr) 695 { 696 if (rxr->next_to_check > rxr->next_to_refresh) 697 return (rxr->next_to_check - rxr->next_to_refresh - 1); 698 else 699 return ((rxr->num_desc + rxr->next_to_check) - 700 rxr->next_to_refresh - 1); 701 } 702 703 /* 704 ** This checks for a zero mac addr, something that will be likely 705 ** unless the Admin on the Host has created one. 706 */ 707 static inline bool 708 ixv_check_ether_addr(u8 *addr) 709 { 710 bool status = TRUE; 711 712 if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && 713 addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) 714 status = FALSE; 715 return (status); 716 } 717 718 /* Shared Prototypes */ 719 720 #ifdef IXGBE_LEGACY_TX 721 void ixgbe_start(struct ifnet *); 722 void ixgbe_start_locked(struct tx_ring *, struct ifnet *); 723 #else /* ! IXGBE_LEGACY_TX */ 724 int ixgbe_mq_start(struct ifnet *, struct mbuf *); 725 int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *); 726 void ixgbe_qflush(struct ifnet *); 727 void ixgbe_deferred_mq_start(void *, int); 728 #endif /* IXGBE_LEGACY_TX */ 729 730 int ixgbe_allocate_queues(struct adapter *); 731 int ixgbe_allocate_transmit_buffers(struct tx_ring *); 732 int ixgbe_setup_transmit_structures(struct adapter *); 733 void ixgbe_free_transmit_structures(struct adapter *); 734 int ixgbe_allocate_receive_buffers(struct rx_ring *); 735 int ixgbe_setup_receive_structures(struct adapter *); 736 void ixgbe_free_receive_structures(struct adapter *); 737 void ixgbe_txeof(struct tx_ring *); 738 bool ixgbe_rxeof(struct ix_queue *); 739 740 int ixgbe_dma_malloc(struct adapter *, 741 bus_size_t, struct ixgbe_dma_alloc *, int); 742 void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); 743 744 #ifdef PCI_IOV 745 746 static inline boolean_t 747 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) 748 { 749 return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); 750 } 751 752 static inline void 753 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) 754 { 755 756 if (vf->flags & IXGBE_VF_CTS) 757 msg |= IXGBE_VT_MSGTYPE_CTS; 758 759 ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool); 760 } 761 762 static inline void 763 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) 764 { 765 msg &= IXGBE_VT_MSG_MASK; 766 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); 767 } 768 769 static inline void 770 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) 771 { 772 msg &= IXGBE_VT_MSG_MASK; 773 ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); 774 } 775 776 static inline void 777 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) 778 { 779 if (!(vf->flags & IXGBE_VF_CTS)) 780 ixgbe_send_vf_nack(adapter, vf, 0); 781 } 782 783 static inline enum ixgbe_iov_mode 784 ixgbe_get_iov_mode(struct adapter *adapter) 785 { 786 if (adapter->num_vfs == 0) 787 return (IXGBE_NO_VM); 788 if (adapter->num_queues <= 2) 789 return (IXGBE_64_VM); 790 else if (adapter->num_queues <= 4) 791 return (IXGBE_32_VM); 792 else 793 return (IXGBE_NO_VM); 794 } 795 796 static inline u16 797 ixgbe_max_vfs(enum ixgbe_iov_mode mode) 798 { 799 /* 800 * We return odd numbers below because we 801 * reserve 1 VM's worth of queues for the PF. 802 */ 803 switch (mode) { 804 case IXGBE_64_VM: 805 return (63); 806 case IXGBE_32_VM: 807 return (31); 808 case IXGBE_NO_VM: 809 default: 810 return (0); 811 } 812 } 813 814 static inline int 815 ixgbe_vf_queues(enum ixgbe_iov_mode mode) 816 { 817 switch (mode) { 818 case IXGBE_64_VM: 819 return (2); 820 case IXGBE_32_VM: 821 return (4); 822 case IXGBE_NO_VM: 823 default: 824 return (0); 825 } 826 } 827 828 static inline int 829 ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num) 830 { 831 return ((vfnum * ixgbe_vf_queues(mode)) + num); 832 } 833 834 static inline int 835 ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num) 836 { 837 return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num)); 838 } 839 840 static inline void 841 ixgbe_update_max_frame(struct adapter * adapter, int max_frame) 842 { 843 if (adapter->max_frame_size < max_frame) 844 adapter->max_frame_size = max_frame; 845 } 846 847 static inline u32 848 ixgbe_get_mrqc(enum ixgbe_iov_mode mode) 849 { 850 u32 mrqc = 0; 851 switch (mode) { 852 case IXGBE_64_VM: 853 mrqc = IXGBE_MRQC_VMDQRSS64EN; 854 break; 855 case IXGBE_32_VM: 856 mrqc = IXGBE_MRQC_VMDQRSS32EN; 857 break; 858 case IXGBE_NO_VM: 859 mrqc = 0; 860 break; 861 default: 862 panic("Unexpected SR-IOV mode %d", mode); 863 } 864 return(mrqc); 865 } 866 867 868 static inline u32 869 ixgbe_get_mtqc(enum ixgbe_iov_mode mode) 870 { 871 uint32_t mtqc = 0; 872 switch (mode) { 873 case IXGBE_64_VM: 874 mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA; 875 break; 876 case IXGBE_32_VM: 877 mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA; 878 break; 879 case IXGBE_NO_VM: 880 mtqc = IXGBE_MTQC_64Q_1PB; 881 break; 882 default: 883 panic("Unexpected SR-IOV mode %d", mode); 884 } 885 return(mtqc); 886 } 887 #endif /* PCI_IOV */ 888 889 #endif /* _IXGBE_H_ */ 890