1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixv_driver_version[] = "2.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixv_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixv_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"), 67 /* required last entry */ 68 PVID_END 69 }; 70 71 /************************************************************************ 72 * Function prototypes 73 ************************************************************************/ 74 static void *ixv_register(device_t); 75 static int ixv_if_attach_pre(if_ctx_t); 76 static int ixv_if_attach_post(if_ctx_t); 77 static int ixv_if_detach(if_ctx_t); 78 79 static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 80 static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 81 static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 82 static void ixv_if_queues_free(if_ctx_t); 83 static void ixv_identify_hardware(if_ctx_t); 84 static void ixv_init_device_features(struct ixgbe_softc *); 85 static int ixv_allocate_pci_resources(if_ctx_t); 86 static void ixv_free_pci_resources(if_ctx_t); 87 static int ixv_setup_interface(if_ctx_t); 88 static void ixv_if_media_status(if_ctx_t, struct ifmediareq *); 89 static int ixv_if_media_change(if_ctx_t); 90 static void ixv_if_update_admin_status(if_ctx_t); 91 static int ixv_if_msix_intr_assign(if_ctx_t, int); 92 93 static int ixv_if_mtu_set(if_ctx_t, uint32_t); 94 static void ixv_if_init(if_ctx_t); 95 static void ixv_if_local_timer(if_ctx_t, uint16_t); 96 static void ixv_if_stop(if_ctx_t); 97 static int ixv_negotiate_api(struct ixgbe_softc *); 98 99 static void ixv_initialize_transmit_units(if_ctx_t); 100 static void ixv_initialize_receive_units(if_ctx_t); 101 static void ixv_initialize_rss_mapping(struct ixgbe_softc *); 102 103 static void ixv_setup_vlan_support(if_ctx_t); 104 static void ixv_configure_ivars(struct ixgbe_softc *); 105 static void ixv_if_enable_intr(if_ctx_t); 106 static void ixv_if_disable_intr(if_ctx_t); 107 static void ixv_if_multi_set(if_ctx_t); 108 109 static void ixv_if_register_vlan(if_ctx_t, u16); 110 static void ixv_if_unregister_vlan(if_ctx_t, u16); 111 112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter); 113 static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event); 114 115 static void ixv_save_stats(struct ixgbe_softc *); 116 static void ixv_init_stats(struct ixgbe_softc *); 117 static void ixv_update_stats(struct ixgbe_softc *); 118 static void ixv_add_stats_sysctls(struct ixgbe_softc *); 119 120 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 121 static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8); 122 123 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 124 125 /* The MSI-X Interrupt handlers */ 126 static int ixv_msix_que(void *); 127 static int ixv_msix_mbx(void *); 128 129 /************************************************************************ 130 * FreeBSD Device Interface Entry Points 131 ************************************************************************/ 132 static device_method_t ixv_methods[] = { 133 /* Device interface */ 134 DEVMETHOD(device_register, ixv_register), 135 DEVMETHOD(device_probe, iflib_device_probe), 136 DEVMETHOD(device_attach, iflib_device_attach), 137 DEVMETHOD(device_detach, iflib_device_detach), 138 DEVMETHOD(device_shutdown, iflib_device_shutdown), 139 DEVMETHOD_END 140 }; 141 142 static driver_t ixv_driver = { 143 "ixv", ixv_methods, sizeof(struct ixgbe_softc), 144 }; 145 146 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0); 147 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array); 148 MODULE_DEPEND(ixv, iflib, 1, 1, 1); 149 MODULE_DEPEND(ixv, pci, 1, 1, 1); 150 MODULE_DEPEND(ixv, ether, 1, 1, 1); 151 152 static device_method_t ixv_if_methods[] = { 153 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre), 154 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post), 155 DEVMETHOD(ifdi_detach, ixv_if_detach), 156 DEVMETHOD(ifdi_init, ixv_if_init), 157 DEVMETHOD(ifdi_stop, ixv_if_stop), 158 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign), 159 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr), 160 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr), 161 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 162 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 163 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc), 164 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc), 165 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free), 166 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status), 167 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set), 168 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set), 169 DEVMETHOD(ifdi_media_status, ixv_if_media_status), 170 DEVMETHOD(ifdi_media_change, ixv_if_media_change), 171 DEVMETHOD(ifdi_timer, ixv_if_local_timer), 172 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan), 173 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan), 174 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter), 175 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart), 176 DEVMETHOD_END 177 }; 178 179 static driver_t ixv_if_driver = { 180 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc) 181 }; 182 183 /* 184 * TUNEABLE PARAMETERS: 185 */ 186 187 /* Flow control setting, default to full */ 188 static int ixv_flow_control = ixgbe_fc_full; 189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 190 191 /* 192 * Header split: this causes the hardware to DMA 193 * the header into a separate mbuf from the payload, 194 * it can be a performance win in some workloads, but 195 * in others it actually hurts, its off by default. 196 */ 197 static int ixv_header_split = false; 198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 199 200 /* 201 * Shadow VFTA table, this is needed because 202 * the real filter table gets cleared during 203 * a soft reset and we need to repopulate it. 204 */ 205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 206 extern struct if_txrx ixgbe_txrx; 207 208 static struct if_shared_ctx ixv_sctx_init = { 209 .isc_magic = IFLIB_MAGIC, 210 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 211 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 212 .isc_tx_maxsegsize = PAGE_SIZE, 213 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 214 .isc_tso_maxsegsize = PAGE_SIZE, 215 .isc_rx_maxsize = MJUM16BYTES, 216 .isc_rx_nsegments = 1, 217 .isc_rx_maxsegsize = MJUM16BYTES, 218 .isc_nfl = 1, 219 .isc_ntxqs = 1, 220 .isc_nrxqs = 1, 221 .isc_admin_intrcnt = 1, 222 .isc_vendor_info = ixv_vendor_info_array, 223 .isc_driver_version = ixv_driver_version, 224 .isc_driver = &ixv_if_driver, 225 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP, 226 227 .isc_nrxd_min = {MIN_RXD}, 228 .isc_ntxd_min = {MIN_TXD}, 229 .isc_nrxd_max = {MAX_RXD}, 230 .isc_ntxd_max = {MAX_TXD}, 231 .isc_nrxd_default = {DEFAULT_RXD}, 232 .isc_ntxd_default = {DEFAULT_TXD}, 233 }; 234 235 static void * 236 ixv_register(device_t dev) 237 { 238 return (&ixv_sctx_init); 239 } 240 241 /************************************************************************ 242 * ixv_if_tx_queues_alloc 243 ************************************************************************/ 244 static int 245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 246 int ntxqs, int ntxqsets) 247 { 248 struct ixgbe_softc *sc = iflib_get_softc(ctx); 249 if_softc_ctx_t scctx = sc->shared; 250 struct ix_tx_queue *que; 251 int i, j, error; 252 253 MPASS(sc->num_tx_queues == ntxqsets); 254 MPASS(ntxqs == 1); 255 256 /* Allocate queue structure memory */ 257 sc->tx_queues = 258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 259 M_DEVBUF, M_NOWAIT | M_ZERO); 260 if (!sc->tx_queues) { 261 device_printf(iflib_get_dev(ctx), 262 "Unable to allocate TX ring memory\n"); 263 return (ENOMEM); 264 } 265 266 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 267 struct tx_ring *txr = &que->txr; 268 269 txr->me = i; 270 txr->sc = que->sc = sc; 271 272 /* Allocate report status array */ 273 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 274 error = ENOMEM; 275 goto fail; 276 } 277 for (j = 0; j < scctx->isc_ntxd[0]; j++) 278 txr->tx_rsq[j] = QIDX_INVALID; 279 /* get the virtual and physical address of the hardware queues */ 280 txr->tail = IXGBE_VFTDT(txr->me); 281 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs]; 282 txr->tx_paddr = paddrs[i*ntxqs]; 283 284 txr->bytes = 0; 285 txr->total_packets = 0; 286 287 } 288 289 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 290 sc->num_tx_queues); 291 292 return (0); 293 294 fail: 295 ixv_if_queues_free(ctx); 296 297 return (error); 298 } /* ixv_if_tx_queues_alloc */ 299 300 /************************************************************************ 301 * ixv_if_rx_queues_alloc 302 ************************************************************************/ 303 static int 304 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 305 int nrxqs, int nrxqsets) 306 { 307 struct ixgbe_softc *sc = iflib_get_softc(ctx); 308 struct ix_rx_queue *que; 309 int i, error; 310 311 MPASS(sc->num_rx_queues == nrxqsets); 312 MPASS(nrxqs == 1); 313 314 /* Allocate queue structure memory */ 315 sc->rx_queues = 316 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets, 317 M_DEVBUF, M_NOWAIT | M_ZERO); 318 if (!sc->rx_queues) { 319 device_printf(iflib_get_dev(ctx), 320 "Unable to allocate TX ring memory\n"); 321 error = ENOMEM; 322 goto fail; 323 } 324 325 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 326 struct rx_ring *rxr = &que->rxr; 327 rxr->me = i; 328 rxr->sc = que->sc = sc; 329 330 331 /* get the virtual and physical address of the hw queues */ 332 rxr->tail = IXGBE_VFRDT(rxr->me); 333 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 334 rxr->rx_paddr = paddrs[i*nrxqs]; 335 rxr->bytes = 0; 336 rxr->que = que; 337 } 338 339 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 340 sc->num_rx_queues); 341 342 return (0); 343 344 fail: 345 ixv_if_queues_free(ctx); 346 347 return (error); 348 } /* ixv_if_rx_queues_alloc */ 349 350 /************************************************************************ 351 * ixv_if_queues_free 352 ************************************************************************/ 353 static void 354 ixv_if_queues_free(if_ctx_t ctx) 355 { 356 struct ixgbe_softc *sc = iflib_get_softc(ctx); 357 struct ix_tx_queue *que = sc->tx_queues; 358 int i; 359 360 if (que == NULL) 361 goto free; 362 363 for (i = 0; i < sc->num_tx_queues; i++, que++) { 364 struct tx_ring *txr = &que->txr; 365 if (txr->tx_rsq == NULL) 366 break; 367 368 free(txr->tx_rsq, M_DEVBUF); 369 txr->tx_rsq = NULL; 370 } 371 if (sc->tx_queues != NULL) 372 free(sc->tx_queues, M_DEVBUF); 373 free: 374 if (sc->rx_queues != NULL) 375 free(sc->rx_queues, M_DEVBUF); 376 sc->tx_queues = NULL; 377 sc->rx_queues = NULL; 378 } /* ixv_if_queues_free */ 379 380 /************************************************************************ 381 * ixv_if_attach_pre - Device initialization routine 382 * 383 * Called when the driver is being loaded. 384 * Identifies the type of hardware, allocates all resources 385 * and initializes the hardware. 386 * 387 * return 0 on success, positive on failure 388 ************************************************************************/ 389 static int 390 ixv_if_attach_pre(if_ctx_t ctx) 391 { 392 struct ixgbe_softc *sc; 393 device_t dev; 394 if_softc_ctx_t scctx; 395 struct ixgbe_hw *hw; 396 int error = 0; 397 398 INIT_DEBUGOUT("ixv_attach: begin"); 399 400 /* Allocate, clear, and link in our sc structure */ 401 dev = iflib_get_dev(ctx); 402 sc = iflib_get_softc(ctx); 403 sc->dev = dev; 404 sc->ctx = ctx; 405 sc->hw.back = sc; 406 scctx = sc->shared = iflib_get_softc_ctx(ctx); 407 sc->media = iflib_get_media(ctx); 408 hw = &sc->hw; 409 410 /* Do base PCI setup - map BAR0 */ 411 if (ixv_allocate_pci_resources(ctx)) { 412 device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); 413 error = ENXIO; 414 goto err_out; 415 } 416 417 /* SYSCTL APIs */ 418 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 419 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 420 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 421 sc, 0, ixv_sysctl_debug, "I", "Debug Info"); 422 423 /* Determine hardware revision */ 424 ixv_identify_hardware(ctx); 425 ixv_init_device_features(sc); 426 427 /* Initialize the shared code */ 428 error = ixgbe_init_ops_vf(hw); 429 if (error) { 430 device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); 431 error = EIO; 432 goto err_out; 433 } 434 435 /* Setup the mailbox */ 436 ixgbe_init_mbx_params_vf(hw); 437 438 error = hw->mac.ops.reset_hw(hw); 439 if (error == IXGBE_ERR_RESET_FAILED) 440 device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); 441 else if (error) 442 device_printf(dev, "...reset_hw() failed with error %d\n", 443 error); 444 if (error) { 445 error = EIO; 446 goto err_out; 447 } 448 449 error = hw->mac.ops.init_hw(hw); 450 if (error) { 451 device_printf(dev, "...init_hw() failed with error %d\n", 452 error); 453 error = EIO; 454 goto err_out; 455 } 456 457 /* Negotiate mailbox API version */ 458 error = ixv_negotiate_api(sc); 459 if (error) { 460 device_printf(dev, 461 "Mailbox API negotiation failed during attach!\n"); 462 goto err_out; 463 } 464 465 /* Check if VF was disabled by PF */ 466 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled); 467 if (error) { 468 /* PF is not capable of controlling VF state. Enable the link. */ 469 sc->link_enabled = true; 470 } 471 472 /* If no mac address was assigned, make a random one */ 473 if (!ixv_check_ether_addr(hw->mac.addr)) { 474 ether_gen_addr(iflib_get_ifp(ctx), 475 (struct ether_addr *)hw->mac.addr); 476 bcopy(hw->mac.addr, hw->mac.perm_addr, 477 sizeof(hw->mac.perm_addr)); 478 } 479 480 /* Most of the iflib initialization... */ 481 482 iflib_set_mac(ctx, hw->mac.addr); 483 switch (sc->hw.mac.type) { 484 case ixgbe_mac_X550_vf: 485 case ixgbe_mac_X550EM_x_vf: 486 case ixgbe_mac_X550EM_a_vf: 487 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2; 488 break; 489 default: 490 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 491 } 492 scctx->isc_txqsizes[0] = 493 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 494 sizeof(u32), DBA_ALIGN); 495 scctx->isc_rxqsizes[0] = 496 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 497 DBA_ALIGN); 498 /* XXX */ 499 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 500 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 501 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 502 scctx->isc_msix_bar = pci_msix_table_bar(dev); 503 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 504 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 505 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 506 507 scctx->isc_txrx = &ixgbe_txrx; 508 509 /* 510 * Tell the upper layer(s) we support everything the PF 511 * driver does except... 512 * Wake-on-LAN 513 */ 514 scctx->isc_capabilities = IXGBE_CAPS; 515 scctx->isc_capabilities ^= IFCAP_WOL; 516 scctx->isc_capenable = scctx->isc_capabilities; 517 518 INIT_DEBUGOUT("ixv_if_attach_pre: end"); 519 520 return (0); 521 522 err_out: 523 ixv_free_pci_resources(ctx); 524 525 return (error); 526 } /* ixv_if_attach_pre */ 527 528 static int 529 ixv_if_attach_post(if_ctx_t ctx) 530 { 531 struct ixgbe_softc *sc = iflib_get_softc(ctx); 532 device_t dev = iflib_get_dev(ctx); 533 int error = 0; 534 535 /* Setup OS specific network interface */ 536 error = ixv_setup_interface(ctx); 537 if (error) { 538 device_printf(dev, "Interface setup failed: %d\n", error); 539 goto end; 540 } 541 542 /* Do the stats setup */ 543 ixv_save_stats(sc); 544 ixv_init_stats(sc); 545 ixv_add_stats_sysctls(sc); 546 547 end: 548 return error; 549 } /* ixv_if_attach_post */ 550 551 /************************************************************************ 552 * ixv_detach - Device removal routine 553 * 554 * Called when the driver is being removed. 555 * Stops the adapter and deallocates all the resources 556 * that were allocated for driver operation. 557 * 558 * return 0 on success, positive on failure 559 ************************************************************************/ 560 static int 561 ixv_if_detach(if_ctx_t ctx) 562 { 563 INIT_DEBUGOUT("ixv_detach: begin"); 564 565 ixv_free_pci_resources(ctx); 566 567 return (0); 568 } /* ixv_if_detach */ 569 570 /************************************************************************ 571 * ixv_if_mtu_set 572 ************************************************************************/ 573 static int 574 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 575 { 576 struct ixgbe_softc *sc = iflib_get_softc(ctx); 577 struct ifnet *ifp = iflib_get_ifp(ctx); 578 int error = 0; 579 580 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 581 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { 582 error = EINVAL; 583 } else { 584 ifp->if_mtu = mtu; 585 sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 586 } 587 588 return error; 589 } /* ixv_if_mtu_set */ 590 591 /************************************************************************ 592 * ixv_if_init - Init entry point 593 * 594 * Used in two ways: It is used by the stack as an init entry 595 * point in network interface structure. It is also used 596 * by the driver as a hw/sw initialization routine to get 597 * to a consistent state. 598 * 599 * return 0 on success, positive on failure 600 ************************************************************************/ 601 static void 602 ixv_if_init(if_ctx_t ctx) 603 { 604 struct ixgbe_softc *sc = iflib_get_softc(ctx); 605 struct ifnet *ifp = iflib_get_ifp(ctx); 606 device_t dev = iflib_get_dev(ctx); 607 struct ixgbe_hw *hw = &sc->hw; 608 int error = 0; 609 610 INIT_DEBUGOUT("ixv_if_init: begin"); 611 hw->adapter_stopped = false; 612 hw->mac.ops.stop_adapter(hw); 613 614 /* reprogram the RAR[0] in case user changed it. */ 615 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 616 617 /* Get the latest mac address, User can use a LAA */ 618 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 619 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 620 621 /* Reset VF and renegotiate mailbox API version */ 622 hw->mac.ops.reset_hw(hw); 623 hw->mac.ops.start_hw(hw); 624 error = ixv_negotiate_api(sc); 625 if (error) { 626 device_printf(dev, 627 "Mailbox API negotiation failed in if_init!\n"); 628 return; 629 } 630 631 ixv_initialize_transmit_units(ctx); 632 633 /* Setup Multicast table */ 634 ixv_if_multi_set(ctx); 635 636 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 637 638 /* Configure RX settings */ 639 ixv_initialize_receive_units(ctx); 640 641 /* Set up VLAN offload and filter */ 642 ixv_setup_vlan_support(ctx); 643 644 /* Set up MSI-X routing */ 645 ixv_configure_ivars(sc); 646 647 /* Set up auto-mask */ 648 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 649 650 /* Set moderation on the Link interrupt */ 651 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR); 652 653 /* Stats init */ 654 ixv_init_stats(sc); 655 656 /* Config/Enable Link */ 657 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled); 658 if (error) { 659 /* PF is not capable of controlling VF state. Enable the link. */ 660 sc->link_enabled = true; 661 } else if (sc->link_enabled == false) 662 device_printf(dev, "VF is disabled by PF\n"); 663 664 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 665 false); 666 667 /* And now turn on interrupts */ 668 ixv_if_enable_intr(ctx); 669 670 return; 671 } /* ixv_if_init */ 672 673 /************************************************************************ 674 * ixv_enable_queue 675 ************************************************************************/ 676 static inline void 677 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector) 678 { 679 struct ixgbe_hw *hw = &sc->hw; 680 u32 queue = 1 << vector; 681 u32 mask; 682 683 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 684 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 685 } /* ixv_enable_queue */ 686 687 /************************************************************************ 688 * ixv_disable_queue 689 ************************************************************************/ 690 static inline void 691 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector) 692 { 693 struct ixgbe_hw *hw = &sc->hw; 694 u64 queue = (u64)(1 << vector); 695 u32 mask; 696 697 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 698 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 699 } /* ixv_disable_queue */ 700 701 702 /************************************************************************ 703 * ixv_msix_que - MSI-X Queue Interrupt Service routine 704 ************************************************************************/ 705 static int 706 ixv_msix_que(void *arg) 707 { 708 struct ix_rx_queue *que = arg; 709 struct ixgbe_softc *sc = que->sc; 710 711 ixv_disable_queue(sc, que->msix); 712 ++que->irqs; 713 714 return (FILTER_SCHEDULE_THREAD); 715 } /* ixv_msix_que */ 716 717 /************************************************************************ 718 * ixv_msix_mbx 719 ************************************************************************/ 720 static int 721 ixv_msix_mbx(void *arg) 722 { 723 struct ixgbe_softc *sc = arg; 724 struct ixgbe_hw *hw = &sc->hw; 725 u32 reg; 726 727 ++sc->link_irq; 728 729 /* First get the cause */ 730 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 731 /* Clear interrupt with write */ 732 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 733 734 /* Link status change */ 735 if (reg & IXGBE_EICR_LSC) 736 iflib_admin_intr_deferred(sc->ctx); 737 738 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 739 740 return (FILTER_HANDLED); 741 } /* ixv_msix_mbx */ 742 743 /************************************************************************ 744 * ixv_media_status - Media Ioctl callback 745 * 746 * Called whenever the user queries the status of 747 * the interface using ifconfig. 748 ************************************************************************/ 749 static void 750 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 751 { 752 struct ixgbe_softc *sc = iflib_get_softc(ctx); 753 754 INIT_DEBUGOUT("ixv_media_status: begin"); 755 756 iflib_admin_intr_deferred(ctx); 757 758 ifmr->ifm_status = IFM_AVALID; 759 ifmr->ifm_active = IFM_ETHER; 760 761 if (!sc->link_active) 762 return; 763 764 ifmr->ifm_status |= IFM_ACTIVE; 765 766 switch (sc->link_speed) { 767 case IXGBE_LINK_SPEED_1GB_FULL: 768 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 769 break; 770 case IXGBE_LINK_SPEED_10GB_FULL: 771 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 772 break; 773 case IXGBE_LINK_SPEED_100_FULL: 774 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 775 break; 776 case IXGBE_LINK_SPEED_10_FULL: 777 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 778 break; 779 } 780 } /* ixv_if_media_status */ 781 782 /************************************************************************ 783 * ixv_if_media_change - Media Ioctl callback 784 * 785 * Called when the user changes speed/duplex using 786 * media/mediopt option with ifconfig. 787 ************************************************************************/ 788 static int 789 ixv_if_media_change(if_ctx_t ctx) 790 { 791 struct ixgbe_softc *sc = iflib_get_softc(ctx); 792 struct ifmedia *ifm = iflib_get_media(ctx); 793 794 INIT_DEBUGOUT("ixv_media_change: begin"); 795 796 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 797 return (EINVAL); 798 799 switch (IFM_SUBTYPE(ifm->ifm_media)) { 800 case IFM_AUTO: 801 break; 802 default: 803 device_printf(sc->dev, "Only auto media type\n"); 804 return (EINVAL); 805 } 806 807 return (0); 808 } /* ixv_if_media_change */ 809 810 811 /************************************************************************ 812 * ixv_negotiate_api 813 * 814 * Negotiate the Mailbox API with the PF; 815 * start with the most featured API first. 816 ************************************************************************/ 817 static int 818 ixv_negotiate_api(struct ixgbe_softc *sc) 819 { 820 struct ixgbe_hw *hw = &sc->hw; 821 int mbx_api[] = { ixgbe_mbox_api_12, 822 ixgbe_mbox_api_11, 823 ixgbe_mbox_api_10, 824 ixgbe_mbox_api_unknown }; 825 int i = 0; 826 827 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 828 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 829 return (0); 830 i++; 831 } 832 833 return (EINVAL); 834 } /* ixv_negotiate_api */ 835 836 837 /************************************************************************ 838 * ixv_if_multi_set - Multicast Update 839 * 840 * Called whenever multicast address list is updated. 841 ************************************************************************/ 842 static void 843 ixv_if_multi_set(if_ctx_t ctx) 844 { 845 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 846 struct ixgbe_softc *sc = iflib_get_softc(ctx); 847 u8 *update_ptr; 848 struct ifmultiaddr *ifma; 849 if_t ifp = iflib_get_ifp(ctx); 850 int mcnt = 0; 851 852 IOCTL_DEBUGOUT("ixv_if_multi_set: begin"); 853 854 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 855 if (ifma->ifma_addr->sa_family != AF_LINK) 856 continue; 857 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 858 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 859 IXGBE_ETH_LENGTH_OF_ADDRESS); 860 mcnt++; 861 } 862 863 update_ptr = mta; 864 865 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt, 866 ixv_mc_array_itr, true); 867 } /* ixv_if_multi_set */ 868 869 /************************************************************************ 870 * ixv_mc_array_itr 871 * 872 * An iterator function needed by the multicast shared code. 873 * It feeds the shared code routine the addresses in the 874 * array of ixv_set_multi() one by one. 875 ************************************************************************/ 876 static u8 * 877 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 878 { 879 u8 *addr = *update_ptr; 880 u8 *newptr; 881 882 *vmdq = 0; 883 884 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 885 *update_ptr = newptr; 886 887 return addr; 888 } /* ixv_mc_array_itr */ 889 890 /************************************************************************ 891 * ixv_if_local_timer - Timer routine 892 * 893 * Checks for link status, updates statistics, 894 * and runs the watchdog check. 895 ************************************************************************/ 896 static void 897 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid) 898 { 899 if (qid != 0) 900 return; 901 902 /* Fire off the adminq task */ 903 iflib_admin_intr_deferred(ctx); 904 } /* ixv_if_local_timer */ 905 906 /************************************************************************ 907 * ixv_if_update_admin_status - Update OS on link state 908 * 909 * Note: Only updates the OS on the cached link state. 910 * The real check of the hardware only happens with 911 * a link interrupt. 912 ************************************************************************/ 913 static void 914 ixv_if_update_admin_status(if_ctx_t ctx) 915 { 916 struct ixgbe_softc *sc = iflib_get_softc(ctx); 917 device_t dev = iflib_get_dev(ctx); 918 s32 status; 919 920 sc->hw.mac.get_link_status = true; 921 922 status = ixgbe_check_link(&sc->hw, &sc->link_speed, 923 &sc->link_up, false); 924 925 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) { 926 /* Mailbox's Clear To Send status is lost or timeout occurred. 927 * We need reinitialization. */ 928 iflib_get_ifp(ctx)->if_init(ctx); 929 } 930 931 if (sc->link_up && sc->link_enabled) { 932 if (sc->link_active == false) { 933 if (bootverbose) 934 device_printf(dev, "Link is up %d Gbps %s \n", 935 ((sc->link_speed == 128) ? 10 : 1), 936 "Full Duplex"); 937 sc->link_active = true; 938 iflib_link_state_change(ctx, LINK_STATE_UP, 939 IF_Gbps(10)); 940 } 941 } else { /* Link down */ 942 if (sc->link_active == true) { 943 if (bootverbose) 944 device_printf(dev, "Link is Down\n"); 945 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 946 sc->link_active = false; 947 } 948 } 949 950 /* Stats Update */ 951 ixv_update_stats(sc); 952 } /* ixv_if_update_admin_status */ 953 954 955 /************************************************************************ 956 * ixv_if_stop - Stop the hardware 957 * 958 * Disables all traffic on the adapter by issuing a 959 * global reset on the MAC and deallocates TX/RX buffers. 960 ************************************************************************/ 961 static void 962 ixv_if_stop(if_ctx_t ctx) 963 { 964 struct ixgbe_softc *sc = iflib_get_softc(ctx); 965 struct ixgbe_hw *hw = &sc->hw; 966 967 INIT_DEBUGOUT("ixv_stop: begin\n"); 968 969 ixv_if_disable_intr(ctx); 970 971 hw->mac.ops.reset_hw(hw); 972 sc->hw.adapter_stopped = false; 973 hw->mac.ops.stop_adapter(hw); 974 975 /* Update the stack */ 976 sc->link_up = false; 977 ixv_if_update_admin_status(ctx); 978 979 /* reprogram the RAR[0] in case user changed it. */ 980 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 981 } /* ixv_if_stop */ 982 983 984 /************************************************************************ 985 * ixv_identify_hardware - Determine hardware revision. 986 ************************************************************************/ 987 static void 988 ixv_identify_hardware(if_ctx_t ctx) 989 { 990 struct ixgbe_softc *sc = iflib_get_softc(ctx); 991 device_t dev = iflib_get_dev(ctx); 992 struct ixgbe_hw *hw = &sc->hw; 993 994 /* Save off the information about this board */ 995 hw->vendor_id = pci_get_vendor(dev); 996 hw->device_id = pci_get_device(dev); 997 hw->revision_id = pci_get_revid(dev); 998 hw->subsystem_vendor_id = pci_get_subvendor(dev); 999 hw->subsystem_device_id = pci_get_subdevice(dev); 1000 1001 /* A subset of set_mac_type */ 1002 switch (hw->device_id) { 1003 case IXGBE_DEV_ID_82599_VF: 1004 hw->mac.type = ixgbe_mac_82599_vf; 1005 break; 1006 case IXGBE_DEV_ID_X540_VF: 1007 hw->mac.type = ixgbe_mac_X540_vf; 1008 break; 1009 case IXGBE_DEV_ID_X550_VF: 1010 hw->mac.type = ixgbe_mac_X550_vf; 1011 break; 1012 case IXGBE_DEV_ID_X550EM_X_VF: 1013 hw->mac.type = ixgbe_mac_X550EM_x_vf; 1014 break; 1015 case IXGBE_DEV_ID_X550EM_A_VF: 1016 hw->mac.type = ixgbe_mac_X550EM_a_vf; 1017 break; 1018 default: 1019 device_printf(dev, "unknown mac type\n"); 1020 hw->mac.type = ixgbe_mac_unknown; 1021 break; 1022 } 1023 } /* ixv_identify_hardware */ 1024 1025 /************************************************************************ 1026 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers 1027 ************************************************************************/ 1028 static int 1029 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix) 1030 { 1031 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1032 device_t dev = iflib_get_dev(ctx); 1033 struct ix_rx_queue *rx_que = sc->rx_queues; 1034 struct ix_tx_queue *tx_que; 1035 int error, rid, vector = 0; 1036 char buf[16]; 1037 1038 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 1039 rid = vector + 1; 1040 1041 snprintf(buf, sizeof(buf), "rxq%d", i); 1042 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1043 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf); 1044 1045 if (error) { 1046 device_printf(iflib_get_dev(ctx), 1047 "Failed to allocate que int %d err: %d", i, error); 1048 sc->num_rx_queues = i + 1; 1049 goto fail; 1050 } 1051 1052 rx_que->msix = vector; 1053 } 1054 1055 for (int i = 0; i < sc->num_tx_queues; i++) { 1056 snprintf(buf, sizeof(buf), "txq%d", i); 1057 tx_que = &sc->tx_queues[i]; 1058 tx_que->msix = i % sc->num_rx_queues; 1059 iflib_softirq_alloc_generic(ctx, 1060 &sc->rx_queues[tx_que->msix].que_irq, 1061 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1062 } 1063 rid = vector + 1; 1064 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 1065 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq"); 1066 if (error) { 1067 device_printf(iflib_get_dev(ctx), 1068 "Failed to register admin handler"); 1069 return (error); 1070 } 1071 1072 sc->vector = vector; 1073 /* 1074 * Due to a broken design QEMU will fail to properly 1075 * enable the guest for MSIX unless the vectors in 1076 * the table are all set up, so we must rewrite the 1077 * ENABLE in the MSIX control register again at this 1078 * point to cause it to successfully initialize us. 1079 */ 1080 if (sc->hw.mac.type == ixgbe_mac_82599_vf) { 1081 int msix_ctrl; 1082 pci_find_cap(dev, PCIY_MSIX, &rid); 1083 rid += PCIR_MSIX_CTRL; 1084 msix_ctrl = pci_read_config(dev, rid, 2); 1085 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1086 pci_write_config(dev, rid, msix_ctrl, 2); 1087 } 1088 1089 return (0); 1090 1091 fail: 1092 iflib_irq_free(ctx, &sc->irq); 1093 rx_que = sc->rx_queues; 1094 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 1095 iflib_irq_free(ctx, &rx_que->que_irq); 1096 1097 return (error); 1098 } /* ixv_if_msix_intr_assign */ 1099 1100 /************************************************************************ 1101 * ixv_allocate_pci_resources 1102 ************************************************************************/ 1103 static int 1104 ixv_allocate_pci_resources(if_ctx_t ctx) 1105 { 1106 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1107 device_t dev = iflib_get_dev(ctx); 1108 int rid; 1109 1110 rid = PCIR_BAR(0); 1111 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1112 RF_ACTIVE); 1113 1114 if (!(sc->pci_mem)) { 1115 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1116 return (ENXIO); 1117 } 1118 1119 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 1120 sc->osdep.mem_bus_space_handle = 1121 rman_get_bushandle(sc->pci_mem); 1122 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 1123 1124 return (0); 1125 } /* ixv_allocate_pci_resources */ 1126 1127 /************************************************************************ 1128 * ixv_free_pci_resources 1129 ************************************************************************/ 1130 static void 1131 ixv_free_pci_resources(if_ctx_t ctx) 1132 { 1133 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1134 struct ix_rx_queue *que = sc->rx_queues; 1135 device_t dev = iflib_get_dev(ctx); 1136 1137 /* Release all MSI-X queue resources */ 1138 if (sc->intr_type == IFLIB_INTR_MSIX) 1139 iflib_irq_free(ctx, &sc->irq); 1140 1141 if (que != NULL) { 1142 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1143 iflib_irq_free(ctx, &que->que_irq); 1144 } 1145 } 1146 1147 if (sc->pci_mem != NULL) 1148 bus_release_resource(dev, SYS_RES_MEMORY, 1149 rman_get_rid(sc->pci_mem), sc->pci_mem); 1150 } /* ixv_free_pci_resources */ 1151 1152 /************************************************************************ 1153 * ixv_setup_interface 1154 * 1155 * Setup networking device structure and register an interface. 1156 ************************************************************************/ 1157 static int 1158 ixv_setup_interface(if_ctx_t ctx) 1159 { 1160 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1161 if_softc_ctx_t scctx = sc->shared; 1162 struct ifnet *ifp = iflib_get_ifp(ctx); 1163 1164 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1165 1166 if_setbaudrate(ifp, IF_Gbps(10)); 1167 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2; 1168 1169 1170 sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1171 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1172 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1173 1174 return 0; 1175 } /* ixv_setup_interface */ 1176 1177 /************************************************************************ 1178 * ixv_if_get_counter 1179 ************************************************************************/ 1180 static uint64_t 1181 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1182 { 1183 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1184 if_t ifp = iflib_get_ifp(ctx); 1185 1186 switch (cnt) { 1187 case IFCOUNTER_IPACKETS: 1188 return (sc->ipackets); 1189 case IFCOUNTER_OPACKETS: 1190 return (sc->opackets); 1191 case IFCOUNTER_IBYTES: 1192 return (sc->ibytes); 1193 case IFCOUNTER_OBYTES: 1194 return (sc->obytes); 1195 case IFCOUNTER_IMCASTS: 1196 return (sc->imcasts); 1197 default: 1198 return (if_get_counter_default(ifp, cnt)); 1199 } 1200 } /* ixv_if_get_counter */ 1201 1202 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1203 * @ctx: iflib context 1204 * @event: event code to check 1205 * 1206 * Defaults to returning true for every event. 1207 * 1208 * @returns true if iflib needs to reinit the interface 1209 */ 1210 static bool 1211 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1212 { 1213 switch (event) { 1214 case IFLIB_RESTART_VLAN_CONFIG: 1215 /* XXX: This may not need to return true */ 1216 default: 1217 return (true); 1218 } 1219 } 1220 1221 /************************************************************************ 1222 * ixv_initialize_transmit_units - Enable transmit unit. 1223 ************************************************************************/ 1224 static void 1225 ixv_initialize_transmit_units(if_ctx_t ctx) 1226 { 1227 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1228 struct ixgbe_hw *hw = &sc->hw; 1229 if_softc_ctx_t scctx = sc->shared; 1230 struct ix_tx_queue *que = sc->tx_queues; 1231 int i; 1232 1233 for (i = 0; i < sc->num_tx_queues; i++, que++) { 1234 struct tx_ring *txr = &que->txr; 1235 u64 tdba = txr->tx_paddr; 1236 u32 txctrl, txdctl; 1237 int j = txr->me; 1238 1239 /* Set WTHRESH to 8, burst writeback */ 1240 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1241 txdctl |= (8 << 16); 1242 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1243 1244 /* Set the HW Tx Head and Tail indices */ 1245 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0); 1246 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0); 1247 1248 /* Set Tx Tail register */ 1249 txr->tail = IXGBE_VFTDT(j); 1250 1251 txr->tx_rs_cidx = txr->tx_rs_pidx; 1252 /* Initialize the last processed descriptor to be the end of 1253 * the ring, rather than the start, so that we avoid an 1254 * off-by-one error when calculating how many descriptors are 1255 * done in the credits_update function. 1256 */ 1257 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1258 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 1259 txr->tx_rsq[k] = QIDX_INVALID; 1260 1261 /* Set Ring parameters */ 1262 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1263 (tdba & 0x00000000ffffffffULL)); 1264 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1265 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1266 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc)); 1267 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1268 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1269 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1270 1271 /* Now enable */ 1272 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1273 txdctl |= IXGBE_TXDCTL_ENABLE; 1274 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1275 } 1276 1277 return; 1278 } /* ixv_initialize_transmit_units */ 1279 1280 /************************************************************************ 1281 * ixv_initialize_rss_mapping 1282 ************************************************************************/ 1283 static void 1284 ixv_initialize_rss_mapping(struct ixgbe_softc *sc) 1285 { 1286 struct ixgbe_hw *hw = &sc->hw; 1287 u32 reta = 0, mrqc, rss_key[10]; 1288 int queue_id; 1289 int i, j; 1290 u32 rss_hash_config; 1291 1292 if (sc->feat_en & IXGBE_FEATURE_RSS) { 1293 /* Fetch the configured RSS key */ 1294 rss_getkey((uint8_t *)&rss_key); 1295 } else { 1296 /* set up random bits */ 1297 arc4rand(&rss_key, sizeof(rss_key), 0); 1298 } 1299 1300 /* Now fill out hash function seeds */ 1301 for (i = 0; i < 10; i++) 1302 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1303 1304 /* Set up the redirection table */ 1305 for (i = 0, j = 0; i < 64; i++, j++) { 1306 if (j == sc->num_rx_queues) 1307 j = 0; 1308 1309 if (sc->feat_en & IXGBE_FEATURE_RSS) { 1310 /* 1311 * Fetch the RSS bucket id for the given indirection 1312 * entry. Cap it at the number of configured buckets 1313 * (which is num_rx_queues.) 1314 */ 1315 queue_id = rss_get_indirection_to_bucket(i); 1316 queue_id = queue_id % sc->num_rx_queues; 1317 } else 1318 queue_id = j; 1319 1320 /* 1321 * The low 8 bits are for hash value (n+0); 1322 * The next 8 bits are for hash value (n+1), etc. 1323 */ 1324 reta >>= 8; 1325 reta |= ((uint32_t)queue_id) << 24; 1326 if ((i & 3) == 3) { 1327 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1328 reta = 0; 1329 } 1330 } 1331 1332 /* Perform hash on these packet types */ 1333 if (sc->feat_en & IXGBE_FEATURE_RSS) 1334 rss_hash_config = rss_gethashconfig(); 1335 else { 1336 /* 1337 * Disable UDP - IP fragments aren't currently being handled 1338 * and so we end up with a mix of 2-tuple and 4-tuple 1339 * traffic. 1340 */ 1341 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1342 | RSS_HASHTYPE_RSS_TCP_IPV4 1343 | RSS_HASHTYPE_RSS_IPV6 1344 | RSS_HASHTYPE_RSS_TCP_IPV6; 1345 } 1346 1347 mrqc = IXGBE_MRQC_RSSEN; 1348 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1349 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1350 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1351 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1352 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1353 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1354 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1355 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1356 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1357 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1358 __func__); 1359 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1360 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1361 __func__); 1362 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1363 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1364 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1365 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1366 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1367 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1368 __func__); 1369 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1370 } /* ixv_initialize_rss_mapping */ 1371 1372 1373 /************************************************************************ 1374 * ixv_initialize_receive_units - Setup receive registers and features. 1375 ************************************************************************/ 1376 static void 1377 ixv_initialize_receive_units(if_ctx_t ctx) 1378 { 1379 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1380 if_softc_ctx_t scctx; 1381 struct ixgbe_hw *hw = &sc->hw; 1382 struct ifnet *ifp = iflib_get_ifp(ctx); 1383 struct ix_rx_queue *que = sc->rx_queues; 1384 u32 bufsz, psrtype; 1385 1386 if (ifp->if_mtu > ETHERMTU) 1387 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1388 else 1389 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1390 1391 psrtype = IXGBE_PSRTYPE_TCPHDR 1392 | IXGBE_PSRTYPE_UDPHDR 1393 | IXGBE_PSRTYPE_IPV4HDR 1394 | IXGBE_PSRTYPE_IPV6HDR 1395 | IXGBE_PSRTYPE_L2HDR; 1396 1397 if (sc->num_rx_queues > 1) 1398 psrtype |= 1 << 29; 1399 1400 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1401 1402 /* Tell PF our max_frame size */ 1403 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) { 1404 device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1405 } 1406 scctx = sc->shared; 1407 1408 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1409 struct rx_ring *rxr = &que->rxr; 1410 u64 rdba = rxr->rx_paddr; 1411 u32 reg, rxdctl; 1412 int j = rxr->me; 1413 1414 /* Disable the queue */ 1415 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1416 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1417 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1418 for (int k = 0; k < 10; k++) { 1419 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1420 IXGBE_RXDCTL_ENABLE) 1421 msec_delay(1); 1422 else 1423 break; 1424 } 1425 wmb(); 1426 /* Setup the Base and Length of the Rx Descriptor Ring */ 1427 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1428 (rdba & 0x00000000ffffffffULL)); 1429 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1430 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1431 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 1432 1433 /* Reset the ring indices */ 1434 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1435 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1436 1437 /* Set up the SRRCTL register */ 1438 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1439 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1440 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1441 reg |= bufsz; 1442 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1443 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1444 1445 /* Capture Rx Tail index */ 1446 rxr->tail = IXGBE_VFRDT(rxr->me); 1447 1448 /* Do the queue enabling last */ 1449 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1450 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1451 for (int l = 0; l < 10; l++) { 1452 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1453 IXGBE_RXDCTL_ENABLE) 1454 break; 1455 msec_delay(1); 1456 } 1457 wmb(); 1458 1459 /* Set the Tail Pointer */ 1460 #ifdef DEV_NETMAP 1461 /* 1462 * In netmap mode, we must preserve the buffers made 1463 * available to userspace before the if_init() 1464 * (this is true by default on the TX side, because 1465 * init makes all buffers available to userspace). 1466 * 1467 * netmap_reset() and the device specific routines 1468 * (e.g. ixgbe_setup_receive_rings()) map these 1469 * buffers at the end of the NIC ring, so here we 1470 * must set the RDT (tail) register to make sure 1471 * they are not overwritten. 1472 * 1473 * In this driver the NIC ring starts at RDH = 0, 1474 * RDT points to the last slot available for reception (?), 1475 * so RDT = num_rx_desc - 1 means the whole ring is available. 1476 */ 1477 if (ifp->if_capenable & IFCAP_NETMAP) { 1478 struct netmap_adapter *na = NA(ifp); 1479 struct netmap_kring *kring = na->rx_rings[j]; 1480 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1481 1482 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1483 } else 1484 #endif /* DEV_NETMAP */ 1485 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1486 scctx->isc_nrxd[0] - 1); 1487 } 1488 1489 /* 1490 * Do not touch RSS and RETA settings for older hardware 1491 * as those are shared among PF and all VF. 1492 */ 1493 if (sc->hw.mac.type >= ixgbe_mac_X550_vf) 1494 ixv_initialize_rss_mapping(sc); 1495 } /* ixv_initialize_receive_units */ 1496 1497 /************************************************************************ 1498 * ixv_setup_vlan_support 1499 ************************************************************************/ 1500 static void 1501 ixv_setup_vlan_support(if_ctx_t ctx) 1502 { 1503 struct ifnet *ifp = iflib_get_ifp(ctx); 1504 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1505 struct ixgbe_hw *hw = &sc->hw; 1506 u32 ctrl, vid, vfta, retry; 1507 1508 /* 1509 * We get here thru if_init, meaning 1510 * a soft reset, this has already cleared 1511 * the VFTA and other state, so if there 1512 * have been no vlan's registered do nothing. 1513 */ 1514 if (sc->num_vlans == 0) 1515 return; 1516 1517 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1518 /* Enable the queues */ 1519 for (int i = 0; i < sc->num_rx_queues; i++) { 1520 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1521 ctrl |= IXGBE_RXDCTL_VME; 1522 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1523 /* 1524 * Let Rx path know that it needs to store VLAN tag 1525 * as part of extra mbuf info. 1526 */ 1527 sc->rx_queues[i].rxr.vtag_strip = true; 1528 } 1529 } 1530 1531 /* 1532 * If filtering VLAN tags is disabled, 1533 * there is no need to fill VLAN Filter Table Array (VFTA). 1534 */ 1535 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) 1536 return; 1537 1538 /* 1539 * A soft reset zero's out the VFTA, so 1540 * we need to repopulate it now. 1541 */ 1542 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1543 if (ixv_shadow_vfta[i] == 0) 1544 continue; 1545 vfta = ixv_shadow_vfta[i]; 1546 /* 1547 * Reconstruct the vlan id's 1548 * based on the bits set in each 1549 * of the array ints. 1550 */ 1551 for (int j = 0; j < 32; j++) { 1552 retry = 0; 1553 if ((vfta & (1 << j)) == 0) 1554 continue; 1555 vid = (i * 32) + j; 1556 /* Call the shared code mailbox routine */ 1557 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) { 1558 if (++retry > 5) 1559 break; 1560 } 1561 } 1562 } 1563 } /* ixv_setup_vlan_support */ 1564 1565 /************************************************************************ 1566 * ixv_if_register_vlan 1567 * 1568 * Run via a vlan config EVENT, it enables us to use the 1569 * HW Filter table since we can get the vlan id. This just 1570 * creates the entry in the soft version of the VFTA, init 1571 * will repopulate the real table. 1572 ************************************************************************/ 1573 static void 1574 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag) 1575 { 1576 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1577 u16 index, bit; 1578 1579 index = (vtag >> 5) & 0x7F; 1580 bit = vtag & 0x1F; 1581 ixv_shadow_vfta[index] |= (1 << bit); 1582 ++sc->num_vlans; 1583 } /* ixv_if_register_vlan */ 1584 1585 /************************************************************************ 1586 * ixv_if_unregister_vlan 1587 * 1588 * Run via a vlan unconfig EVENT, remove our entry 1589 * in the soft vfta. 1590 ************************************************************************/ 1591 static void 1592 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag) 1593 { 1594 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1595 u16 index, bit; 1596 1597 index = (vtag >> 5) & 0x7F; 1598 bit = vtag & 0x1F; 1599 ixv_shadow_vfta[index] &= ~(1 << bit); 1600 --sc->num_vlans; 1601 } /* ixv_if_unregister_vlan */ 1602 1603 /************************************************************************ 1604 * ixv_if_enable_intr 1605 ************************************************************************/ 1606 static void 1607 ixv_if_enable_intr(if_ctx_t ctx) 1608 { 1609 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1610 struct ixgbe_hw *hw = &sc->hw; 1611 struct ix_rx_queue *que = sc->rx_queues; 1612 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1613 1614 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1615 1616 mask = IXGBE_EIMS_ENABLE_MASK; 1617 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1618 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1619 1620 for (int i = 0; i < sc->num_rx_queues; i++, que++) 1621 ixv_enable_queue(sc, que->msix); 1622 1623 IXGBE_WRITE_FLUSH(hw); 1624 } /* ixv_if_enable_intr */ 1625 1626 /************************************************************************ 1627 * ixv_if_disable_intr 1628 ************************************************************************/ 1629 static void 1630 ixv_if_disable_intr(if_ctx_t ctx) 1631 { 1632 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1633 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0); 1634 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0); 1635 IXGBE_WRITE_FLUSH(&sc->hw); 1636 } /* ixv_if_disable_intr */ 1637 1638 /************************************************************************ 1639 * ixv_if_rx_queue_intr_enable 1640 ************************************************************************/ 1641 static int 1642 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1643 { 1644 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1645 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 1646 1647 ixv_enable_queue(sc, que->rxr.me); 1648 1649 return (0); 1650 } /* ixv_if_rx_queue_intr_enable */ 1651 1652 /************************************************************************ 1653 * ixv_set_ivar 1654 * 1655 * Setup the correct IVAR register for a particular MSI-X interrupt 1656 * - entry is the register array entry 1657 * - vector is the MSI-X vector for this queue 1658 * - type is RX/TX/MISC 1659 ************************************************************************/ 1660 static void 1661 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 1662 { 1663 struct ixgbe_hw *hw = &sc->hw; 1664 u32 ivar, index; 1665 1666 vector |= IXGBE_IVAR_ALLOC_VAL; 1667 1668 if (type == -1) { /* MISC IVAR */ 1669 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1670 ivar &= ~0xFF; 1671 ivar |= vector; 1672 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1673 } else { /* RX/TX IVARS */ 1674 index = (16 * (entry & 1)) + (8 * type); 1675 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1676 ivar &= ~(0xFF << index); 1677 ivar |= (vector << index); 1678 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1679 } 1680 } /* ixv_set_ivar */ 1681 1682 /************************************************************************ 1683 * ixv_configure_ivars 1684 ************************************************************************/ 1685 static void 1686 ixv_configure_ivars(struct ixgbe_softc *sc) 1687 { 1688 struct ix_rx_queue *que = sc->rx_queues; 1689 1690 MPASS(sc->num_rx_queues == sc->num_tx_queues); 1691 1692 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1693 /* First the RX queue entry */ 1694 ixv_set_ivar(sc, i, que->msix, 0); 1695 /* ... and the TX */ 1696 ixv_set_ivar(sc, i, que->msix, 1); 1697 /* Set an initial value in EITR */ 1698 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix), 1699 IXGBE_EITR_DEFAULT); 1700 } 1701 1702 /* For the mailbox interrupt */ 1703 ixv_set_ivar(sc, 1, sc->vector, -1); 1704 } /* ixv_configure_ivars */ 1705 1706 /************************************************************************ 1707 * ixv_save_stats 1708 * 1709 * The VF stats registers never have a truly virgin 1710 * starting point, so this routine tries to make an 1711 * artificial one, marking ground zero on attach as 1712 * it were. 1713 ************************************************************************/ 1714 static void 1715 ixv_save_stats(struct ixgbe_softc *sc) 1716 { 1717 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) { 1718 sc->stats.vf.saved_reset_vfgprc += 1719 sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc; 1720 sc->stats.vf.saved_reset_vfgptc += 1721 sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc; 1722 sc->stats.vf.saved_reset_vfgorc += 1723 sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc; 1724 sc->stats.vf.saved_reset_vfgotc += 1725 sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc; 1726 sc->stats.vf.saved_reset_vfmprc += 1727 sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc; 1728 } 1729 } /* ixv_save_stats */ 1730 1731 /************************************************************************ 1732 * ixv_init_stats 1733 ************************************************************************/ 1734 static void 1735 ixv_init_stats(struct ixgbe_softc *sc) 1736 { 1737 struct ixgbe_hw *hw = &sc->hw; 1738 1739 sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1740 sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1741 sc->stats.vf.last_vfgorc |= 1742 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1743 1744 sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1745 sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1746 sc->stats.vf.last_vfgotc |= 1747 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1748 1749 sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1750 1751 sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc; 1752 sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc; 1753 sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc; 1754 sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc; 1755 sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc; 1756 } /* ixv_init_stats */ 1757 1758 #define UPDATE_STAT_32(reg, last, count) \ 1759 { \ 1760 u32 current = IXGBE_READ_REG(hw, reg); \ 1761 if (current < last) \ 1762 count += 0x100000000LL; \ 1763 last = current; \ 1764 count &= 0xFFFFFFFF00000000LL; \ 1765 count |= current; \ 1766 } 1767 1768 #define UPDATE_STAT_36(lsb, msb, last, count) \ 1769 { \ 1770 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 1771 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 1772 u64 current = ((cur_msb << 32) | cur_lsb); \ 1773 if (current < last) \ 1774 count += 0x1000000000LL; \ 1775 last = current; \ 1776 count &= 0xFFFFFFF000000000LL; \ 1777 count |= current; \ 1778 } 1779 1780 /************************************************************************ 1781 * ixv_update_stats - Update the board statistics counters. 1782 ************************************************************************/ 1783 void 1784 ixv_update_stats(struct ixgbe_softc *sc) 1785 { 1786 struct ixgbe_hw *hw = &sc->hw; 1787 struct ixgbevf_hw_stats *stats = &sc->stats.vf; 1788 1789 UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc, 1790 sc->stats.vf.vfgprc); 1791 UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc, 1792 sc->stats.vf.vfgptc); 1793 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1794 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc); 1795 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1796 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc); 1797 UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc, 1798 sc->stats.vf.vfmprc); 1799 1800 /* Fill out the OS statistics structure */ 1801 IXGBE_SET_IPACKETS(sc, stats->vfgprc); 1802 IXGBE_SET_OPACKETS(sc, stats->vfgptc); 1803 IXGBE_SET_IBYTES(sc, stats->vfgorc); 1804 IXGBE_SET_OBYTES(sc, stats->vfgotc); 1805 IXGBE_SET_IMCASTS(sc, stats->vfmprc); 1806 } /* ixv_update_stats */ 1807 1808 /************************************************************************ 1809 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 1810 ************************************************************************/ 1811 static void 1812 ixv_add_stats_sysctls(struct ixgbe_softc *sc) 1813 { 1814 device_t dev = sc->dev; 1815 struct ix_tx_queue *tx_que = sc->tx_queues; 1816 struct ix_rx_queue *rx_que = sc->rx_queues; 1817 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1818 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1819 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1820 struct ixgbevf_hw_stats *stats = &sc->stats.vf; 1821 struct sysctl_oid *stat_node, *queue_node; 1822 struct sysctl_oid_list *stat_list, *queue_list; 1823 1824 #define QUEUE_NAME_LEN 32 1825 char namebuf[QUEUE_NAME_LEN]; 1826 1827 /* Driver Statistics */ 1828 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1829 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1830 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1831 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1832 1833 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 1834 struct tx_ring *txr = &tx_que->txr; 1835 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1836 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1837 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1838 queue_list = SYSCTL_CHILDREN(queue_node); 1839 1840 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1841 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets"); 1842 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1843 CTLFLAG_RD, &(txr->total_packets), "TX Packets"); 1844 } 1845 1846 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 1847 struct rx_ring *rxr = &rx_que->rxr; 1848 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1849 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1850 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1851 queue_list = SYSCTL_CHILDREN(queue_node); 1852 1853 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1854 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue"); 1855 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1856 CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); 1857 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1858 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); 1859 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1860 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); 1861 } 1862 1863 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 1864 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1865 "VF Statistics (read from HW registers)"); 1866 stat_list = SYSCTL_CHILDREN(stat_node); 1867 1868 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1869 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); 1870 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1871 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); 1872 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1873 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); 1874 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1875 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); 1876 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1877 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); 1878 } /* ixv_add_stats_sysctls */ 1879 1880 /************************************************************************ 1881 * ixv_print_debug_info 1882 * 1883 * Called only when em_display_debug_stats is enabled. 1884 * Provides a way to take a look at important statistics 1885 * maintained by the driver and hardware. 1886 ************************************************************************/ 1887 static void 1888 ixv_print_debug_info(struct ixgbe_softc *sc) 1889 { 1890 device_t dev = sc->dev; 1891 struct ixgbe_hw *hw = &sc->hw; 1892 1893 device_printf(dev, "Error Byte Count = %u \n", 1894 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 1895 1896 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq); 1897 } /* ixv_print_debug_info */ 1898 1899 /************************************************************************ 1900 * ixv_sysctl_debug 1901 ************************************************************************/ 1902 static int 1903 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 1904 { 1905 struct ixgbe_softc *sc; 1906 int error, result; 1907 1908 result = -1; 1909 error = sysctl_handle_int(oidp, &result, 0, req); 1910 1911 if (error || !req->newptr) 1912 return (error); 1913 1914 if (result == 1) { 1915 sc = (struct ixgbe_softc *)arg1; 1916 ixv_print_debug_info(sc); 1917 } 1918 1919 return error; 1920 } /* ixv_sysctl_debug */ 1921 1922 /************************************************************************ 1923 * ixv_init_device_features 1924 ************************************************************************/ 1925 static void 1926 ixv_init_device_features(struct ixgbe_softc *sc) 1927 { 1928 sc->feat_cap = IXGBE_FEATURE_NETMAP 1929 | IXGBE_FEATURE_VF 1930 | IXGBE_FEATURE_LEGACY_TX; 1931 1932 /* A tad short on feature flags for VFs, atm. */ 1933 switch (sc->hw.mac.type) { 1934 case ixgbe_mac_82599_vf: 1935 break; 1936 case ixgbe_mac_X540_vf: 1937 break; 1938 case ixgbe_mac_X550_vf: 1939 case ixgbe_mac_X550EM_x_vf: 1940 case ixgbe_mac_X550EM_a_vf: 1941 sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 1942 sc->feat_cap |= IXGBE_FEATURE_RSS; 1943 break; 1944 default: 1945 break; 1946 } 1947 1948 /* Enabled by default... */ 1949 /* Is a virtual function (VF) */ 1950 if (sc->feat_cap & IXGBE_FEATURE_VF) 1951 sc->feat_en |= IXGBE_FEATURE_VF; 1952 /* Netmap */ 1953 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 1954 sc->feat_en |= IXGBE_FEATURE_NETMAP; 1955 /* Receive-Side Scaling (RSS) */ 1956 if (sc->feat_cap & IXGBE_FEATURE_RSS) 1957 sc->feat_en |= IXGBE_FEATURE_RSS; 1958 /* Needs advanced context descriptor regardless of offloads req'd */ 1959 if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 1960 sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 1961 } /* ixv_init_device_features */ 1962 1963