1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_rss.h" 39 40 #include "ixgbe.h" 41 #include "ifdi_if.h" 42 43 #include <net/netmap.h> 44 #include <dev/netmap/netmap_kern.h> 45 46 /************************************************************************ 47 * Driver version 48 ************************************************************************/ 49 char ixv_driver_version[] = "2.0.1-k"; 50 51 /************************************************************************ 52 * PCI Device ID Table 53 * 54 * Used by probe to select devices to load on 55 * Last field stores an index into ixv_strings 56 * Last entry must be all 0s 57 * 58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 59 ************************************************************************/ 60 static pci_vendor_info_t ixv_vendor_info_array[] = 61 { 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"), 66 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"), 67 /* required last entry */ 68 PVID_END 69 }; 70 71 /************************************************************************ 72 * Function prototypes 73 ************************************************************************/ 74 static void *ixv_register(device_t); 75 static int ixv_if_attach_pre(if_ctx_t); 76 static int ixv_if_attach_post(if_ctx_t); 77 static int ixv_if_detach(if_ctx_t); 78 79 static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t); 80 static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 81 static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); 82 static void ixv_if_queues_free(if_ctx_t); 83 static void ixv_identify_hardware(if_ctx_t); 84 static void ixv_init_device_features(struct ixgbe_softc *); 85 static int ixv_allocate_pci_resources(if_ctx_t); 86 static void ixv_free_pci_resources(if_ctx_t); 87 static int ixv_setup_interface(if_ctx_t); 88 static void ixv_if_media_status(if_ctx_t, struct ifmediareq *); 89 static int ixv_if_media_change(if_ctx_t); 90 static void ixv_if_update_admin_status(if_ctx_t); 91 static int ixv_if_msix_intr_assign(if_ctx_t, int); 92 93 static int ixv_if_mtu_set(if_ctx_t, uint32_t); 94 static void ixv_if_init(if_ctx_t); 95 static void ixv_if_local_timer(if_ctx_t, uint16_t); 96 static void ixv_if_stop(if_ctx_t); 97 static int ixv_negotiate_api(struct ixgbe_softc *); 98 99 static void ixv_initialize_transmit_units(if_ctx_t); 100 static void ixv_initialize_receive_units(if_ctx_t); 101 static void ixv_initialize_rss_mapping(struct ixgbe_softc *); 102 103 static void ixv_setup_vlan_support(if_ctx_t); 104 static void ixv_configure_ivars(struct ixgbe_softc *); 105 static void ixv_if_enable_intr(if_ctx_t); 106 static void ixv_if_disable_intr(if_ctx_t); 107 static void ixv_if_multi_set(if_ctx_t); 108 109 static void ixv_if_register_vlan(if_ctx_t, u16); 110 static void ixv_if_unregister_vlan(if_ctx_t, u16); 111 112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter); 113 static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event); 114 115 static void ixv_save_stats(struct ixgbe_softc *); 116 static void ixv_init_stats(struct ixgbe_softc *); 117 static void ixv_update_stats(struct ixgbe_softc *); 118 static void ixv_add_stats_sysctls(struct ixgbe_softc *); 119 120 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 121 static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8); 122 123 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 124 125 /* The MSI-X Interrupt handlers */ 126 static int ixv_msix_que(void *); 127 static int ixv_msix_mbx(void *); 128 129 /************************************************************************ 130 * FreeBSD Device Interface Entry Points 131 ************************************************************************/ 132 static device_method_t ixv_methods[] = { 133 /* Device interface */ 134 DEVMETHOD(device_register, ixv_register), 135 DEVMETHOD(device_probe, iflib_device_probe), 136 DEVMETHOD(device_attach, iflib_device_attach), 137 DEVMETHOD(device_detach, iflib_device_detach), 138 DEVMETHOD(device_shutdown, iflib_device_shutdown), 139 DEVMETHOD_END 140 }; 141 142 static driver_t ixv_driver = { 143 "ixv", ixv_methods, sizeof(struct ixgbe_softc), 144 }; 145 146 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0); 147 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array); 148 MODULE_DEPEND(ixv, iflib, 1, 1, 1); 149 MODULE_DEPEND(ixv, pci, 1, 1, 1); 150 MODULE_DEPEND(ixv, ether, 1, 1, 1); 151 152 static device_method_t ixv_if_methods[] = { 153 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre), 154 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post), 155 DEVMETHOD(ifdi_detach, ixv_if_detach), 156 DEVMETHOD(ifdi_init, ixv_if_init), 157 DEVMETHOD(ifdi_stop, ixv_if_stop), 158 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign), 159 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr), 160 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr), 161 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 162 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 163 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc), 164 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc), 165 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free), 166 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status), 167 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set), 168 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set), 169 DEVMETHOD(ifdi_media_status, ixv_if_media_status), 170 DEVMETHOD(ifdi_media_change, ixv_if_media_change), 171 DEVMETHOD(ifdi_timer, ixv_if_local_timer), 172 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan), 173 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan), 174 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter), 175 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart), 176 DEVMETHOD_END 177 }; 178 179 static driver_t ixv_if_driver = { 180 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc) 181 }; 182 183 /* 184 * TUNEABLE PARAMETERS: 185 */ 186 187 /* Flow control setting, default to full */ 188 static int ixv_flow_control = ixgbe_fc_full; 189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 190 191 /* 192 * Header split: this causes the hardware to DMA 193 * the header into a separate mbuf from the payload, 194 * it can be a performance win in some workloads, but 195 * in others it actually hurts, its off by default. 196 */ 197 static int ixv_header_split = false; 198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 199 200 extern struct if_txrx ixgbe_txrx; 201 202 static struct if_shared_ctx ixv_sctx_init = { 203 .isc_magic = IFLIB_MAGIC, 204 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 205 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 206 .isc_tx_maxsegsize = PAGE_SIZE, 207 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), 208 .isc_tso_maxsegsize = PAGE_SIZE, 209 .isc_rx_maxsize = MJUM16BYTES, 210 .isc_rx_nsegments = 1, 211 .isc_rx_maxsegsize = MJUM16BYTES, 212 .isc_nfl = 1, 213 .isc_ntxqs = 1, 214 .isc_nrxqs = 1, 215 .isc_admin_intrcnt = 1, 216 .isc_vendor_info = ixv_vendor_info_array, 217 .isc_driver_version = ixv_driver_version, 218 .isc_driver = &ixv_if_driver, 219 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP, 220 221 .isc_nrxd_min = {MIN_RXD}, 222 .isc_ntxd_min = {MIN_TXD}, 223 .isc_nrxd_max = {MAX_RXD}, 224 .isc_ntxd_max = {MAX_TXD}, 225 .isc_nrxd_default = {DEFAULT_RXD}, 226 .isc_ntxd_default = {DEFAULT_TXD}, 227 }; 228 229 static void * 230 ixv_register(device_t dev) 231 { 232 return (&ixv_sctx_init); 233 } 234 235 /************************************************************************ 236 * ixv_if_tx_queues_alloc 237 ************************************************************************/ 238 static int 239 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 240 int ntxqs, int ntxqsets) 241 { 242 struct ixgbe_softc *sc = iflib_get_softc(ctx); 243 if_softc_ctx_t scctx = sc->shared; 244 struct ix_tx_queue *que; 245 int i, j, error; 246 247 MPASS(sc->num_tx_queues == ntxqsets); 248 MPASS(ntxqs == 1); 249 250 /* Allocate queue structure memory */ 251 sc->tx_queues = 252 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 253 M_DEVBUF, M_NOWAIT | M_ZERO); 254 if (!sc->tx_queues) { 255 device_printf(iflib_get_dev(ctx), 256 "Unable to allocate TX ring memory\n"); 257 return (ENOMEM); 258 } 259 260 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { 261 struct tx_ring *txr = &que->txr; 262 263 txr->me = i; 264 txr->sc = que->sc = sc; 265 266 /* Allocate report status array */ 267 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 268 error = ENOMEM; 269 goto fail; 270 } 271 for (j = 0; j < scctx->isc_ntxd[0]; j++) 272 txr->tx_rsq[j] = QIDX_INVALID; 273 /* get the virtual and physical address of the hardware queues */ 274 txr->tail = IXGBE_VFTDT(txr->me); 275 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs]; 276 txr->tx_paddr = paddrs[i*ntxqs]; 277 278 txr->bytes = 0; 279 txr->total_packets = 0; 280 281 } 282 283 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 284 sc->num_tx_queues); 285 286 return (0); 287 288 fail: 289 ixv_if_queues_free(ctx); 290 291 return (error); 292 } /* ixv_if_tx_queues_alloc */ 293 294 /************************************************************************ 295 * ixv_if_rx_queues_alloc 296 ************************************************************************/ 297 static int 298 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 299 int nrxqs, int nrxqsets) 300 { 301 struct ixgbe_softc *sc = iflib_get_softc(ctx); 302 struct ix_rx_queue *que; 303 int i, error; 304 305 MPASS(sc->num_rx_queues == nrxqsets); 306 MPASS(nrxqs == 1); 307 308 /* Allocate queue structure memory */ 309 sc->rx_queues = 310 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets, 311 M_DEVBUF, M_NOWAIT | M_ZERO); 312 if (!sc->rx_queues) { 313 device_printf(iflib_get_dev(ctx), 314 "Unable to allocate TX ring memory\n"); 315 error = ENOMEM; 316 goto fail; 317 } 318 319 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { 320 struct rx_ring *rxr = &que->rxr; 321 rxr->me = i; 322 rxr->sc = que->sc = sc; 323 324 325 /* get the virtual and physical address of the hw queues */ 326 rxr->tail = IXGBE_VFRDT(rxr->me); 327 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 328 rxr->rx_paddr = paddrs[i*nrxqs]; 329 rxr->bytes = 0; 330 rxr->que = que; 331 } 332 333 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 334 sc->num_rx_queues); 335 336 return (0); 337 338 fail: 339 ixv_if_queues_free(ctx); 340 341 return (error); 342 } /* ixv_if_rx_queues_alloc */ 343 344 /************************************************************************ 345 * ixv_if_queues_free 346 ************************************************************************/ 347 static void 348 ixv_if_queues_free(if_ctx_t ctx) 349 { 350 struct ixgbe_softc *sc = iflib_get_softc(ctx); 351 struct ix_tx_queue *que = sc->tx_queues; 352 int i; 353 354 if (que == NULL) 355 goto free; 356 357 for (i = 0; i < sc->num_tx_queues; i++, que++) { 358 struct tx_ring *txr = &que->txr; 359 if (txr->tx_rsq == NULL) 360 break; 361 362 free(txr->tx_rsq, M_DEVBUF); 363 txr->tx_rsq = NULL; 364 } 365 if (sc->tx_queues != NULL) 366 free(sc->tx_queues, M_DEVBUF); 367 free: 368 if (sc->rx_queues != NULL) 369 free(sc->rx_queues, M_DEVBUF); 370 sc->tx_queues = NULL; 371 sc->rx_queues = NULL; 372 } /* ixv_if_queues_free */ 373 374 /************************************************************************ 375 * ixv_if_attach_pre - Device initialization routine 376 * 377 * Called when the driver is being loaded. 378 * Identifies the type of hardware, allocates all resources 379 * and initializes the hardware. 380 * 381 * return 0 on success, positive on failure 382 ************************************************************************/ 383 static int 384 ixv_if_attach_pre(if_ctx_t ctx) 385 { 386 struct ixgbe_softc *sc; 387 device_t dev; 388 if_softc_ctx_t scctx; 389 struct ixgbe_hw *hw; 390 int error = 0; 391 392 INIT_DEBUGOUT("ixv_attach: begin"); 393 394 /* Allocate, clear, and link in our sc structure */ 395 dev = iflib_get_dev(ctx); 396 sc = iflib_get_softc(ctx); 397 sc->dev = dev; 398 sc->ctx = ctx; 399 sc->hw.back = sc; 400 scctx = sc->shared = iflib_get_softc_ctx(ctx); 401 sc->media = iflib_get_media(ctx); 402 hw = &sc->hw; 403 404 /* Do base PCI setup - map BAR0 */ 405 if (ixv_allocate_pci_resources(ctx)) { 406 device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); 407 error = ENXIO; 408 goto err_out; 409 } 410 411 /* SYSCTL APIs */ 412 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 413 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 414 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 415 sc, 0, ixv_sysctl_debug, "I", "Debug Info"); 416 417 /* Determine hardware revision */ 418 ixv_identify_hardware(ctx); 419 ixv_init_device_features(sc); 420 421 /* Initialize the shared code */ 422 error = ixgbe_init_ops_vf(hw); 423 if (error) { 424 device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); 425 error = EIO; 426 goto err_out; 427 } 428 429 /* Setup the mailbox */ 430 ixgbe_init_mbx_params_vf(hw); 431 432 error = hw->mac.ops.reset_hw(hw); 433 if (error == IXGBE_ERR_RESET_FAILED) 434 device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); 435 else if (error) 436 device_printf(dev, "...reset_hw() failed with error %d\n", 437 error); 438 if (error) { 439 error = EIO; 440 goto err_out; 441 } 442 443 error = hw->mac.ops.init_hw(hw); 444 if (error) { 445 device_printf(dev, "...init_hw() failed with error %d\n", 446 error); 447 error = EIO; 448 goto err_out; 449 } 450 451 /* Negotiate mailbox API version */ 452 error = ixv_negotiate_api(sc); 453 if (error) { 454 device_printf(dev, 455 "Mailbox API negotiation failed during attach!\n"); 456 goto err_out; 457 } 458 459 /* Check if VF was disabled by PF */ 460 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled); 461 if (error) { 462 /* PF is not capable of controlling VF state. Enable the link. */ 463 sc->link_enabled = true; 464 } 465 466 /* If no mac address was assigned, make a random one */ 467 if (!ixv_check_ether_addr(hw->mac.addr)) { 468 ether_gen_addr(iflib_get_ifp(ctx), 469 (struct ether_addr *)hw->mac.addr); 470 bcopy(hw->mac.addr, hw->mac.perm_addr, 471 sizeof(hw->mac.perm_addr)); 472 } 473 474 /* Most of the iflib initialization... */ 475 476 iflib_set_mac(ctx, hw->mac.addr); 477 switch (sc->hw.mac.type) { 478 case ixgbe_mac_X550_vf: 479 case ixgbe_mac_X550EM_x_vf: 480 case ixgbe_mac_X550EM_a_vf: 481 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2; 482 break; 483 default: 484 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 485 } 486 scctx->isc_txqsizes[0] = 487 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 488 sizeof(u32), DBA_ALIGN); 489 scctx->isc_rxqsizes[0] = 490 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 491 DBA_ALIGN); 492 /* XXX */ 493 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 494 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 495 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 496 scctx->isc_msix_bar = pci_msix_table_bar(dev); 497 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 498 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 499 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 500 501 scctx->isc_txrx = &ixgbe_txrx; 502 503 /* 504 * Tell the upper layer(s) we support everything the PF 505 * driver does except... 506 * Wake-on-LAN 507 */ 508 scctx->isc_capabilities = IXGBE_CAPS; 509 scctx->isc_capabilities ^= IFCAP_WOL; 510 scctx->isc_capenable = scctx->isc_capabilities; 511 512 INIT_DEBUGOUT("ixv_if_attach_pre: end"); 513 514 return (0); 515 516 err_out: 517 ixv_free_pci_resources(ctx); 518 519 return (error); 520 } /* ixv_if_attach_pre */ 521 522 static int 523 ixv_if_attach_post(if_ctx_t ctx) 524 { 525 struct ixgbe_softc *sc = iflib_get_softc(ctx); 526 device_t dev = iflib_get_dev(ctx); 527 int error = 0; 528 529 /* Setup OS specific network interface */ 530 error = ixv_setup_interface(ctx); 531 if (error) { 532 device_printf(dev, "Interface setup failed: %d\n", error); 533 goto end; 534 } 535 536 /* Do the stats setup */ 537 ixv_save_stats(sc); 538 ixv_init_stats(sc); 539 ixv_add_stats_sysctls(sc); 540 541 end: 542 return error; 543 } /* ixv_if_attach_post */ 544 545 /************************************************************************ 546 * ixv_detach - Device removal routine 547 * 548 * Called when the driver is being removed. 549 * Stops the adapter and deallocates all the resources 550 * that were allocated for driver operation. 551 * 552 * return 0 on success, positive on failure 553 ************************************************************************/ 554 static int 555 ixv_if_detach(if_ctx_t ctx) 556 { 557 INIT_DEBUGOUT("ixv_detach: begin"); 558 559 ixv_free_pci_resources(ctx); 560 561 return (0); 562 } /* ixv_if_detach */ 563 564 /************************************************************************ 565 * ixv_if_mtu_set 566 ************************************************************************/ 567 static int 568 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 569 { 570 struct ixgbe_softc *sc = iflib_get_softc(ctx); 571 if_t ifp = iflib_get_ifp(ctx); 572 int error = 0; 573 574 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 575 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { 576 error = EINVAL; 577 } else { 578 if_setmtu(ifp, mtu); 579 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR; 580 } 581 582 return error; 583 } /* ixv_if_mtu_set */ 584 585 /************************************************************************ 586 * ixv_if_init - Init entry point 587 * 588 * Used in two ways: It is used by the stack as an init entry 589 * point in network interface structure. It is also used 590 * by the driver as a hw/sw initialization routine to get 591 * to a consistent state. 592 * 593 * return 0 on success, positive on failure 594 ************************************************************************/ 595 static void 596 ixv_if_init(if_ctx_t ctx) 597 { 598 struct ixgbe_softc *sc = iflib_get_softc(ctx); 599 if_t ifp = iflib_get_ifp(ctx); 600 device_t dev = iflib_get_dev(ctx); 601 struct ixgbe_hw *hw = &sc->hw; 602 int error = 0; 603 604 INIT_DEBUGOUT("ixv_if_init: begin"); 605 hw->adapter_stopped = false; 606 hw->mac.ops.stop_adapter(hw); 607 608 /* reprogram the RAR[0] in case user changed it. */ 609 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 610 611 /* Get the latest mac address, User can use a LAA */ 612 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 613 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 614 615 /* Reset VF and renegotiate mailbox API version */ 616 hw->mac.ops.reset_hw(hw); 617 hw->mac.ops.start_hw(hw); 618 error = ixv_negotiate_api(sc); 619 if (error) { 620 device_printf(dev, 621 "Mailbox API negotiation failed in if_init!\n"); 622 return; 623 } 624 625 ixv_initialize_transmit_units(ctx); 626 627 /* Setup Multicast table */ 628 ixv_if_multi_set(ctx); 629 630 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); 631 632 /* Configure RX settings */ 633 ixv_initialize_receive_units(ctx); 634 635 /* Set up VLAN offload and filter */ 636 ixv_setup_vlan_support(ctx); 637 638 /* Set up MSI-X routing */ 639 ixv_configure_ivars(sc); 640 641 /* Set up auto-mask */ 642 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 643 644 /* Set moderation on the Link interrupt */ 645 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR); 646 647 /* Stats init */ 648 ixv_init_stats(sc); 649 650 /* Config/Enable Link */ 651 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled); 652 if (error) { 653 /* PF is not capable of controlling VF state. Enable the link. */ 654 sc->link_enabled = true; 655 } else if (sc->link_enabled == false) 656 device_printf(dev, "VF is disabled by PF\n"); 657 658 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up, 659 false); 660 661 /* And now turn on interrupts */ 662 ixv_if_enable_intr(ctx); 663 664 return; 665 } /* ixv_if_init */ 666 667 /************************************************************************ 668 * ixv_enable_queue 669 ************************************************************************/ 670 static inline void 671 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector) 672 { 673 struct ixgbe_hw *hw = &sc->hw; 674 u32 queue = 1 << vector; 675 u32 mask; 676 677 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 678 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 679 } /* ixv_enable_queue */ 680 681 /************************************************************************ 682 * ixv_disable_queue 683 ************************************************************************/ 684 static inline void 685 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector) 686 { 687 struct ixgbe_hw *hw = &sc->hw; 688 u64 queue = (u64)(1 << vector); 689 u32 mask; 690 691 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 692 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 693 } /* ixv_disable_queue */ 694 695 696 /************************************************************************ 697 * ixv_msix_que - MSI-X Queue Interrupt Service routine 698 ************************************************************************/ 699 static int 700 ixv_msix_que(void *arg) 701 { 702 struct ix_rx_queue *que = arg; 703 struct ixgbe_softc *sc = que->sc; 704 705 ixv_disable_queue(sc, que->msix); 706 ++que->irqs; 707 708 return (FILTER_SCHEDULE_THREAD); 709 } /* ixv_msix_que */ 710 711 /************************************************************************ 712 * ixv_msix_mbx 713 ************************************************************************/ 714 static int 715 ixv_msix_mbx(void *arg) 716 { 717 struct ixgbe_softc *sc = arg; 718 struct ixgbe_hw *hw = &sc->hw; 719 u32 reg; 720 721 ++sc->link_irq; 722 723 /* First get the cause */ 724 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 725 /* Clear interrupt with write */ 726 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 727 728 /* Link status change */ 729 if (reg & IXGBE_EICR_LSC) 730 iflib_admin_intr_deferred(sc->ctx); 731 732 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 733 734 return (FILTER_HANDLED); 735 } /* ixv_msix_mbx */ 736 737 /************************************************************************ 738 * ixv_media_status - Media Ioctl callback 739 * 740 * Called whenever the user queries the status of 741 * the interface using ifconfig. 742 ************************************************************************/ 743 static void 744 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 745 { 746 struct ixgbe_softc *sc = iflib_get_softc(ctx); 747 748 INIT_DEBUGOUT("ixv_media_status: begin"); 749 750 iflib_admin_intr_deferred(ctx); 751 752 ifmr->ifm_status = IFM_AVALID; 753 ifmr->ifm_active = IFM_ETHER; 754 755 if (!sc->link_active) 756 return; 757 758 ifmr->ifm_status |= IFM_ACTIVE; 759 760 switch (sc->link_speed) { 761 case IXGBE_LINK_SPEED_1GB_FULL: 762 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 763 break; 764 case IXGBE_LINK_SPEED_10GB_FULL: 765 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 766 break; 767 case IXGBE_LINK_SPEED_100_FULL: 768 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 769 break; 770 case IXGBE_LINK_SPEED_10_FULL: 771 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 772 break; 773 } 774 } /* ixv_if_media_status */ 775 776 /************************************************************************ 777 * ixv_if_media_change - Media Ioctl callback 778 * 779 * Called when the user changes speed/duplex using 780 * media/mediopt option with ifconfig. 781 ************************************************************************/ 782 static int 783 ixv_if_media_change(if_ctx_t ctx) 784 { 785 struct ixgbe_softc *sc = iflib_get_softc(ctx); 786 struct ifmedia *ifm = iflib_get_media(ctx); 787 788 INIT_DEBUGOUT("ixv_media_change: begin"); 789 790 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 791 return (EINVAL); 792 793 switch (IFM_SUBTYPE(ifm->ifm_media)) { 794 case IFM_AUTO: 795 break; 796 default: 797 device_printf(sc->dev, "Only auto media type\n"); 798 return (EINVAL); 799 } 800 801 return (0); 802 } /* ixv_if_media_change */ 803 804 805 /************************************************************************ 806 * ixv_negotiate_api 807 * 808 * Negotiate the Mailbox API with the PF; 809 * start with the most featured API first. 810 ************************************************************************/ 811 static int 812 ixv_negotiate_api(struct ixgbe_softc *sc) 813 { 814 struct ixgbe_hw *hw = &sc->hw; 815 int mbx_api[] = { ixgbe_mbox_api_12, 816 ixgbe_mbox_api_11, 817 ixgbe_mbox_api_10, 818 ixgbe_mbox_api_unknown }; 819 int i = 0; 820 821 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 822 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 823 return (0); 824 i++; 825 } 826 827 return (EINVAL); 828 } /* ixv_negotiate_api */ 829 830 831 static u_int 832 ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt) 833 { 834 bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 835 IXGBE_ETH_LENGTH_OF_ADDRESS); 836 837 return (++cnt); 838 } 839 840 /************************************************************************ 841 * ixv_if_multi_set - Multicast Update 842 * 843 * Called whenever multicast address list is updated. 844 ************************************************************************/ 845 static void 846 ixv_if_multi_set(if_ctx_t ctx) 847 { 848 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 849 struct ixgbe_softc *sc = iflib_get_softc(ctx); 850 u8 *update_ptr; 851 if_t ifp = iflib_get_ifp(ctx); 852 int mcnt = 0; 853 854 IOCTL_DEBUGOUT("ixv_if_multi_set: begin"); 855 856 mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta); 857 858 update_ptr = mta; 859 860 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt, 861 ixv_mc_array_itr, true); 862 } /* ixv_if_multi_set */ 863 864 /************************************************************************ 865 * ixv_mc_array_itr 866 * 867 * An iterator function needed by the multicast shared code. 868 * It feeds the shared code routine the addresses in the 869 * array of ixv_set_multi() one by one. 870 ************************************************************************/ 871 static u8 * 872 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 873 { 874 u8 *addr = *update_ptr; 875 u8 *newptr; 876 877 *vmdq = 0; 878 879 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 880 *update_ptr = newptr; 881 882 return addr; 883 } /* ixv_mc_array_itr */ 884 885 /************************************************************************ 886 * ixv_if_local_timer - Timer routine 887 * 888 * Checks for link status, updates statistics, 889 * and runs the watchdog check. 890 ************************************************************************/ 891 static void 892 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid) 893 { 894 if (qid != 0) 895 return; 896 897 /* Fire off the adminq task */ 898 iflib_admin_intr_deferred(ctx); 899 } /* ixv_if_local_timer */ 900 901 /************************************************************************ 902 * ixv_if_update_admin_status - Update OS on link state 903 * 904 * Note: Only updates the OS on the cached link state. 905 * The real check of the hardware only happens with 906 * a link interrupt. 907 ************************************************************************/ 908 static void 909 ixv_if_update_admin_status(if_ctx_t ctx) 910 { 911 struct ixgbe_softc *sc = iflib_get_softc(ctx); 912 device_t dev = iflib_get_dev(ctx); 913 s32 status; 914 915 sc->hw.mac.get_link_status = true; 916 917 status = ixgbe_check_link(&sc->hw, &sc->link_speed, 918 &sc->link_up, false); 919 920 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) { 921 /* Mailbox's Clear To Send status is lost or timeout occurred. 922 * We need reinitialization. */ 923 if_init(iflib_get_ifp(ctx), ctx); 924 } 925 926 if (sc->link_up && sc->link_enabled) { 927 if (sc->link_active == false) { 928 if (bootverbose) 929 device_printf(dev, "Link is up %d Gbps %s \n", 930 ((sc->link_speed == 128) ? 10 : 1), 931 "Full Duplex"); 932 sc->link_active = true; 933 iflib_link_state_change(ctx, LINK_STATE_UP, 934 ixgbe_link_speed_to_baudrate(sc->link_speed)); 935 } 936 } else { /* Link down */ 937 if (sc->link_active == true) { 938 if (bootverbose) 939 device_printf(dev, "Link is Down\n"); 940 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 941 sc->link_active = false; 942 } 943 } 944 945 /* Stats Update */ 946 ixv_update_stats(sc); 947 } /* ixv_if_update_admin_status */ 948 949 950 /************************************************************************ 951 * ixv_if_stop - Stop the hardware 952 * 953 * Disables all traffic on the adapter by issuing a 954 * global reset on the MAC and deallocates TX/RX buffers. 955 ************************************************************************/ 956 static void 957 ixv_if_stop(if_ctx_t ctx) 958 { 959 struct ixgbe_softc *sc = iflib_get_softc(ctx); 960 struct ixgbe_hw *hw = &sc->hw; 961 962 INIT_DEBUGOUT("ixv_stop: begin\n"); 963 964 ixv_if_disable_intr(ctx); 965 966 hw->mac.ops.reset_hw(hw); 967 sc->hw.adapter_stopped = false; 968 hw->mac.ops.stop_adapter(hw); 969 970 /* Update the stack */ 971 sc->link_up = false; 972 ixv_if_update_admin_status(ctx); 973 974 /* reprogram the RAR[0] in case user changed it. */ 975 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 976 } /* ixv_if_stop */ 977 978 979 /************************************************************************ 980 * ixv_identify_hardware - Determine hardware revision. 981 ************************************************************************/ 982 static void 983 ixv_identify_hardware(if_ctx_t ctx) 984 { 985 struct ixgbe_softc *sc = iflib_get_softc(ctx); 986 device_t dev = iflib_get_dev(ctx); 987 struct ixgbe_hw *hw = &sc->hw; 988 989 /* Save off the information about this board */ 990 hw->vendor_id = pci_get_vendor(dev); 991 hw->device_id = pci_get_device(dev); 992 hw->revision_id = pci_get_revid(dev); 993 hw->subsystem_vendor_id = pci_get_subvendor(dev); 994 hw->subsystem_device_id = pci_get_subdevice(dev); 995 996 /* A subset of set_mac_type */ 997 switch (hw->device_id) { 998 case IXGBE_DEV_ID_82599_VF: 999 hw->mac.type = ixgbe_mac_82599_vf; 1000 break; 1001 case IXGBE_DEV_ID_X540_VF: 1002 hw->mac.type = ixgbe_mac_X540_vf; 1003 break; 1004 case IXGBE_DEV_ID_X550_VF: 1005 hw->mac.type = ixgbe_mac_X550_vf; 1006 break; 1007 case IXGBE_DEV_ID_X550EM_X_VF: 1008 hw->mac.type = ixgbe_mac_X550EM_x_vf; 1009 break; 1010 case IXGBE_DEV_ID_X550EM_A_VF: 1011 hw->mac.type = ixgbe_mac_X550EM_a_vf; 1012 break; 1013 default: 1014 device_printf(dev, "unknown mac type\n"); 1015 hw->mac.type = ixgbe_mac_unknown; 1016 break; 1017 } 1018 } /* ixv_identify_hardware */ 1019 1020 /************************************************************************ 1021 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers 1022 ************************************************************************/ 1023 static int 1024 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix) 1025 { 1026 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1027 device_t dev = iflib_get_dev(ctx); 1028 struct ix_rx_queue *rx_que = sc->rx_queues; 1029 struct ix_tx_queue *tx_que; 1030 int error, rid, vector = 0; 1031 char buf[16]; 1032 1033 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { 1034 rid = vector + 1; 1035 1036 snprintf(buf, sizeof(buf), "rxq%d", i); 1037 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1038 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf); 1039 1040 if (error) { 1041 device_printf(iflib_get_dev(ctx), 1042 "Failed to allocate que int %d err: %d", i, error); 1043 sc->num_rx_queues = i + 1; 1044 goto fail; 1045 } 1046 1047 rx_que->msix = vector; 1048 } 1049 1050 for (int i = 0; i < sc->num_tx_queues; i++) { 1051 snprintf(buf, sizeof(buf), "txq%d", i); 1052 tx_que = &sc->tx_queues[i]; 1053 tx_que->msix = i % sc->num_rx_queues; 1054 iflib_softirq_alloc_generic(ctx, 1055 &sc->rx_queues[tx_que->msix].que_irq, 1056 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1057 } 1058 rid = vector + 1; 1059 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, 1060 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq"); 1061 if (error) { 1062 device_printf(iflib_get_dev(ctx), 1063 "Failed to register admin handler"); 1064 return (error); 1065 } 1066 1067 sc->vector = vector; 1068 /* 1069 * Due to a broken design QEMU will fail to properly 1070 * enable the guest for MSIX unless the vectors in 1071 * the table are all set up, so we must rewrite the 1072 * ENABLE in the MSIX control register again at this 1073 * point to cause it to successfully initialize us. 1074 */ 1075 if (sc->hw.mac.type == ixgbe_mac_82599_vf) { 1076 int msix_ctrl; 1077 pci_find_cap(dev, PCIY_MSIX, &rid); 1078 rid += PCIR_MSIX_CTRL; 1079 msix_ctrl = pci_read_config(dev, rid, 2); 1080 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1081 pci_write_config(dev, rid, msix_ctrl, 2); 1082 } 1083 1084 return (0); 1085 1086 fail: 1087 iflib_irq_free(ctx, &sc->irq); 1088 rx_que = sc->rx_queues; 1089 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) 1090 iflib_irq_free(ctx, &rx_que->que_irq); 1091 1092 return (error); 1093 } /* ixv_if_msix_intr_assign */ 1094 1095 /************************************************************************ 1096 * ixv_allocate_pci_resources 1097 ************************************************************************/ 1098 static int 1099 ixv_allocate_pci_resources(if_ctx_t ctx) 1100 { 1101 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1102 device_t dev = iflib_get_dev(ctx); 1103 int rid; 1104 1105 rid = PCIR_BAR(0); 1106 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1107 RF_ACTIVE); 1108 1109 if (!(sc->pci_mem)) { 1110 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1111 return (ENXIO); 1112 } 1113 1114 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); 1115 sc->osdep.mem_bus_space_handle = 1116 rman_get_bushandle(sc->pci_mem); 1117 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle; 1118 1119 return (0); 1120 } /* ixv_allocate_pci_resources */ 1121 1122 /************************************************************************ 1123 * ixv_free_pci_resources 1124 ************************************************************************/ 1125 static void 1126 ixv_free_pci_resources(if_ctx_t ctx) 1127 { 1128 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1129 struct ix_rx_queue *que = sc->rx_queues; 1130 device_t dev = iflib_get_dev(ctx); 1131 1132 /* Release all MSI-X queue resources */ 1133 if (sc->intr_type == IFLIB_INTR_MSIX) 1134 iflib_irq_free(ctx, &sc->irq); 1135 1136 if (que != NULL) { 1137 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1138 iflib_irq_free(ctx, &que->que_irq); 1139 } 1140 } 1141 1142 if (sc->pci_mem != NULL) 1143 bus_release_resource(dev, SYS_RES_MEMORY, 1144 rman_get_rid(sc->pci_mem), sc->pci_mem); 1145 } /* ixv_free_pci_resources */ 1146 1147 /************************************************************************ 1148 * ixv_setup_interface 1149 * 1150 * Setup networking device structure and register an interface. 1151 ************************************************************************/ 1152 static int 1153 ixv_setup_interface(if_ctx_t ctx) 1154 { 1155 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1156 if_softc_ctx_t scctx = sc->shared; 1157 if_t ifp = iflib_get_ifp(ctx); 1158 1159 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1160 1161 if_setbaudrate(ifp, IF_Gbps(10)); 1162 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2); 1163 1164 1165 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR; 1166 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1167 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1168 1169 return 0; 1170 } /* ixv_setup_interface */ 1171 1172 /************************************************************************ 1173 * ixv_if_get_counter 1174 ************************************************************************/ 1175 static uint64_t 1176 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1177 { 1178 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1179 if_t ifp = iflib_get_ifp(ctx); 1180 1181 switch (cnt) { 1182 case IFCOUNTER_IPACKETS: 1183 return (sc->ipackets); 1184 case IFCOUNTER_OPACKETS: 1185 return (sc->opackets); 1186 case IFCOUNTER_IBYTES: 1187 return (sc->ibytes); 1188 case IFCOUNTER_OBYTES: 1189 return (sc->obytes); 1190 case IFCOUNTER_IMCASTS: 1191 return (sc->imcasts); 1192 default: 1193 return (if_get_counter_default(ifp, cnt)); 1194 } 1195 } /* ixv_if_get_counter */ 1196 1197 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1198 * @ctx: iflib context 1199 * @event: event code to check 1200 * 1201 * Defaults to returning true for every event. 1202 * 1203 * @returns true if iflib needs to reinit the interface 1204 */ 1205 static bool 1206 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1207 { 1208 switch (event) { 1209 case IFLIB_RESTART_VLAN_CONFIG: 1210 /* XXX: This may not need to return true */ 1211 default: 1212 return (true); 1213 } 1214 } 1215 1216 /************************************************************************ 1217 * ixv_initialize_transmit_units - Enable transmit unit. 1218 ************************************************************************/ 1219 static void 1220 ixv_initialize_transmit_units(if_ctx_t ctx) 1221 { 1222 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1223 struct ixgbe_hw *hw = &sc->hw; 1224 if_softc_ctx_t scctx = sc->shared; 1225 struct ix_tx_queue *que = sc->tx_queues; 1226 int i; 1227 1228 for (i = 0; i < sc->num_tx_queues; i++, que++) { 1229 struct tx_ring *txr = &que->txr; 1230 u64 tdba = txr->tx_paddr; 1231 u32 txctrl, txdctl; 1232 int j = txr->me; 1233 1234 /* Set WTHRESH to 8, burst writeback */ 1235 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1236 txdctl |= (8 << 16); 1237 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1238 1239 /* Set the HW Tx Head and Tail indices */ 1240 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0); 1241 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0); 1242 1243 /* Set Tx Tail register */ 1244 txr->tail = IXGBE_VFTDT(j); 1245 1246 txr->tx_rs_cidx = txr->tx_rs_pidx; 1247 /* Initialize the last processed descriptor to be the end of 1248 * the ring, rather than the start, so that we avoid an 1249 * off-by-one error when calculating how many descriptors are 1250 * done in the credits_update function. 1251 */ 1252 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1253 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 1254 txr->tx_rsq[k] = QIDX_INVALID; 1255 1256 /* Set Ring parameters */ 1257 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1258 (tdba & 0x00000000ffffffffULL)); 1259 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1260 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1261 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc)); 1262 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1263 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1264 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1265 1266 /* Now enable */ 1267 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1268 txdctl |= IXGBE_TXDCTL_ENABLE; 1269 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1270 } 1271 1272 return; 1273 } /* ixv_initialize_transmit_units */ 1274 1275 /************************************************************************ 1276 * ixv_initialize_rss_mapping 1277 ************************************************************************/ 1278 static void 1279 ixv_initialize_rss_mapping(struct ixgbe_softc *sc) 1280 { 1281 struct ixgbe_hw *hw = &sc->hw; 1282 u32 reta = 0, mrqc, rss_key[10]; 1283 int queue_id; 1284 int i, j; 1285 u32 rss_hash_config; 1286 1287 if (sc->feat_en & IXGBE_FEATURE_RSS) { 1288 /* Fetch the configured RSS key */ 1289 rss_getkey((uint8_t *)&rss_key); 1290 } else { 1291 /* set up random bits */ 1292 arc4rand(&rss_key, sizeof(rss_key), 0); 1293 } 1294 1295 /* Now fill out hash function seeds */ 1296 for (i = 0; i < 10; i++) 1297 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1298 1299 /* Set up the redirection table */ 1300 for (i = 0, j = 0; i < 64; i++, j++) { 1301 if (j == sc->num_rx_queues) 1302 j = 0; 1303 1304 if (sc->feat_en & IXGBE_FEATURE_RSS) { 1305 /* 1306 * Fetch the RSS bucket id for the given indirection 1307 * entry. Cap it at the number of configured buckets 1308 * (which is num_rx_queues.) 1309 */ 1310 queue_id = rss_get_indirection_to_bucket(i); 1311 queue_id = queue_id % sc->num_rx_queues; 1312 } else 1313 queue_id = j; 1314 1315 /* 1316 * The low 8 bits are for hash value (n+0); 1317 * The next 8 bits are for hash value (n+1), etc. 1318 */ 1319 reta >>= 8; 1320 reta |= ((uint32_t)queue_id) << 24; 1321 if ((i & 3) == 3) { 1322 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1323 reta = 0; 1324 } 1325 } 1326 1327 /* Perform hash on these packet types */ 1328 if (sc->feat_en & IXGBE_FEATURE_RSS) 1329 rss_hash_config = rss_gethashconfig(); 1330 else { 1331 /* 1332 * Disable UDP - IP fragments aren't currently being handled 1333 * and so we end up with a mix of 2-tuple and 4-tuple 1334 * traffic. 1335 */ 1336 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1337 | RSS_HASHTYPE_RSS_TCP_IPV4 1338 | RSS_HASHTYPE_RSS_IPV6 1339 | RSS_HASHTYPE_RSS_TCP_IPV6; 1340 } 1341 1342 mrqc = IXGBE_MRQC_RSSEN; 1343 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1344 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1345 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1346 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1347 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1348 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1349 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1350 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1351 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1352 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1353 __func__); 1354 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1355 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1356 __func__); 1357 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1358 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1359 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1361 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1362 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1363 __func__); 1364 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1365 } /* ixv_initialize_rss_mapping */ 1366 1367 1368 /************************************************************************ 1369 * ixv_initialize_receive_units - Setup receive registers and features. 1370 ************************************************************************/ 1371 static void 1372 ixv_initialize_receive_units(if_ctx_t ctx) 1373 { 1374 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1375 if_softc_ctx_t scctx; 1376 struct ixgbe_hw *hw = &sc->hw; 1377 if_t ifp = iflib_get_ifp(ctx); 1378 struct ix_rx_queue *que = sc->rx_queues; 1379 u32 bufsz, psrtype; 1380 1381 if (if_getmtu(ifp) > ETHERMTU) 1382 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1383 else 1384 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1385 1386 psrtype = IXGBE_PSRTYPE_TCPHDR 1387 | IXGBE_PSRTYPE_UDPHDR 1388 | IXGBE_PSRTYPE_IPV4HDR 1389 | IXGBE_PSRTYPE_IPV6HDR 1390 | IXGBE_PSRTYPE_L2HDR; 1391 1392 if (sc->num_rx_queues > 1) 1393 psrtype |= 1 << 29; 1394 1395 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1396 1397 /* Tell PF our max_frame size */ 1398 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) { 1399 device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1400 } 1401 scctx = sc->shared; 1402 1403 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1404 struct rx_ring *rxr = &que->rxr; 1405 u64 rdba = rxr->rx_paddr; 1406 u32 reg, rxdctl; 1407 int j = rxr->me; 1408 1409 /* Disable the queue */ 1410 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1411 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1412 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1413 for (int k = 0; k < 10; k++) { 1414 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1415 IXGBE_RXDCTL_ENABLE) 1416 msec_delay(1); 1417 else 1418 break; 1419 } 1420 wmb(); 1421 /* Setup the Base and Length of the Rx Descriptor Ring */ 1422 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1423 (rdba & 0x00000000ffffffffULL)); 1424 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1425 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1426 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 1427 1428 /* Reset the ring indices */ 1429 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1430 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1431 1432 /* Set up the SRRCTL register */ 1433 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1434 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1435 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1436 reg |= bufsz; 1437 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1438 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1439 1440 /* Capture Rx Tail index */ 1441 rxr->tail = IXGBE_VFRDT(rxr->me); 1442 1443 /* Do the queue enabling last */ 1444 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1445 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1446 for (int l = 0; l < 10; l++) { 1447 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1448 IXGBE_RXDCTL_ENABLE) 1449 break; 1450 msec_delay(1); 1451 } 1452 wmb(); 1453 1454 /* Set the Tail Pointer */ 1455 #ifdef DEV_NETMAP 1456 /* 1457 * In netmap mode, we must preserve the buffers made 1458 * available to userspace before the if_init() 1459 * (this is true by default on the TX side, because 1460 * init makes all buffers available to userspace). 1461 * 1462 * netmap_reset() and the device specific routines 1463 * (e.g. ixgbe_setup_receive_rings()) map these 1464 * buffers at the end of the NIC ring, so here we 1465 * must set the RDT (tail) register to make sure 1466 * they are not overwritten. 1467 * 1468 * In this driver the NIC ring starts at RDH = 0, 1469 * RDT points to the last slot available for reception (?), 1470 * so RDT = num_rx_desc - 1 means the whole ring is available. 1471 */ 1472 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 1473 struct netmap_adapter *na = NA(ifp); 1474 struct netmap_kring *kring = na->rx_rings[j]; 1475 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1476 1477 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1478 } else 1479 #endif /* DEV_NETMAP */ 1480 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1481 scctx->isc_nrxd[0] - 1); 1482 } 1483 1484 /* 1485 * Do not touch RSS and RETA settings for older hardware 1486 * as those are shared among PF and all VF. 1487 */ 1488 if (sc->hw.mac.type >= ixgbe_mac_X550_vf) 1489 ixv_initialize_rss_mapping(sc); 1490 } /* ixv_initialize_receive_units */ 1491 1492 /************************************************************************ 1493 * ixv_setup_vlan_support 1494 ************************************************************************/ 1495 static void 1496 ixv_setup_vlan_support(if_ctx_t ctx) 1497 { 1498 if_t ifp = iflib_get_ifp(ctx); 1499 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1500 struct ixgbe_hw *hw = &sc->hw; 1501 u32 ctrl, vid, vfta, retry; 1502 1503 /* 1504 * We get here thru if_init, meaning 1505 * a soft reset, this has already cleared 1506 * the VFTA and other state, so if there 1507 * have been no vlan's registered do nothing. 1508 */ 1509 if (sc->num_vlans == 0) 1510 return; 1511 1512 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) { 1513 /* Enable the queues */ 1514 for (int i = 0; i < sc->num_rx_queues; i++) { 1515 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1516 ctrl |= IXGBE_RXDCTL_VME; 1517 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1518 /* 1519 * Let Rx path know that it needs to store VLAN tag 1520 * as part of extra mbuf info. 1521 */ 1522 sc->rx_queues[i].rxr.vtag_strip = true; 1523 } 1524 } 1525 1526 /* 1527 * If filtering VLAN tags is disabled, 1528 * there is no need to fill VLAN Filter Table Array (VFTA). 1529 */ 1530 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1531 return; 1532 1533 /* 1534 * A soft reset zero's out the VFTA, so 1535 * we need to repopulate it now. 1536 */ 1537 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1538 if (sc->shadow_vfta[i] == 0) 1539 continue; 1540 vfta = sc->shadow_vfta[i]; 1541 /* 1542 * Reconstruct the vlan id's 1543 * based on the bits set in each 1544 * of the array ints. 1545 */ 1546 for (int j = 0; j < 32; j++) { 1547 retry = 0; 1548 if ((vfta & (1 << j)) == 0) 1549 continue; 1550 vid = (i * 32) + j; 1551 /* Call the shared code mailbox routine */ 1552 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) { 1553 if (++retry > 5) 1554 break; 1555 } 1556 } 1557 } 1558 } /* ixv_setup_vlan_support */ 1559 1560 /************************************************************************ 1561 * ixv_if_register_vlan 1562 * 1563 * Run via a vlan config EVENT, it enables us to use the 1564 * HW Filter table since we can get the vlan id. This just 1565 * creates the entry in the soft version of the VFTA, init 1566 * will repopulate the real table. 1567 ************************************************************************/ 1568 static void 1569 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag) 1570 { 1571 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1572 u16 index, bit; 1573 1574 index = (vtag >> 5) & 0x7F; 1575 bit = vtag & 0x1F; 1576 sc->shadow_vfta[index] |= (1 << bit); 1577 ++sc->num_vlans; 1578 } /* ixv_if_register_vlan */ 1579 1580 /************************************************************************ 1581 * ixv_if_unregister_vlan 1582 * 1583 * Run via a vlan unconfig EVENT, remove our entry 1584 * in the soft vfta. 1585 ************************************************************************/ 1586 static void 1587 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag) 1588 { 1589 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1590 u16 index, bit; 1591 1592 index = (vtag >> 5) & 0x7F; 1593 bit = vtag & 0x1F; 1594 sc->shadow_vfta[index] &= ~(1 << bit); 1595 --sc->num_vlans; 1596 } /* ixv_if_unregister_vlan */ 1597 1598 /************************************************************************ 1599 * ixv_if_enable_intr 1600 ************************************************************************/ 1601 static void 1602 ixv_if_enable_intr(if_ctx_t ctx) 1603 { 1604 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1605 struct ixgbe_hw *hw = &sc->hw; 1606 struct ix_rx_queue *que = sc->rx_queues; 1607 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1608 1609 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1610 1611 mask = IXGBE_EIMS_ENABLE_MASK; 1612 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1613 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1614 1615 for (int i = 0; i < sc->num_rx_queues; i++, que++) 1616 ixv_enable_queue(sc, que->msix); 1617 1618 IXGBE_WRITE_FLUSH(hw); 1619 } /* ixv_if_enable_intr */ 1620 1621 /************************************************************************ 1622 * ixv_if_disable_intr 1623 ************************************************************************/ 1624 static void 1625 ixv_if_disable_intr(if_ctx_t ctx) 1626 { 1627 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1628 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0); 1629 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0); 1630 IXGBE_WRITE_FLUSH(&sc->hw); 1631 } /* ixv_if_disable_intr */ 1632 1633 /************************************************************************ 1634 * ixv_if_rx_queue_intr_enable 1635 ************************************************************************/ 1636 static int 1637 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1638 { 1639 struct ixgbe_softc *sc = iflib_get_softc(ctx); 1640 struct ix_rx_queue *que = &sc->rx_queues[rxqid]; 1641 1642 ixv_enable_queue(sc, que->rxr.me); 1643 1644 return (0); 1645 } /* ixv_if_rx_queue_intr_enable */ 1646 1647 /************************************************************************ 1648 * ixv_set_ivar 1649 * 1650 * Setup the correct IVAR register for a particular MSI-X interrupt 1651 * - entry is the register array entry 1652 * - vector is the MSI-X vector for this queue 1653 * - type is RX/TX/MISC 1654 ************************************************************************/ 1655 static void 1656 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type) 1657 { 1658 struct ixgbe_hw *hw = &sc->hw; 1659 u32 ivar, index; 1660 1661 vector |= IXGBE_IVAR_ALLOC_VAL; 1662 1663 if (type == -1) { /* MISC IVAR */ 1664 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1665 ivar &= ~0xFF; 1666 ivar |= vector; 1667 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1668 } else { /* RX/TX IVARS */ 1669 index = (16 * (entry & 1)) + (8 * type); 1670 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1671 ivar &= ~(0xFF << index); 1672 ivar |= (vector << index); 1673 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1674 } 1675 } /* ixv_set_ivar */ 1676 1677 /************************************************************************ 1678 * ixv_configure_ivars 1679 ************************************************************************/ 1680 static void 1681 ixv_configure_ivars(struct ixgbe_softc *sc) 1682 { 1683 struct ix_rx_queue *que = sc->rx_queues; 1684 1685 MPASS(sc->num_rx_queues == sc->num_tx_queues); 1686 1687 for (int i = 0; i < sc->num_rx_queues; i++, que++) { 1688 /* First the RX queue entry */ 1689 ixv_set_ivar(sc, i, que->msix, 0); 1690 /* ... and the TX */ 1691 ixv_set_ivar(sc, i, que->msix, 1); 1692 /* Set an initial value in EITR */ 1693 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix), 1694 IXGBE_EITR_DEFAULT); 1695 } 1696 1697 /* For the mailbox interrupt */ 1698 ixv_set_ivar(sc, 1, sc->vector, -1); 1699 } /* ixv_configure_ivars */ 1700 1701 /************************************************************************ 1702 * ixv_save_stats 1703 * 1704 * The VF stats registers never have a truly virgin 1705 * starting point, so this routine tries to make an 1706 * artificial one, marking ground zero on attach as 1707 * it were. 1708 ************************************************************************/ 1709 static void 1710 ixv_save_stats(struct ixgbe_softc *sc) 1711 { 1712 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) { 1713 sc->stats.vf.saved_reset_vfgprc += 1714 sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc; 1715 sc->stats.vf.saved_reset_vfgptc += 1716 sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc; 1717 sc->stats.vf.saved_reset_vfgorc += 1718 sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc; 1719 sc->stats.vf.saved_reset_vfgotc += 1720 sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc; 1721 sc->stats.vf.saved_reset_vfmprc += 1722 sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc; 1723 } 1724 } /* ixv_save_stats */ 1725 1726 /************************************************************************ 1727 * ixv_init_stats 1728 ************************************************************************/ 1729 static void 1730 ixv_init_stats(struct ixgbe_softc *sc) 1731 { 1732 struct ixgbe_hw *hw = &sc->hw; 1733 1734 sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1735 sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1736 sc->stats.vf.last_vfgorc |= 1737 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1738 1739 sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1740 sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1741 sc->stats.vf.last_vfgotc |= 1742 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1743 1744 sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1745 1746 sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc; 1747 sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc; 1748 sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc; 1749 sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc; 1750 sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc; 1751 } /* ixv_init_stats */ 1752 1753 #define UPDATE_STAT_32(reg, last, count) \ 1754 { \ 1755 u32 current = IXGBE_READ_REG(hw, reg); \ 1756 if (current < last) \ 1757 count += 0x100000000LL; \ 1758 last = current; \ 1759 count &= 0xFFFFFFFF00000000LL; \ 1760 count |= current; \ 1761 } 1762 1763 #define UPDATE_STAT_36(lsb, msb, last, count) \ 1764 { \ 1765 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 1766 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 1767 u64 current = ((cur_msb << 32) | cur_lsb); \ 1768 if (current < last) \ 1769 count += 0x1000000000LL; \ 1770 last = current; \ 1771 count &= 0xFFFFFFF000000000LL; \ 1772 count |= current; \ 1773 } 1774 1775 /************************************************************************ 1776 * ixv_update_stats - Update the board statistics counters. 1777 ************************************************************************/ 1778 void 1779 ixv_update_stats(struct ixgbe_softc *sc) 1780 { 1781 struct ixgbe_hw *hw = &sc->hw; 1782 struct ixgbevf_hw_stats *stats = &sc->stats.vf; 1783 1784 UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc, 1785 sc->stats.vf.vfgprc); 1786 UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc, 1787 sc->stats.vf.vfgptc); 1788 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1789 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc); 1790 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1791 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc); 1792 UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc, 1793 sc->stats.vf.vfmprc); 1794 1795 /* Fill out the OS statistics structure */ 1796 IXGBE_SET_IPACKETS(sc, stats->vfgprc); 1797 IXGBE_SET_OPACKETS(sc, stats->vfgptc); 1798 IXGBE_SET_IBYTES(sc, stats->vfgorc); 1799 IXGBE_SET_OBYTES(sc, stats->vfgotc); 1800 IXGBE_SET_IMCASTS(sc, stats->vfmprc); 1801 } /* ixv_update_stats */ 1802 1803 /************************************************************************ 1804 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 1805 ************************************************************************/ 1806 static void 1807 ixv_add_stats_sysctls(struct ixgbe_softc *sc) 1808 { 1809 device_t dev = sc->dev; 1810 struct ix_tx_queue *tx_que = sc->tx_queues; 1811 struct ix_rx_queue *rx_que = sc->rx_queues; 1812 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1813 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1814 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1815 struct ixgbevf_hw_stats *stats = &sc->stats.vf; 1816 struct sysctl_oid *stat_node, *queue_node; 1817 struct sysctl_oid_list *stat_list, *queue_list; 1818 1819 #define QUEUE_NAME_LEN 32 1820 char namebuf[QUEUE_NAME_LEN]; 1821 1822 /* Driver Statistics */ 1823 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1824 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); 1825 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1826 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); 1827 1828 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) { 1829 struct tx_ring *txr = &tx_que->txr; 1830 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1831 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1832 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1833 queue_list = SYSCTL_CHILDREN(queue_node); 1834 1835 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1836 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets"); 1837 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1838 CTLFLAG_RD, &(txr->total_packets), "TX Packets"); 1839 } 1840 1841 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) { 1842 struct rx_ring *rxr = &rx_que->rxr; 1843 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1844 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1845 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); 1846 queue_list = SYSCTL_CHILDREN(queue_node); 1847 1848 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1849 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue"); 1850 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1851 CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); 1852 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1853 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); 1854 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1855 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); 1856 } 1857 1858 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 1859 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1860 "VF Statistics (read from HW registers)"); 1861 stat_list = SYSCTL_CHILDREN(stat_node); 1862 1863 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1864 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); 1865 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1866 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); 1867 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1868 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); 1869 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1870 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); 1871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1872 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); 1873 } /* ixv_add_stats_sysctls */ 1874 1875 /************************************************************************ 1876 * ixv_print_debug_info 1877 * 1878 * Called only when em_display_debug_stats is enabled. 1879 * Provides a way to take a look at important statistics 1880 * maintained by the driver and hardware. 1881 ************************************************************************/ 1882 static void 1883 ixv_print_debug_info(struct ixgbe_softc *sc) 1884 { 1885 device_t dev = sc->dev; 1886 struct ixgbe_hw *hw = &sc->hw; 1887 1888 device_printf(dev, "Error Byte Count = %u \n", 1889 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 1890 1891 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq); 1892 } /* ixv_print_debug_info */ 1893 1894 /************************************************************************ 1895 * ixv_sysctl_debug 1896 ************************************************************************/ 1897 static int 1898 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 1899 { 1900 struct ixgbe_softc *sc; 1901 int error, result; 1902 1903 result = -1; 1904 error = sysctl_handle_int(oidp, &result, 0, req); 1905 1906 if (error || !req->newptr) 1907 return (error); 1908 1909 if (result == 1) { 1910 sc = (struct ixgbe_softc *)arg1; 1911 ixv_print_debug_info(sc); 1912 } 1913 1914 return error; 1915 } /* ixv_sysctl_debug */ 1916 1917 /************************************************************************ 1918 * ixv_init_device_features 1919 ************************************************************************/ 1920 static void 1921 ixv_init_device_features(struct ixgbe_softc *sc) 1922 { 1923 sc->feat_cap = IXGBE_FEATURE_NETMAP 1924 | IXGBE_FEATURE_VF 1925 | IXGBE_FEATURE_LEGACY_TX; 1926 1927 /* A tad short on feature flags for VFs, atm. */ 1928 switch (sc->hw.mac.type) { 1929 case ixgbe_mac_82599_vf: 1930 break; 1931 case ixgbe_mac_X540_vf: 1932 break; 1933 case ixgbe_mac_X550_vf: 1934 case ixgbe_mac_X550EM_x_vf: 1935 case ixgbe_mac_X550EM_a_vf: 1936 sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 1937 sc->feat_cap |= IXGBE_FEATURE_RSS; 1938 break; 1939 default: 1940 break; 1941 } 1942 1943 /* Enabled by default... */ 1944 /* Is a virtual function (VF) */ 1945 if (sc->feat_cap & IXGBE_FEATURE_VF) 1946 sc->feat_en |= IXGBE_FEATURE_VF; 1947 /* Netmap */ 1948 if (sc->feat_cap & IXGBE_FEATURE_NETMAP) 1949 sc->feat_en |= IXGBE_FEATURE_NETMAP; 1950 /* Receive-Side Scaling (RSS) */ 1951 if (sc->feat_cap & IXGBE_FEATURE_RSS) 1952 sc->feat_en |= IXGBE_FEATURE_RSS; 1953 /* Needs advanced context descriptor regardless of offloads req'd */ 1954 if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 1955 sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 1956 } /* ixv_init_device_features */ 1957 1958