1 /****************************************************************************** 2 3 Copyright (c) 2001-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 39 #include "ixgbe.h" 40 #include "ifdi_if.h" 41 42 #include <net/netmap.h> 43 #include <dev/netmap/netmap_kern.h> 44 45 /************************************************************************ 46 * Driver version 47 ************************************************************************/ 48 char ixv_driver_version[] = "2.0.0-k"; 49 50 /************************************************************************ 51 * PCI Device ID Table 52 * 53 * Used by probe to select devices to load on 54 * Last field stores an index into ixv_strings 55 * Last entry must be all 0s 56 * 57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 58 ************************************************************************/ 59 static pci_vendor_info_t ixv_vendor_info_array[] = 60 { 61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), 62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), 63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), 64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), 65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"), 66 /* required last entry */ 67 PVID_END 68 }; 69 70 /************************************************************************ 71 * Function prototypes 72 ************************************************************************/ 73 static void *ixv_register(device_t dev); 74 static int ixv_if_attach_pre(if_ctx_t ctx); 75 static int ixv_if_attach_post(if_ctx_t ctx); 76 static int ixv_if_detach(if_ctx_t ctx); 77 78 static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); 79 static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 80 static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 81 static void ixv_if_queues_free(if_ctx_t ctx); 82 static void ixv_identify_hardware(if_ctx_t ctx); 83 static void ixv_init_device_features(struct adapter *); 84 static int ixv_allocate_pci_resources(if_ctx_t ctx); 85 static void ixv_free_pci_resources(if_ctx_t ctx); 86 static int ixv_setup_interface(if_ctx_t ctx); 87 static void ixv_if_media_status(if_ctx_t , struct ifmediareq *); 88 static int ixv_if_media_change(if_ctx_t ctx); 89 static void ixv_if_update_admin_status(if_ctx_t ctx); 90 static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix); 91 92 static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 93 static void ixv_if_init(if_ctx_t ctx); 94 static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid); 95 static void ixv_if_stop(if_ctx_t ctx); 96 static int ixv_negotiate_api(struct adapter *); 97 98 static void ixv_initialize_transmit_units(if_ctx_t ctx); 99 static void ixv_initialize_receive_units(if_ctx_t ctx); 100 static void ixv_initialize_rss_mapping(struct adapter *); 101 102 static void ixv_setup_vlan_support(if_ctx_t ctx); 103 static void ixv_configure_ivars(struct adapter *); 104 static void ixv_if_enable_intr(if_ctx_t ctx); 105 static void ixv_if_disable_intr(if_ctx_t ctx); 106 static void ixv_if_multi_set(if_ctx_t ctx); 107 108 static void ixv_if_register_vlan(if_ctx_t, u16); 109 static void ixv_if_unregister_vlan(if_ctx_t, u16); 110 111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter); 112 113 static void ixv_save_stats(struct adapter *); 114 static void ixv_init_stats(struct adapter *); 115 static void ixv_update_stats(struct adapter *); 116 static void ixv_add_stats_sysctls(struct adapter *adapter); 117 118 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); 119 static void ixv_set_ivar(struct adapter *, u8, u8, s8); 120 121 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); 122 123 /* The MSI-X Interrupt handlers */ 124 static int ixv_msix_que(void *); 125 static int ixv_msix_mbx(void *); 126 127 /************************************************************************ 128 * FreeBSD Device Interface Entry Points 129 ************************************************************************/ 130 static device_method_t ixv_methods[] = { 131 /* Device interface */ 132 DEVMETHOD(device_register, ixv_register), 133 DEVMETHOD(device_probe, iflib_device_probe), 134 DEVMETHOD(device_attach, iflib_device_attach), 135 DEVMETHOD(device_detach, iflib_device_detach), 136 DEVMETHOD(device_shutdown, iflib_device_shutdown), 137 DEVMETHOD_END 138 }; 139 140 static driver_t ixv_driver = { 141 "ixv", ixv_methods, sizeof(struct adapter), 142 }; 143 144 devclass_t ixv_devclass; 145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); 146 MODULE_DEPEND(ixv, pci, 1, 1, 1); 147 MODULE_DEPEND(ixv, ether, 1, 1, 1); 148 #ifdef DEV_NETMAP 149 MODULE_DEPEND(ixv, netmap, 1, 1, 1); 150 #endif /* DEV_NETMAP */ 151 152 static device_method_t ixv_if_methods[] = { 153 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre), 154 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post), 155 DEVMETHOD(ifdi_detach, ixv_if_detach), 156 DEVMETHOD(ifdi_init, ixv_if_init), 157 DEVMETHOD(ifdi_stop, ixv_if_stop), 158 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign), 159 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr), 160 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr), 161 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 162 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable), 163 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc), 164 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc), 165 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free), 166 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status), 167 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set), 168 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set), 169 DEVMETHOD(ifdi_media_status, ixv_if_media_status), 170 DEVMETHOD(ifdi_media_change, ixv_if_media_change), 171 DEVMETHOD(ifdi_timer, ixv_if_local_timer), 172 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan), 173 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan), 174 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter), 175 DEVMETHOD_END 176 }; 177 178 static driver_t ixv_if_driver = { 179 "ixv_if", ixv_if_methods, sizeof(struct adapter) 180 }; 181 182 /* 183 * TUNEABLE PARAMETERS: 184 */ 185 186 /* Flow control setting, default to full */ 187 static int ixv_flow_control = ixgbe_fc_full; 188 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); 189 190 /* 191 * Header split: this causes the hardware to DMA 192 * the header into a separate mbuf from the payload, 193 * it can be a performance win in some workloads, but 194 * in others it actually hurts, its off by default. 195 */ 196 static int ixv_header_split = FALSE; 197 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); 198 199 /* 200 * Shadow VFTA table, this is needed because 201 * the real filter table gets cleared during 202 * a soft reset and we need to repopulate it. 203 */ 204 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; 205 extern struct if_txrx ixgbe_txrx; 206 207 static struct if_shared_ctx ixv_sctx_init = { 208 .isc_magic = IFLIB_MAGIC, 209 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ 210 .isc_tx_maxsize = IXGBE_TSO_SIZE, 211 212 .isc_tx_maxsegsize = PAGE_SIZE, 213 214 .isc_rx_maxsize = MJUM16BYTES, 215 .isc_rx_nsegments = 1, 216 .isc_rx_maxsegsize = MJUM16BYTES, 217 .isc_nfl = 1, 218 .isc_ntxqs = 1, 219 .isc_nrxqs = 1, 220 .isc_admin_intrcnt = 1, 221 .isc_vendor_info = ixv_vendor_info_array, 222 .isc_driver_version = ixv_driver_version, 223 .isc_driver = &ixv_if_driver, 224 225 .isc_nrxd_min = {MIN_RXD}, 226 .isc_ntxd_min = {MIN_TXD}, 227 .isc_nrxd_max = {MAX_RXD}, 228 .isc_ntxd_max = {MAX_TXD}, 229 .isc_nrxd_default = {DEFAULT_RXD}, 230 .isc_ntxd_default = {DEFAULT_TXD}, 231 }; 232 233 if_shared_ctx_t ixv_sctx = &ixv_sctx_init; 234 235 static void * 236 ixv_register(device_t dev) 237 { 238 return (ixv_sctx); 239 } 240 241 /************************************************************************ 242 * ixv_if_tx_queues_alloc 243 ************************************************************************/ 244 static int 245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 246 int ntxqs, int ntxqsets) 247 { 248 struct adapter *adapter = iflib_get_softc(ctx); 249 if_softc_ctx_t scctx = adapter->shared; 250 struct ix_tx_queue *que; 251 int i, j, error; 252 253 MPASS(adapter->num_tx_queues == ntxqsets); 254 MPASS(ntxqs == 1); 255 256 /* Allocate queue structure memory */ 257 adapter->tx_queues = 258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, 259 M_DEVBUF, M_NOWAIT | M_ZERO); 260 if (!adapter->tx_queues) { 261 device_printf(iflib_get_dev(ctx), 262 "Unable to allocate TX ring memory\n"); 263 return (ENOMEM); 264 } 265 266 for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { 267 struct tx_ring *txr = &que->txr; 268 269 txr->me = i; 270 txr->adapter = que->adapter = adapter; 271 adapter->active_queues |= (u64)1 << txr->me; 272 273 /* Allocate report status array */ 274 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { 275 error = ENOMEM; 276 goto fail; 277 } 278 for (j = 0; j < scctx->isc_ntxd[0]; j++) 279 txr->tx_rsq[j] = QIDX_INVALID; 280 /* get the virtual and physical address of the hardware queues */ 281 txr->tail = IXGBE_VFTDT(txr->me); 282 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs]; 283 txr->tx_paddr = paddrs[i*ntxqs]; 284 285 txr->bytes = 0; 286 txr->total_packets = 0; 287 288 } 289 290 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", 291 adapter->num_tx_queues); 292 293 return (0); 294 295 fail: 296 ixv_if_queues_free(ctx); 297 298 return (error); 299 } /* ixv_if_tx_queues_alloc */ 300 301 /************************************************************************ 302 * ixv_if_rx_queues_alloc 303 ************************************************************************/ 304 static int 305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, 306 int nrxqs, int nrxqsets) 307 { 308 struct adapter *adapter = iflib_get_softc(ctx); 309 struct ix_rx_queue *que; 310 int i, error; 311 312 MPASS(adapter->num_rx_queues == nrxqsets); 313 MPASS(nrxqs == 1); 314 315 /* Allocate queue structure memory */ 316 adapter->rx_queues = 317 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets, 318 M_DEVBUF, M_NOWAIT | M_ZERO); 319 if (!adapter->rx_queues) { 320 device_printf(iflib_get_dev(ctx), 321 "Unable to allocate TX ring memory\n"); 322 error = ENOMEM; 323 goto fail; 324 } 325 326 for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { 327 struct rx_ring *rxr = &que->rxr; 328 rxr->me = i; 329 rxr->adapter = que->adapter = adapter; 330 331 332 /* get the virtual and physical address of the hw queues */ 333 rxr->tail = IXGBE_VFRDT(rxr->me); 334 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; 335 rxr->rx_paddr = paddrs[i*nrxqs]; 336 rxr->bytes = 0; 337 rxr->que = que; 338 } 339 340 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", 341 adapter->num_rx_queues); 342 343 return (0); 344 345 fail: 346 ixv_if_queues_free(ctx); 347 348 return (error); 349 } /* ixv_if_rx_queues_alloc */ 350 351 /************************************************************************ 352 * ixv_if_queues_free 353 ************************************************************************/ 354 static void 355 ixv_if_queues_free(if_ctx_t ctx) 356 { 357 struct adapter *adapter = iflib_get_softc(ctx); 358 struct ix_tx_queue *que = adapter->tx_queues; 359 int i; 360 361 if (que == NULL) 362 goto free; 363 364 for (i = 0; i < adapter->num_tx_queues; i++, que++) { 365 struct tx_ring *txr = &que->txr; 366 if (txr->tx_rsq == NULL) 367 break; 368 369 free(txr->tx_rsq, M_DEVBUF); 370 txr->tx_rsq = NULL; 371 } 372 if (adapter->tx_queues != NULL) 373 free(adapter->tx_queues, M_DEVBUF); 374 free: 375 if (adapter->rx_queues != NULL) 376 free(adapter->rx_queues, M_DEVBUF); 377 adapter->tx_queues = NULL; 378 adapter->rx_queues = NULL; 379 } /* ixv_if_queues_free */ 380 381 /************************************************************************ 382 * ixv_if_attach_pre - Device initialization routine 383 * 384 * Called when the driver is being loaded. 385 * Identifies the type of hardware, allocates all resources 386 * and initializes the hardware. 387 * 388 * return 0 on success, positive on failure 389 ************************************************************************/ 390 static int 391 ixv_if_attach_pre(if_ctx_t ctx) 392 { 393 struct adapter *adapter; 394 device_t dev; 395 if_softc_ctx_t scctx; 396 struct ixgbe_hw *hw; 397 int error = 0; 398 399 INIT_DEBUGOUT("ixv_attach: begin"); 400 401 /* Allocate, clear, and link in our adapter structure */ 402 dev = iflib_get_dev(ctx); 403 adapter = iflib_get_softc(ctx); 404 adapter->dev = dev; 405 adapter->ctx = ctx; 406 adapter->hw.back = adapter; 407 scctx = adapter->shared = iflib_get_softc_ctx(ctx); 408 adapter->media = iflib_get_media(ctx); 409 hw = &adapter->hw; 410 411 /* Do base PCI setup - map BAR0 */ 412 if (ixv_allocate_pci_resources(ctx)) { 413 device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); 414 error = ENXIO; 415 goto err_out; 416 } 417 418 /* SYSCTL APIs */ 419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 421 CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I", 422 "Debug Info"); 423 424 /* Determine hardware revision */ 425 ixv_identify_hardware(ctx); 426 ixv_init_device_features(adapter); 427 428 /* Initialize the shared code */ 429 error = ixgbe_init_ops_vf(hw); 430 if (error) { 431 device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); 432 error = EIO; 433 goto err_out; 434 } 435 436 /* Setup the mailbox */ 437 ixgbe_init_mbx_params_vf(hw); 438 439 error = hw->mac.ops.reset_hw(hw); 440 if (error == IXGBE_ERR_RESET_FAILED) 441 device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); 442 else if (error) 443 device_printf(dev, "...reset_hw() failed with error %d\n", 444 error); 445 if (error) { 446 error = EIO; 447 goto err_out; 448 } 449 450 error = hw->mac.ops.init_hw(hw); 451 if (error) { 452 device_printf(dev, "...init_hw() failed with error %d\n", 453 error); 454 error = EIO; 455 goto err_out; 456 } 457 458 /* Negotiate mailbox API version */ 459 error = ixv_negotiate_api(adapter); 460 if (error) { 461 device_printf(dev, 462 "Mailbox API negotiation failed during attach!\n"); 463 goto err_out; 464 } 465 466 /* If no mac address was assigned, make a random one */ 467 if (!ixv_check_ether_addr(hw->mac.addr)) { 468 u8 addr[ETHER_ADDR_LEN]; 469 arc4rand(&addr, sizeof(addr), 0); 470 addr[0] &= 0xFE; 471 addr[0] |= 0x02; 472 bcopy(addr, hw->mac.addr, sizeof(addr)); 473 bcopy(addr, hw->mac.perm_addr, sizeof(addr)); 474 } 475 476 /* Most of the iflib initialization... */ 477 478 iflib_set_mac(ctx, hw->mac.addr); 479 switch (adapter->hw.mac.type) { 480 case ixgbe_mac_X550_vf: 481 case ixgbe_mac_X550EM_x_vf: 482 case ixgbe_mac_X550EM_a_vf: 483 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2; 484 break; 485 default: 486 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 487 } 488 scctx->isc_txqsizes[0] = 489 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + 490 sizeof(u32), DBA_ALIGN); 491 scctx->isc_rxqsizes[0] = 492 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), 493 DBA_ALIGN); 494 /* XXX */ 495 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 496 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; 497 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; 498 scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR); 499 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; 500 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; 501 scctx->isc_tx_tso_segsize_max = PAGE_SIZE; 502 503 scctx->isc_txrx = &ixgbe_txrx; 504 505 /* 506 * Tell the upper layer(s) we support everything the PF 507 * driver does except... 508 * hardware stats 509 * Wake-on-LAN 510 */ 511 scctx->isc_capenable = IXGBE_CAPS; 512 scctx->isc_capenable ^= IFCAP_HWSTATS | IFCAP_WOL; 513 514 INIT_DEBUGOUT("ixv_if_attach_pre: end"); 515 516 return (0); 517 518 err_out: 519 ixv_free_pci_resources(ctx); 520 521 return (error); 522 } /* ixv_if_attach_pre */ 523 524 static int 525 ixv_if_attach_post(if_ctx_t ctx) 526 { 527 struct adapter *adapter = iflib_get_softc(ctx); 528 device_t dev = iflib_get_dev(ctx); 529 int error = 0; 530 531 /* Setup OS specific network interface */ 532 error = ixv_setup_interface(ctx); 533 if (error) { 534 device_printf(dev, "Interface setup failed: %d\n", error); 535 goto end; 536 } 537 538 /* Do the stats setup */ 539 ixv_save_stats(adapter); 540 ixv_init_stats(adapter); 541 ixv_add_stats_sysctls(adapter); 542 543 end: 544 return error; 545 } /* ixv_if_attach_post */ 546 547 /************************************************************************ 548 * ixv_detach - Device removal routine 549 * 550 * Called when the driver is being removed. 551 * Stops the adapter and deallocates all the resources 552 * that were allocated for driver operation. 553 * 554 * return 0 on success, positive on failure 555 ************************************************************************/ 556 static int 557 ixv_if_detach(if_ctx_t ctx) 558 { 559 INIT_DEBUGOUT("ixv_detach: begin"); 560 561 ixv_free_pci_resources(ctx); 562 563 return (0); 564 } /* ixv_if_detach */ 565 566 /************************************************************************ 567 * ixv_if_mtu_set 568 ************************************************************************/ 569 static int 570 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 571 { 572 struct adapter *adapter = iflib_get_softc(ctx); 573 struct ifnet *ifp = iflib_get_ifp(ctx); 574 int error = 0; 575 576 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 577 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { 578 error = EINVAL; 579 } else { 580 ifp->if_mtu = mtu; 581 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 582 } 583 584 return error; 585 } /* ixv_if_mtu_set */ 586 587 /************************************************************************ 588 * ixv_if_init - Init entry point 589 * 590 * Used in two ways: It is used by the stack as an init entry 591 * point in network interface structure. It is also used 592 * by the driver as a hw/sw initialization routine to get 593 * to a consistent state. 594 * 595 * return 0 on success, positive on failure 596 ************************************************************************/ 597 static void 598 ixv_if_init(if_ctx_t ctx) 599 { 600 struct adapter *adapter = iflib_get_softc(ctx); 601 struct ifnet *ifp = iflib_get_ifp(ctx); 602 device_t dev = iflib_get_dev(ctx); 603 struct ixgbe_hw *hw = &adapter->hw; 604 int error = 0; 605 606 INIT_DEBUGOUT("ixv_if_init: begin"); 607 hw->adapter_stopped = FALSE; 608 hw->mac.ops.stop_adapter(hw); 609 610 /* reprogram the RAR[0] in case user changed it. */ 611 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 612 613 /* Get the latest mac address, User can use a LAA */ 614 bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); 615 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); 616 617 /* Reset VF and renegotiate mailbox API version */ 618 hw->mac.ops.reset_hw(hw); 619 error = ixv_negotiate_api(adapter); 620 if (error) { 621 device_printf(dev, 622 "Mailbox API negotiation failed in if_init!\n"); 623 return; 624 } 625 626 ixv_initialize_transmit_units(ctx); 627 628 /* Setup Multicast table */ 629 ixv_if_multi_set(ctx); 630 631 /* 632 * Determine the correct mbuf pool 633 * for doing jumbo/headersplit 634 */ 635 if (ifp->if_mtu > ETHERMTU) 636 adapter->rx_mbuf_sz = MJUMPAGESIZE; 637 else 638 adapter->rx_mbuf_sz = MCLBYTES; 639 640 /* Configure RX settings */ 641 ixv_initialize_receive_units(ctx); 642 643 /* Set up VLAN offload and filter */ 644 ixv_setup_vlan_support(ctx); 645 646 /* Set up MSI-X routing */ 647 ixv_configure_ivars(adapter); 648 649 /* Set up auto-mask */ 650 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); 651 652 /* Set moderation on the Link interrupt */ 653 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); 654 655 /* Stats init */ 656 ixv_init_stats(adapter); 657 658 /* Config/Enable Link */ 659 hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, 660 FALSE); 661 662 /* And now turn on interrupts */ 663 ixv_if_enable_intr(ctx); 664 665 /* Now inform the stack we're ready */ 666 ifp->if_drv_flags |= IFF_DRV_RUNNING; 667 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 668 669 return; 670 } /* ixv_if_init */ 671 672 /************************************************************************ 673 * ixv_enable_queue 674 ************************************************************************/ 675 static inline void 676 ixv_enable_queue(struct adapter *adapter, u32 vector) 677 { 678 struct ixgbe_hw *hw = &adapter->hw; 679 u32 queue = 1 << vector; 680 u32 mask; 681 682 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 683 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 684 } /* ixv_enable_queue */ 685 686 /************************************************************************ 687 * ixv_disable_queue 688 ************************************************************************/ 689 static inline void 690 ixv_disable_queue(struct adapter *adapter, u32 vector) 691 { 692 struct ixgbe_hw *hw = &adapter->hw; 693 u64 queue = (u64)(1 << vector); 694 u32 mask; 695 696 mask = (IXGBE_EIMS_RTX_QUEUE & queue); 697 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); 698 } /* ixv_disable_queue */ 699 700 701 /************************************************************************ 702 * ixv_msix_que - MSI-X Queue Interrupt Service routine 703 ************************************************************************/ 704 static int 705 ixv_msix_que(void *arg) 706 { 707 struct ix_rx_queue *que = arg; 708 struct adapter *adapter = que->adapter; 709 710 ixv_disable_queue(adapter, que->msix); 711 ++que->irqs; 712 713 return (FILTER_SCHEDULE_THREAD); 714 } /* ixv_msix_que */ 715 716 /************************************************************************ 717 * ixv_msix_mbx 718 ************************************************************************/ 719 static int 720 ixv_msix_mbx(void *arg) 721 { 722 struct adapter *adapter = arg; 723 struct ixgbe_hw *hw = &adapter->hw; 724 u32 reg; 725 726 ++adapter->link_irq; 727 728 /* First get the cause */ 729 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); 730 /* Clear interrupt with write */ 731 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); 732 733 /* Link status change */ 734 if (reg & IXGBE_EICR_LSC) 735 iflib_admin_intr_deferred(adapter->ctx); 736 737 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); 738 739 return (FILTER_HANDLED); 740 } /* ixv_msix_mbx */ 741 742 /************************************************************************ 743 * ixv_media_status - Media Ioctl callback 744 * 745 * Called whenever the user queries the status of 746 * the interface using ifconfig. 747 ************************************************************************/ 748 static void 749 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) 750 { 751 struct adapter *adapter = iflib_get_softc(ctx); 752 753 INIT_DEBUGOUT("ixv_media_status: begin"); 754 755 iflib_admin_intr_deferred(ctx); 756 757 ifmr->ifm_status = IFM_AVALID; 758 ifmr->ifm_active = IFM_ETHER; 759 760 if (!adapter->link_active) 761 return; 762 763 ifmr->ifm_status |= IFM_ACTIVE; 764 765 switch (adapter->link_speed) { 766 case IXGBE_LINK_SPEED_1GB_FULL: 767 ifmr->ifm_active |= IFM_1000_T | IFM_FDX; 768 break; 769 case IXGBE_LINK_SPEED_10GB_FULL: 770 ifmr->ifm_active |= IFM_10G_T | IFM_FDX; 771 break; 772 case IXGBE_LINK_SPEED_100_FULL: 773 ifmr->ifm_active |= IFM_100_TX | IFM_FDX; 774 break; 775 case IXGBE_LINK_SPEED_10_FULL: 776 ifmr->ifm_active |= IFM_10_T | IFM_FDX; 777 break; 778 } 779 } /* ixv_if_media_status */ 780 781 /************************************************************************ 782 * ixv_if_media_change - Media Ioctl callback 783 * 784 * Called when the user changes speed/duplex using 785 * media/mediopt option with ifconfig. 786 ************************************************************************/ 787 static int 788 ixv_if_media_change(if_ctx_t ctx) 789 { 790 struct adapter *adapter = iflib_get_softc(ctx); 791 struct ifmedia *ifm = iflib_get_media(ctx); 792 793 INIT_DEBUGOUT("ixv_media_change: begin"); 794 795 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 796 return (EINVAL); 797 798 switch (IFM_SUBTYPE(ifm->ifm_media)) { 799 case IFM_AUTO: 800 break; 801 default: 802 device_printf(adapter->dev, "Only auto media type\n"); 803 return (EINVAL); 804 } 805 806 return (0); 807 } /* ixv_if_media_change */ 808 809 810 /************************************************************************ 811 * ixv_negotiate_api 812 * 813 * Negotiate the Mailbox API with the PF; 814 * start with the most featured API first. 815 ************************************************************************/ 816 static int 817 ixv_negotiate_api(struct adapter *adapter) 818 { 819 struct ixgbe_hw *hw = &adapter->hw; 820 int mbx_api[] = { ixgbe_mbox_api_11, 821 ixgbe_mbox_api_10, 822 ixgbe_mbox_api_unknown }; 823 int i = 0; 824 825 while (mbx_api[i] != ixgbe_mbox_api_unknown) { 826 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) 827 return (0); 828 i++; 829 } 830 831 return (EINVAL); 832 } /* ixv_negotiate_api */ 833 834 835 /************************************************************************ 836 * ixv_if_multi_set - Multicast Update 837 * 838 * Called whenever multicast address list is updated. 839 ************************************************************************/ 840 static void 841 ixv_if_multi_set(if_ctx_t ctx) 842 { 843 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; 844 struct adapter *adapter = iflib_get_softc(ctx); 845 u8 *update_ptr; 846 struct ifmultiaddr *ifma; 847 if_t ifp = iflib_get_ifp(ctx); 848 int mcnt = 0; 849 850 IOCTL_DEBUGOUT("ixv_if_multi_set: begin"); 851 852 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 853 if (ifma->ifma_addr->sa_family != AF_LINK) 854 continue; 855 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 856 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], 857 IXGBE_ETH_LENGTH_OF_ADDRESS); 858 mcnt++; 859 } 860 861 update_ptr = mta; 862 863 adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, 864 ixv_mc_array_itr, TRUE); 865 } /* ixv_if_multi_set */ 866 867 /************************************************************************ 868 * ixv_mc_array_itr 869 * 870 * An iterator function needed by the multicast shared code. 871 * It feeds the shared code routine the addresses in the 872 * array of ixv_set_multi() one by one. 873 ************************************************************************/ 874 static u8 * 875 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) 876 { 877 u8 *addr = *update_ptr; 878 u8 *newptr; 879 880 *vmdq = 0; 881 882 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 883 *update_ptr = newptr; 884 885 return addr; 886 } /* ixv_mc_array_itr */ 887 888 /************************************************************************ 889 * ixv_if_local_timer - Timer routine 890 * 891 * Checks for link status, updates statistics, 892 * and runs the watchdog check. 893 ************************************************************************/ 894 static void 895 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid) 896 { 897 if (qid != 0) 898 return; 899 900 /* Fire off the adminq task */ 901 iflib_admin_intr_deferred(ctx); 902 } /* ixv_if_local_timer */ 903 904 /************************************************************************ 905 * ixv_if_update_admin_status - Update OS on link state 906 * 907 * Note: Only updates the OS on the cached link state. 908 * The real check of the hardware only happens with 909 * a link interrupt. 910 ************************************************************************/ 911 static void 912 ixv_if_update_admin_status(if_ctx_t ctx) 913 { 914 struct adapter *adapter = iflib_get_softc(ctx); 915 device_t dev = iflib_get_dev(ctx); 916 917 adapter->hw.mac.get_link_status = TRUE; 918 ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up, 919 FALSE); 920 921 if (adapter->link_up) { 922 if (adapter->link_active == FALSE) { 923 if (bootverbose) 924 device_printf(dev, "Link is up %d Gbps %s \n", 925 ((adapter->link_speed == 128) ? 10 : 1), 926 "Full Duplex"); 927 adapter->link_active = TRUE; 928 iflib_link_state_change(ctx, LINK_STATE_UP, 929 IF_Gbps(10)); 930 } 931 } else { /* Link down */ 932 if (adapter->link_active == TRUE) { 933 if (bootverbose) 934 device_printf(dev, "Link is Down\n"); 935 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); 936 adapter->link_active = FALSE; 937 } 938 } 939 940 /* Stats Update */ 941 ixv_update_stats(adapter); 942 } /* ixv_if_update_admin_status */ 943 944 945 /************************************************************************ 946 * ixv_if_stop - Stop the hardware 947 * 948 * Disables all traffic on the adapter by issuing a 949 * global reset on the MAC and deallocates TX/RX buffers. 950 ************************************************************************/ 951 static void 952 ixv_if_stop(if_ctx_t ctx) 953 { 954 struct adapter *adapter = iflib_get_softc(ctx); 955 struct ixgbe_hw *hw = &adapter->hw; 956 957 INIT_DEBUGOUT("ixv_stop: begin\n"); 958 959 ixv_if_disable_intr(ctx); 960 961 hw->mac.ops.reset_hw(hw); 962 adapter->hw.adapter_stopped = FALSE; 963 hw->mac.ops.stop_adapter(hw); 964 965 /* Update the stack */ 966 adapter->link_up = FALSE; 967 ixv_if_update_admin_status(ctx); 968 969 /* reprogram the RAR[0] in case user changed it. */ 970 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 971 } /* ixv_if_stop */ 972 973 974 /************************************************************************ 975 * ixv_identify_hardware - Determine hardware revision. 976 ************************************************************************/ 977 static void 978 ixv_identify_hardware(if_ctx_t ctx) 979 { 980 struct adapter *adapter = iflib_get_softc(ctx); 981 device_t dev = iflib_get_dev(ctx); 982 struct ixgbe_hw *hw = &adapter->hw; 983 984 /* Save off the information about this board */ 985 hw->vendor_id = pci_get_vendor(dev); 986 hw->device_id = pci_get_device(dev); 987 hw->revision_id = pci_get_revid(dev); 988 hw->subsystem_vendor_id = pci_get_subvendor(dev); 989 hw->subsystem_device_id = pci_get_subdevice(dev); 990 991 /* A subset of set_mac_type */ 992 switch (hw->device_id) { 993 case IXGBE_DEV_ID_82599_VF: 994 hw->mac.type = ixgbe_mac_82599_vf; 995 break; 996 case IXGBE_DEV_ID_X540_VF: 997 hw->mac.type = ixgbe_mac_X540_vf; 998 break; 999 case IXGBE_DEV_ID_X550_VF: 1000 hw->mac.type = ixgbe_mac_X550_vf; 1001 break; 1002 case IXGBE_DEV_ID_X550EM_X_VF: 1003 hw->mac.type = ixgbe_mac_X550EM_x_vf; 1004 break; 1005 case IXGBE_DEV_ID_X550EM_A_VF: 1006 hw->mac.type = ixgbe_mac_X550EM_a_vf; 1007 break; 1008 default: 1009 device_printf(dev, "unknown mac type\n"); 1010 hw->mac.type = ixgbe_mac_unknown; 1011 break; 1012 } 1013 } /* ixv_identify_hardware */ 1014 1015 /************************************************************************ 1016 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers 1017 ************************************************************************/ 1018 static int 1019 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix) 1020 { 1021 struct adapter *adapter = iflib_get_softc(ctx); 1022 device_t dev = iflib_get_dev(ctx); 1023 struct ix_rx_queue *rx_que = adapter->rx_queues; 1024 struct ix_tx_queue *tx_que; 1025 int error, rid, vector = 0; 1026 char buf[16]; 1027 1028 for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { 1029 rid = vector + 1; 1030 1031 snprintf(buf, sizeof(buf), "rxq%d", i); 1032 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1033 IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf); 1034 1035 if (error) { 1036 device_printf(iflib_get_dev(ctx), 1037 "Failed to allocate que int %d err: %d", i, error); 1038 adapter->num_rx_queues = i + 1; 1039 goto fail; 1040 } 1041 1042 rx_que->msix = vector; 1043 adapter->active_queues |= (u64)(1 << rx_que->msix); 1044 1045 } 1046 1047 for (int i = 0; i < adapter->num_tx_queues; i++) { 1048 snprintf(buf, sizeof(buf), "txq%d", i); 1049 tx_que = &adapter->tx_queues[i]; 1050 tx_que->msix = i % adapter->num_rx_queues; 1051 iflib_softirq_alloc_generic(ctx, 1052 &adapter->rx_queues[tx_que->msix].que_irq, 1053 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1054 } 1055 rid = vector + 1; 1056 error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, 1057 IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq"); 1058 if (error) { 1059 device_printf(iflib_get_dev(ctx), 1060 "Failed to register admin handler"); 1061 return (error); 1062 } 1063 1064 adapter->vector = vector; 1065 /* 1066 * Due to a broken design QEMU will fail to properly 1067 * enable the guest for MSIX unless the vectors in 1068 * the table are all set up, so we must rewrite the 1069 * ENABLE in the MSIX control register again at this 1070 * point to cause it to successfully initialize us. 1071 */ 1072 if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { 1073 int msix_ctrl; 1074 pci_find_cap(dev, PCIY_MSIX, &rid); 1075 rid += PCIR_MSIX_CTRL; 1076 msix_ctrl = pci_read_config(dev, rid, 2); 1077 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1078 pci_write_config(dev, rid, msix_ctrl, 2); 1079 } 1080 1081 return (0); 1082 1083 fail: 1084 iflib_irq_free(ctx, &adapter->irq); 1085 rx_que = adapter->rx_queues; 1086 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) 1087 iflib_irq_free(ctx, &rx_que->que_irq); 1088 1089 return (error); 1090 } /* ixv_if_msix_intr_assign */ 1091 1092 /************************************************************************ 1093 * ixv_allocate_pci_resources 1094 ************************************************************************/ 1095 static int 1096 ixv_allocate_pci_resources(if_ctx_t ctx) 1097 { 1098 struct adapter *adapter = iflib_get_softc(ctx); 1099 device_t dev = iflib_get_dev(ctx); 1100 int rid; 1101 1102 rid = PCIR_BAR(0); 1103 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1104 RF_ACTIVE); 1105 1106 if (!(adapter->pci_mem)) { 1107 device_printf(dev, "Unable to allocate bus resource: memory\n"); 1108 return (ENXIO); 1109 } 1110 1111 adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); 1112 adapter->osdep.mem_bus_space_handle = 1113 rman_get_bushandle(adapter->pci_mem); 1114 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; 1115 1116 return (0); 1117 } /* ixv_allocate_pci_resources */ 1118 1119 /************************************************************************ 1120 * ixv_free_pci_resources 1121 ************************************************************************/ 1122 static void 1123 ixv_free_pci_resources(if_ctx_t ctx) 1124 { 1125 struct adapter *adapter = iflib_get_softc(ctx); 1126 struct ix_rx_queue *que = adapter->rx_queues; 1127 device_t dev = iflib_get_dev(ctx); 1128 1129 /* Release all msix queue resources */ 1130 if (adapter->intr_type == IFLIB_INTR_MSIX) 1131 iflib_irq_free(ctx, &adapter->irq); 1132 1133 if (que != NULL) { 1134 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 1135 iflib_irq_free(ctx, &que->que_irq); 1136 } 1137 } 1138 1139 /* Clean the Legacy or Link interrupt last */ 1140 if (adapter->pci_mem != NULL) 1141 bus_release_resource(dev, SYS_RES_MEMORY, 1142 PCIR_BAR(0), adapter->pci_mem); 1143 } /* ixv_free_pci_resources */ 1144 1145 /************************************************************************ 1146 * ixv_setup_interface 1147 * 1148 * Setup networking device structure and register an interface. 1149 ************************************************************************/ 1150 static int 1151 ixv_setup_interface(if_ctx_t ctx) 1152 { 1153 struct adapter *adapter = iflib_get_softc(ctx); 1154 if_softc_ctx_t scctx = adapter->shared; 1155 struct ifnet *ifp = iflib_get_ifp(ctx); 1156 1157 INIT_DEBUGOUT("ixv_setup_interface: begin"); 1158 1159 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 1160 if_setbaudrate(ifp, IF_Gbps(10)); 1161 ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2; 1162 1163 1164 adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; 1165 ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1166 ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); 1167 1168 return 0; 1169 } /* ixv_setup_interface */ 1170 1171 /************************************************************************ 1172 * ixv_if_get_counter 1173 ************************************************************************/ 1174 static uint64_t 1175 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1176 { 1177 struct adapter *adapter = iflib_get_softc(ctx); 1178 if_t ifp = iflib_get_ifp(ctx); 1179 1180 switch (cnt) { 1181 case IFCOUNTER_IPACKETS: 1182 return (adapter->ipackets); 1183 case IFCOUNTER_OPACKETS: 1184 return (adapter->opackets); 1185 case IFCOUNTER_IBYTES: 1186 return (adapter->ibytes); 1187 case IFCOUNTER_OBYTES: 1188 return (adapter->obytes); 1189 case IFCOUNTER_IMCASTS: 1190 return (adapter->imcasts); 1191 default: 1192 return (if_get_counter_default(ifp, cnt)); 1193 } 1194 } /* ixv_if_get_counter */ 1195 1196 /************************************************************************ 1197 * ixv_initialize_transmit_units - Enable transmit unit. 1198 ************************************************************************/ 1199 static void 1200 ixv_initialize_transmit_units(if_ctx_t ctx) 1201 { 1202 struct adapter *adapter = iflib_get_softc(ctx); 1203 struct ixgbe_hw *hw = &adapter->hw; 1204 if_softc_ctx_t scctx = adapter->shared; 1205 struct ix_tx_queue *que = adapter->tx_queues; 1206 int i; 1207 1208 for (i = 0; i < adapter->num_tx_queues; i++, que++) { 1209 struct tx_ring *txr = &que->txr; 1210 u64 tdba = txr->tx_paddr; 1211 u32 txctrl, txdctl; 1212 int j = txr->me; 1213 1214 /* Set WTHRESH to 8, burst writeback */ 1215 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1216 txdctl |= (8 << 16); 1217 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1218 1219 /* Set the HW Tx Head and Tail indices */ 1220 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); 1221 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); 1222 1223 /* Set Tx Tail register */ 1224 txr->tail = IXGBE_VFTDT(j); 1225 1226 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; 1227 for (int k = 0; k < scctx->isc_ntxd[0]; k++) 1228 txr->tx_rsq[k] = QIDX_INVALID; 1229 1230 /* Set Ring parameters */ 1231 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1232 (tdba & 0x00000000ffffffffULL)); 1233 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1234 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), 1235 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc)); 1236 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1237 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 1238 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1239 1240 /* Now enable */ 1241 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1242 txdctl |= IXGBE_TXDCTL_ENABLE; 1243 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1244 } 1245 1246 return; 1247 } /* ixv_initialize_transmit_units */ 1248 1249 /************************************************************************ 1250 * ixv_initialize_rss_mapping 1251 ************************************************************************/ 1252 static void 1253 ixv_initialize_rss_mapping(struct adapter *adapter) 1254 { 1255 struct ixgbe_hw *hw = &adapter->hw; 1256 u32 reta = 0, mrqc, rss_key[10]; 1257 int queue_id; 1258 int i, j; 1259 u32 rss_hash_config; 1260 1261 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1262 /* Fetch the configured RSS key */ 1263 rss_getkey((uint8_t *)&rss_key); 1264 } else { 1265 /* set up random bits */ 1266 arc4rand(&rss_key, sizeof(rss_key), 0); 1267 } 1268 1269 /* Now fill out hash function seeds */ 1270 for (i = 0; i < 10; i++) 1271 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); 1272 1273 /* Set up the redirection table */ 1274 for (i = 0, j = 0; i < 64; i++, j++) { 1275 if (j == adapter->num_rx_queues) 1276 j = 0; 1277 1278 if (adapter->feat_en & IXGBE_FEATURE_RSS) { 1279 /* 1280 * Fetch the RSS bucket id for the given indirection 1281 * entry. Cap it at the number of configured buckets 1282 * (which is num_rx_queues.) 1283 */ 1284 queue_id = rss_get_indirection_to_bucket(i); 1285 queue_id = queue_id % adapter->num_rx_queues; 1286 } else 1287 queue_id = j; 1288 1289 /* 1290 * The low 8 bits are for hash value (n+0); 1291 * The next 8 bits are for hash value (n+1), etc. 1292 */ 1293 reta >>= 8; 1294 reta |= ((uint32_t)queue_id) << 24; 1295 if ((i & 3) == 3) { 1296 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); 1297 reta = 0; 1298 } 1299 } 1300 1301 /* Perform hash on these packet types */ 1302 if (adapter->feat_en & IXGBE_FEATURE_RSS) 1303 rss_hash_config = rss_gethashconfig(); 1304 else { 1305 /* 1306 * Disable UDP - IP fragments aren't currently being handled 1307 * and so we end up with a mix of 2-tuple and 4-tuple 1308 * traffic. 1309 */ 1310 rss_hash_config = RSS_HASHTYPE_RSS_IPV4 1311 | RSS_HASHTYPE_RSS_TCP_IPV4 1312 | RSS_HASHTYPE_RSS_IPV6 1313 | RSS_HASHTYPE_RSS_TCP_IPV6; 1314 } 1315 1316 mrqc = IXGBE_MRQC_RSSEN; 1317 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) 1318 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; 1319 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) 1320 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; 1321 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) 1322 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; 1323 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) 1324 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; 1325 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) 1326 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", 1327 __func__); 1328 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) 1329 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", 1330 __func__); 1331 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) 1332 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; 1333 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) 1334 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; 1335 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) 1336 device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", 1337 __func__); 1338 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); 1339 } /* ixv_initialize_rss_mapping */ 1340 1341 1342 /************************************************************************ 1343 * ixv_initialize_receive_units - Setup receive registers and features. 1344 ************************************************************************/ 1345 static void 1346 ixv_initialize_receive_units(if_ctx_t ctx) 1347 { 1348 struct adapter *adapter = iflib_get_softc(ctx); 1349 if_softc_ctx_t scctx; 1350 struct ixgbe_hw *hw = &adapter->hw; 1351 struct ifnet *ifp = iflib_get_ifp(ctx); 1352 struct ix_rx_queue *que = adapter->rx_queues; 1353 u32 bufsz, psrtype; 1354 1355 if (ifp->if_mtu > ETHERMTU) 1356 bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1357 else 1358 bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1359 1360 psrtype = IXGBE_PSRTYPE_TCPHDR 1361 | IXGBE_PSRTYPE_UDPHDR 1362 | IXGBE_PSRTYPE_IPV4HDR 1363 | IXGBE_PSRTYPE_IPV6HDR 1364 | IXGBE_PSRTYPE_L2HDR; 1365 1366 if (adapter->num_rx_queues > 1) 1367 psrtype |= 1 << 29; 1368 1369 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1370 1371 /* Tell PF our max_frame size */ 1372 if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { 1373 device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); 1374 } 1375 scctx = adapter->shared; 1376 1377 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 1378 struct rx_ring *rxr = &que->rxr; 1379 u64 rdba = rxr->rx_paddr; 1380 u32 reg, rxdctl; 1381 int j = rxr->me; 1382 1383 /* Disable the queue */ 1384 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1385 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1386 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1387 for (int k = 0; k < 10; k++) { 1388 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1389 IXGBE_RXDCTL_ENABLE) 1390 msec_delay(1); 1391 else 1392 break; 1393 } 1394 wmb(); 1395 /* Setup the Base and Length of the Rx Descriptor Ring */ 1396 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1397 (rdba & 0x00000000ffffffffULL)); 1398 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1399 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), 1400 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); 1401 1402 /* Reset the ring indices */ 1403 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); 1404 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); 1405 1406 /* Set up the SRRCTL register */ 1407 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); 1408 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1409 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 1410 reg |= bufsz; 1411 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1412 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); 1413 1414 /* Capture Rx Tail index */ 1415 rxr->tail = IXGBE_VFRDT(rxr->me); 1416 1417 /* Do the queue enabling last */ 1418 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1419 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1420 for (int l = 0; l < 10; l++) { 1421 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & 1422 IXGBE_RXDCTL_ENABLE) 1423 break; 1424 msec_delay(1); 1425 } 1426 wmb(); 1427 1428 /* Set the Tail Pointer */ 1429 #ifdef DEV_NETMAP 1430 /* 1431 * In netmap mode, we must preserve the buffers made 1432 * available to userspace before the if_init() 1433 * (this is true by default on the TX side, because 1434 * init makes all buffers available to userspace). 1435 * 1436 * netmap_reset() and the device specific routines 1437 * (e.g. ixgbe_setup_receive_rings()) map these 1438 * buffers at the end of the NIC ring, so here we 1439 * must set the RDT (tail) register to make sure 1440 * they are not overwritten. 1441 * 1442 * In this driver the NIC ring starts at RDH = 0, 1443 * RDT points to the last slot available for reception (?), 1444 * so RDT = num_rx_desc - 1 means the whole ring is available. 1445 */ 1446 if (ifp->if_capenable & IFCAP_NETMAP) { 1447 struct netmap_adapter *na = NA(ifp); 1448 struct netmap_kring *kring = &na->rx_rings[j]; 1449 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); 1450 1451 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); 1452 } else 1453 #endif /* DEV_NETMAP */ 1454 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 1455 scctx->isc_nrxd[0] - 1); 1456 } 1457 1458 ixv_initialize_rss_mapping(adapter); 1459 } /* ixv_initialize_receive_units */ 1460 1461 /************************************************************************ 1462 * ixv_setup_vlan_support 1463 ************************************************************************/ 1464 static void 1465 ixv_setup_vlan_support(if_ctx_t ctx) 1466 { 1467 struct adapter *adapter = iflib_get_softc(ctx); 1468 struct ixgbe_hw *hw = &adapter->hw; 1469 u32 ctrl, vid, vfta, retry; 1470 1471 /* 1472 * We get here thru if_init, meaning 1473 * a soft reset, this has already cleared 1474 * the VFTA and other state, so if there 1475 * have been no vlan's registered do nothing. 1476 */ 1477 if (adapter->num_vlans == 0) 1478 return; 1479 1480 /* Enable the queues */ 1481 for (int i = 0; i < adapter->num_rx_queues; i++) { 1482 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); 1483 ctrl |= IXGBE_RXDCTL_VME; 1484 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); 1485 /* 1486 * Let Rx path know that it needs to store VLAN tag 1487 * as part of extra mbuf info. 1488 */ 1489 adapter->rx_queues[i].rxr.vtag_strip = TRUE; 1490 } 1491 1492 /* 1493 * A soft reset zero's out the VFTA, so 1494 * we need to repopulate it now. 1495 */ 1496 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { 1497 if (ixv_shadow_vfta[i] == 0) 1498 continue; 1499 vfta = ixv_shadow_vfta[i]; 1500 /* 1501 * Reconstruct the vlan id's 1502 * based on the bits set in each 1503 * of the array ints. 1504 */ 1505 for (int j = 0; j < 32; j++) { 1506 retry = 0; 1507 if ((vfta & (1 << j)) == 0) 1508 continue; 1509 vid = (i * 32) + j; 1510 /* Call the shared code mailbox routine */ 1511 while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) { 1512 if (++retry > 5) 1513 break; 1514 } 1515 } 1516 } 1517 } /* ixv_setup_vlan_support */ 1518 1519 /************************************************************************ 1520 * ixv_if_register_vlan 1521 * 1522 * Run via a vlan config EVENT, it enables us to use the 1523 * HW Filter table since we can get the vlan id. This just 1524 * creates the entry in the soft version of the VFTA, init 1525 * will repopulate the real table. 1526 ************************************************************************/ 1527 static void 1528 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag) 1529 { 1530 struct adapter *adapter = iflib_get_softc(ctx); 1531 u16 index, bit; 1532 1533 index = (vtag >> 5) & 0x7F; 1534 bit = vtag & 0x1F; 1535 ixv_shadow_vfta[index] |= (1 << bit); 1536 ++adapter->num_vlans; 1537 } /* ixv_if_register_vlan */ 1538 1539 /************************************************************************ 1540 * ixv_if_unregister_vlan 1541 * 1542 * Run via a vlan unconfig EVENT, remove our entry 1543 * in the soft vfta. 1544 ************************************************************************/ 1545 static void 1546 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag) 1547 { 1548 struct adapter *adapter = iflib_get_softc(ctx); 1549 u16 index, bit; 1550 1551 index = (vtag >> 5) & 0x7F; 1552 bit = vtag & 0x1F; 1553 ixv_shadow_vfta[index] &= ~(1 << bit); 1554 --adapter->num_vlans; 1555 } /* ixv_if_unregister_vlan */ 1556 1557 /************************************************************************ 1558 * ixv_if_enable_intr 1559 ************************************************************************/ 1560 static void 1561 ixv_if_enable_intr(if_ctx_t ctx) 1562 { 1563 struct adapter *adapter = iflib_get_softc(ctx); 1564 struct ixgbe_hw *hw = &adapter->hw; 1565 struct ix_rx_queue *que = adapter->rx_queues; 1566 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1567 1568 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1569 1570 mask = IXGBE_EIMS_ENABLE_MASK; 1571 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 1572 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 1573 1574 for (int i = 0; i < adapter->num_rx_queues; i++, que++) 1575 ixv_enable_queue(adapter, que->msix); 1576 1577 IXGBE_WRITE_FLUSH(hw); 1578 } /* ixv_if_enable_intr */ 1579 1580 /************************************************************************ 1581 * ixv_if_disable_intr 1582 ************************************************************************/ 1583 static void 1584 ixv_if_disable_intr(if_ctx_t ctx) 1585 { 1586 struct adapter *adapter = iflib_get_softc(ctx); 1587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); 1588 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); 1589 IXGBE_WRITE_FLUSH(&adapter->hw); 1590 } /* ixv_if_disable_intr */ 1591 1592 /************************************************************************ 1593 * ixv_if_rx_queue_intr_enable 1594 ************************************************************************/ 1595 static int 1596 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1597 { 1598 struct adapter *adapter = iflib_get_softc(ctx); 1599 struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; 1600 1601 ixv_enable_queue(adapter, que->rxr.me); 1602 1603 return (0); 1604 } /* ixv_if_rx_queue_intr_enable */ 1605 1606 /************************************************************************ 1607 * ixv_set_ivar 1608 * 1609 * Setup the correct IVAR register for a particular MSI-X interrupt 1610 * - entry is the register array entry 1611 * - vector is the MSI-X vector for this queue 1612 * - type is RX/TX/MISC 1613 ************************************************************************/ 1614 static void 1615 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) 1616 { 1617 struct ixgbe_hw *hw = &adapter->hw; 1618 u32 ivar, index; 1619 1620 vector |= IXGBE_IVAR_ALLOC_VAL; 1621 1622 if (type == -1) { /* MISC IVAR */ 1623 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 1624 ivar &= ~0xFF; 1625 ivar |= vector; 1626 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 1627 } else { /* RX/TX IVARS */ 1628 index = (16 * (entry & 1)) + (8 * type); 1629 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); 1630 ivar &= ~(0xFF << index); 1631 ivar |= (vector << index); 1632 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); 1633 } 1634 } /* ixv_set_ivar */ 1635 1636 /************************************************************************ 1637 * ixv_configure_ivars 1638 ************************************************************************/ 1639 static void 1640 ixv_configure_ivars(struct adapter *adapter) 1641 { 1642 struct ix_rx_queue *que = adapter->rx_queues; 1643 1644 MPASS(adapter->num_rx_queues == adapter->num_tx_queues); 1645 1646 for (int i = 0; i < adapter->num_rx_queues; i++, que++) { 1647 /* First the RX queue entry */ 1648 ixv_set_ivar(adapter, i, que->msix, 0); 1649 /* ... and the TX */ 1650 ixv_set_ivar(adapter, i, que->msix, 1); 1651 /* Set an initial value in EITR */ 1652 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), 1653 IXGBE_EITR_DEFAULT); 1654 } 1655 1656 /* For the mailbox interrupt */ 1657 ixv_set_ivar(adapter, 1, adapter->vector, -1); 1658 } /* ixv_configure_ivars */ 1659 1660 /************************************************************************ 1661 * ixv_save_stats 1662 * 1663 * The VF stats registers never have a truly virgin 1664 * starting point, so this routine tries to make an 1665 * artificial one, marking ground zero on attach as 1666 * it were. 1667 ************************************************************************/ 1668 static void 1669 ixv_save_stats(struct adapter *adapter) 1670 { 1671 if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { 1672 adapter->stats.vf.saved_reset_vfgprc += 1673 adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; 1674 adapter->stats.vf.saved_reset_vfgptc += 1675 adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; 1676 adapter->stats.vf.saved_reset_vfgorc += 1677 adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; 1678 adapter->stats.vf.saved_reset_vfgotc += 1679 adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; 1680 adapter->stats.vf.saved_reset_vfmprc += 1681 adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; 1682 } 1683 } /* ixv_save_stats */ 1684 1685 /************************************************************************ 1686 * ixv_init_stats 1687 ************************************************************************/ 1688 static void 1689 ixv_init_stats(struct adapter *adapter) 1690 { 1691 struct ixgbe_hw *hw = &adapter->hw; 1692 1693 adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1694 adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1695 adapter->stats.vf.last_vfgorc |= 1696 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1697 1698 adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1699 adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1700 adapter->stats.vf.last_vfgotc |= 1701 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1702 1703 adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1704 1705 adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; 1706 adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; 1707 adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; 1708 adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; 1709 adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; 1710 } /* ixv_init_stats */ 1711 1712 #define UPDATE_STAT_32(reg, last, count) \ 1713 { \ 1714 u32 current = IXGBE_READ_REG(hw, reg); \ 1715 if (current < last) \ 1716 count += 0x100000000LL; \ 1717 last = current; \ 1718 count &= 0xFFFFFFFF00000000LL; \ 1719 count |= current; \ 1720 } 1721 1722 #define UPDATE_STAT_36(lsb, msb, last, count) \ 1723 { \ 1724 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ 1725 u64 cur_msb = IXGBE_READ_REG(hw, msb); \ 1726 u64 current = ((cur_msb << 32) | cur_lsb); \ 1727 if (current < last) \ 1728 count += 0x1000000000LL; \ 1729 last = current; \ 1730 count &= 0xFFFFFFF000000000LL; \ 1731 count |= current; \ 1732 } 1733 1734 /************************************************************************ 1735 * ixv_update_stats - Update the board statistics counters. 1736 ************************************************************************/ 1737 void 1738 ixv_update_stats(struct adapter *adapter) 1739 { 1740 struct ixgbe_hw *hw = &adapter->hw; 1741 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 1742 1743 UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, 1744 adapter->stats.vf.vfgprc); 1745 UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, 1746 adapter->stats.vf.vfgptc); 1747 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 1748 adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); 1749 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 1750 adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); 1751 UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, 1752 adapter->stats.vf.vfmprc); 1753 1754 /* Fill out the OS statistics structure */ 1755 IXGBE_SET_IPACKETS(adapter, stats->vfgprc); 1756 IXGBE_SET_OPACKETS(adapter, stats->vfgptc); 1757 IXGBE_SET_IBYTES(adapter, stats->vfgorc); 1758 IXGBE_SET_OBYTES(adapter, stats->vfgotc); 1759 IXGBE_SET_IMCASTS(adapter, stats->vfmprc); 1760 } /* ixv_update_stats */ 1761 1762 /************************************************************************ 1763 * ixv_add_stats_sysctls - Add statistic sysctls for the VF. 1764 ************************************************************************/ 1765 static void 1766 ixv_add_stats_sysctls(struct adapter *adapter) 1767 { 1768 device_t dev = adapter->dev; 1769 struct ix_tx_queue *tx_que = adapter->tx_queues; 1770 struct ix_rx_queue *rx_que = adapter->rx_queues; 1771 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1772 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 1773 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); 1774 struct ixgbevf_hw_stats *stats = &adapter->stats.vf; 1775 struct sysctl_oid *stat_node, *queue_node; 1776 struct sysctl_oid_list *stat_list, *queue_list; 1777 1778 #define QUEUE_NAME_LEN 32 1779 char namebuf[QUEUE_NAME_LEN]; 1780 1781 /* Driver Statistics */ 1782 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", 1783 CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); 1784 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", 1785 CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); 1786 1787 for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { 1788 struct tx_ring *txr = &tx_que->txr; 1789 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1790 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1791 CTLFLAG_RD, NULL, "Queue Name"); 1792 queue_list = SYSCTL_CHILDREN(queue_node); 1793 1794 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", 1795 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets"); 1796 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", 1797 CTLFLAG_RD, &(txr->total_packets), "TX Packets"); 1798 } 1799 1800 for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { 1801 struct rx_ring *rxr = &rx_que->rxr; 1802 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); 1803 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 1804 CTLFLAG_RD, NULL, "Queue Name"); 1805 queue_list = SYSCTL_CHILDREN(queue_node); 1806 1807 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1808 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue"); 1809 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", 1810 CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); 1811 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", 1812 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); 1813 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", 1814 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); 1815 } 1816 1817 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", 1818 CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)"); 1819 stat_list = SYSCTL_CHILDREN(stat_node); 1820 1821 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", 1822 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); 1823 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", 1824 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); 1825 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", 1826 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); 1827 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", 1828 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); 1829 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 1830 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); 1831 } /* ixv_add_stats_sysctls */ 1832 1833 /************************************************************************ 1834 * ixv_print_debug_info 1835 * 1836 * Called only when em_display_debug_stats is enabled. 1837 * Provides a way to take a look at important statistics 1838 * maintained by the driver and hardware. 1839 ************************************************************************/ 1840 static void 1841 ixv_print_debug_info(struct adapter *adapter) 1842 { 1843 device_t dev = adapter->dev; 1844 struct ixgbe_hw *hw = &adapter->hw; 1845 1846 device_printf(dev, "Error Byte Count = %u \n", 1847 IXGBE_READ_REG(hw, IXGBE_ERRBC)); 1848 1849 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq); 1850 } /* ixv_print_debug_info */ 1851 1852 /************************************************************************ 1853 * ixv_sysctl_debug 1854 ************************************************************************/ 1855 static int 1856 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) 1857 { 1858 struct adapter *adapter; 1859 int error, result; 1860 1861 result = -1; 1862 error = sysctl_handle_int(oidp, &result, 0, req); 1863 1864 if (error || !req->newptr) 1865 return (error); 1866 1867 if (result == 1) { 1868 adapter = (struct adapter *)arg1; 1869 ixv_print_debug_info(adapter); 1870 } 1871 1872 return error; 1873 } /* ixv_sysctl_debug */ 1874 1875 /************************************************************************ 1876 * ixv_init_device_features 1877 ************************************************************************/ 1878 static void 1879 ixv_init_device_features(struct adapter *adapter) 1880 { 1881 adapter->feat_cap = IXGBE_FEATURE_NETMAP 1882 | IXGBE_FEATURE_VF 1883 | IXGBE_FEATURE_RSS 1884 | IXGBE_FEATURE_LEGACY_TX; 1885 1886 /* A tad short on feature flags for VFs, atm. */ 1887 switch (adapter->hw.mac.type) { 1888 case ixgbe_mac_82599_vf: 1889 break; 1890 case ixgbe_mac_X540_vf: 1891 break; 1892 case ixgbe_mac_X550_vf: 1893 case ixgbe_mac_X550EM_x_vf: 1894 case ixgbe_mac_X550EM_a_vf: 1895 adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; 1896 break; 1897 default: 1898 break; 1899 } 1900 1901 /* Enabled by default... */ 1902 /* Is a virtual function (VF) */ 1903 if (adapter->feat_cap & IXGBE_FEATURE_VF) 1904 adapter->feat_en |= IXGBE_FEATURE_VF; 1905 /* Netmap */ 1906 if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) 1907 adapter->feat_en |= IXGBE_FEATURE_NETMAP; 1908 /* Receive-Side Scaling (RSS) */ 1909 if (adapter->feat_cap & IXGBE_FEATURE_RSS) 1910 adapter->feat_en |= IXGBE_FEATURE_RSS; 1911 /* Needs advanced context descriptor regardless of offloads req'd */ 1912 if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) 1913 adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; 1914 } /* ixv_init_device_features */ 1915 1916