1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2024, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /** 33 * @file if_iavf_iflib.c 34 * @brief iflib driver implementation 35 * 36 * Contains the main entry point for the iflib driver implementation. It 37 * implements the various ifdi driver methods, and sets up the module and 38 * driver values to load an iflib driver. 39 */ 40 41 #include "iavf_iflib.h" 42 #include "iavf_vc_common.h" 43 44 #include "iavf_drv_info.h" 45 #include "iavf_sysctls_iflib.h" 46 47 /********************************************************************* 48 * Function prototypes 49 *********************************************************************/ 50 static void *iavf_register(device_t dev); 51 static int iavf_if_attach_pre(if_ctx_t ctx); 52 static int iavf_if_attach_post(if_ctx_t ctx); 53 static int iavf_if_detach(if_ctx_t ctx); 54 static int iavf_if_shutdown(if_ctx_t ctx); 55 static int iavf_if_suspend(if_ctx_t ctx); 56 static int iavf_if_resume(if_ctx_t ctx); 57 static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix); 58 static void iavf_if_enable_intr(if_ctx_t ctx); 59 static void iavf_if_disable_intr(if_ctx_t ctx); 60 static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 61 static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 62 static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 63 static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 64 static void iavf_if_queues_free(if_ctx_t ctx); 65 static void iavf_if_update_admin_status(if_ctx_t ctx); 66 static void iavf_if_multi_set(if_ctx_t ctx); 67 static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 68 static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 69 static int iavf_if_media_change(if_ctx_t ctx); 70 static int iavf_if_promisc_set(if_ctx_t ctx, int flags); 71 static void iavf_if_timer(if_ctx_t ctx, uint16_t qid); 72 static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag); 73 static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 74 static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt); 75 static void iavf_if_init(if_ctx_t ctx); 76 static void iavf_if_stop(if_ctx_t ctx); 77 static bool iavf_if_needs_restart(if_ctx_t, enum iflib_restart_event); 78 79 static int iavf_allocate_pci_resources(struct iavf_sc *); 80 static void iavf_free_pci_resources(struct iavf_sc *); 81 static void iavf_setup_interface(struct iavf_sc *); 82 static void iavf_add_device_sysctls(struct iavf_sc *); 83 static void iavf_enable_queue_irq(struct iavf_hw *, int); 84 static void iavf_disable_queue_irq(struct iavf_hw *, int); 85 static void iavf_stop(struct iavf_sc *); 86 87 static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr); 88 static int iavf_msix_que(void *); 89 static int iavf_msix_adminq(void *); 90 static void iavf_configure_itr(struct iavf_sc *sc); 91 92 static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 93 #ifdef IAVF_DEBUG 94 static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS); 95 static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS); 96 #endif 97 98 static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *); 99 static void iavf_vc_task(void *arg, int pending __unused); 100 static int iavf_setup_vc_tq(struct iavf_sc *sc); 101 static int iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op); 102 103 /********************************************************************* 104 * FreeBSD Device Interface Entry Points 105 *********************************************************************/ 106 107 /** 108 * @var iavf_methods 109 * @brief device methods for the iavf driver 110 * 111 * Device method callbacks used to interact with the driver. For iflib this 112 * primarily resolves to the default iflib implementations. 113 */ 114 static device_method_t iavf_methods[] = { 115 /* Device interface */ 116 DEVMETHOD(device_register, iavf_register), 117 DEVMETHOD(device_probe, iflib_device_probe), 118 DEVMETHOD(device_attach, iflib_device_attach), 119 DEVMETHOD(device_detach, iflib_device_detach), 120 DEVMETHOD(device_shutdown, iflib_device_shutdown), 121 DEVMETHOD_END 122 }; 123 124 static driver_t iavf_driver = { 125 "iavf", iavf_methods, sizeof(struct iavf_sc), 126 }; 127 128 DRIVER_MODULE(iavf, pci, iavf_driver, 0, 0); 129 MODULE_VERSION(iavf, 1); 130 131 MODULE_DEPEND(iavf, pci, 1, 1, 1); 132 MODULE_DEPEND(iavf, ether, 1, 1, 1); 133 MODULE_DEPEND(iavf, iflib, 1, 1, 1); 134 135 IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array); 136 137 /** 138 * @var M_IAVF 139 * @brief main iavf driver allocation type 140 * 141 * malloc(9) allocation type used by the majority of memory allocations in the 142 * iavf iflib driver. 143 */ 144 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations"); 145 146 static device_method_t iavf_if_methods[] = { 147 DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre), 148 DEVMETHOD(ifdi_attach_post, iavf_if_attach_post), 149 DEVMETHOD(ifdi_detach, iavf_if_detach), 150 DEVMETHOD(ifdi_shutdown, iavf_if_shutdown), 151 DEVMETHOD(ifdi_suspend, iavf_if_suspend), 152 DEVMETHOD(ifdi_resume, iavf_if_resume), 153 DEVMETHOD(ifdi_init, iavf_if_init), 154 DEVMETHOD(ifdi_stop, iavf_if_stop), 155 DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign), 156 DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr), 157 DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr), 158 DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable), 159 DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable), 160 DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc), 161 DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc), 162 DEVMETHOD(ifdi_queues_free, iavf_if_queues_free), 163 DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status), 164 DEVMETHOD(ifdi_multi_set, iavf_if_multi_set), 165 DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set), 166 DEVMETHOD(ifdi_media_status, iavf_if_media_status), 167 DEVMETHOD(ifdi_media_change, iavf_if_media_change), 168 DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set), 169 DEVMETHOD(ifdi_timer, iavf_if_timer), 170 DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register), 171 DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister), 172 DEVMETHOD(ifdi_get_counter, iavf_if_get_counter), 173 DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart), 174 DEVMETHOD_END 175 }; 176 177 static driver_t iavf_if_driver = { 178 "iavf_if", iavf_if_methods, sizeof(struct iavf_sc) 179 }; 180 181 extern struct if_txrx iavf_txrx_hwb; 182 extern struct if_txrx iavf_txrx_dwb; 183 184 static struct if_shared_ctx iavf_sctx = { 185 .isc_magic = IFLIB_MAGIC, 186 .isc_q_align = PAGE_SIZE, 187 .isc_tx_maxsize = IAVF_MAX_FRAME, 188 .isc_tx_maxsegsize = IAVF_MAX_FRAME, 189 .isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header), 190 .isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE, 191 .isc_rx_maxsize = IAVF_MAX_FRAME, 192 .isc_rx_nsegments = IAVF_MAX_RX_SEGS, 193 .isc_rx_maxsegsize = IAVF_MAX_FRAME, 194 .isc_nfl = 1, 195 .isc_ntxqs = 1, 196 .isc_nrxqs = 1, 197 198 .isc_admin_intrcnt = 1, 199 .isc_vendor_info = iavf_vendor_info_array, 200 .isc_driver_version = __DECONST(char *, iavf_driver_version), 201 .isc_driver = &iavf_if_driver, 202 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF, 203 204 .isc_nrxd_min = {IAVF_MIN_RING}, 205 .isc_ntxd_min = {IAVF_MIN_RING}, 206 .isc_nrxd_max = {IAVF_MAX_RING}, 207 .isc_ntxd_max = {IAVF_MAX_RING}, 208 .isc_nrxd_default = {IAVF_DEFAULT_RING}, 209 .isc_ntxd_default = {IAVF_DEFAULT_RING}, 210 }; 211 212 /*** Functions ***/ 213 214 /** 215 * iavf_register - iflib callback to obtain the shared context pointer 216 * @dev: the device being registered 217 * 218 * Called when the driver is first being attached to the driver. This function 219 * is used by iflib to obtain a pointer to the shared context structure which 220 * describes the device features. 221 * 222 * @returns a pointer to the iavf shared context structure. 223 */ 224 static void * 225 iavf_register(device_t dev __unused) 226 { 227 return (&iavf_sctx); 228 } 229 230 /** 231 * iavf_allocate_pci_resources - Allocate PCI resources 232 * @sc: the device private softc 233 * 234 * Allocate PCI resources used by the iflib driver. 235 * 236 * @returns zero or a non-zero error code on failure 237 */ 238 static int 239 iavf_allocate_pci_resources(struct iavf_sc *sc) 240 { 241 return iavf_allocate_pci_resources_common(sc); 242 } 243 244 /** 245 * iavf_if_attach_pre - Begin attaching the device to the driver 246 * @ctx: the iflib context pointer 247 * 248 * Called by iflib to begin the attach process. Allocates resources and 249 * initializes the hardware for operation. 250 * 251 * @returns zero or a non-zero error code on failure. 252 */ 253 static int 254 iavf_if_attach_pre(if_ctx_t ctx) 255 { 256 device_t dev; 257 struct iavf_sc *sc; 258 struct iavf_hw *hw; 259 struct iavf_vsi *vsi; 260 if_softc_ctx_t scctx; 261 int error = 0; 262 263 /* Setup pointers */ 264 dev = iflib_get_dev(ctx); 265 sc = iavf_sc_from_ctx(ctx); 266 267 vsi = &sc->vsi; 268 vsi->back = sc; 269 sc->dev = sc->osdep.dev = dev; 270 hw = &sc->hw; 271 272 vsi->dev = dev; 273 vsi->hw = &sc->hw; 274 vsi->num_vlans = 0; 275 vsi->ctx = ctx; 276 sc->media = iflib_get_media(ctx); 277 vsi->ifp = iflib_get_ifp(ctx); 278 vsi->shared = scctx = iflib_get_softc_ctx(ctx); 279 280 iavf_save_tunables(sc); 281 282 /* Setup VC mutex */ 283 snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name), 284 "%s:vc", device_get_nameunit(dev)); 285 mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF); 286 287 /* Do PCI setup - map BAR0, etc */ 288 error = iavf_allocate_pci_resources(sc); 289 if (error) { 290 device_printf(dev, "%s: Allocation of PCI resources failed\n", 291 __func__); 292 goto err_early; 293 } 294 295 iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n"); 296 297 error = iavf_set_mac_type(hw); 298 if (error) { 299 device_printf(dev, "%s: set_mac_type failed: %d\n", 300 __func__, error); 301 goto err_pci_res; 302 } 303 304 error = iavf_reset_complete(hw); 305 if (error) { 306 device_printf(dev, "%s: Device is still being reset\n", 307 __func__); 308 goto err_pci_res; 309 } 310 311 iavf_dbg_init(sc, "VF Device is ready for configuration\n"); 312 313 /* Sets up Admin Queue */ 314 error = iavf_setup_vc(sc); 315 if (error) { 316 device_printf(dev, "%s: Error setting up PF comms, %d\n", 317 __func__, error); 318 goto err_pci_res; 319 } 320 321 iavf_dbg_init(sc, "PF API version verified\n"); 322 323 /* Need API version before sending reset message */ 324 error = iavf_reset(sc); 325 if (error) { 326 device_printf(dev, "VF reset failed; reload the driver\n"); 327 goto err_aq; 328 } 329 330 iavf_dbg_init(sc, "VF reset complete\n"); 331 332 /* Ask for VF config from PF */ 333 error = iavf_vf_config(sc); 334 if (error) { 335 device_printf(dev, "Error getting configuration from PF: %d\n", 336 error); 337 goto err_aq; 338 } 339 340 iavf_print_device_info(sc); 341 342 error = iavf_get_vsi_res_from_vf_res(sc); 343 if (error) 344 goto err_res_buf; 345 346 iavf_dbg_init(sc, "Resource Acquisition complete\n"); 347 348 /* Setup taskqueue to service VC messages */ 349 error = iavf_setup_vc_tq(sc); 350 if (error) 351 goto err_vc_tq; 352 353 iavf_set_mac_addresses(sc); 354 iflib_set_mac(ctx, hw->mac.addr); 355 356 /* Allocate filter lists */ 357 iavf_init_filters(sc); 358 359 /* Fill out more iflib parameters */ 360 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 361 sc->vsi_res->num_queue_pairs; 362 if (vsi->enable_head_writeback) { 363 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 364 * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN); 365 scctx->isc_txrx = &iavf_txrx_hwb; 366 } else { 367 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 368 * sizeof(struct iavf_tx_desc), DBA_ALIGN); 369 scctx->isc_txrx = &iavf_txrx_dwb; 370 } 371 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 372 * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN); 373 scctx->isc_msix_bar = pci_msix_table_bar(dev); 374 scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS; 375 scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS; 376 scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE; 377 scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE; 378 scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE; 379 scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS; 380 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 381 382 return (0); 383 384 err_vc_tq: 385 taskqueue_free(sc->vc_tq); 386 err_res_buf: 387 free(sc->vf_res, M_IAVF); 388 err_aq: 389 iavf_shutdown_adminq(hw); 390 err_pci_res: 391 iavf_free_pci_resources(sc); 392 err_early: 393 IAVF_VC_LOCK_DESTROY(sc); 394 return (error); 395 } 396 397 /** 398 * iavf_vc_task - task used to process VC messages 399 * @arg: device softc 400 * @pending: unused 401 * 402 * Processes the admin queue, in order to process the virtual 403 * channel messages received from the PF. 404 */ 405 static void 406 iavf_vc_task(void *arg, int pending __unused) 407 { 408 struct iavf_sc *sc = (struct iavf_sc *)arg; 409 u16 var; 410 411 iavf_process_adminq(sc, &var); 412 } 413 414 /** 415 * iavf_setup_vc_tq - Setup task queues 416 * @sc: device softc 417 * 418 * Create taskqueue and tasklet for processing virtual channel messages. This 419 * is done in a separate non-iflib taskqueue so that the iflib context lock 420 * does not need to be held for VC messages to be processed. 421 * 422 * @returns zero on success, or an error code on failure. 423 */ 424 static int 425 iavf_setup_vc_tq(struct iavf_sc *sc) 426 { 427 device_t dev = sc->dev; 428 int error = 0; 429 430 TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc); 431 432 sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT, 433 taskqueue_thread_enqueue, &sc->vc_tq); 434 if (!sc->vc_tq) { 435 device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n"); 436 return (ENOMEM); 437 } 438 error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc", 439 device_get_nameunit(dev)); 440 if (error) { 441 device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n", 442 error); 443 taskqueue_free(sc->vc_tq); 444 return (error); 445 } 446 447 return (error); 448 } 449 450 /** 451 * iavf_if_attach_post - Finish attaching the device to the driver 452 * @ctx: the iflib context pointer 453 * 454 * Called by iflib after it has setup queues and interrupts. Used to finish up 455 * the attach process for a device. Attach logic which must occur after Tx and 456 * Rx queues are setup belongs here. 457 * 458 * @returns zero or a non-zero error code on failure 459 */ 460 static int 461 iavf_if_attach_post(if_ctx_t ctx) 462 { 463 #ifdef IXL_DEBUG 464 device_t dev = iflib_get_dev(ctx); 465 #endif 466 struct iavf_sc *sc; 467 struct iavf_hw *hw; 468 struct iavf_vsi *vsi; 469 int error = 0; 470 471 INIT_DBG_DEV(dev, "begin"); 472 473 sc = iavf_sc_from_ctx(ctx); 474 vsi = &sc->vsi; 475 hw = &sc->hw; 476 477 /* Save off determined number of queues for interface */ 478 vsi->num_rx_queues = vsi->shared->isc_nrxqsets; 479 vsi->num_tx_queues = vsi->shared->isc_ntxqsets; 480 481 /* Setup the stack interface */ 482 iavf_setup_interface(sc); 483 484 iavf_dbg_init(sc, "Interface setup complete\n"); 485 486 /* Initialize statistics & add sysctls */ 487 bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats)); 488 iavf_add_device_sysctls(sc); 489 490 atomic_store_rel_32(&sc->queues_enabled, 0); 491 iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED); 492 493 /* We want AQ enabled early for init */ 494 iavf_enable_adminq_irq(hw); 495 496 INIT_DBG_DEV(dev, "end"); 497 498 return (error); 499 } 500 501 /** 502 * iavf_if_detach - Detach a device from the driver 503 * @ctx: the iflib context of the device to detach 504 * 505 * Called by iflib to detach a given device from the driver. Clean up any 506 * resources associated with the driver and shut the device down. 507 * 508 * @remark iflib always ignores the return value of IFDI_DETACH, so this 509 * function is effectively not allowed to fail. Instead, it should clean up 510 * and release as much as possible even if something goes wrong. 511 * 512 * @returns zero 513 */ 514 static int 515 iavf_if_detach(if_ctx_t ctx) 516 { 517 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 518 struct iavf_hw *hw = &sc->hw; 519 device_t dev = sc->dev; 520 enum iavf_status status; 521 522 INIT_DBG_DEV(dev, "begin"); 523 524 iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED); 525 526 /* Drain admin queue taskqueue */ 527 taskqueue_free(sc->vc_tq); 528 IAVF_VC_LOCK_DESTROY(sc); 529 530 /* Remove all the media and link information */ 531 ifmedia_removeall(sc->media); 532 533 iavf_disable_adminq_irq(hw); 534 status = iavf_shutdown_adminq(&sc->hw); 535 if (status != IAVF_SUCCESS) { 536 device_printf(dev, 537 "iavf_shutdown_adminq() failed with status %s\n", 538 iavf_stat_str(hw, status)); 539 } 540 541 free(sc->vf_res, M_IAVF); 542 sc->vf_res = NULL; 543 iavf_free_pci_resources(sc); 544 iavf_free_filters(sc); 545 546 INIT_DBG_DEV(dev, "end"); 547 return (0); 548 } 549 550 /** 551 * iavf_if_shutdown - called by iflib to handle shutdown 552 * @ctx: the iflib context pointer 553 * 554 * Callback for the IFDI_SHUTDOWN iflib function. 555 * 556 * @returns zero or an error code on failure 557 */ 558 static int 559 iavf_if_shutdown(if_ctx_t ctx __unused) 560 { 561 return (0); 562 } 563 564 /** 565 * iavf_if_suspend - called by iflib to handle suspend 566 * @ctx: the iflib context pointer 567 * 568 * Callback for the IFDI_SUSPEND iflib function. 569 * 570 * @returns zero or an error code on failure 571 */ 572 static int 573 iavf_if_suspend(if_ctx_t ctx __unused) 574 { 575 return (0); 576 } 577 578 /** 579 * iavf_if_resume - called by iflib to handle resume 580 * @ctx: the iflib context pointer 581 * 582 * Callback for the IFDI_RESUME iflib function. 583 * 584 * @returns zero or an error code on failure 585 */ 586 static int 587 iavf_if_resume(if_ctx_t ctx __unused) 588 { 589 return (0); 590 } 591 592 /** 593 * iavf_vc_sleep_wait - Sleep for a response from a VC message 594 * @sc: device softc 595 * @op: the op code to sleep on 596 * 597 * Sleep until a response from the PF for the VC message sent by the 598 * given op. 599 * 600 * @returns zero on success, or EWOULDBLOCK if the sleep times out. 601 */ 602 static int 603 iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op) 604 { 605 int error = 0; 606 607 IAVF_VC_LOCK_ASSERT(sc); 608 609 iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS); 610 611 error = mtx_sleep(iavf_vc_get_op_chan(sc, op), 612 &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT); 613 614 return (error); 615 } 616 617 /** 618 * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response 619 * @sc: device softc 620 * @op: the op code to send 621 * 622 * Send a virtchnl message to the PF, and sleep or busy wait for a response 623 * from the PF, depending on iflib context lock type. 624 * 625 * @remark this function does not wait if the device is detaching, on kernels 626 * that support indicating to the driver that the device is detaching 627 * 628 * @returns zero or an error code on failure. 629 */ 630 int 631 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op) 632 { 633 if_ctx_t ctx = sc->vsi.ctx; 634 int error = 0; 635 636 IAVF_VC_LOCK(sc); 637 error = iavf_vc_send_cmd(sc, op); 638 if (error != 0) { 639 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); 640 goto release_lock; 641 } 642 643 /* Don't wait for a response if the device is being detached. */ 644 if (!iflib_in_detach(ctx)) { 645 error = iavf_vc_sleep_wait(sc, op); 646 IAVF_VC_LOCK_ASSERT(sc); 647 648 if (error == EWOULDBLOCK) 649 device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS); 650 } 651 release_lock: 652 IAVF_VC_UNLOCK(sc); 653 return (error); 654 } 655 656 /** 657 * iavf_send_vc_msg - Send a virtchnl message to the PF 658 * @sc: device softc 659 * @op: the op code to send 660 * 661 * Send a virtchnl message to the PF and do not wait for a response. 662 * 663 * @returns zero on success, or an error code on failure. 664 */ 665 int 666 iavf_send_vc_msg(struct iavf_sc *sc, u32 op) 667 { 668 int error = 0; 669 670 error = iavf_vc_send_cmd(sc, op); 671 if (error != 0) 672 iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); 673 674 return (error); 675 } 676 677 /** 678 * iavf_init_queues - initialize Tx and Rx queues 679 * @vsi: the VSI to initialize 680 * 681 * Refresh the Tx and Rx ring contents and update the tail pointers for each 682 * queue. 683 */ 684 static void 685 iavf_init_queues(struct iavf_vsi *vsi) 686 { 687 struct iavf_tx_queue *tx_que = vsi->tx_queues; 688 struct iavf_rx_queue *rx_que = vsi->rx_queues; 689 struct rx_ring *rxr; 690 uint32_t mbuf_sz; 691 692 mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx); 693 MPASS(mbuf_sz <= UINT16_MAX); 694 695 for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) 696 iavf_init_tx_ring(vsi, tx_que); 697 698 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { 699 rxr = &rx_que->rxr; 700 701 rxr->mbuf_sz = mbuf_sz; 702 wr32(vsi->hw, rxr->tail, 0); 703 } 704 } 705 706 /** 707 * iavf_if_init - Initialize device for operation 708 * @ctx: the iflib context pointer 709 * 710 * Initializes a device for operation. Called by iflib in response to an 711 * interface up event from the stack. 712 * 713 * @remark this function does not return a value and thus cannot indicate 714 * failure to initialize. 715 */ 716 static void 717 iavf_if_init(if_ctx_t ctx) 718 { 719 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 720 struct iavf_vsi *vsi = &sc->vsi; 721 struct iavf_hw *hw = &sc->hw; 722 if_t ifp = iflib_get_ifp(ctx); 723 u8 tmpaddr[ETHER_ADDR_LEN]; 724 enum iavf_status status; 725 device_t dev = sc->dev; 726 int error = 0; 727 728 INIT_DBG_IF(ifp, "begin"); 729 730 sx_assert(iflib_ctx_lock_get(ctx), SA_XLOCKED); 731 732 error = iavf_reset_complete(hw); 733 if (error) { 734 device_printf(sc->dev, "%s: VF reset failed\n", 735 __func__); 736 } 737 738 if (!iavf_check_asq_alive(hw)) { 739 iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n"); 740 pci_enable_busmaster(dev); 741 742 status = iavf_shutdown_adminq(hw); 743 if (status != IAVF_SUCCESS) { 744 device_printf(dev, 745 "%s: iavf_shutdown_adminq failed: %s\n", 746 __func__, iavf_stat_str(hw, status)); 747 return; 748 } 749 750 status = iavf_init_adminq(hw); 751 if (status != IAVF_SUCCESS) { 752 device_printf(dev, 753 "%s: iavf_init_adminq failed: %s\n", 754 __func__, iavf_stat_str(hw, status)); 755 return; 756 } 757 } 758 759 /* Make sure queues are disabled */ 760 iavf_disable_queues_with_retries(sc); 761 762 bcopy(if_getlladdr(ifp), tmpaddr, ETHER_ADDR_LEN); 763 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 764 (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) { 765 error = iavf_del_mac_filter(sc, hw->mac.addr); 766 if (error == 0) 767 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER); 768 769 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 770 } 771 772 error = iavf_add_mac_filter(sc, hw->mac.addr, 0); 773 if (!error || error == EEXIST) 774 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER); 775 iflib_set_mac(ctx, hw->mac.addr); 776 777 /* Prepare the queues for operation */ 778 iavf_init_queues(vsi); 779 780 /* Set initial ITR values */ 781 iavf_configure_itr(sc); 782 783 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES); 784 785 /* Set up RSS */ 786 iavf_config_rss(sc); 787 788 /* Map vectors */ 789 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS); 790 791 /* Init SW TX ring indices */ 792 if (vsi->enable_head_writeback) 793 iavf_init_tx_cidx(vsi); 794 else 795 iavf_init_tx_rsqs(vsi); 796 797 /* Configure promiscuous mode */ 798 iavf_config_promisc(sc, if_getflags(ifp)); 799 800 /* Enable queues */ 801 iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES); 802 803 iavf_set_state(&sc->state, IAVF_STATE_RUNNING); 804 } 805 806 /** 807 * iavf_if_msix_intr_assign - Assign MSI-X interrupts 808 * @ctx: the iflib context pointer 809 * @msix: the number of MSI-X vectors available 810 * 811 * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and 812 * sets up vectors for each Tx and Rx queue, as well as the administrative 813 * control interrupt. 814 * 815 * @returns zero or an error code on failure 816 */ 817 static int 818 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused) 819 { 820 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 821 struct iavf_vsi *vsi = &sc->vsi; 822 struct iavf_rx_queue *rx_que = vsi->rx_queues; 823 struct iavf_tx_queue *tx_que = vsi->tx_queues; 824 int err, i, rid, vector = 0; 825 char buf[16]; 826 827 MPASS(vsi->shared->isc_nrxqsets > 0); 828 MPASS(vsi->shared->isc_ntxqsets > 0); 829 830 /* Admin Que is vector 0*/ 831 rid = vector + 1; 832 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 833 iavf_msix_adminq, sc, 0, "aq"); 834 if (err) { 835 iflib_irq_free(ctx, &vsi->irq); 836 device_printf(iflib_get_dev(ctx), 837 "Failed to register Admin Que handler"); 838 return (err); 839 } 840 841 /* Now set up the stations */ 842 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { 843 rid = vector + 1; 844 845 snprintf(buf, sizeof(buf), "rxq%d", i); 846 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 847 IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf); 848 if (err) { 849 device_printf(iflib_get_dev(ctx), 850 "Failed to allocate queue RX int vector %d, err: %d\n", i, err); 851 vsi->num_rx_queues = i + 1; 852 goto fail; 853 } 854 rx_que->msix = vector; 855 } 856 857 bzero(buf, sizeof(buf)); 858 859 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { 860 snprintf(buf, sizeof(buf), "txq%d", i); 861 iflib_softirq_alloc_generic(ctx, 862 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, 863 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 864 865 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; 866 } 867 868 return (0); 869 fail: 870 iflib_irq_free(ctx, &vsi->irq); 871 rx_que = vsi->rx_queues; 872 for (i = 0; i < vsi->num_rx_queues; i++, rx_que++) 873 iflib_irq_free(ctx, &rx_que->que_irq); 874 return (err); 875 } 876 877 /** 878 * iavf_if_enable_intr - Enable all interrupts for a device 879 * @ctx: the iflib context pointer 880 * 881 * Called by iflib to request enabling all interrupts. 882 */ 883 static void 884 iavf_if_enable_intr(if_ctx_t ctx) 885 { 886 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 887 struct iavf_vsi *vsi = &sc->vsi; 888 889 iavf_enable_intr(vsi); 890 } 891 892 /** 893 * iavf_if_disable_intr - Disable all interrupts for a device 894 * @ctx: the iflib context pointer 895 * 896 * Called by iflib to request disabling all interrupts. 897 */ 898 static void 899 iavf_if_disable_intr(if_ctx_t ctx) 900 { 901 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 902 struct iavf_vsi *vsi = &sc->vsi; 903 904 iavf_disable_intr(vsi); 905 } 906 907 /** 908 * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt 909 * @ctx: the iflib context pointer 910 * @rxqid: Rx queue index 911 * 912 * Enables the interrupt associated with a specified Rx queue. 913 * 914 * @returns zero 915 */ 916 static int 917 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 918 { 919 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 920 struct iavf_vsi *vsi = &sc->vsi; 921 struct iavf_hw *hw = vsi->hw; 922 struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 923 924 iavf_enable_queue_irq(hw, rx_que->msix - 1); 925 return (0); 926 } 927 928 /** 929 * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt 930 * @ctx: the iflib context pointer 931 * @txqid: Tx queue index 932 * 933 * Enables the interrupt associated with a specified Tx queue. 934 * 935 * @returns zero 936 */ 937 static int 938 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 939 { 940 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 941 struct iavf_vsi *vsi = &sc->vsi; 942 struct iavf_hw *hw = vsi->hw; 943 struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid]; 944 945 iavf_enable_queue_irq(hw, tx_que->msix - 1); 946 return (0); 947 } 948 949 /** 950 * iavf_if_tx_queues_alloc - Allocate Tx queue memory 951 * @ctx: the iflib context pointer 952 * @vaddrs: Array of virtual addresses 953 * @paddrs: Array of physical addresses 954 * @ntxqs: the number of Tx queues per group (should always be 1) 955 * @ntxqsets: the number of Tx queues 956 * 957 * Allocates memory for the specified number of Tx queues. This includes 958 * memory for the queue structures and the report status array for the queues. 959 * The virtual and physical addresses are saved for later use during 960 * initialization. 961 * 962 * @returns zero or a non-zero error code on failure 963 */ 964 static int 965 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 966 { 967 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 968 struct iavf_vsi *vsi = &sc->vsi; 969 if_softc_ctx_t scctx = vsi->shared; 970 struct iavf_tx_queue *que; 971 int i, j, error = 0; 972 973 MPASS(scctx->isc_ntxqsets > 0); 974 MPASS(ntxqs == 1); 975 MPASS(scctx->isc_ntxqsets == ntxqsets); 976 977 /* Allocate queue structure memory */ 978 if (!(vsi->tx_queues = 979 (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { 980 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 981 return (ENOMEM); 982 } 983 984 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 985 struct tx_ring *txr = &que->txr; 986 987 txr->me = i; 988 que->vsi = vsi; 989 990 if (!vsi->enable_head_writeback) { 991 /* Allocate report status array */ 992 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) { 993 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 994 error = ENOMEM; 995 goto fail; 996 } 997 /* Init report status array */ 998 for (j = 0; j < scctx->isc_ntxd[0]; j++) 999 txr->tx_rsq[j] = QIDX_INVALID; 1000 } 1001 /* get the virtual and physical address of the hardware queues */ 1002 txr->tail = IAVF_QTX_TAIL1(txr->me); 1003 txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs]; 1004 txr->tx_paddr = paddrs[i * ntxqs]; 1005 txr->que = que; 1006 } 1007 1008 return (0); 1009 fail: 1010 iavf_if_queues_free(ctx); 1011 return (error); 1012 } 1013 1014 /** 1015 * iavf_if_rx_queues_alloc - Allocate Rx queue memory 1016 * @ctx: the iflib context pointer 1017 * @vaddrs: Array of virtual addresses 1018 * @paddrs: Array of physical addresses 1019 * @nrxqs: number of Rx queues per group (should always be 1) 1020 * @nrxqsets: the number of Rx queues to allocate 1021 * 1022 * Called by iflib to allocate driver memory for a number of Rx queues. 1023 * Allocates memory for the drivers private Rx queue data structure, and saves 1024 * the physical and virtual addresses for later use. 1025 * 1026 * @returns zero or a non-zero error code on failure 1027 */ 1028 static int 1029 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1030 { 1031 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1032 struct iavf_vsi *vsi = &sc->vsi; 1033 struct iavf_rx_queue *que; 1034 int i, error = 0; 1035 1036 #ifdef INVARIANTS 1037 if_softc_ctx_t scctx = vsi->shared; 1038 MPASS(scctx->isc_nrxqsets > 0); 1039 MPASS(nrxqs == 1); 1040 MPASS(scctx->isc_nrxqsets == nrxqsets); 1041 #endif 1042 1043 /* Allocate queue structure memory */ 1044 if (!(vsi->rx_queues = 1045 (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) * 1046 nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { 1047 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1048 error = ENOMEM; 1049 goto fail; 1050 } 1051 1052 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1053 struct rx_ring *rxr = &que->rxr; 1054 1055 rxr->me = i; 1056 que->vsi = vsi; 1057 1058 /* get the virtual and physical address of the hardware queues */ 1059 rxr->tail = IAVF_QRX_TAIL1(rxr->me); 1060 rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs]; 1061 rxr->rx_paddr = paddrs[i * nrxqs]; 1062 rxr->que = que; 1063 } 1064 1065 return (0); 1066 fail: 1067 iavf_if_queues_free(ctx); 1068 return (error); 1069 } 1070 1071 /** 1072 * iavf_if_queues_free - Free driver queue memory 1073 * @ctx: the iflib context pointer 1074 * 1075 * Called by iflib to release memory allocated by the driver when setting up 1076 * Tx and Rx queues. 1077 * 1078 * @remark The ordering of this function and iavf_if_detach is not guaranteed. 1079 * It is possible for this function to be called either before or after the 1080 * iavf_if_detach. Thus, care must be taken to ensure that either ordering of 1081 * iavf_if_detach and iavf_if_queues_free is safe. 1082 */ 1083 static void 1084 iavf_if_queues_free(if_ctx_t ctx) 1085 { 1086 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1087 struct iavf_vsi *vsi = &sc->vsi; 1088 1089 if (!vsi->enable_head_writeback) { 1090 struct iavf_tx_queue *que; 1091 int i = 0; 1092 1093 for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) { 1094 struct tx_ring *txr = &que->txr; 1095 if (txr->tx_rsq != NULL) { 1096 free(txr->tx_rsq, M_IAVF); 1097 txr->tx_rsq = NULL; 1098 } 1099 } 1100 } 1101 1102 if (vsi->tx_queues != NULL) { 1103 free(vsi->tx_queues, M_IAVF); 1104 vsi->tx_queues = NULL; 1105 } 1106 if (vsi->rx_queues != NULL) { 1107 free(vsi->rx_queues, M_IAVF); 1108 vsi->rx_queues = NULL; 1109 } 1110 } 1111 1112 /** 1113 * iavf_check_aq_errors - Check for AdminQ errors 1114 * @sc: device softc 1115 * 1116 * Check the AdminQ registers for errors, and determine whether or not a reset 1117 * may be required to resolve them. 1118 * 1119 * @post if there are errors, the VF device will be stopped and a reset will 1120 * be requested. 1121 * 1122 * @returns zero if there are no issues, EBUSY if the device is resetting, 1123 * or EIO if there are any AQ errors. 1124 */ 1125 static int 1126 iavf_check_aq_errors(struct iavf_sc *sc) 1127 { 1128 struct iavf_hw *hw = &sc->hw; 1129 device_t dev = sc->dev; 1130 u32 reg, oldreg; 1131 u8 aq_error = false; 1132 1133 oldreg = reg = rd32(hw, hw->aq.arq.len); 1134 1135 /* Check if device is in reset */ 1136 if (reg == 0xdeadbeef || reg == 0xffffffff) { 1137 device_printf(dev, "VF in reset\n"); 1138 return (EBUSY); 1139 } 1140 1141 /* Check for Admin queue errors */ 1142 if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 1143 device_printf(dev, "ARQ VF Error detected\n"); 1144 reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 1145 aq_error = true; 1146 } 1147 if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 1148 device_printf(dev, "ARQ Overflow Error detected\n"); 1149 reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 1150 aq_error = true; 1151 } 1152 if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 1153 device_printf(dev, "ARQ Critical Error detected\n"); 1154 reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 1155 aq_error = true; 1156 } 1157 if (oldreg != reg) 1158 wr32(hw, hw->aq.arq.len, reg); 1159 1160 oldreg = reg = rd32(hw, hw->aq.asq.len); 1161 if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 1162 device_printf(dev, "ASQ VF Error detected\n"); 1163 reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 1164 aq_error = true; 1165 } 1166 if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 1167 device_printf(dev, "ASQ Overflow Error detected\n"); 1168 reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 1169 aq_error = true; 1170 } 1171 if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 1172 device_printf(dev, "ASQ Critical Error detected\n"); 1173 reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 1174 aq_error = true; 1175 } 1176 if (oldreg != reg) 1177 wr32(hw, hw->aq.asq.len, reg); 1178 1179 return (aq_error ? EIO : 0); 1180 } 1181 1182 /** 1183 * iavf_process_adminq - Process adminq responses from the PF 1184 * @sc: device softc 1185 * @pending: output parameter indicating how many messages remain 1186 * 1187 * Process the adminq to handle replies from the PF over the virtchnl 1188 * connection. 1189 * 1190 * @returns zero or an iavf_status code on failure 1191 */ 1192 static enum iavf_status 1193 iavf_process_adminq(struct iavf_sc *sc, u16 *pending) 1194 { 1195 enum iavf_status status = IAVF_SUCCESS; 1196 struct iavf_arq_event_info event; 1197 struct iavf_hw *hw = &sc->hw; 1198 struct virtchnl_msg *v_msg; 1199 int error = 0, loop = 0; 1200 u32 reg; 1201 1202 if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) { 1203 status = IAVF_ERR_ADMIN_QUEUE_ERROR; 1204 goto reenable_interrupt; 1205 } 1206 1207 error = iavf_check_aq_errors(sc); 1208 if (error) { 1209 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 1210 goto reenable_interrupt; 1211 } 1212 1213 event.buf_len = IAVF_AQ_BUF_SZ; 1214 event.msg_buf = sc->aq_buffer; 1215 bzero(event.msg_buf, IAVF_AQ_BUF_SZ); 1216 v_msg = (struct virtchnl_msg *)&event.desc; 1217 1218 IAVF_VC_LOCK(sc); 1219 /* clean and process any events */ 1220 do { 1221 status = iavf_clean_arq_element(hw, &event, pending); 1222 /* 1223 * Also covers normal case when iavf_clean_arq_element() 1224 * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK" 1225 */ 1226 if (status) 1227 break; 1228 iavf_vc_completion(sc, v_msg->v_opcode, 1229 v_msg->v_retval, event.msg_buf, event.msg_len); 1230 bzero(event.msg_buf, IAVF_AQ_BUF_SZ); 1231 } while (*pending && (loop++ < IAVF_ADM_LIMIT)); 1232 IAVF_VC_UNLOCK(sc); 1233 1234 reenable_interrupt: 1235 /* Re-enable admin queue interrupt cause */ 1236 reg = rd32(hw, IAVF_VFINT_ICR0_ENA1); 1237 reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK; 1238 wr32(hw, IAVF_VFINT_ICR0_ENA1, reg); 1239 1240 return (status); 1241 } 1242 1243 /** 1244 * iavf_if_update_admin_status - Administrative status task 1245 * @ctx: iflib context 1246 * 1247 * Called by iflib to handle administrative status events. The iavf driver 1248 * uses this to process the adminq virtchnl messages outside of interrupt 1249 * context. 1250 */ 1251 static void 1252 iavf_if_update_admin_status(if_ctx_t ctx) 1253 { 1254 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1255 struct iavf_hw *hw = &sc->hw; 1256 u16 pending = 0; 1257 1258 iavf_process_adminq(sc, &pending); 1259 iavf_update_link_status(sc); 1260 1261 /* 1262 * If there are still messages to process, reschedule. 1263 * Otherwise, re-enable the Admin Queue interrupt. 1264 */ 1265 if (pending > 0) 1266 iflib_admin_intr_deferred(ctx); 1267 else 1268 iavf_enable_adminq_irq(hw); 1269 } 1270 1271 /** 1272 * iavf_if_multi_set - Set multicast address filters 1273 * @ctx: iflib context 1274 * 1275 * Called by iflib to update the current list of multicast filters for the 1276 * device. 1277 */ 1278 static void 1279 iavf_if_multi_set(if_ctx_t ctx) 1280 { 1281 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1282 1283 iavf_multi_set(sc); 1284 } 1285 1286 /** 1287 * iavf_if_mtu_set - Set the device MTU 1288 * @ctx: iflib context 1289 * @mtu: MTU value to set 1290 * 1291 * Called by iflib to set the device MTU. 1292 * 1293 * @returns zero on success, or EINVAL if the MTU is invalid. 1294 */ 1295 static int 1296 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1297 { 1298 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1299 struct iavf_vsi *vsi = &sc->vsi; 1300 1301 IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)"); 1302 if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) { 1303 device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n", 1304 mtu, IAVF_MIN_MTU, IAVF_MAX_MTU); 1305 return (EINVAL); 1306 } 1307 1308 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1309 ETHER_VLAN_ENCAP_LEN; 1310 1311 return (0); 1312 } 1313 1314 /** 1315 * iavf_if_media_status - Report current media status 1316 * @ctx: iflib context 1317 * @ifmr: ifmedia request structure 1318 * 1319 * Called by iflib to report the current media status in the ifmr. 1320 */ 1321 static void 1322 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1323 { 1324 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1325 1326 iavf_media_status_common(sc, ifmr); 1327 } 1328 1329 /** 1330 * iavf_if_media_change - Change the current media settings 1331 * @ctx: iflib context 1332 * 1333 * Called by iflib to change the current media settings. 1334 * 1335 * @returns zero on success, or an error code on failure. 1336 */ 1337 static int 1338 iavf_if_media_change(if_ctx_t ctx) 1339 { 1340 return iavf_media_change_common(iflib_get_ifp(ctx)); 1341 } 1342 1343 /** 1344 * iavf_if_promisc_set - Set device promiscuous mode 1345 * @ctx: iflib context 1346 * @flags: promiscuous configuration 1347 * 1348 * Called by iflib to request that the device enter promiscuous mode. 1349 * 1350 * @returns zero on success, or an error code on failure. 1351 */ 1352 static int 1353 iavf_if_promisc_set(if_ctx_t ctx, int flags) 1354 { 1355 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1356 1357 return iavf_config_promisc(sc, flags); 1358 } 1359 1360 /** 1361 * iavf_if_timer - Periodic timer called by iflib 1362 * @ctx: iflib context 1363 * @qid: The queue being triggered 1364 * 1365 * Called by iflib periodically as a timer task, so that the driver can handle 1366 * periodic work. 1367 * 1368 * @remark this timer is only called while the interface is up, even if 1369 * IFLIB_ADMIN_ALWAYS_RUN is set. 1370 */ 1371 static void 1372 iavf_if_timer(if_ctx_t ctx, uint16_t qid) 1373 { 1374 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1375 struct iavf_hw *hw = &sc->hw; 1376 u32 val; 1377 1378 if (qid != 0) 1379 return; 1380 1381 /* Check for when PF triggers a VF reset */ 1382 val = rd32(hw, IAVF_VFGEN_RSTAT) & 1383 IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1384 if (val != VIRTCHNL_VFR_VFACTIVE 1385 && val != VIRTCHNL_VFR_COMPLETED) { 1386 iavf_dbg_info(sc, "reset in progress! (%d)\n", val); 1387 return; 1388 } 1389 1390 /* Fire off the adminq task */ 1391 iflib_admin_intr_deferred(ctx); 1392 1393 /* Update stats */ 1394 iavf_request_stats(sc); 1395 } 1396 1397 /** 1398 * iavf_if_vlan_register - Register a VLAN 1399 * @ctx: iflib context 1400 * @vtag: the VLAN to register 1401 * 1402 * Register a VLAN filter for a given vtag. 1403 */ 1404 static void 1405 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag) 1406 { 1407 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1408 struct iavf_vsi *vsi = &sc->vsi; 1409 1410 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1411 return; 1412 1413 /* Add VLAN 0 to list, for untagged traffic */ 1414 if (vsi->num_vlans == 0) 1415 iavf_add_vlan_filter(sc, 0); 1416 1417 iavf_add_vlan_filter(sc, vtag); 1418 1419 ++vsi->num_vlans; 1420 1421 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 1422 } 1423 1424 /** 1425 * iavf_if_vlan_unregister - Unregister a VLAN 1426 * @ctx: iflib context 1427 * @vtag: the VLAN to remove 1428 * 1429 * Unregister (remove) a VLAN filter for the given vtag. 1430 */ 1431 static void 1432 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1433 { 1434 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1435 struct iavf_vsi *vsi = &sc->vsi; 1436 int i = 0; 1437 1438 if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0)) /* Invalid */ 1439 return; 1440 1441 i = iavf_mark_del_vlan_filter(sc, vtag); 1442 vsi->num_vlans -= i; 1443 1444 /* Remove VLAN filter 0 if the last VLAN is being removed */ 1445 if (vsi->num_vlans == 0) 1446 i += iavf_mark_del_vlan_filter(sc, 0); 1447 1448 if (i > 0) 1449 iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER); 1450 } 1451 1452 /** 1453 * iavf_if_get_counter - Get network statistic counters 1454 * @ctx: iflib context 1455 * @cnt: The counter to obtain 1456 * 1457 * Called by iflib to obtain the value of the specified counter. 1458 * 1459 * @returns the uint64_t counter value. 1460 */ 1461 static uint64_t 1462 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1463 { 1464 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1465 struct iavf_vsi *vsi = &sc->vsi; 1466 if_t ifp = iflib_get_ifp(ctx); 1467 1468 switch (cnt) { 1469 case IFCOUNTER_IPACKETS: 1470 return (vsi->ipackets); 1471 case IFCOUNTER_IERRORS: 1472 return (vsi->ierrors); 1473 case IFCOUNTER_OPACKETS: 1474 return (vsi->opackets); 1475 case IFCOUNTER_OERRORS: 1476 return (vsi->oerrors); 1477 case IFCOUNTER_COLLISIONS: 1478 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1479 return (0); 1480 case IFCOUNTER_IBYTES: 1481 return (vsi->ibytes); 1482 case IFCOUNTER_OBYTES: 1483 return (vsi->obytes); 1484 case IFCOUNTER_IMCASTS: 1485 return (vsi->imcasts); 1486 case IFCOUNTER_OMCASTS: 1487 return (vsi->omcasts); 1488 case IFCOUNTER_IQDROPS: 1489 return (vsi->iqdrops); 1490 case IFCOUNTER_OQDROPS: 1491 return (vsi->oqdrops); 1492 case IFCOUNTER_NOPROTO: 1493 return (vsi->noproto); 1494 default: 1495 return (if_get_counter_default(ifp, cnt)); 1496 } 1497 } 1498 1499 /* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1500 * @ctx: iflib context 1501 * @event: event code to check 1502 * 1503 * Defaults to returning false for unknown events. 1504 * 1505 * @returns true if iflib needs to reinit the interface 1506 */ 1507 static bool 1508 iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1509 { 1510 switch (event) { 1511 case IFLIB_RESTART_VLAN_CONFIG: 1512 return (true); 1513 default: 1514 return (false); 1515 } 1516 } 1517 1518 /** 1519 * iavf_free_pci_resources - Free PCI resources 1520 * @sc: device softc 1521 * 1522 * Called to release the PCI resources allocated during attach. May be called 1523 * in the error flow of attach_pre, or during detach as part of cleanup. 1524 */ 1525 static void 1526 iavf_free_pci_resources(struct iavf_sc *sc) 1527 { 1528 struct iavf_vsi *vsi = &sc->vsi; 1529 struct iavf_rx_queue *rx_que = vsi->rx_queues; 1530 device_t dev = sc->dev; 1531 1532 /* We may get here before stations are set up */ 1533 if (rx_que == NULL) 1534 goto early; 1535 1536 /* Release all interrupts */ 1537 iflib_irq_free(vsi->ctx, &vsi->irq); 1538 1539 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1540 iflib_irq_free(vsi->ctx, &rx_que->que_irq); 1541 1542 early: 1543 if (sc->pci_mem != NULL) 1544 bus_release_resource(dev, SYS_RES_MEMORY, 1545 rman_get_rid(sc->pci_mem), sc->pci_mem); 1546 } 1547 1548 /** 1549 * iavf_setup_interface - Setup the device interface 1550 * @sc: device softc 1551 * 1552 * Called to setup some device interface settings, such as the ifmedia 1553 * structure. 1554 */ 1555 static void 1556 iavf_setup_interface(struct iavf_sc *sc) 1557 { 1558 struct iavf_vsi *vsi = &sc->vsi; 1559 if_ctx_t ctx = vsi->ctx; 1560 if_t ifp = iflib_get_ifp(ctx); 1561 1562 iavf_dbg_init(sc, "begin\n"); 1563 1564 vsi->shared->isc_max_frame_size = 1565 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN 1566 + ETHER_VLAN_ENCAP_LEN; 1567 1568 iavf_set_initial_baudrate(ifp); 1569 1570 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1571 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1572 } 1573 1574 /** 1575 * iavf_msix_adminq - Admin Queue interrupt handler 1576 * @arg: void pointer to the device softc 1577 * 1578 * Interrupt handler for the non-queue interrupt causes. Primarily this will 1579 * be the adminq interrupt, but also includes other miscellaneous causes. 1580 * 1581 * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise 1582 * returns FITLER_HANDLED. 1583 */ 1584 static int 1585 iavf_msix_adminq(void *arg) 1586 { 1587 struct iavf_sc *sc = (struct iavf_sc *)arg; 1588 struct iavf_hw *hw = &sc->hw; 1589 u32 reg, mask; 1590 1591 ++sc->admin_irq; 1592 1593 if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED)) 1594 return (FILTER_HANDLED); 1595 1596 reg = rd32(hw, IAVF_VFINT_ICR01); 1597 /* 1598 * For masking off interrupt causes that need to be handled before 1599 * they can be re-enabled 1600 */ 1601 mask = rd32(hw, IAVF_VFINT_ICR0_ENA1); 1602 1603 /* Check on the cause */ 1604 if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) { 1605 mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK; 1606 1607 /* Process messages outside of the iflib context lock */ 1608 taskqueue_enqueue(sc->vc_tq, &sc->vc_task); 1609 } 1610 1611 wr32(hw, IAVF_VFINT_ICR0_ENA1, mask); 1612 iavf_enable_adminq_irq(hw); 1613 1614 return (FILTER_HANDLED); 1615 } 1616 1617 /** 1618 * iavf_enable_intr - Enable device interrupts 1619 * @vsi: the main VSI 1620 * 1621 * Called to enable all queue interrupts. 1622 */ 1623 void 1624 iavf_enable_intr(struct iavf_vsi *vsi) 1625 { 1626 struct iavf_hw *hw = vsi->hw; 1627 struct iavf_rx_queue *que = vsi->rx_queues; 1628 1629 iavf_enable_adminq_irq(hw); 1630 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1631 iavf_enable_queue_irq(hw, que->rxr.me); 1632 } 1633 1634 /** 1635 * iavf_disable_intr - Disable device interrupts 1636 * @vsi: the main VSI 1637 * 1638 * Called to disable all interrupts 1639 * 1640 * @remark we never disable the admin status interrupt. 1641 */ 1642 void 1643 iavf_disable_intr(struct iavf_vsi *vsi) 1644 { 1645 struct iavf_hw *hw = vsi->hw; 1646 struct iavf_rx_queue *que = vsi->rx_queues; 1647 1648 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1649 iavf_disable_queue_irq(hw, que->rxr.me); 1650 } 1651 1652 /** 1653 * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt 1654 * @hw: hardware structure 1655 * @id: IRQ vector to enable 1656 * 1657 * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt. 1658 */ 1659 static void 1660 iavf_enable_queue_irq(struct iavf_hw *hw, int id) 1661 { 1662 u32 reg; 1663 1664 reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 1665 IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK | 1666 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; 1667 wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg); 1668 } 1669 1670 /** 1671 * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt 1672 * @hw: hardware structure 1673 * @id: IRQ vector to disable 1674 * 1675 * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt. 1676 */ 1677 static void 1678 iavf_disable_queue_irq(struct iavf_hw *hw, int id) 1679 { 1680 wr32(hw, IAVF_VFINT_DYN_CTLN1(id), 1681 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 1682 rd32(hw, IAVF_VFGEN_RSTAT); 1683 } 1684 1685 /** 1686 * iavf_configure_itr - Get initial ITR values from tunable values. 1687 * @sc: device softc 1688 * 1689 * Load the initial tunable values for the ITR configuration. 1690 */ 1691 static void 1692 iavf_configure_itr(struct iavf_sc *sc) 1693 { 1694 iavf_configure_tx_itr(sc); 1695 iavf_configure_rx_itr(sc); 1696 } 1697 1698 /** 1699 * iavf_set_queue_rx_itr - Update Rx ITR value 1700 * @que: Rx queue to update 1701 * 1702 * Provide a update to the queue RX interrupt moderation value. 1703 */ 1704 static void 1705 iavf_set_queue_rx_itr(struct iavf_rx_queue *que) 1706 { 1707 struct iavf_vsi *vsi = que->vsi; 1708 struct iavf_hw *hw = vsi->hw; 1709 struct rx_ring *rxr = &que->rxr; 1710 1711 /* Idle, do nothing */ 1712 if (rxr->bytes == 0) 1713 return; 1714 1715 /* Update the hardware if needed */ 1716 if (rxr->itr != vsi->rx_itr_setting) { 1717 rxr->itr = vsi->rx_itr_setting; 1718 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, 1719 que->rxr.me), rxr->itr); 1720 } 1721 } 1722 1723 /** 1724 * iavf_msix_que - Main Rx queue interrupt handler 1725 * @arg: void pointer to the Rx queue 1726 * 1727 * Main MSI-X interrupt handler for Rx queue interrupts 1728 * 1729 * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run, 1730 * otherwise returns FILTER_HANDLED. 1731 */ 1732 static int 1733 iavf_msix_que(void *arg) 1734 { 1735 struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg; 1736 struct iavf_sc *sc = rx_que->vsi->back; 1737 1738 ++rx_que->irqs; 1739 1740 if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING)) 1741 return (FILTER_HANDLED); 1742 1743 iavf_set_queue_rx_itr(rx_que); 1744 1745 return (FILTER_SCHEDULE_THREAD); 1746 } 1747 1748 /** 1749 * iavf_update_link_status - Update iflib Link status 1750 * @sc: device softc 1751 * 1752 * Notify the iflib stack of changes in link status. Called after the device 1753 * receives a virtchnl message indicating a change in link status. 1754 */ 1755 void 1756 iavf_update_link_status(struct iavf_sc *sc) 1757 { 1758 struct iavf_vsi *vsi = &sc->vsi; 1759 u64 baudrate; 1760 1761 if (sc->link_up){ 1762 if (vsi->link_active == FALSE) { 1763 vsi->link_active = TRUE; 1764 baudrate = iavf_baudrate_from_link_speed(sc); 1765 iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate); 1766 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1767 } 1768 } else { /* Link down */ 1769 if (vsi->link_active == TRUE) { 1770 vsi->link_active = FALSE; 1771 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1772 } 1773 } 1774 } 1775 1776 /** 1777 * iavf_stop - Stop the interface 1778 * @sc: device softc 1779 * 1780 * This routine disables all traffic on the adapter by disabling interrupts 1781 * and sending a message to the PF to tell it to stop the hardware 1782 * Tx/Rx LAN queues. 1783 */ 1784 static void 1785 iavf_stop(struct iavf_sc *sc) 1786 { 1787 iavf_clear_state(&sc->state, IAVF_STATE_RUNNING); 1788 1789 iavf_disable_intr(&sc->vsi); 1790 1791 iavf_disable_queues_with_retries(sc); 1792 } 1793 1794 /** 1795 * iavf_if_stop - iflib stop handler 1796 * @ctx: iflib context 1797 * 1798 * Call iavf_stop to stop the interface. 1799 */ 1800 static void 1801 iavf_if_stop(if_ctx_t ctx) 1802 { 1803 struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1804 1805 iavf_stop(sc); 1806 } 1807 1808 /** 1809 * iavf_del_mac_filter - Delete a MAC filter 1810 * @sc: device softc 1811 * @macaddr: MAC address to remove 1812 * 1813 * Marks a MAC filter for deletion. 1814 * 1815 * @returns zero if the filter existed, or ENOENT if it did not. 1816 */ 1817 static int 1818 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr) 1819 { 1820 struct iavf_mac_filter *f; 1821 1822 f = iavf_find_mac_filter(sc, macaddr); 1823 if (f == NULL) 1824 return (ENOENT); 1825 1826 f->flags |= IAVF_FILTER_DEL; 1827 return (0); 1828 } 1829 1830 /** 1831 * iavf_init_tx_rsqs - Initialize Report Status array 1832 * @vsi: the main VSI 1833 * 1834 * Set the Report Status queue fields to zero in order to initialize the 1835 * queues for transmit. 1836 */ 1837 void 1838 iavf_init_tx_rsqs(struct iavf_vsi *vsi) 1839 { 1840 if_softc_ctx_t scctx = vsi->shared; 1841 struct iavf_tx_queue *tx_que; 1842 int i, j; 1843 1844 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 1845 struct tx_ring *txr = &tx_que->txr; 1846 1847 txr->tx_rs_cidx = txr->tx_rs_pidx; 1848 1849 /* Initialize the last processed descriptor to be the end of 1850 * the ring, rather than the start, so that we avoid an 1851 * off-by-one error when calculating how many descriptors are 1852 * done in the credits_update function. 1853 */ 1854 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1855 1856 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1857 txr->tx_rsq[j] = QIDX_INVALID; 1858 } 1859 } 1860 1861 /** 1862 * iavf_init_tx_cidx - Initialize Tx cidx values 1863 * @vsi: the main VSI 1864 * 1865 * Initialize the tx_cidx_processed values for Tx queues in order to 1866 * initialize the Tx queues for transmit. 1867 */ 1868 void 1869 iavf_init_tx_cidx(struct iavf_vsi *vsi) 1870 { 1871 if_softc_ctx_t scctx = vsi->shared; 1872 struct iavf_tx_queue *tx_que; 1873 int i; 1874 1875 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 1876 struct tx_ring *txr = &tx_que->txr; 1877 1878 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1879 } 1880 } 1881 1882 /** 1883 * iavf_add_device_sysctls - Add device sysctls for configuration 1884 * @sc: device softc 1885 * 1886 * Add the main sysctl nodes and sysctls for device configuration. 1887 */ 1888 static void 1889 iavf_add_device_sysctls(struct iavf_sc *sc) 1890 { 1891 struct iavf_vsi *vsi = &sc->vsi; 1892 device_t dev = sc->dev; 1893 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1894 struct sysctl_oid_list *debug_list; 1895 1896 iavf_add_device_sysctls_common(sc); 1897 1898 debug_list = iavf_create_debug_sysctl_tree(sc); 1899 1900 iavf_add_debug_sysctls_common(sc, debug_list); 1901 1902 SYSCTL_ADD_PROC(ctx, debug_list, 1903 OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD, 1904 sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 1905 1906 #ifdef IAVF_DEBUG 1907 SYSCTL_ADD_PROC(ctx, debug_list, 1908 OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR, 1909 sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF"); 1910 1911 SYSCTL_ADD_PROC(ctx, debug_list, 1912 OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR, 1913 sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW"); 1914 #endif 1915 1916 /* Add stats sysctls */ 1917 iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi"); 1918 1919 iavf_add_queues_sysctls(dev, vsi); 1920 } 1921 1922 /** 1923 * iavf_add_queues_sysctls - Add per-queue sysctls 1924 * @dev: device pointer 1925 * @vsi: the main VSI 1926 * 1927 * Add sysctls for each Tx and Rx queue. 1928 */ 1929 void 1930 iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi) 1931 { 1932 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1933 struct sysctl_oid_list *vsi_list, *queue_list; 1934 struct sysctl_oid *queue_node; 1935 char queue_namebuf[32]; 1936 1937 struct iavf_rx_queue *rx_que; 1938 struct iavf_tx_queue *tx_que; 1939 struct tx_ring *txr; 1940 struct rx_ring *rxr; 1941 1942 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 1943 1944 /* Queue statistics */ 1945 for (int q = 0; q < vsi->num_rx_queues; q++) { 1946 bzero(queue_namebuf, sizeof(queue_namebuf)); 1947 snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q); 1948 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, 1949 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #"); 1950 queue_list = SYSCTL_CHILDREN(queue_node); 1951 1952 rx_que = &(vsi->rx_queues[q]); 1953 rxr = &(rx_que->rxr); 1954 1955 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1956 CTLFLAG_RD, &(rx_que->irqs), 1957 "irqs on this queue (both Tx and Rx)"); 1958 1959 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", 1960 CTLFLAG_RD, &(rxr->rx_packets), 1961 "Queue Packets Received"); 1962 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", 1963 CTLFLAG_RD, &(rxr->rx_bytes), 1964 "Queue Bytes Received"); 1965 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err", 1966 CTLFLAG_RD, &(rxr->desc_errs), 1967 "Queue Rx Descriptor Errors"); 1968 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", 1969 CTLFLAG_RD, &(rxr->itr), 0, 1970 "Queue Rx ITR Interval"); 1971 } 1972 for (int q = 0; q < vsi->num_tx_queues; q++) { 1973 bzero(queue_namebuf, sizeof(queue_namebuf)); 1974 snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q); 1975 queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, 1976 OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #"); 1977 queue_list = SYSCTL_CHILDREN(queue_node); 1978 1979 tx_que = &(vsi->tx_queues[q]); 1980 txr = &(tx_que->txr); 1981 1982 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso", 1983 CTLFLAG_RD, &(tx_que->tso), 1984 "TSO"); 1985 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", 1986 CTLFLAG_RD, &(txr->mss_too_small), 1987 "TSO sends with an MSS less than 64"); 1988 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", 1989 CTLFLAG_RD, &(txr->tx_packets), 1990 "Queue Packets Transmitted"); 1991 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", 1992 CTLFLAG_RD, &(txr->tx_bytes), 1993 "Queue Bytes Transmitted"); 1994 SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", 1995 CTLFLAG_RD, &(txr->itr), 0, 1996 "Queue Tx ITR Interval"); 1997 } 1998 } 1999 2000 /** 2001 * iavf_driver_is_detaching - Check if the driver is detaching/unloading 2002 * @sc: device private softc 2003 * 2004 * @returns true if the driver is detaching, false otherwise. 2005 * 2006 * @remark on newer kernels, take advantage of iflib_in_detach in order to 2007 * report detachment correctly as early as possible. 2008 * 2009 * @remark this function is used by various code paths that want to avoid 2010 * running if the driver is about to be removed. This includes sysctls and 2011 * other driver access points. Note that it does not fully resolve 2012 * detach-based race conditions as it is possible for a thread to race with 2013 * iflib_in_detach. 2014 */ 2015 bool 2016 iavf_driver_is_detaching(struct iavf_sc *sc) 2017 { 2018 return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) || 2019 iflib_in_detach(sc->vsi.ctx)); 2020 } 2021 2022 /** 2023 * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping 2024 * @oidp: sysctl oid structure 2025 * @arg1: void pointer to device softc 2026 * @arg2: unused 2027 * @req: sysctl request pointer 2028 * 2029 * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors. 2030 * 2031 * @returns zero on success, or an error code on failure. 2032 */ 2033 static int 2034 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 2035 { 2036 struct iavf_sc *sc = (struct iavf_sc *)arg1; 2037 struct iavf_vsi *vsi = &sc->vsi; 2038 device_t dev = sc->dev; 2039 struct sbuf *buf; 2040 int error = 0; 2041 2042 struct iavf_rx_queue *rx_que; 2043 struct iavf_tx_queue *tx_que; 2044 2045 UNREFERENCED_2PARAMETER(arg2, oidp); 2046 2047 if (iavf_driver_is_detaching(sc)) 2048 return (ESHUTDOWN); 2049 2050 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2051 if (!buf) { 2052 device_printf(dev, "Could not allocate sbuf for output.\n"); 2053 return (ENOMEM); 2054 } 2055 2056 sbuf_cat(buf, "\n"); 2057 for (int i = 0; i < vsi->num_rx_queues; i++) { 2058 rx_que = &vsi->rx_queues[i]; 2059 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 2060 } 2061 for (int i = 0; i < vsi->num_tx_queues; i++) { 2062 tx_que = &vsi->tx_queues[i]; 2063 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 2064 } 2065 2066 error = sbuf_finish(buf); 2067 if (error) 2068 device_printf(dev, "Error finishing sbuf: %d\n", error); 2069 sbuf_delete(buf); 2070 2071 return (error); 2072 } 2073 2074 #ifdef IAVF_DEBUG 2075 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) 2076 2077 /** 2078 * iavf_sysctl_vf_reset - Request a VF reset 2079 * @oidp: sysctl oid pointer 2080 * @arg1: void pointer to device softc 2081 * @arg2: unused 2082 * @req: sysctl request pointer 2083 * 2084 * Request a VF reset for the device. 2085 * 2086 * @returns zero on success, or an error code on failure. 2087 */ 2088 static int 2089 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS) 2090 { 2091 struct iavf_sc *sc = (struct iavf_sc *)arg1; 2092 int do_reset = 0, error = 0; 2093 2094 UNREFERENCED_PARAMETER(arg2); 2095 2096 if (iavf_driver_is_detaching(sc)) 2097 return (ESHUTDOWN); 2098 2099 error = sysctl_handle_int(oidp, &do_reset, 0, req); 2100 if ((error) || (req->newptr == NULL)) 2101 return (error); 2102 2103 if (do_reset == 1) { 2104 iavf_reset(sc); 2105 if (CTX_ACTIVE(sc->vsi.ctx)) 2106 iflib_request_reset(sc->vsi.ctx); 2107 } 2108 2109 return (error); 2110 } 2111 2112 /** 2113 * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device 2114 * @oidp: sysctl oid pointer 2115 * @arg1: void pointer to device softc 2116 * @arg2: unused 2117 * @req: sysctl request pointer 2118 * 2119 * Sysctl callback to trigger a PCIe FLR. 2120 * 2121 * @returns zero on success, or an error code on failure. 2122 */ 2123 static int 2124 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS) 2125 { 2126 struct iavf_sc *sc = (struct iavf_sc *)arg1; 2127 device_t dev = sc->dev; 2128 int do_reset = 0, error = 0; 2129 2130 UNREFERENCED_PARAMETER(arg2); 2131 2132 if (iavf_driver_is_detaching(sc)) 2133 return (ESHUTDOWN); 2134 2135 error = sysctl_handle_int(oidp, &do_reset, 0, req); 2136 if ((error) || (req->newptr == NULL)) 2137 return (error); 2138 2139 if (do_reset == 1) { 2140 if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) { 2141 device_printf(dev, "PCIE FLR failed\n"); 2142 error = EIO; 2143 } 2144 else if (CTX_ACTIVE(sc->vsi.ctx)) 2145 iflib_request_reset(sc->vsi.ctx); 2146 } 2147 2148 return (error); 2149 } 2150 #undef CTX_ACTIVE 2151 #endif 2152