1ca853deeSEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */ 2ca853deeSEric Joyner /* Copyright (c) 2021, Intel Corporation 3ca853deeSEric Joyner * All rights reserved. 4ca853deeSEric Joyner * 5ca853deeSEric Joyner * Redistribution and use in source and binary forms, with or without 6ca853deeSEric Joyner * modification, are permitted provided that the following conditions are met: 7ca853deeSEric Joyner * 8ca853deeSEric Joyner * 1. Redistributions of source code must retain the above copyright notice, 9ca853deeSEric Joyner * this list of conditions and the following disclaimer. 10ca853deeSEric Joyner * 11ca853deeSEric Joyner * 2. Redistributions in binary form must reproduce the above copyright 12ca853deeSEric Joyner * notice, this list of conditions and the following disclaimer in the 13ca853deeSEric Joyner * documentation and/or other materials provided with the distribution. 14ca853deeSEric Joyner * 15ca853deeSEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its 16ca853deeSEric Joyner * contributors may be used to endorse or promote products derived from 17ca853deeSEric Joyner * this software without specific prior written permission. 18ca853deeSEric Joyner * 19ca853deeSEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20ca853deeSEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21ca853deeSEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22ca853deeSEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23ca853deeSEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24ca853deeSEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25ca853deeSEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26ca853deeSEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27ca853deeSEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28ca853deeSEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29ca853deeSEric Joyner * POSSIBILITY OF SUCH DAMAGE. 30ca853deeSEric Joyner */ 31ca853deeSEric Joyner 32ca853deeSEric Joyner /** 33ca853deeSEric Joyner * @file if_iavf_iflib.c 34ca853deeSEric Joyner * @brief iflib driver implementation 35ca853deeSEric Joyner * 36ca853deeSEric Joyner * Contains the main entry point for the iflib driver implementation. It 37ca853deeSEric Joyner * implements the various ifdi driver methods, and sets up the module and 38ca853deeSEric Joyner * driver values to load an iflib driver. 39ca853deeSEric Joyner */ 40ca853deeSEric Joyner 41ca853deeSEric Joyner #include "iavf_iflib.h" 42ca853deeSEric Joyner #include "iavf_vc_common.h" 43ca853deeSEric Joyner 44ca853deeSEric Joyner #include "iavf_drv_info.h" 45ca853deeSEric Joyner #include "iavf_sysctls_iflib.h" 46ca853deeSEric Joyner 47ca853deeSEric Joyner /********************************************************************* 48ca853deeSEric Joyner * Function prototypes 49ca853deeSEric Joyner *********************************************************************/ 50ca853deeSEric Joyner static void *iavf_register(device_t dev); 51ca853deeSEric Joyner static int iavf_if_attach_pre(if_ctx_t ctx); 52ca853deeSEric Joyner static int iavf_if_attach_post(if_ctx_t ctx); 53ca853deeSEric Joyner static int iavf_if_detach(if_ctx_t ctx); 54ca853deeSEric Joyner static int iavf_if_shutdown(if_ctx_t ctx); 55ca853deeSEric Joyner static int iavf_if_suspend(if_ctx_t ctx); 56ca853deeSEric Joyner static int iavf_if_resume(if_ctx_t ctx); 57ca853deeSEric Joyner static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix); 58ca853deeSEric Joyner static void iavf_if_enable_intr(if_ctx_t ctx); 59ca853deeSEric Joyner static void iavf_if_disable_intr(if_ctx_t ctx); 60ca853deeSEric Joyner static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 61ca853deeSEric Joyner static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 62ca853deeSEric Joyner static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 63ca853deeSEric Joyner static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 64ca853deeSEric Joyner static void iavf_if_queues_free(if_ctx_t ctx); 65ca853deeSEric Joyner static void iavf_if_update_admin_status(if_ctx_t ctx); 66ca853deeSEric Joyner static void iavf_if_multi_set(if_ctx_t ctx); 67ca853deeSEric Joyner static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 68ca853deeSEric Joyner static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 69ca853deeSEric Joyner static int iavf_if_media_change(if_ctx_t ctx); 70ca853deeSEric Joyner static int iavf_if_promisc_set(if_ctx_t ctx, int flags); 71ca853deeSEric Joyner static void iavf_if_timer(if_ctx_t ctx, uint16_t qid); 72ca853deeSEric Joyner static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag); 73ca853deeSEric Joyner static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 74ca853deeSEric Joyner static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt); 75ca853deeSEric Joyner static void iavf_if_init(if_ctx_t ctx); 76ca853deeSEric Joyner static void iavf_if_stop(if_ctx_t ctx); 77*1d6c12c5SKevin Bowling static bool iavf_if_needs_restart(if_ctx_t, enum iflib_restart_event); 78ca853deeSEric Joyner 79ca853deeSEric Joyner static int iavf_allocate_pci_resources(struct iavf_sc *); 80ca853deeSEric Joyner static void iavf_free_pci_resources(struct iavf_sc *); 81ca853deeSEric Joyner static void iavf_setup_interface(struct iavf_sc *); 82ca853deeSEric Joyner static void iavf_add_device_sysctls(struct iavf_sc *); 83ca853deeSEric Joyner static void iavf_enable_queue_irq(struct iavf_hw *, int); 84ca853deeSEric Joyner static void iavf_disable_queue_irq(struct iavf_hw *, int); 85ca853deeSEric Joyner static void iavf_stop(struct iavf_sc *); 86ca853deeSEric Joyner 87ca853deeSEric Joyner static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr); 88ca853deeSEric Joyner static int iavf_msix_que(void *); 89ca853deeSEric Joyner static int iavf_msix_adminq(void *); 90ca853deeSEric Joyner static void iavf_configure_itr(struct iavf_sc *sc); 91ca853deeSEric Joyner 92ca853deeSEric Joyner static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); 93ca853deeSEric Joyner #ifdef IAVF_DEBUG 94ca853deeSEric Joyner static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS); 95ca853deeSEric Joyner static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS); 96ca853deeSEric Joyner #endif 97ca853deeSEric Joyner 98ca853deeSEric Joyner static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *); 99ca853deeSEric Joyner static void iavf_vc_task(void *arg, int pending __unused); 100ca853deeSEric Joyner static int iavf_setup_vc_tq(struct iavf_sc *sc); 101ca853deeSEric Joyner static int iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op); 102ca853deeSEric Joyner 103ca853deeSEric Joyner /********************************************************************* 104ca853deeSEric Joyner * FreeBSD Device Interface Entry Points 105ca853deeSEric Joyner *********************************************************************/ 106ca853deeSEric Joyner 107ca853deeSEric Joyner /** 108ca853deeSEric Joyner * @var iavf_methods 109ca853deeSEric Joyner * @brief device methods for the iavf driver 110ca853deeSEric Joyner * 111ca853deeSEric Joyner * Device method callbacks used to interact with the driver. For iflib this 112ca853deeSEric Joyner * primarily resolves to the default iflib implementations. 113ca853deeSEric Joyner */ 114ca853deeSEric Joyner static device_method_t iavf_methods[] = { 115ca853deeSEric Joyner /* Device interface */ 116ca853deeSEric Joyner DEVMETHOD(device_register, iavf_register), 117ca853deeSEric Joyner DEVMETHOD(device_probe, iflib_device_probe), 118ca853deeSEric Joyner DEVMETHOD(device_attach, iflib_device_attach), 119ca853deeSEric Joyner DEVMETHOD(device_detach, iflib_device_detach), 120ca853deeSEric Joyner DEVMETHOD(device_shutdown, iflib_device_shutdown), 121ca853deeSEric Joyner DEVMETHOD_END 122ca853deeSEric Joyner }; 123ca853deeSEric Joyner 124ca853deeSEric Joyner static driver_t iavf_driver = { 125ca853deeSEric Joyner "iavf", iavf_methods, sizeof(struct iavf_sc), 126ca853deeSEric Joyner }; 127ca853deeSEric Joyner 12883c0a9e8SJohn Baldwin DRIVER_MODULE(iavf, pci, iavf_driver, 0, 0); 129ca853deeSEric Joyner MODULE_VERSION(iavf, 1); 130ca853deeSEric Joyner 131ca853deeSEric Joyner MODULE_DEPEND(iavf, pci, 1, 1, 1); 132ca853deeSEric Joyner MODULE_DEPEND(iavf, ether, 1, 1, 1); 133ca853deeSEric Joyner MODULE_DEPEND(iavf, iflib, 1, 1, 1); 134ca853deeSEric Joyner 135ca853deeSEric Joyner IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array); 136ca853deeSEric Joyner 137ca853deeSEric Joyner /** 138ca853deeSEric Joyner * @var M_IAVF 139ca853deeSEric Joyner * @brief main iavf driver allocation type 140ca853deeSEric Joyner * 141ca853deeSEric Joyner * malloc(9) allocation type used by the majority of memory allocations in the 142ca853deeSEric Joyner * iavf iflib driver. 143ca853deeSEric Joyner */ 144ca853deeSEric Joyner MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations"); 145ca853deeSEric Joyner 146ca853deeSEric Joyner static device_method_t iavf_if_methods[] = { 147ca853deeSEric Joyner DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre), 148ca853deeSEric Joyner DEVMETHOD(ifdi_attach_post, iavf_if_attach_post), 149ca853deeSEric Joyner DEVMETHOD(ifdi_detach, iavf_if_detach), 150ca853deeSEric Joyner DEVMETHOD(ifdi_shutdown, iavf_if_shutdown), 151ca853deeSEric Joyner DEVMETHOD(ifdi_suspend, iavf_if_suspend), 152ca853deeSEric Joyner DEVMETHOD(ifdi_resume, iavf_if_resume), 153ca853deeSEric Joyner DEVMETHOD(ifdi_init, iavf_if_init), 154ca853deeSEric Joyner DEVMETHOD(ifdi_stop, iavf_if_stop), 155ca853deeSEric Joyner DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign), 156ca853deeSEric Joyner DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr), 157ca853deeSEric Joyner DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr), 158ca853deeSEric Joyner DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable), 159ca853deeSEric Joyner DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable), 160ca853deeSEric Joyner DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc), 161ca853deeSEric Joyner DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc), 162ca853deeSEric Joyner DEVMETHOD(ifdi_queues_free, iavf_if_queues_free), 163ca853deeSEric Joyner DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status), 164ca853deeSEric Joyner DEVMETHOD(ifdi_multi_set, iavf_if_multi_set), 165ca853deeSEric Joyner DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set), 166ca853deeSEric Joyner DEVMETHOD(ifdi_media_status, iavf_if_media_status), 167ca853deeSEric Joyner DEVMETHOD(ifdi_media_change, iavf_if_media_change), 168ca853deeSEric Joyner DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set), 169ca853deeSEric Joyner DEVMETHOD(ifdi_timer, iavf_if_timer), 170ca853deeSEric Joyner DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register), 171ca853deeSEric Joyner DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister), 172ca853deeSEric Joyner DEVMETHOD(ifdi_get_counter, iavf_if_get_counter), 173*1d6c12c5SKevin Bowling DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart), 174ca853deeSEric Joyner DEVMETHOD_END 175ca853deeSEric Joyner }; 176ca853deeSEric Joyner 177ca853deeSEric Joyner static driver_t iavf_if_driver = { 178ca853deeSEric Joyner "iavf_if", iavf_if_methods, sizeof(struct iavf_sc) 179ca853deeSEric Joyner }; 180ca853deeSEric Joyner 181ca853deeSEric Joyner extern struct if_txrx iavf_txrx_hwb; 182ca853deeSEric Joyner extern struct if_txrx iavf_txrx_dwb; 183ca853deeSEric Joyner 184ca853deeSEric Joyner static struct if_shared_ctx iavf_sctx = { 185ca853deeSEric Joyner .isc_magic = IFLIB_MAGIC, 186ca853deeSEric Joyner .isc_q_align = PAGE_SIZE, 187ca853deeSEric Joyner .isc_tx_maxsize = IAVF_MAX_FRAME, 188ca853deeSEric Joyner .isc_tx_maxsegsize = IAVF_MAX_FRAME, 189ca853deeSEric Joyner .isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header), 190ca853deeSEric Joyner .isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE, 191ca853deeSEric Joyner .isc_rx_maxsize = IAVF_MAX_FRAME, 192ca853deeSEric Joyner .isc_rx_nsegments = IAVF_MAX_RX_SEGS, 193ca853deeSEric Joyner .isc_rx_maxsegsize = IAVF_MAX_FRAME, 194ca853deeSEric Joyner .isc_nfl = 1, 195ca853deeSEric Joyner .isc_ntxqs = 1, 196ca853deeSEric Joyner .isc_nrxqs = 1, 197ca853deeSEric Joyner 198ca853deeSEric Joyner .isc_admin_intrcnt = 1, 199ca853deeSEric Joyner .isc_vendor_info = iavf_vendor_info_array, 200ca853deeSEric Joyner .isc_driver_version = __DECONST(char *, iavf_driver_version), 201ca853deeSEric Joyner .isc_driver = &iavf_if_driver, 202ca853deeSEric Joyner .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF, 203ca853deeSEric Joyner 204ca853deeSEric Joyner .isc_nrxd_min = {IAVF_MIN_RING}, 205ca853deeSEric Joyner .isc_ntxd_min = {IAVF_MIN_RING}, 206ca853deeSEric Joyner .isc_nrxd_max = {IAVF_MAX_RING}, 207ca853deeSEric Joyner .isc_ntxd_max = {IAVF_MAX_RING}, 208ca853deeSEric Joyner .isc_nrxd_default = {IAVF_DEFAULT_RING}, 209ca853deeSEric Joyner .isc_ntxd_default = {IAVF_DEFAULT_RING}, 210ca853deeSEric Joyner }; 211ca853deeSEric Joyner 212ca853deeSEric Joyner /*** Functions ***/ 213ca853deeSEric Joyner 214ca853deeSEric Joyner /** 215ca853deeSEric Joyner * iavf_register - iflib callback to obtain the shared context pointer 216ca853deeSEric Joyner * @dev: the device being registered 217ca853deeSEric Joyner * 218ca853deeSEric Joyner * Called when the driver is first being attached to the driver. This function 219ca853deeSEric Joyner * is used by iflib to obtain a pointer to the shared context structure which 220ca853deeSEric Joyner * describes the device features. 221ca853deeSEric Joyner * 222ca853deeSEric Joyner * @returns a pointer to the iavf shared context structure. 223ca853deeSEric Joyner */ 224ca853deeSEric Joyner static void * 225ca853deeSEric Joyner iavf_register(device_t dev __unused) 226ca853deeSEric Joyner { 227ca853deeSEric Joyner return (&iavf_sctx); 228ca853deeSEric Joyner } 229ca853deeSEric Joyner 230ca853deeSEric Joyner /** 231ca853deeSEric Joyner * iavf_allocate_pci_resources - Allocate PCI resources 232ca853deeSEric Joyner * @sc: the device private softc 233ca853deeSEric Joyner * 234ca853deeSEric Joyner * Allocate PCI resources used by the iflib driver. 235ca853deeSEric Joyner * 236ca853deeSEric Joyner * @returns zero or a non-zero error code on failure 237ca853deeSEric Joyner */ 238ca853deeSEric Joyner static int 239ca853deeSEric Joyner iavf_allocate_pci_resources(struct iavf_sc *sc) 240ca853deeSEric Joyner { 241ca853deeSEric Joyner return iavf_allocate_pci_resources_common(sc); 242ca853deeSEric Joyner } 243ca853deeSEric Joyner 244ca853deeSEric Joyner /** 245ca853deeSEric Joyner * iavf_if_attach_pre - Begin attaching the device to the driver 246ca853deeSEric Joyner * @ctx: the iflib context pointer 247ca853deeSEric Joyner * 248ca853deeSEric Joyner * Called by iflib to begin the attach process. Allocates resources and 249ca853deeSEric Joyner * initializes the hardware for operation. 250ca853deeSEric Joyner * 251ca853deeSEric Joyner * @returns zero or a non-zero error code on failure. 252ca853deeSEric Joyner */ 253ca853deeSEric Joyner static int 254ca853deeSEric Joyner iavf_if_attach_pre(if_ctx_t ctx) 255ca853deeSEric Joyner { 256ca853deeSEric Joyner device_t dev; 257ca853deeSEric Joyner struct iavf_sc *sc; 258ca853deeSEric Joyner struct iavf_hw *hw; 259ca853deeSEric Joyner struct iavf_vsi *vsi; 260ca853deeSEric Joyner if_softc_ctx_t scctx; 261ca853deeSEric Joyner int error = 0; 262ca853deeSEric Joyner 263ca853deeSEric Joyner /* Setup pointers */ 264ca853deeSEric Joyner dev = iflib_get_dev(ctx); 265ca853deeSEric Joyner sc = iavf_sc_from_ctx(ctx); 266ca853deeSEric Joyner 267ca853deeSEric Joyner vsi = &sc->vsi; 268ca853deeSEric Joyner vsi->back = sc; 269ca853deeSEric Joyner sc->dev = sc->osdep.dev = dev; 270ca853deeSEric Joyner hw = &sc->hw; 271ca853deeSEric Joyner 272ca853deeSEric Joyner vsi->dev = dev; 273ca853deeSEric Joyner vsi->hw = &sc->hw; 274ca853deeSEric Joyner vsi->num_vlans = 0; 275ca853deeSEric Joyner vsi->ctx = ctx; 276ca853deeSEric Joyner sc->media = iflib_get_media(ctx); 277ca853deeSEric Joyner vsi->ifp = iflib_get_ifp(ctx); 278ca853deeSEric Joyner vsi->shared = scctx = iflib_get_softc_ctx(ctx); 279ca853deeSEric Joyner 280ca853deeSEric Joyner iavf_save_tunables(sc); 281ca853deeSEric Joyner 282ca853deeSEric Joyner /* Setup VC mutex */ 283ca853deeSEric Joyner snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name), 284ca853deeSEric Joyner "%s:vc", device_get_nameunit(dev)); 285ca853deeSEric Joyner mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF); 286ca853deeSEric Joyner 287ca853deeSEric Joyner /* Do PCI setup - map BAR0, etc */ 288ca853deeSEric Joyner error = iavf_allocate_pci_resources(sc); 289ca853deeSEric Joyner if (error) { 290ca853deeSEric Joyner device_printf(dev, "%s: Allocation of PCI resources failed\n", 291ca853deeSEric Joyner __func__); 292ca853deeSEric Joyner goto err_early; 293ca853deeSEric Joyner } 294ca853deeSEric Joyner 295ca853deeSEric Joyner iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n"); 296ca853deeSEric Joyner 297ca853deeSEric Joyner error = iavf_set_mac_type(hw); 298ca853deeSEric Joyner if (error) { 299ca853deeSEric Joyner device_printf(dev, "%s: set_mac_type failed: %d\n", 300ca853deeSEric Joyner __func__, error); 301ca853deeSEric Joyner goto err_pci_res; 302ca853deeSEric Joyner } 303ca853deeSEric Joyner 304ca853deeSEric Joyner error = iavf_reset_complete(hw); 305ca853deeSEric Joyner if (error) { 306ca853deeSEric Joyner device_printf(dev, "%s: Device is still being reset\n", 307ca853deeSEric Joyner __func__); 308ca853deeSEric Joyner goto err_pci_res; 309ca853deeSEric Joyner } 310ca853deeSEric Joyner 311ca853deeSEric Joyner iavf_dbg_init(sc, "VF Device is ready for configuration\n"); 312ca853deeSEric Joyner 313ca853deeSEric Joyner /* Sets up Admin Queue */ 314ca853deeSEric Joyner error = iavf_setup_vc(sc); 315ca853deeSEric Joyner if (error) { 316ca853deeSEric Joyner device_printf(dev, "%s: Error setting up PF comms, %d\n", 317ca853deeSEric Joyner __func__, error); 318ca853deeSEric Joyner goto err_pci_res; 319ca853deeSEric Joyner } 320ca853deeSEric Joyner 321ca853deeSEric Joyner iavf_dbg_init(sc, "PF API version verified\n"); 322ca853deeSEric Joyner 323ca853deeSEric Joyner /* Need API version before sending reset message */ 324ca853deeSEric Joyner error = iavf_reset(sc); 325ca853deeSEric Joyner if (error) { 326ca853deeSEric Joyner device_printf(dev, "VF reset failed; reload the driver\n"); 327ca853deeSEric Joyner goto err_aq; 328ca853deeSEric Joyner } 329ca853deeSEric Joyner 330ca853deeSEric Joyner iavf_dbg_init(sc, "VF reset complete\n"); 331ca853deeSEric Joyner 332ca853deeSEric Joyner /* Ask for VF config from PF */ 333ca853deeSEric Joyner error = iavf_vf_config(sc); 334ca853deeSEric Joyner if (error) { 335ca853deeSEric Joyner device_printf(dev, "Error getting configuration from PF: %d\n", 336ca853deeSEric Joyner error); 337ca853deeSEric Joyner goto err_aq; 338ca853deeSEric Joyner } 339ca853deeSEric Joyner 340ca853deeSEric Joyner iavf_print_device_info(sc); 341ca853deeSEric Joyner 342ca853deeSEric Joyner error = iavf_get_vsi_res_from_vf_res(sc); 343ca853deeSEric Joyner if (error) 344ca853deeSEric Joyner goto err_res_buf; 345ca853deeSEric Joyner 346ca853deeSEric Joyner iavf_dbg_init(sc, "Resource Acquisition complete\n"); 347ca853deeSEric Joyner 348ca853deeSEric Joyner /* Setup taskqueue to service VC messages */ 349ca853deeSEric Joyner error = iavf_setup_vc_tq(sc); 350ca853deeSEric Joyner if (error) 351ca853deeSEric Joyner goto err_vc_tq; 352ca853deeSEric Joyner 353ca853deeSEric Joyner iavf_set_mac_addresses(sc); 354ca853deeSEric Joyner iflib_set_mac(ctx, hw->mac.addr); 355ca853deeSEric Joyner 356ca853deeSEric Joyner /* Allocate filter lists */ 357ca853deeSEric Joyner iavf_init_filters(sc); 358ca853deeSEric Joyner 359ca853deeSEric Joyner /* Fill out more iflib parameters */ 360ca853deeSEric Joyner scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 361ca853deeSEric Joyner sc->vsi_res->num_queue_pairs; 362ca853deeSEric Joyner if (vsi->enable_head_writeback) { 363ca853deeSEric Joyner scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 364ca853deeSEric Joyner * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN); 365ca853deeSEric Joyner scctx->isc_txrx = &iavf_txrx_hwb; 366ca853deeSEric Joyner } else { 367ca853deeSEric Joyner scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 368ca853deeSEric Joyner * sizeof(struct iavf_tx_desc), DBA_ALIGN); 369ca853deeSEric Joyner scctx->isc_txrx = &iavf_txrx_dwb; 370ca853deeSEric Joyner } 371ca853deeSEric Joyner scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 372ca853deeSEric Joyner * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN); 373ca853deeSEric Joyner scctx->isc_msix_bar = PCIR_BAR(IAVF_MSIX_BAR); 374ca853deeSEric Joyner scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS; 375ca853deeSEric Joyner scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS; 376ca853deeSEric Joyner scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE; 377ca853deeSEric Joyner scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE; 378ca853deeSEric Joyner scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE; 379ca853deeSEric Joyner scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS; 380ca853deeSEric Joyner scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 381ca853deeSEric Joyner 382ca853deeSEric Joyner /* Update OS cache of MSIX control register values */ 383ca853deeSEric Joyner iavf_update_msix_devinfo(dev); 384ca853deeSEric Joyner 385ca853deeSEric Joyner return (0); 386ca853deeSEric Joyner 387ca853deeSEric Joyner err_vc_tq: 388ca853deeSEric Joyner taskqueue_free(sc->vc_tq); 389ca853deeSEric Joyner err_res_buf: 390ca853deeSEric Joyner free(sc->vf_res, M_IAVF); 391ca853deeSEric Joyner err_aq: 392ca853deeSEric Joyner iavf_shutdown_adminq(hw); 393ca853deeSEric Joyner err_pci_res: 394ca853deeSEric Joyner iavf_free_pci_resources(sc); 395ca853deeSEric Joyner err_early: 396ca853deeSEric Joyner IAVF_VC_LOCK_DESTROY(sc); 397ca853deeSEric Joyner return (error); 398ca853deeSEric Joyner } 399ca853deeSEric Joyner 400ca853deeSEric Joyner /** 401ca853deeSEric Joyner * iavf_vc_task - task used to process VC messages 402ca853deeSEric Joyner * @arg: device softc 403ca853deeSEric Joyner * @pending: unused 404ca853deeSEric Joyner * 405ca853deeSEric Joyner * Processes the admin queue, in order to process the virtual 406ca853deeSEric Joyner * channel messages received from the PF. 407ca853deeSEric Joyner */ 408ca853deeSEric Joyner static void 409ca853deeSEric Joyner iavf_vc_task(void *arg, int pending __unused) 410ca853deeSEric Joyner { 411ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg; 412ca853deeSEric Joyner u16 var; 413ca853deeSEric Joyner 414ca853deeSEric Joyner iavf_process_adminq(sc, &var); 415ca853deeSEric Joyner } 416ca853deeSEric Joyner 417ca853deeSEric Joyner /** 418ca853deeSEric Joyner * iavf_setup_vc_tq - Setup task queues 419ca853deeSEric Joyner * @sc: device softc 420ca853deeSEric Joyner * 421ca853deeSEric Joyner * Create taskqueue and tasklet for processing virtual channel messages. This 422ca853deeSEric Joyner * is done in a separate non-iflib taskqueue so that the iflib context lock 423ca853deeSEric Joyner * does not need to be held for VC messages to be processed. 424ca853deeSEric Joyner * 425ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 426ca853deeSEric Joyner */ 427ca853deeSEric Joyner static int 428ca853deeSEric Joyner iavf_setup_vc_tq(struct iavf_sc *sc) 429ca853deeSEric Joyner { 430ca853deeSEric Joyner device_t dev = sc->dev; 431ca853deeSEric Joyner int error = 0; 432ca853deeSEric Joyner 433ca853deeSEric Joyner TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc); 434ca853deeSEric Joyner 435ca853deeSEric Joyner sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT, 436ca853deeSEric Joyner taskqueue_thread_enqueue, &sc->vc_tq); 437ca853deeSEric Joyner if (!sc->vc_tq) { 438ca853deeSEric Joyner device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n"); 439ca853deeSEric Joyner return (ENOMEM); 440ca853deeSEric Joyner } 441ca853deeSEric Joyner error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc", 442ca853deeSEric Joyner device_get_nameunit(dev)); 443ca853deeSEric Joyner if (error) { 444ca853deeSEric Joyner device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n", 445ca853deeSEric Joyner error); 446ca853deeSEric Joyner taskqueue_free(sc->vc_tq); 447ca853deeSEric Joyner return (error); 448ca853deeSEric Joyner } 449ca853deeSEric Joyner 450ca853deeSEric Joyner return (error); 451ca853deeSEric Joyner } 452ca853deeSEric Joyner 453ca853deeSEric Joyner /** 454ca853deeSEric Joyner * iavf_if_attach_post - Finish attaching the device to the driver 455ca853deeSEric Joyner * @ctx: the iflib context pointer 456ca853deeSEric Joyner * 457ca853deeSEric Joyner * Called by iflib after it has setup queues and interrupts. Used to finish up 458ca853deeSEric Joyner * the attach process for a device. Attach logic which must occur after Tx and 459ca853deeSEric Joyner * Rx queues are setup belongs here. 460ca853deeSEric Joyner * 461ca853deeSEric Joyner * @returns zero or a non-zero error code on failure 462ca853deeSEric Joyner */ 463ca853deeSEric Joyner static int 464ca853deeSEric Joyner iavf_if_attach_post(if_ctx_t ctx) 465ca853deeSEric Joyner { 466ee28ad11SJohn Baldwin #ifdef IXL_DEBUG 467ee28ad11SJohn Baldwin device_t dev = iflib_get_dev(ctx); 468ee28ad11SJohn Baldwin #endif 469ca853deeSEric Joyner struct iavf_sc *sc; 470ca853deeSEric Joyner struct iavf_hw *hw; 471ca853deeSEric Joyner struct iavf_vsi *vsi; 472ca853deeSEric Joyner int error = 0; 473ca853deeSEric Joyner 474ca853deeSEric Joyner INIT_DBG_DEV(dev, "begin"); 475ca853deeSEric Joyner 476ca853deeSEric Joyner sc = iavf_sc_from_ctx(ctx); 477ca853deeSEric Joyner vsi = &sc->vsi; 478ca853deeSEric Joyner hw = &sc->hw; 479ca853deeSEric Joyner 480ca853deeSEric Joyner /* Save off determined number of queues for interface */ 481ca853deeSEric Joyner vsi->num_rx_queues = vsi->shared->isc_nrxqsets; 482ca853deeSEric Joyner vsi->num_tx_queues = vsi->shared->isc_ntxqsets; 483ca853deeSEric Joyner 484ca853deeSEric Joyner /* Setup the stack interface */ 485ca853deeSEric Joyner iavf_setup_interface(sc); 486ca853deeSEric Joyner 487ca853deeSEric Joyner iavf_dbg_init(sc, "Interface setup complete\n"); 488ca853deeSEric Joyner 489ca853deeSEric Joyner /* Initialize statistics & add sysctls */ 490ca853deeSEric Joyner bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats)); 491ca853deeSEric Joyner iavf_add_device_sysctls(sc); 492ca853deeSEric Joyner 493ca853deeSEric Joyner atomic_store_rel_32(&sc->queues_enabled, 0); 494ca853deeSEric Joyner iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED); 495ca853deeSEric Joyner 496ca853deeSEric Joyner /* We want AQ enabled early for init */ 497ca853deeSEric Joyner iavf_enable_adminq_irq(hw); 498ca853deeSEric Joyner 499ca853deeSEric Joyner INIT_DBG_DEV(dev, "end"); 500ca853deeSEric Joyner 501ca853deeSEric Joyner return (error); 502ca853deeSEric Joyner } 503ca853deeSEric Joyner 504ca853deeSEric Joyner /** 505ca853deeSEric Joyner * iavf_if_detach - Detach a device from the driver 506ca853deeSEric Joyner * @ctx: the iflib context of the device to detach 507ca853deeSEric Joyner * 508ca853deeSEric Joyner * Called by iflib to detach a given device from the driver. Clean up any 509ca853deeSEric Joyner * resources associated with the driver and shut the device down. 510ca853deeSEric Joyner * 511ca853deeSEric Joyner * @remark iflib always ignores the return value of IFDI_DETACH, so this 512ca853deeSEric Joyner * function is effectively not allowed to fail. Instead, it should clean up 513ca853deeSEric Joyner * and release as much as possible even if something goes wrong. 514ca853deeSEric Joyner * 515ca853deeSEric Joyner * @returns zero 516ca853deeSEric Joyner */ 517ca853deeSEric Joyner static int 518ca853deeSEric Joyner iavf_if_detach(if_ctx_t ctx) 519ca853deeSEric Joyner { 520ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 521ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 522ca853deeSEric Joyner device_t dev = sc->dev; 523ca853deeSEric Joyner enum iavf_status status; 524ca853deeSEric Joyner 525ca853deeSEric Joyner INIT_DBG_DEV(dev, "begin"); 526ca853deeSEric Joyner 527ca853deeSEric Joyner iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED); 528ca853deeSEric Joyner 529ca853deeSEric Joyner /* Drain admin queue taskqueue */ 530ca853deeSEric Joyner taskqueue_free(sc->vc_tq); 531ca853deeSEric Joyner IAVF_VC_LOCK_DESTROY(sc); 532ca853deeSEric Joyner 533ca853deeSEric Joyner /* Remove all the media and link information */ 534ca853deeSEric Joyner ifmedia_removeall(sc->media); 535ca853deeSEric Joyner 536ca853deeSEric Joyner iavf_disable_adminq_irq(hw); 537ca853deeSEric Joyner status = iavf_shutdown_adminq(&sc->hw); 538ca853deeSEric Joyner if (status != IAVF_SUCCESS) { 539ca853deeSEric Joyner device_printf(dev, 540ca853deeSEric Joyner "iavf_shutdown_adminq() failed with status %s\n", 541ca853deeSEric Joyner iavf_stat_str(hw, status)); 542ca853deeSEric Joyner } 543ca853deeSEric Joyner 544ca853deeSEric Joyner free(sc->vf_res, M_IAVF); 545ca853deeSEric Joyner sc->vf_res = NULL; 546ca853deeSEric Joyner iavf_free_pci_resources(sc); 547ca853deeSEric Joyner iavf_free_filters(sc); 548ca853deeSEric Joyner 549ca853deeSEric Joyner INIT_DBG_DEV(dev, "end"); 550ca853deeSEric Joyner return (0); 551ca853deeSEric Joyner } 552ca853deeSEric Joyner 553ca853deeSEric Joyner /** 554ca853deeSEric Joyner * iavf_if_shutdown - called by iflib to handle shutdown 555ca853deeSEric Joyner * @ctx: the iflib context pointer 556ca853deeSEric Joyner * 557ca853deeSEric Joyner * Callback for the IFDI_SHUTDOWN iflib function. 558ca853deeSEric Joyner * 559ca853deeSEric Joyner * @returns zero or an error code on failure 560ca853deeSEric Joyner */ 561ca853deeSEric Joyner static int 562ca853deeSEric Joyner iavf_if_shutdown(if_ctx_t ctx __unused) 563ca853deeSEric Joyner { 564ca853deeSEric Joyner return (0); 565ca853deeSEric Joyner } 566ca853deeSEric Joyner 567ca853deeSEric Joyner /** 568ca853deeSEric Joyner * iavf_if_suspend - called by iflib to handle suspend 569ca853deeSEric Joyner * @ctx: the iflib context pointer 570ca853deeSEric Joyner * 571ca853deeSEric Joyner * Callback for the IFDI_SUSPEND iflib function. 572ca853deeSEric Joyner * 573ca853deeSEric Joyner * @returns zero or an error code on failure 574ca853deeSEric Joyner */ 575ca853deeSEric Joyner static int 576ca853deeSEric Joyner iavf_if_suspend(if_ctx_t ctx __unused) 577ca853deeSEric Joyner { 578ca853deeSEric Joyner return (0); 579ca853deeSEric Joyner } 580ca853deeSEric Joyner 581ca853deeSEric Joyner /** 582ca853deeSEric Joyner * iavf_if_resume - called by iflib to handle resume 583ca853deeSEric Joyner * @ctx: the iflib context pointer 584ca853deeSEric Joyner * 585ca853deeSEric Joyner * Callback for the IFDI_RESUME iflib function. 586ca853deeSEric Joyner * 587ca853deeSEric Joyner * @returns zero or an error code on failure 588ca853deeSEric Joyner */ 589ca853deeSEric Joyner static int 590ca853deeSEric Joyner iavf_if_resume(if_ctx_t ctx __unused) 591ca853deeSEric Joyner { 592ca853deeSEric Joyner return (0); 593ca853deeSEric Joyner } 594ca853deeSEric Joyner 595ca853deeSEric Joyner /** 596ca853deeSEric Joyner * iavf_vc_sleep_wait - Sleep for a response from a VC message 597ca853deeSEric Joyner * @sc: device softc 598ca853deeSEric Joyner * @op: the op code to sleep on 599ca853deeSEric Joyner * 600ca853deeSEric Joyner * Sleep until a response from the PF for the VC message sent by the 601ca853deeSEric Joyner * given op. 602ca853deeSEric Joyner * 603ca853deeSEric Joyner * @returns zero on success, or EWOULDBLOCK if the sleep times out. 604ca853deeSEric Joyner */ 605ca853deeSEric Joyner static int 606ca853deeSEric Joyner iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op) 607ca853deeSEric Joyner { 608ca853deeSEric Joyner int error = 0; 609ca853deeSEric Joyner 610ca853deeSEric Joyner IAVF_VC_LOCK_ASSERT(sc); 611ca853deeSEric Joyner 612ca853deeSEric Joyner iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS); 613ca853deeSEric Joyner 614ca853deeSEric Joyner error = mtx_sleep(iavf_vc_get_op_chan(sc, op), 615ca853deeSEric Joyner &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT); 616ca853deeSEric Joyner 617ca853deeSEric Joyner return (error); 618ca853deeSEric Joyner } 619ca853deeSEric Joyner 620ca853deeSEric Joyner /** 621ca853deeSEric Joyner * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response 622ca853deeSEric Joyner * @sc: device softc 623ca853deeSEric Joyner * @op: the op code to send 624ca853deeSEric Joyner * 625ca853deeSEric Joyner * Send a virtchnl message to the PF, and sleep or busy wait for a response 626ca853deeSEric Joyner * from the PF, depending on iflib context lock type. 627ca853deeSEric Joyner * 628ca853deeSEric Joyner * @remark this function does not wait if the device is detaching, on kernels 629ca853deeSEric Joyner * that support indicating to the driver that the device is detaching 630ca853deeSEric Joyner * 631ca853deeSEric Joyner * @returns zero or an error code on failure. 632ca853deeSEric Joyner */ 633ca853deeSEric Joyner int 634ca853deeSEric Joyner iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op) 635ca853deeSEric Joyner { 636ca853deeSEric Joyner if_ctx_t ctx = sc->vsi.ctx; 637ca853deeSEric Joyner int error = 0; 638ca853deeSEric Joyner 639ca853deeSEric Joyner IAVF_VC_LOCK(sc); 640ca853deeSEric Joyner error = iavf_vc_send_cmd(sc, op); 641ca853deeSEric Joyner if (error != 0) { 642ca853deeSEric Joyner iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); 643ca853deeSEric Joyner goto release_lock; 644ca853deeSEric Joyner } 645ca853deeSEric Joyner 646ca853deeSEric Joyner /* Don't wait for a response if the device is being detached. */ 647ca853deeSEric Joyner if (!iflib_in_detach(ctx)) { 648ca853deeSEric Joyner error = iavf_vc_sleep_wait(sc, op); 649ca853deeSEric Joyner IAVF_VC_LOCK_ASSERT(sc); 650ca853deeSEric Joyner 651ca853deeSEric Joyner if (error == EWOULDBLOCK) 652ca853deeSEric Joyner device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS); 653ca853deeSEric Joyner } 654ca853deeSEric Joyner release_lock: 655ca853deeSEric Joyner IAVF_VC_UNLOCK(sc); 656ca853deeSEric Joyner return (error); 657ca853deeSEric Joyner } 658ca853deeSEric Joyner 659ca853deeSEric Joyner /** 660ca853deeSEric Joyner * iavf_send_vc_msg - Send a virtchnl message to the PF 661ca853deeSEric Joyner * @sc: device softc 662ca853deeSEric Joyner * @op: the op code to send 663ca853deeSEric Joyner * 664ca853deeSEric Joyner * Send a virtchnl message to the PF and do not wait for a response. 665ca853deeSEric Joyner * 666ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 667ca853deeSEric Joyner */ 668ca853deeSEric Joyner int 669ca853deeSEric Joyner iavf_send_vc_msg(struct iavf_sc *sc, u32 op) 670ca853deeSEric Joyner { 671ca853deeSEric Joyner int error = 0; 672ca853deeSEric Joyner 673ca853deeSEric Joyner error = iavf_vc_send_cmd(sc, op); 674ca853deeSEric Joyner if (error != 0) 675ca853deeSEric Joyner iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); 676ca853deeSEric Joyner 677ca853deeSEric Joyner return (error); 678ca853deeSEric Joyner } 679ca853deeSEric Joyner 680ca853deeSEric Joyner /** 681ca853deeSEric Joyner * iavf_init_queues - initialize Tx and Rx queues 682ca853deeSEric Joyner * @vsi: the VSI to initialize 683ca853deeSEric Joyner * 684ca853deeSEric Joyner * Refresh the Tx and Rx ring contents and update the tail pointers for each 685ca853deeSEric Joyner * queue. 686ca853deeSEric Joyner */ 687ca853deeSEric Joyner static void 688ca853deeSEric Joyner iavf_init_queues(struct iavf_vsi *vsi) 689ca853deeSEric Joyner { 690ca853deeSEric Joyner struct iavf_tx_queue *tx_que = vsi->tx_queues; 691ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues; 692ca853deeSEric Joyner struct rx_ring *rxr; 693ca853deeSEric Joyner uint32_t mbuf_sz; 694ca853deeSEric Joyner 695ca853deeSEric Joyner mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx); 696ca853deeSEric Joyner MPASS(mbuf_sz <= UINT16_MAX); 697ca853deeSEric Joyner 698ca853deeSEric Joyner for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) 699ca853deeSEric Joyner iavf_init_tx_ring(vsi, tx_que); 700ca853deeSEric Joyner 701ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { 702ca853deeSEric Joyner rxr = &rx_que->rxr; 703ca853deeSEric Joyner 704ca853deeSEric Joyner rxr->mbuf_sz = mbuf_sz; 705ca853deeSEric Joyner wr32(vsi->hw, rxr->tail, 0); 706ca853deeSEric Joyner } 707ca853deeSEric Joyner } 708ca853deeSEric Joyner 709ca853deeSEric Joyner /** 710ca853deeSEric Joyner * iavf_if_init - Initialize device for operation 711ca853deeSEric Joyner * @ctx: the iflib context pointer 712ca853deeSEric Joyner * 713ca853deeSEric Joyner * Initializes a device for operation. Called by iflib in response to an 714ca853deeSEric Joyner * interface up event from the stack. 715ca853deeSEric Joyner * 716ca853deeSEric Joyner * @remark this function does not return a value and thus cannot indicate 717ca853deeSEric Joyner * failure to initialize. 718ca853deeSEric Joyner */ 719ca853deeSEric Joyner static void 720ca853deeSEric Joyner iavf_if_init(if_ctx_t ctx) 721ca853deeSEric Joyner { 722ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 723ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 724ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 725ca853deeSEric Joyner if_t ifp = iflib_get_ifp(ctx); 726ca853deeSEric Joyner u8 tmpaddr[ETHER_ADDR_LEN]; 727ca853deeSEric Joyner enum iavf_status status; 728ca853deeSEric Joyner device_t dev = sc->dev; 729ca853deeSEric Joyner int error = 0; 730ca853deeSEric Joyner 731ca853deeSEric Joyner INIT_DBG_IF(ifp, "begin"); 732ca853deeSEric Joyner 7330834f13dSPiotr Kubaj sx_assert(iflib_ctx_lock_get(ctx), SA_XLOCKED); 734ca853deeSEric Joyner 735ca853deeSEric Joyner error = iavf_reset_complete(hw); 736ca853deeSEric Joyner if (error) { 737ca853deeSEric Joyner device_printf(sc->dev, "%s: VF reset failed\n", 738ca853deeSEric Joyner __func__); 739ca853deeSEric Joyner } 740ca853deeSEric Joyner 741ca853deeSEric Joyner if (!iavf_check_asq_alive(hw)) { 742ca853deeSEric Joyner iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n"); 743ca853deeSEric Joyner pci_enable_busmaster(dev); 744ca853deeSEric Joyner 745ca853deeSEric Joyner status = iavf_shutdown_adminq(hw); 746ca853deeSEric Joyner if (status != IAVF_SUCCESS) { 747ca853deeSEric Joyner device_printf(dev, 748ca853deeSEric Joyner "%s: iavf_shutdown_adminq failed: %s\n", 749ca853deeSEric Joyner __func__, iavf_stat_str(hw, status)); 750ca853deeSEric Joyner return; 751ca853deeSEric Joyner } 752ca853deeSEric Joyner 753ca853deeSEric Joyner status = iavf_init_adminq(hw); 754ca853deeSEric Joyner if (status != IAVF_SUCCESS) { 755ca853deeSEric Joyner device_printf(dev, 756ca853deeSEric Joyner "%s: iavf_init_adminq failed: %s\n", 757ca853deeSEric Joyner __func__, iavf_stat_str(hw, status)); 758ca853deeSEric Joyner return; 759ca853deeSEric Joyner } 760ca853deeSEric Joyner } 761ca853deeSEric Joyner 762ca853deeSEric Joyner /* Make sure queues are disabled */ 763ca853deeSEric Joyner iavf_disable_queues_with_retries(sc); 764ca853deeSEric Joyner 765d8096b2dSJustin Hibbits bcopy(if_getlladdr(ifp), tmpaddr, ETHER_ADDR_LEN); 766ca853deeSEric Joyner if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 767ca853deeSEric Joyner (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) { 768ca853deeSEric Joyner error = iavf_del_mac_filter(sc, hw->mac.addr); 769ca853deeSEric Joyner if (error == 0) 770ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER); 771ca853deeSEric Joyner 772ca853deeSEric Joyner bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 773ca853deeSEric Joyner } 774ca853deeSEric Joyner 775ca853deeSEric Joyner error = iavf_add_mac_filter(sc, hw->mac.addr, 0); 776ca853deeSEric Joyner if (!error || error == EEXIST) 777ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER); 778ca853deeSEric Joyner iflib_set_mac(ctx, hw->mac.addr); 779ca853deeSEric Joyner 780ca853deeSEric Joyner /* Prepare the queues for operation */ 781ca853deeSEric Joyner iavf_init_queues(vsi); 782ca853deeSEric Joyner 783ca853deeSEric Joyner /* Set initial ITR values */ 784ca853deeSEric Joyner iavf_configure_itr(sc); 785ca853deeSEric Joyner 786ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES); 787ca853deeSEric Joyner 788ca853deeSEric Joyner /* Set up RSS */ 789ca853deeSEric Joyner iavf_config_rss(sc); 790ca853deeSEric Joyner 791ca853deeSEric Joyner /* Map vectors */ 792ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS); 793ca853deeSEric Joyner 794ca853deeSEric Joyner /* Init SW TX ring indices */ 795ca853deeSEric Joyner if (vsi->enable_head_writeback) 796ca853deeSEric Joyner iavf_init_tx_cidx(vsi); 797ca853deeSEric Joyner else 798ca853deeSEric Joyner iavf_init_tx_rsqs(vsi); 799ca853deeSEric Joyner 800ca853deeSEric Joyner /* Configure promiscuous mode */ 801ca853deeSEric Joyner iavf_config_promisc(sc, if_getflags(ifp)); 802ca853deeSEric Joyner 803ca853deeSEric Joyner /* Enable queues */ 804ca853deeSEric Joyner iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES); 805ca853deeSEric Joyner 806ca853deeSEric Joyner iavf_set_state(&sc->state, IAVF_STATE_RUNNING); 807ca853deeSEric Joyner } 808ca853deeSEric Joyner 809ca853deeSEric Joyner /** 810ca853deeSEric Joyner * iavf_if_msix_intr_assign - Assign MSI-X interrupts 811ca853deeSEric Joyner * @ctx: the iflib context pointer 812ca853deeSEric Joyner * @msix: the number of MSI-X vectors available 813ca853deeSEric Joyner * 814ca853deeSEric Joyner * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and 815ca853deeSEric Joyner * sets up vectors for each Tx and Rx queue, as well as the administrative 816ca853deeSEric Joyner * control interrupt. 817ca853deeSEric Joyner * 818ca853deeSEric Joyner * @returns zero or an error code on failure 819ca853deeSEric Joyner */ 820ca853deeSEric Joyner static int 821ca853deeSEric Joyner iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused) 822ca853deeSEric Joyner { 823ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 824ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 825ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues; 826ca853deeSEric Joyner struct iavf_tx_queue *tx_que = vsi->tx_queues; 827ca853deeSEric Joyner int err, i, rid, vector = 0; 828ca853deeSEric Joyner char buf[16]; 829ca853deeSEric Joyner 830ca853deeSEric Joyner MPASS(vsi->shared->isc_nrxqsets > 0); 831ca853deeSEric Joyner MPASS(vsi->shared->isc_ntxqsets > 0); 832ca853deeSEric Joyner 833ca853deeSEric Joyner /* Admin Que is vector 0*/ 834ca853deeSEric Joyner rid = vector + 1; 835ca853deeSEric Joyner err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 836ca853deeSEric Joyner iavf_msix_adminq, sc, 0, "aq"); 837ca853deeSEric Joyner if (err) { 838ca853deeSEric Joyner iflib_irq_free(ctx, &vsi->irq); 839ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), 840ca853deeSEric Joyner "Failed to register Admin Que handler"); 841ca853deeSEric Joyner return (err); 842ca853deeSEric Joyner } 843ca853deeSEric Joyner 844ca853deeSEric Joyner /* Now set up the stations */ 845ca853deeSEric Joyner for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { 846ca853deeSEric Joyner rid = vector + 1; 847ca853deeSEric Joyner 848ca853deeSEric Joyner snprintf(buf, sizeof(buf), "rxq%d", i); 849ca853deeSEric Joyner err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 850ca853deeSEric Joyner IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf); 851ca853deeSEric Joyner if (err) { 852ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), 853ca853deeSEric Joyner "Failed to allocate queue RX int vector %d, err: %d\n", i, err); 854ca853deeSEric Joyner vsi->num_rx_queues = i + 1; 855ca853deeSEric Joyner goto fail; 856ca853deeSEric Joyner } 857ca853deeSEric Joyner rx_que->msix = vector; 858ca853deeSEric Joyner } 859ca853deeSEric Joyner 860ca853deeSEric Joyner bzero(buf, sizeof(buf)); 861ca853deeSEric Joyner 862ca853deeSEric Joyner for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { 863ca853deeSEric Joyner snprintf(buf, sizeof(buf), "txq%d", i); 864ca853deeSEric Joyner iflib_softirq_alloc_generic(ctx, 865ca853deeSEric Joyner &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, 866ca853deeSEric Joyner IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 867ca853deeSEric Joyner 868ca853deeSEric Joyner tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; 869ca853deeSEric Joyner } 870ca853deeSEric Joyner 871ca853deeSEric Joyner return (0); 872ca853deeSEric Joyner fail: 873ca853deeSEric Joyner iflib_irq_free(ctx, &vsi->irq); 874ca853deeSEric Joyner rx_que = vsi->rx_queues; 8750834f13dSPiotr Kubaj for (i = 0; i < vsi->num_rx_queues; i++, rx_que++) 876ca853deeSEric Joyner iflib_irq_free(ctx, &rx_que->que_irq); 877ca853deeSEric Joyner return (err); 878ca853deeSEric Joyner } 879ca853deeSEric Joyner 880ca853deeSEric Joyner /** 881ca853deeSEric Joyner * iavf_if_enable_intr - Enable all interrupts for a device 882ca853deeSEric Joyner * @ctx: the iflib context pointer 883ca853deeSEric Joyner * 884ca853deeSEric Joyner * Called by iflib to request enabling all interrupts. 885ca853deeSEric Joyner */ 886ca853deeSEric Joyner static void 887ca853deeSEric Joyner iavf_if_enable_intr(if_ctx_t ctx) 888ca853deeSEric Joyner { 889ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 890ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 891ca853deeSEric Joyner 892ca853deeSEric Joyner iavf_enable_intr(vsi); 893ca853deeSEric Joyner } 894ca853deeSEric Joyner 895ca853deeSEric Joyner /** 896ca853deeSEric Joyner * iavf_if_disable_intr - Disable all interrupts for a device 897ca853deeSEric Joyner * @ctx: the iflib context pointer 898ca853deeSEric Joyner * 899ca853deeSEric Joyner * Called by iflib to request disabling all interrupts. 900ca853deeSEric Joyner */ 901ca853deeSEric Joyner static void 902ca853deeSEric Joyner iavf_if_disable_intr(if_ctx_t ctx) 903ca853deeSEric Joyner { 904ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 905ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 906ca853deeSEric Joyner 907ca853deeSEric Joyner iavf_disable_intr(vsi); 908ca853deeSEric Joyner } 909ca853deeSEric Joyner 910ca853deeSEric Joyner /** 911ca853deeSEric Joyner * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt 912ca853deeSEric Joyner * @ctx: the iflib context pointer 913ca853deeSEric Joyner * @rxqid: Rx queue index 914ca853deeSEric Joyner * 915ca853deeSEric Joyner * Enables the interrupt associated with a specified Rx queue. 916ca853deeSEric Joyner * 917ca853deeSEric Joyner * @returns zero 918ca853deeSEric Joyner */ 919ca853deeSEric Joyner static int 920ca853deeSEric Joyner iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 921ca853deeSEric Joyner { 922ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 923ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 924ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw; 925ca853deeSEric Joyner struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 926ca853deeSEric Joyner 927ca853deeSEric Joyner iavf_enable_queue_irq(hw, rx_que->msix - 1); 928ca853deeSEric Joyner return (0); 929ca853deeSEric Joyner } 930ca853deeSEric Joyner 931ca853deeSEric Joyner /** 932ca853deeSEric Joyner * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt 933ca853deeSEric Joyner * @ctx: the iflib context pointer 934ca853deeSEric Joyner * @txqid: Tx queue index 935ca853deeSEric Joyner * 936ca853deeSEric Joyner * Enables the interrupt associated with a specified Tx queue. 937ca853deeSEric Joyner * 938ca853deeSEric Joyner * @returns zero 939ca853deeSEric Joyner */ 940ca853deeSEric Joyner static int 941ca853deeSEric Joyner iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 942ca853deeSEric Joyner { 943ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 944ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 945ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw; 946ca853deeSEric Joyner struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid]; 947ca853deeSEric Joyner 948ca853deeSEric Joyner iavf_enable_queue_irq(hw, tx_que->msix - 1); 949ca853deeSEric Joyner return (0); 950ca853deeSEric Joyner } 951ca853deeSEric Joyner 952ca853deeSEric Joyner /** 953ca853deeSEric Joyner * iavf_if_tx_queues_alloc - Allocate Tx queue memory 954ca853deeSEric Joyner * @ctx: the iflib context pointer 955ca853deeSEric Joyner * @vaddrs: Array of virtual addresses 956ca853deeSEric Joyner * @paddrs: Array of physical addresses 957ca853deeSEric Joyner * @ntxqs: the number of Tx queues per group (should always be 1) 958ca853deeSEric Joyner * @ntxqsets: the number of Tx queues 959ca853deeSEric Joyner * 960ca853deeSEric Joyner * Allocates memory for the specified number of Tx queues. This includes 961ca853deeSEric Joyner * memory for the queue structures and the report status array for the queues. 962ca853deeSEric Joyner * The virtual and physical addresses are saved for later use during 963ca853deeSEric Joyner * initialization. 964ca853deeSEric Joyner * 965ca853deeSEric Joyner * @returns zero or a non-zero error code on failure 966ca853deeSEric Joyner */ 967ca853deeSEric Joyner static int 968ca853deeSEric Joyner iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 969ca853deeSEric Joyner { 970ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 971ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 972ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared; 973ca853deeSEric Joyner struct iavf_tx_queue *que; 974ca853deeSEric Joyner int i, j, error = 0; 975ca853deeSEric Joyner 976ca853deeSEric Joyner MPASS(scctx->isc_ntxqsets > 0); 977ca853deeSEric Joyner MPASS(ntxqs == 1); 978ca853deeSEric Joyner MPASS(scctx->isc_ntxqsets == ntxqsets); 979ca853deeSEric Joyner 980ca853deeSEric Joyner /* Allocate queue structure memory */ 981ca853deeSEric Joyner if (!(vsi->tx_queues = 982ca853deeSEric Joyner (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { 983ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 984ca853deeSEric Joyner return (ENOMEM); 985ca853deeSEric Joyner } 986ca853deeSEric Joyner 987ca853deeSEric Joyner for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 988ca853deeSEric Joyner struct tx_ring *txr = &que->txr; 989ca853deeSEric Joyner 990ca853deeSEric Joyner txr->me = i; 991ca853deeSEric Joyner que->vsi = vsi; 992ca853deeSEric Joyner 993ca853deeSEric Joyner if (!vsi->enable_head_writeback) { 994ca853deeSEric Joyner /* Allocate report status array */ 995ca853deeSEric Joyner if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) { 996ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 997ca853deeSEric Joyner error = ENOMEM; 998ca853deeSEric Joyner goto fail; 999ca853deeSEric Joyner } 1000ca853deeSEric Joyner /* Init report status array */ 1001ca853deeSEric Joyner for (j = 0; j < scctx->isc_ntxd[0]; j++) 1002ca853deeSEric Joyner txr->tx_rsq[j] = QIDX_INVALID; 1003ca853deeSEric Joyner } 1004ca853deeSEric Joyner /* get the virtual and physical address of the hardware queues */ 1005ca853deeSEric Joyner txr->tail = IAVF_QTX_TAIL1(txr->me); 1006ca853deeSEric Joyner txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs]; 1007ca853deeSEric Joyner txr->tx_paddr = paddrs[i * ntxqs]; 1008ca853deeSEric Joyner txr->que = que; 1009ca853deeSEric Joyner } 1010ca853deeSEric Joyner 1011ca853deeSEric Joyner return (0); 1012ca853deeSEric Joyner fail: 1013ca853deeSEric Joyner iavf_if_queues_free(ctx); 1014ca853deeSEric Joyner return (error); 1015ca853deeSEric Joyner } 1016ca853deeSEric Joyner 1017ca853deeSEric Joyner /** 1018ca853deeSEric Joyner * iavf_if_rx_queues_alloc - Allocate Rx queue memory 1019ca853deeSEric Joyner * @ctx: the iflib context pointer 1020ca853deeSEric Joyner * @vaddrs: Array of virtual addresses 1021ca853deeSEric Joyner * @paddrs: Array of physical addresses 1022ca853deeSEric Joyner * @nrxqs: number of Rx queues per group (should always be 1) 1023ca853deeSEric Joyner * @nrxqsets: the number of Rx queues to allocate 1024ca853deeSEric Joyner * 1025ca853deeSEric Joyner * Called by iflib to allocate driver memory for a number of Rx queues. 1026ca853deeSEric Joyner * Allocates memory for the drivers private Rx queue data structure, and saves 1027ca853deeSEric Joyner * the physical and virtual addresses for later use. 1028ca853deeSEric Joyner * 1029ca853deeSEric Joyner * @returns zero or a non-zero error code on failure 1030ca853deeSEric Joyner */ 1031ca853deeSEric Joyner static int 1032ca853deeSEric Joyner iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1033ca853deeSEric Joyner { 1034ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1035ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1036ca853deeSEric Joyner struct iavf_rx_queue *que; 1037ca853deeSEric Joyner int i, error = 0; 1038ca853deeSEric Joyner 1039ca853deeSEric Joyner #ifdef INVARIANTS 1040ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared; 1041ca853deeSEric Joyner MPASS(scctx->isc_nrxqsets > 0); 1042ca853deeSEric Joyner MPASS(nrxqs == 1); 1043ca853deeSEric Joyner MPASS(scctx->isc_nrxqsets == nrxqsets); 1044ca853deeSEric Joyner #endif 1045ca853deeSEric Joyner 1046ca853deeSEric Joyner /* Allocate queue structure memory */ 1047ca853deeSEric Joyner if (!(vsi->rx_queues = 1048ca853deeSEric Joyner (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) * 1049ca853deeSEric Joyner nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { 1050ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1051ca853deeSEric Joyner error = ENOMEM; 1052ca853deeSEric Joyner goto fail; 1053ca853deeSEric Joyner } 1054ca853deeSEric Joyner 1055ca853deeSEric Joyner for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1056ca853deeSEric Joyner struct rx_ring *rxr = &que->rxr; 1057ca853deeSEric Joyner 1058ca853deeSEric Joyner rxr->me = i; 1059ca853deeSEric Joyner que->vsi = vsi; 1060ca853deeSEric Joyner 1061ca853deeSEric Joyner /* get the virtual and physical address of the hardware queues */ 1062ca853deeSEric Joyner rxr->tail = IAVF_QRX_TAIL1(rxr->me); 1063ca853deeSEric Joyner rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs]; 1064ca853deeSEric Joyner rxr->rx_paddr = paddrs[i * nrxqs]; 1065ca853deeSEric Joyner rxr->que = que; 1066ca853deeSEric Joyner } 1067ca853deeSEric Joyner 1068ca853deeSEric Joyner return (0); 1069ca853deeSEric Joyner fail: 1070ca853deeSEric Joyner iavf_if_queues_free(ctx); 1071ca853deeSEric Joyner return (error); 1072ca853deeSEric Joyner } 1073ca853deeSEric Joyner 1074ca853deeSEric Joyner /** 1075ca853deeSEric Joyner * iavf_if_queues_free - Free driver queue memory 1076ca853deeSEric Joyner * @ctx: the iflib context pointer 1077ca853deeSEric Joyner * 1078ca853deeSEric Joyner * Called by iflib to release memory allocated by the driver when setting up 1079ca853deeSEric Joyner * Tx and Rx queues. 1080ca853deeSEric Joyner * 1081ca853deeSEric Joyner * @remark The ordering of this function and iavf_if_detach is not guaranteed. 1082ca853deeSEric Joyner * It is possible for this function to be called either before or after the 1083ca853deeSEric Joyner * iavf_if_detach. Thus, care must be taken to ensure that either ordering of 1084ca853deeSEric Joyner * iavf_if_detach and iavf_if_queues_free is safe. 1085ca853deeSEric Joyner */ 1086ca853deeSEric Joyner static void 1087ca853deeSEric Joyner iavf_if_queues_free(if_ctx_t ctx) 1088ca853deeSEric Joyner { 1089ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1090ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1091ca853deeSEric Joyner 1092ca853deeSEric Joyner if (!vsi->enable_head_writeback) { 1093ca853deeSEric Joyner struct iavf_tx_queue *que; 1094ca853deeSEric Joyner int i = 0; 1095ca853deeSEric Joyner 1096ca853deeSEric Joyner for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) { 1097ca853deeSEric Joyner struct tx_ring *txr = &que->txr; 1098ca853deeSEric Joyner if (txr->tx_rsq != NULL) { 1099ca853deeSEric Joyner free(txr->tx_rsq, M_IAVF); 1100ca853deeSEric Joyner txr->tx_rsq = NULL; 1101ca853deeSEric Joyner } 1102ca853deeSEric Joyner } 1103ca853deeSEric Joyner } 1104ca853deeSEric Joyner 1105ca853deeSEric Joyner if (vsi->tx_queues != NULL) { 1106ca853deeSEric Joyner free(vsi->tx_queues, M_IAVF); 1107ca853deeSEric Joyner vsi->tx_queues = NULL; 1108ca853deeSEric Joyner } 1109ca853deeSEric Joyner if (vsi->rx_queues != NULL) { 1110ca853deeSEric Joyner free(vsi->rx_queues, M_IAVF); 1111ca853deeSEric Joyner vsi->rx_queues = NULL; 1112ca853deeSEric Joyner } 1113ca853deeSEric Joyner } 1114ca853deeSEric Joyner 1115ca853deeSEric Joyner /** 1116ca853deeSEric Joyner * iavf_check_aq_errors - Check for AdminQ errors 1117ca853deeSEric Joyner * @sc: device softc 1118ca853deeSEric Joyner * 1119ca853deeSEric Joyner * Check the AdminQ registers for errors, and determine whether or not a reset 1120ca853deeSEric Joyner * may be required to resolve them. 1121ca853deeSEric Joyner * 1122ca853deeSEric Joyner * @post if there are errors, the VF device will be stopped and a reset will 1123ca853deeSEric Joyner * be requested. 1124ca853deeSEric Joyner * 1125ca853deeSEric Joyner * @returns zero if there are no issues, EBUSY if the device is resetting, 1126ca853deeSEric Joyner * or EIO if there are any AQ errors. 1127ca853deeSEric Joyner */ 1128ca853deeSEric Joyner static int 1129ca853deeSEric Joyner iavf_check_aq_errors(struct iavf_sc *sc) 1130ca853deeSEric Joyner { 1131ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 1132ca853deeSEric Joyner device_t dev = sc->dev; 1133ca853deeSEric Joyner u32 reg, oldreg; 1134ca853deeSEric Joyner u8 aq_error = false; 1135ca853deeSEric Joyner 1136ca853deeSEric Joyner oldreg = reg = rd32(hw, hw->aq.arq.len); 1137ca853deeSEric Joyner 1138ca853deeSEric Joyner /* Check if device is in reset */ 1139ca853deeSEric Joyner if (reg == 0xdeadbeef || reg == 0xffffffff) { 1140ca853deeSEric Joyner device_printf(dev, "VF in reset\n"); 1141ca853deeSEric Joyner return (EBUSY); 1142ca853deeSEric Joyner } 1143ca853deeSEric Joyner 1144ca853deeSEric Joyner /* Check for Admin queue errors */ 1145ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) { 1146ca853deeSEric Joyner device_printf(dev, "ARQ VF Error detected\n"); 1147ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK; 1148ca853deeSEric Joyner aq_error = true; 1149ca853deeSEric Joyner } 1150ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) { 1151ca853deeSEric Joyner device_printf(dev, "ARQ Overflow Error detected\n"); 1152ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK; 1153ca853deeSEric Joyner aq_error = true; 1154ca853deeSEric Joyner } 1155ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) { 1156ca853deeSEric Joyner device_printf(dev, "ARQ Critical Error detected\n"); 1157ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK; 1158ca853deeSEric Joyner aq_error = true; 1159ca853deeSEric Joyner } 1160ca853deeSEric Joyner if (oldreg != reg) 1161ca853deeSEric Joyner wr32(hw, hw->aq.arq.len, reg); 1162ca853deeSEric Joyner 1163ca853deeSEric Joyner oldreg = reg = rd32(hw, hw->aq.asq.len); 1164ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) { 1165ca853deeSEric Joyner device_printf(dev, "ASQ VF Error detected\n"); 1166ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK; 1167ca853deeSEric Joyner aq_error = true; 1168ca853deeSEric Joyner } 1169ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) { 1170ca853deeSEric Joyner device_printf(dev, "ASQ Overflow Error detected\n"); 1171ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK; 1172ca853deeSEric Joyner aq_error = true; 1173ca853deeSEric Joyner } 1174ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 1175ca853deeSEric Joyner device_printf(dev, "ASQ Critical Error detected\n"); 1176ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK; 1177ca853deeSEric Joyner aq_error = true; 1178ca853deeSEric Joyner } 1179ca853deeSEric Joyner if (oldreg != reg) 1180ca853deeSEric Joyner wr32(hw, hw->aq.asq.len, reg); 1181ca853deeSEric Joyner 1182ca853deeSEric Joyner return (aq_error ? EIO : 0); 1183ca853deeSEric Joyner } 1184ca853deeSEric Joyner 1185ca853deeSEric Joyner /** 1186ca853deeSEric Joyner * iavf_process_adminq - Process adminq responses from the PF 1187ca853deeSEric Joyner * @sc: device softc 1188ca853deeSEric Joyner * @pending: output parameter indicating how many messages remain 1189ca853deeSEric Joyner * 1190ca853deeSEric Joyner * Process the adminq to handle replies from the PF over the virtchnl 1191ca853deeSEric Joyner * connection. 1192ca853deeSEric Joyner * 1193ca853deeSEric Joyner * @returns zero or an iavf_status code on failure 1194ca853deeSEric Joyner */ 1195ca853deeSEric Joyner static enum iavf_status 1196ca853deeSEric Joyner iavf_process_adminq(struct iavf_sc *sc, u16 *pending) 1197ca853deeSEric Joyner { 1198ca853deeSEric Joyner enum iavf_status status = IAVF_SUCCESS; 1199ca853deeSEric Joyner struct iavf_arq_event_info event; 1200ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 1201ca853deeSEric Joyner struct virtchnl_msg *v_msg; 1202ca853deeSEric Joyner int error = 0, loop = 0; 1203ca853deeSEric Joyner u32 reg; 1204ca853deeSEric Joyner 1205ca853deeSEric Joyner if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) { 1206ca853deeSEric Joyner status = IAVF_ERR_ADMIN_QUEUE_ERROR; 1207ca853deeSEric Joyner goto reenable_interrupt; 1208ca853deeSEric Joyner } 1209ca853deeSEric Joyner 1210ca853deeSEric Joyner error = iavf_check_aq_errors(sc); 1211ca853deeSEric Joyner if (error) { 1212ca853deeSEric Joyner status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 1213ca853deeSEric Joyner goto reenable_interrupt; 1214ca853deeSEric Joyner } 1215ca853deeSEric Joyner 1216ca853deeSEric Joyner event.buf_len = IAVF_AQ_BUF_SZ; 1217ca853deeSEric Joyner event.msg_buf = sc->aq_buffer; 1218ca853deeSEric Joyner bzero(event.msg_buf, IAVF_AQ_BUF_SZ); 1219ca853deeSEric Joyner v_msg = (struct virtchnl_msg *)&event.desc; 1220ca853deeSEric Joyner 1221ca853deeSEric Joyner IAVF_VC_LOCK(sc); 1222ca853deeSEric Joyner /* clean and process any events */ 1223ca853deeSEric Joyner do { 1224ca853deeSEric Joyner status = iavf_clean_arq_element(hw, &event, pending); 1225ca853deeSEric Joyner /* 1226ca853deeSEric Joyner * Also covers normal case when iavf_clean_arq_element() 1227ca853deeSEric Joyner * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK" 1228ca853deeSEric Joyner */ 1229ca853deeSEric Joyner if (status) 1230ca853deeSEric Joyner break; 1231ca853deeSEric Joyner iavf_vc_completion(sc, v_msg->v_opcode, 1232ca853deeSEric Joyner v_msg->v_retval, event.msg_buf, event.msg_len); 1233ca853deeSEric Joyner bzero(event.msg_buf, IAVF_AQ_BUF_SZ); 1234ca853deeSEric Joyner } while (*pending && (loop++ < IAVF_ADM_LIMIT)); 1235ca853deeSEric Joyner IAVF_VC_UNLOCK(sc); 1236ca853deeSEric Joyner 1237ca853deeSEric Joyner reenable_interrupt: 1238ca853deeSEric Joyner /* Re-enable admin queue interrupt cause */ 1239ca853deeSEric Joyner reg = rd32(hw, IAVF_VFINT_ICR0_ENA1); 1240ca853deeSEric Joyner reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK; 1241ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ICR0_ENA1, reg); 1242ca853deeSEric Joyner 1243ca853deeSEric Joyner return (status); 1244ca853deeSEric Joyner } 1245ca853deeSEric Joyner 1246ca853deeSEric Joyner /** 1247ca853deeSEric Joyner * iavf_if_update_admin_status - Administrative status task 1248ca853deeSEric Joyner * @ctx: iflib context 1249ca853deeSEric Joyner * 1250ca853deeSEric Joyner * Called by iflib to handle administrative status events. The iavf driver 1251ca853deeSEric Joyner * uses this to process the adminq virtchnl messages outside of interrupt 1252ca853deeSEric Joyner * context. 1253ca853deeSEric Joyner */ 1254ca853deeSEric Joyner static void 1255ca853deeSEric Joyner iavf_if_update_admin_status(if_ctx_t ctx) 1256ca853deeSEric Joyner { 1257ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1258ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 1259ca853deeSEric Joyner u16 pending = 0; 1260ca853deeSEric Joyner 1261ca853deeSEric Joyner iavf_process_adminq(sc, &pending); 1262ca853deeSEric Joyner iavf_update_link_status(sc); 1263ca853deeSEric Joyner 1264ca853deeSEric Joyner /* 1265ca853deeSEric Joyner * If there are still messages to process, reschedule. 1266ca853deeSEric Joyner * Otherwise, re-enable the Admin Queue interrupt. 1267ca853deeSEric Joyner */ 1268ca853deeSEric Joyner if (pending > 0) 1269ca853deeSEric Joyner iflib_admin_intr_deferred(ctx); 1270ca853deeSEric Joyner else 1271ca853deeSEric Joyner iavf_enable_adminq_irq(hw); 1272ca853deeSEric Joyner } 1273ca853deeSEric Joyner 1274ca853deeSEric Joyner /** 1275ca853deeSEric Joyner * iavf_if_multi_set - Set multicast address filters 1276ca853deeSEric Joyner * @ctx: iflib context 1277ca853deeSEric Joyner * 1278ca853deeSEric Joyner * Called by iflib to update the current list of multicast filters for the 1279ca853deeSEric Joyner * device. 1280ca853deeSEric Joyner */ 1281ca853deeSEric Joyner static void 1282ca853deeSEric Joyner iavf_if_multi_set(if_ctx_t ctx) 1283ca853deeSEric Joyner { 1284ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1285ca853deeSEric Joyner 1286ca853deeSEric Joyner iavf_multi_set(sc); 1287ca853deeSEric Joyner } 1288ca853deeSEric Joyner 1289ca853deeSEric Joyner /** 1290ca853deeSEric Joyner * iavf_if_mtu_set - Set the device MTU 1291ca853deeSEric Joyner * @ctx: iflib context 1292ca853deeSEric Joyner * @mtu: MTU value to set 1293ca853deeSEric Joyner * 1294ca853deeSEric Joyner * Called by iflib to set the device MTU. 1295ca853deeSEric Joyner * 1296ca853deeSEric Joyner * @returns zero on success, or EINVAL if the MTU is invalid. 1297ca853deeSEric Joyner */ 1298ca853deeSEric Joyner static int 1299ca853deeSEric Joyner iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1300ca853deeSEric Joyner { 1301ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1302ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1303ca853deeSEric Joyner 1304ca853deeSEric Joyner IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)"); 1305ca853deeSEric Joyner if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) { 1306ca853deeSEric Joyner device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n", 1307ca853deeSEric Joyner mtu, IAVF_MIN_MTU, IAVF_MAX_MTU); 1308ca853deeSEric Joyner return (EINVAL); 1309ca853deeSEric Joyner } 1310ca853deeSEric Joyner 1311ca853deeSEric Joyner vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1312ca853deeSEric Joyner ETHER_VLAN_ENCAP_LEN; 1313ca853deeSEric Joyner 1314ca853deeSEric Joyner return (0); 1315ca853deeSEric Joyner } 1316ca853deeSEric Joyner 1317ca853deeSEric Joyner /** 1318ca853deeSEric Joyner * iavf_if_media_status - Report current media status 1319ca853deeSEric Joyner * @ctx: iflib context 1320ca853deeSEric Joyner * @ifmr: ifmedia request structure 1321ca853deeSEric Joyner * 1322ca853deeSEric Joyner * Called by iflib to report the current media status in the ifmr. 1323ca853deeSEric Joyner */ 1324ca853deeSEric Joyner static void 1325ca853deeSEric Joyner iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1326ca853deeSEric Joyner { 1327ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1328ca853deeSEric Joyner 1329ca853deeSEric Joyner iavf_media_status_common(sc, ifmr); 1330ca853deeSEric Joyner } 1331ca853deeSEric Joyner 1332ca853deeSEric Joyner /** 1333ca853deeSEric Joyner * iavf_if_media_change - Change the current media settings 1334ca853deeSEric Joyner * @ctx: iflib context 1335ca853deeSEric Joyner * 1336ca853deeSEric Joyner * Called by iflib to change the current media settings. 1337ca853deeSEric Joyner * 1338ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 1339ca853deeSEric Joyner */ 1340ca853deeSEric Joyner static int 1341ca853deeSEric Joyner iavf_if_media_change(if_ctx_t ctx) 1342ca853deeSEric Joyner { 1343ca853deeSEric Joyner return iavf_media_change_common(iflib_get_ifp(ctx)); 1344ca853deeSEric Joyner } 1345ca853deeSEric Joyner 1346ca853deeSEric Joyner /** 1347ca853deeSEric Joyner * iavf_if_promisc_set - Set device promiscuous mode 1348ca853deeSEric Joyner * @ctx: iflib context 1349ca853deeSEric Joyner * @flags: promiscuous configuration 1350ca853deeSEric Joyner * 1351ca853deeSEric Joyner * Called by iflib to request that the device enter promiscuous mode. 1352ca853deeSEric Joyner * 1353ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 1354ca853deeSEric Joyner */ 1355ca853deeSEric Joyner static int 1356ca853deeSEric Joyner iavf_if_promisc_set(if_ctx_t ctx, int flags) 1357ca853deeSEric Joyner { 1358ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1359ca853deeSEric Joyner 1360ca853deeSEric Joyner return iavf_config_promisc(sc, flags); 1361ca853deeSEric Joyner } 1362ca853deeSEric Joyner 1363ca853deeSEric Joyner /** 1364ca853deeSEric Joyner * iavf_if_timer - Periodic timer called by iflib 1365ca853deeSEric Joyner * @ctx: iflib context 1366ca853deeSEric Joyner * @qid: The queue being triggered 1367ca853deeSEric Joyner * 1368ca853deeSEric Joyner * Called by iflib periodically as a timer task, so that the driver can handle 1369ca853deeSEric Joyner * periodic work. 1370ca853deeSEric Joyner * 1371ca853deeSEric Joyner * @remark this timer is only called while the interface is up, even if 1372ca853deeSEric Joyner * IFLIB_ADMIN_ALWAYS_RUN is set. 1373ca853deeSEric Joyner */ 1374ca853deeSEric Joyner static void 1375ca853deeSEric Joyner iavf_if_timer(if_ctx_t ctx, uint16_t qid) 1376ca853deeSEric Joyner { 1377ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1378ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 1379ca853deeSEric Joyner u32 val; 1380ca853deeSEric Joyner 1381ca853deeSEric Joyner if (qid != 0) 1382ca853deeSEric Joyner return; 1383ca853deeSEric Joyner 1384ca853deeSEric Joyner /* Check for when PF triggers a VF reset */ 1385ca853deeSEric Joyner val = rd32(hw, IAVF_VFGEN_RSTAT) & 1386ca853deeSEric Joyner IAVF_VFGEN_RSTAT_VFR_STATE_MASK; 1387ca853deeSEric Joyner if (val != VIRTCHNL_VFR_VFACTIVE 1388ca853deeSEric Joyner && val != VIRTCHNL_VFR_COMPLETED) { 1389ca853deeSEric Joyner iavf_dbg_info(sc, "reset in progress! (%d)\n", val); 1390ca853deeSEric Joyner return; 1391ca853deeSEric Joyner } 1392ca853deeSEric Joyner 1393ca853deeSEric Joyner /* Fire off the adminq task */ 1394ca853deeSEric Joyner iflib_admin_intr_deferred(ctx); 1395ca853deeSEric Joyner 1396ca853deeSEric Joyner /* Update stats */ 1397ca853deeSEric Joyner iavf_request_stats(sc); 1398ca853deeSEric Joyner } 1399ca853deeSEric Joyner 1400ca853deeSEric Joyner /** 1401ca853deeSEric Joyner * iavf_if_vlan_register - Register a VLAN 1402ca853deeSEric Joyner * @ctx: iflib context 1403ca853deeSEric Joyner * @vtag: the VLAN to register 1404ca853deeSEric Joyner * 1405ca853deeSEric Joyner * Register a VLAN filter for a given vtag. 1406ca853deeSEric Joyner */ 1407ca853deeSEric Joyner static void 1408ca853deeSEric Joyner iavf_if_vlan_register(if_ctx_t ctx, u16 vtag) 1409ca853deeSEric Joyner { 1410ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1411ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1412ca853deeSEric Joyner 1413ca853deeSEric Joyner if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1414ca853deeSEric Joyner return; 1415ca853deeSEric Joyner 1416ca853deeSEric Joyner /* Add VLAN 0 to list, for untagged traffic */ 1417ca853deeSEric Joyner if (vsi->num_vlans == 0) 1418ca853deeSEric Joyner iavf_add_vlan_filter(sc, 0); 1419ca853deeSEric Joyner 1420ca853deeSEric Joyner iavf_add_vlan_filter(sc, vtag); 1421ca853deeSEric Joyner 1422ca853deeSEric Joyner ++vsi->num_vlans; 1423ca853deeSEric Joyner 1424ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 1425ca853deeSEric Joyner } 1426ca853deeSEric Joyner 1427ca853deeSEric Joyner /** 1428ca853deeSEric Joyner * iavf_if_vlan_unregister - Unregister a VLAN 1429ca853deeSEric Joyner * @ctx: iflib context 1430ca853deeSEric Joyner * @vtag: the VLAN to remove 1431ca853deeSEric Joyner * 1432ca853deeSEric Joyner * Unregister (remove) a VLAN filter for the given vtag. 1433ca853deeSEric Joyner */ 1434ca853deeSEric Joyner static void 1435ca853deeSEric Joyner iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1436ca853deeSEric Joyner { 1437ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1438ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1439ca853deeSEric Joyner int i = 0; 1440ca853deeSEric Joyner 1441ca853deeSEric Joyner if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0)) /* Invalid */ 1442ca853deeSEric Joyner return; 1443ca853deeSEric Joyner 1444ca853deeSEric Joyner i = iavf_mark_del_vlan_filter(sc, vtag); 1445ca853deeSEric Joyner vsi->num_vlans -= i; 1446ca853deeSEric Joyner 1447ca853deeSEric Joyner /* Remove VLAN filter 0 if the last VLAN is being removed */ 1448ca853deeSEric Joyner if (vsi->num_vlans == 0) 1449ca853deeSEric Joyner i += iavf_mark_del_vlan_filter(sc, 0); 1450ca853deeSEric Joyner 1451ca853deeSEric Joyner if (i > 0) 1452ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER); 1453ca853deeSEric Joyner } 1454ca853deeSEric Joyner 1455ca853deeSEric Joyner /** 1456ca853deeSEric Joyner * iavf_if_get_counter - Get network statistic counters 1457ca853deeSEric Joyner * @ctx: iflib context 1458ca853deeSEric Joyner * @cnt: The counter to obtain 1459ca853deeSEric Joyner * 1460ca853deeSEric Joyner * Called by iflib to obtain the value of the specified counter. 1461ca853deeSEric Joyner * 1462ca853deeSEric Joyner * @returns the uint64_t counter value. 1463ca853deeSEric Joyner */ 1464ca853deeSEric Joyner static uint64_t 1465ca853deeSEric Joyner iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1466ca853deeSEric Joyner { 1467ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1468ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1469ca853deeSEric Joyner if_t ifp = iflib_get_ifp(ctx); 1470ca853deeSEric Joyner 1471ca853deeSEric Joyner switch (cnt) { 1472ca853deeSEric Joyner case IFCOUNTER_IPACKETS: 1473ca853deeSEric Joyner return (vsi->ipackets); 1474ca853deeSEric Joyner case IFCOUNTER_IERRORS: 1475ca853deeSEric Joyner return (vsi->ierrors); 1476ca853deeSEric Joyner case IFCOUNTER_OPACKETS: 1477ca853deeSEric Joyner return (vsi->opackets); 1478ca853deeSEric Joyner case IFCOUNTER_OERRORS: 1479ca853deeSEric Joyner return (vsi->oerrors); 1480ca853deeSEric Joyner case IFCOUNTER_COLLISIONS: 1481ca853deeSEric Joyner /* Collisions are by standard impossible in 40G/10G Ethernet */ 1482ca853deeSEric Joyner return (0); 1483ca853deeSEric Joyner case IFCOUNTER_IBYTES: 1484ca853deeSEric Joyner return (vsi->ibytes); 1485ca853deeSEric Joyner case IFCOUNTER_OBYTES: 1486ca853deeSEric Joyner return (vsi->obytes); 1487ca853deeSEric Joyner case IFCOUNTER_IMCASTS: 1488ca853deeSEric Joyner return (vsi->imcasts); 1489ca853deeSEric Joyner case IFCOUNTER_OMCASTS: 1490ca853deeSEric Joyner return (vsi->omcasts); 1491ca853deeSEric Joyner case IFCOUNTER_IQDROPS: 1492ca853deeSEric Joyner return (vsi->iqdrops); 1493ca853deeSEric Joyner case IFCOUNTER_OQDROPS: 1494ca853deeSEric Joyner return (vsi->oqdrops); 1495ca853deeSEric Joyner case IFCOUNTER_NOPROTO: 1496ca853deeSEric Joyner return (vsi->noproto); 1497ca853deeSEric Joyner default: 1498ca853deeSEric Joyner return (if_get_counter_default(ifp, cnt)); 1499ca853deeSEric Joyner } 1500ca853deeSEric Joyner } 1501ca853deeSEric Joyner 1502*1d6c12c5SKevin Bowling /* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1503*1d6c12c5SKevin Bowling * @ctx: iflib context 1504*1d6c12c5SKevin Bowling * @event: event code to check 1505*1d6c12c5SKevin Bowling * 1506*1d6c12c5SKevin Bowling * Defaults to returning false for unknown events. 1507*1d6c12c5SKevin Bowling * 1508*1d6c12c5SKevin Bowling * @returns true if iflib needs to reinit the interface 1509*1d6c12c5SKevin Bowling */ 1510*1d6c12c5SKevin Bowling static bool 1511*1d6c12c5SKevin Bowling iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1512*1d6c12c5SKevin Bowling { 1513*1d6c12c5SKevin Bowling switch (event) { 1514*1d6c12c5SKevin Bowling case IFLIB_RESTART_VLAN_CONFIG: 1515*1d6c12c5SKevin Bowling return (true); 1516*1d6c12c5SKevin Bowling default: 1517*1d6c12c5SKevin Bowling return (false); 1518*1d6c12c5SKevin Bowling } 1519*1d6c12c5SKevin Bowling } 1520*1d6c12c5SKevin Bowling 1521ca853deeSEric Joyner /** 1522ca853deeSEric Joyner * iavf_free_pci_resources - Free PCI resources 1523ca853deeSEric Joyner * @sc: device softc 1524ca853deeSEric Joyner * 1525ca853deeSEric Joyner * Called to release the PCI resources allocated during attach. May be called 1526ca853deeSEric Joyner * in the error flow of attach_pre, or during detach as part of cleanup. 1527ca853deeSEric Joyner */ 1528ca853deeSEric Joyner static void 1529ca853deeSEric Joyner iavf_free_pci_resources(struct iavf_sc *sc) 1530ca853deeSEric Joyner { 1531ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1532ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues; 1533ca853deeSEric Joyner device_t dev = sc->dev; 1534ca853deeSEric Joyner 1535ca853deeSEric Joyner /* We may get here before stations are set up */ 1536ca853deeSEric Joyner if (rx_que == NULL) 1537ca853deeSEric Joyner goto early; 1538ca853deeSEric Joyner 1539ca853deeSEric Joyner /* Release all interrupts */ 1540ca853deeSEric Joyner iflib_irq_free(vsi->ctx, &vsi->irq); 1541ca853deeSEric Joyner 1542ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1543ca853deeSEric Joyner iflib_irq_free(vsi->ctx, &rx_que->que_irq); 1544ca853deeSEric Joyner 1545ca853deeSEric Joyner early: 1546ca853deeSEric Joyner if (sc->pci_mem != NULL) 1547ca853deeSEric Joyner bus_release_resource(dev, SYS_RES_MEMORY, 1548ca853deeSEric Joyner rman_get_rid(sc->pci_mem), sc->pci_mem); 1549ca853deeSEric Joyner } 1550ca853deeSEric Joyner 1551ca853deeSEric Joyner /** 1552ca853deeSEric Joyner * iavf_setup_interface - Setup the device interface 1553ca853deeSEric Joyner * @sc: device softc 1554ca853deeSEric Joyner * 1555ca853deeSEric Joyner * Called to setup some device interface settings, such as the ifmedia 1556ca853deeSEric Joyner * structure. 1557ca853deeSEric Joyner */ 1558ca853deeSEric Joyner static void 1559ca853deeSEric Joyner iavf_setup_interface(struct iavf_sc *sc) 1560ca853deeSEric Joyner { 1561ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1562ca853deeSEric Joyner if_ctx_t ctx = vsi->ctx; 1563d8096b2dSJustin Hibbits if_t ifp = iflib_get_ifp(ctx); 1564ca853deeSEric Joyner 1565ca853deeSEric Joyner iavf_dbg_init(sc, "begin\n"); 1566ca853deeSEric Joyner 1567ca853deeSEric Joyner vsi->shared->isc_max_frame_size = 1568d8096b2dSJustin Hibbits if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN 1569ca853deeSEric Joyner + ETHER_VLAN_ENCAP_LEN; 1570ca853deeSEric Joyner 1571ca853deeSEric Joyner iavf_set_initial_baudrate(ifp); 1572ca853deeSEric Joyner 1573ca853deeSEric Joyner ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1574ca853deeSEric Joyner ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); 1575ca853deeSEric Joyner } 1576ca853deeSEric Joyner 1577ca853deeSEric Joyner /** 1578ca853deeSEric Joyner * iavf_msix_adminq - Admin Queue interrupt handler 1579ca853deeSEric Joyner * @arg: void pointer to the device softc 1580ca853deeSEric Joyner * 1581ca853deeSEric Joyner * Interrupt handler for the non-queue interrupt causes. Primarily this will 1582ca853deeSEric Joyner * be the adminq interrupt, but also includes other miscellaneous causes. 1583ca853deeSEric Joyner * 1584ca853deeSEric Joyner * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise 1585ca853deeSEric Joyner * returns FITLER_HANDLED. 1586ca853deeSEric Joyner */ 1587ca853deeSEric Joyner static int 1588ca853deeSEric Joyner iavf_msix_adminq(void *arg) 1589ca853deeSEric Joyner { 1590ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg; 1591ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw; 1592ca853deeSEric Joyner u32 reg, mask; 1593ca853deeSEric Joyner 1594ca853deeSEric Joyner ++sc->admin_irq; 1595ca853deeSEric Joyner 1596ca853deeSEric Joyner if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED)) 1597ca853deeSEric Joyner return (FILTER_HANDLED); 1598ca853deeSEric Joyner 1599ca853deeSEric Joyner reg = rd32(hw, IAVF_VFINT_ICR01); 1600ca853deeSEric Joyner /* 1601ca853deeSEric Joyner * For masking off interrupt causes that need to be handled before 1602ca853deeSEric Joyner * they can be re-enabled 1603ca853deeSEric Joyner */ 1604ca853deeSEric Joyner mask = rd32(hw, IAVF_VFINT_ICR0_ENA1); 1605ca853deeSEric Joyner 1606ca853deeSEric Joyner /* Check on the cause */ 1607ca853deeSEric Joyner if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) { 1608ca853deeSEric Joyner mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK; 1609ca853deeSEric Joyner 1610ca853deeSEric Joyner /* Process messages outside of the iflib context lock */ 1611ca853deeSEric Joyner taskqueue_enqueue(sc->vc_tq, &sc->vc_task); 1612ca853deeSEric Joyner } 1613ca853deeSEric Joyner 1614ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ICR0_ENA1, mask); 1615ca853deeSEric Joyner iavf_enable_adminq_irq(hw); 1616ca853deeSEric Joyner 1617ca853deeSEric Joyner return (FILTER_HANDLED); 1618ca853deeSEric Joyner } 1619ca853deeSEric Joyner 1620ca853deeSEric Joyner /** 1621ca853deeSEric Joyner * iavf_enable_intr - Enable device interrupts 1622ca853deeSEric Joyner * @vsi: the main VSI 1623ca853deeSEric Joyner * 1624ca853deeSEric Joyner * Called to enable all queue interrupts. 1625ca853deeSEric Joyner */ 1626ca853deeSEric Joyner void 1627ca853deeSEric Joyner iavf_enable_intr(struct iavf_vsi *vsi) 1628ca853deeSEric Joyner { 1629ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw; 1630ca853deeSEric Joyner struct iavf_rx_queue *que = vsi->rx_queues; 1631ca853deeSEric Joyner 1632ca853deeSEric Joyner iavf_enable_adminq_irq(hw); 1633ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1634ca853deeSEric Joyner iavf_enable_queue_irq(hw, que->rxr.me); 1635ca853deeSEric Joyner } 1636ca853deeSEric Joyner 1637ca853deeSEric Joyner /** 1638ca853deeSEric Joyner * iavf_disable_intr - Disable device interrupts 1639ca853deeSEric Joyner * @vsi: the main VSI 1640ca853deeSEric Joyner * 1641ca853deeSEric Joyner * Called to disable all interrupts 1642ca853deeSEric Joyner * 1643ca853deeSEric Joyner * @remark we never disable the admin status interrupt. 1644ca853deeSEric Joyner */ 1645ca853deeSEric Joyner void 1646ca853deeSEric Joyner iavf_disable_intr(struct iavf_vsi *vsi) 1647ca853deeSEric Joyner { 1648ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw; 1649ca853deeSEric Joyner struct iavf_rx_queue *que = vsi->rx_queues; 1650ca853deeSEric Joyner 1651ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1652ca853deeSEric Joyner iavf_disable_queue_irq(hw, que->rxr.me); 1653ca853deeSEric Joyner } 1654ca853deeSEric Joyner 1655ca853deeSEric Joyner /** 1656ca853deeSEric Joyner * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt 1657ca853deeSEric Joyner * @hw: hardware structure 1658ca853deeSEric Joyner * @id: IRQ vector to enable 1659ca853deeSEric Joyner * 1660ca853deeSEric Joyner * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt. 1661ca853deeSEric Joyner */ 1662ca853deeSEric Joyner static void 1663ca853deeSEric Joyner iavf_enable_queue_irq(struct iavf_hw *hw, int id) 1664ca853deeSEric Joyner { 1665ca853deeSEric Joyner u32 reg; 1666ca853deeSEric Joyner 1667ca853deeSEric Joyner reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK | 1668ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK | 1669ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; 1670ca853deeSEric Joyner wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg); 1671ca853deeSEric Joyner } 1672ca853deeSEric Joyner 1673ca853deeSEric Joyner /** 1674ca853deeSEric Joyner * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt 1675ca853deeSEric Joyner * @hw: hardware structure 1676ca853deeSEric Joyner * @id: IRQ vector to disable 1677ca853deeSEric Joyner * 1678ca853deeSEric Joyner * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt. 1679ca853deeSEric Joyner */ 1680ca853deeSEric Joyner static void 1681ca853deeSEric Joyner iavf_disable_queue_irq(struct iavf_hw *hw, int id) 1682ca853deeSEric Joyner { 1683ca853deeSEric Joyner wr32(hw, IAVF_VFINT_DYN_CTLN1(id), 1684ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK); 1685ca853deeSEric Joyner rd32(hw, IAVF_VFGEN_RSTAT); 1686ca853deeSEric Joyner } 1687ca853deeSEric Joyner 1688ca853deeSEric Joyner /** 1689ca853deeSEric Joyner * iavf_configure_itr - Get initial ITR values from tunable values. 1690ca853deeSEric Joyner * @sc: device softc 1691ca853deeSEric Joyner * 1692ca853deeSEric Joyner * Load the initial tunable values for the ITR configuration. 1693ca853deeSEric Joyner */ 1694ca853deeSEric Joyner static void 1695ca853deeSEric Joyner iavf_configure_itr(struct iavf_sc *sc) 1696ca853deeSEric Joyner { 1697ca853deeSEric Joyner iavf_configure_tx_itr(sc); 1698ca853deeSEric Joyner iavf_configure_rx_itr(sc); 1699ca853deeSEric Joyner } 1700ca853deeSEric Joyner 1701ca853deeSEric Joyner /** 1702ca853deeSEric Joyner * iavf_set_queue_rx_itr - Update Rx ITR value 1703ca853deeSEric Joyner * @que: Rx queue to update 1704ca853deeSEric Joyner * 1705ca853deeSEric Joyner * Provide a update to the queue RX interrupt moderation value. 1706ca853deeSEric Joyner */ 1707ca853deeSEric Joyner static void 1708ca853deeSEric Joyner iavf_set_queue_rx_itr(struct iavf_rx_queue *que) 1709ca853deeSEric Joyner { 1710ca853deeSEric Joyner struct iavf_vsi *vsi = que->vsi; 1711ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw; 1712ca853deeSEric Joyner struct rx_ring *rxr = &que->rxr; 1713ca853deeSEric Joyner 1714ca853deeSEric Joyner /* Idle, do nothing */ 1715ca853deeSEric Joyner if (rxr->bytes == 0) 1716ca853deeSEric Joyner return; 1717ca853deeSEric Joyner 1718ca853deeSEric Joyner /* Update the hardware if needed */ 1719ca853deeSEric Joyner if (rxr->itr != vsi->rx_itr_setting) { 1720ca853deeSEric Joyner rxr->itr = vsi->rx_itr_setting; 1721ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, 1722ca853deeSEric Joyner que->rxr.me), rxr->itr); 1723ca853deeSEric Joyner } 1724ca853deeSEric Joyner } 1725ca853deeSEric Joyner 1726ca853deeSEric Joyner /** 1727ca853deeSEric Joyner * iavf_msix_que - Main Rx queue interrupt handler 1728ca853deeSEric Joyner * @arg: void pointer to the Rx queue 1729ca853deeSEric Joyner * 1730ca853deeSEric Joyner * Main MSI-X interrupt handler for Rx queue interrupts 1731ca853deeSEric Joyner * 1732ca853deeSEric Joyner * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run, 1733ca853deeSEric Joyner * otherwise returns FILTER_HANDLED. 1734ca853deeSEric Joyner */ 1735ca853deeSEric Joyner static int 1736ca853deeSEric Joyner iavf_msix_que(void *arg) 1737ca853deeSEric Joyner { 1738ca853deeSEric Joyner struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg; 1739ca853deeSEric Joyner struct iavf_sc *sc = rx_que->vsi->back; 1740ca853deeSEric Joyner 1741ca853deeSEric Joyner ++rx_que->irqs; 1742ca853deeSEric Joyner 1743ca853deeSEric Joyner if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING)) 1744ca853deeSEric Joyner return (FILTER_HANDLED); 1745ca853deeSEric Joyner 1746ca853deeSEric Joyner iavf_set_queue_rx_itr(rx_que); 1747ca853deeSEric Joyner 1748ca853deeSEric Joyner return (FILTER_SCHEDULE_THREAD); 1749ca853deeSEric Joyner } 1750ca853deeSEric Joyner 1751ca853deeSEric Joyner /** 1752ca853deeSEric Joyner * iavf_update_link_status - Update iflib Link status 1753ca853deeSEric Joyner * @sc: device softc 1754ca853deeSEric Joyner * 1755ca853deeSEric Joyner * Notify the iflib stack of changes in link status. Called after the device 1756ca853deeSEric Joyner * receives a virtchnl message indicating a change in link status. 1757ca853deeSEric Joyner */ 1758ca853deeSEric Joyner void 1759ca853deeSEric Joyner iavf_update_link_status(struct iavf_sc *sc) 1760ca853deeSEric Joyner { 1761ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1762ca853deeSEric Joyner u64 baudrate; 1763ca853deeSEric Joyner 1764ca853deeSEric Joyner if (sc->link_up){ 1765ca853deeSEric Joyner if (vsi->link_active == FALSE) { 1766ca853deeSEric Joyner vsi->link_active = TRUE; 1767ca853deeSEric Joyner baudrate = iavf_baudrate_from_link_speed(sc); 1768ca853deeSEric Joyner iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate); 1769ca853deeSEric Joyner iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1770ca853deeSEric Joyner } 1771ca853deeSEric Joyner } else { /* Link down */ 1772ca853deeSEric Joyner if (vsi->link_active == TRUE) { 1773ca853deeSEric Joyner vsi->link_active = FALSE; 1774ca853deeSEric Joyner iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1775ca853deeSEric Joyner } 1776ca853deeSEric Joyner } 1777ca853deeSEric Joyner } 1778ca853deeSEric Joyner 1779ca853deeSEric Joyner /** 1780ca853deeSEric Joyner * iavf_stop - Stop the interface 1781ca853deeSEric Joyner * @sc: device softc 1782ca853deeSEric Joyner * 1783ca853deeSEric Joyner * This routine disables all traffic on the adapter by disabling interrupts 1784ca853deeSEric Joyner * and sending a message to the PF to tell it to stop the hardware 1785ca853deeSEric Joyner * Tx/Rx LAN queues. 1786ca853deeSEric Joyner */ 1787ca853deeSEric Joyner static void 1788ca853deeSEric Joyner iavf_stop(struct iavf_sc *sc) 1789ca853deeSEric Joyner { 1790ca853deeSEric Joyner iavf_clear_state(&sc->state, IAVF_STATE_RUNNING); 1791ca853deeSEric Joyner 1792ca853deeSEric Joyner iavf_disable_intr(&sc->vsi); 1793ca853deeSEric Joyner 1794ca853deeSEric Joyner iavf_disable_queues_with_retries(sc); 1795ca853deeSEric Joyner } 1796ca853deeSEric Joyner 1797ca853deeSEric Joyner /** 1798ca853deeSEric Joyner * iavf_if_stop - iflib stop handler 1799ca853deeSEric Joyner * @ctx: iflib context 1800ca853deeSEric Joyner * 1801ca853deeSEric Joyner * Call iavf_stop to stop the interface. 1802ca853deeSEric Joyner */ 1803ca853deeSEric Joyner static void 1804ca853deeSEric Joyner iavf_if_stop(if_ctx_t ctx) 1805ca853deeSEric Joyner { 1806ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx); 1807ca853deeSEric Joyner 1808ca853deeSEric Joyner iavf_stop(sc); 1809ca853deeSEric Joyner } 1810ca853deeSEric Joyner 1811ca853deeSEric Joyner /** 1812ca853deeSEric Joyner * iavf_del_mac_filter - Delete a MAC filter 1813ca853deeSEric Joyner * @sc: device softc 1814ca853deeSEric Joyner * @macaddr: MAC address to remove 1815ca853deeSEric Joyner * 1816ca853deeSEric Joyner * Marks a MAC filter for deletion. 1817ca853deeSEric Joyner * 1818ca853deeSEric Joyner * @returns zero if the filter existed, or ENOENT if it did not. 1819ca853deeSEric Joyner */ 1820ca853deeSEric Joyner static int 1821ca853deeSEric Joyner iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr) 1822ca853deeSEric Joyner { 1823ca853deeSEric Joyner struct iavf_mac_filter *f; 1824ca853deeSEric Joyner 1825ca853deeSEric Joyner f = iavf_find_mac_filter(sc, macaddr); 1826ca853deeSEric Joyner if (f == NULL) 1827ca853deeSEric Joyner return (ENOENT); 1828ca853deeSEric Joyner 1829ca853deeSEric Joyner f->flags |= IAVF_FILTER_DEL; 1830ca853deeSEric Joyner return (0); 1831ca853deeSEric Joyner } 1832ca853deeSEric Joyner 1833ca853deeSEric Joyner /** 1834ca853deeSEric Joyner * iavf_init_tx_rsqs - Initialize Report Status array 1835ca853deeSEric Joyner * @vsi: the main VSI 1836ca853deeSEric Joyner * 1837ca853deeSEric Joyner * Set the Report Status queue fields to zero in order to initialize the 1838ca853deeSEric Joyner * queues for transmit. 1839ca853deeSEric Joyner */ 1840ca853deeSEric Joyner void 1841ca853deeSEric Joyner iavf_init_tx_rsqs(struct iavf_vsi *vsi) 1842ca853deeSEric Joyner { 1843ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared; 1844ca853deeSEric Joyner struct iavf_tx_queue *tx_que; 1845ca853deeSEric Joyner int i, j; 1846ca853deeSEric Joyner 1847ca853deeSEric Joyner for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 1848ca853deeSEric Joyner struct tx_ring *txr = &tx_que->txr; 1849ca853deeSEric Joyner 1850ca853deeSEric Joyner txr->tx_rs_cidx = txr->tx_rs_pidx; 1851ca853deeSEric Joyner 1852ca853deeSEric Joyner /* Initialize the last processed descriptor to be the end of 1853ca853deeSEric Joyner * the ring, rather than the start, so that we avoid an 1854ca853deeSEric Joyner * off-by-one error when calculating how many descriptors are 1855ca853deeSEric Joyner * done in the credits_update function. 1856ca853deeSEric Joyner */ 1857ca853deeSEric Joyner txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1858ca853deeSEric Joyner 1859ca853deeSEric Joyner for (j = 0; j < scctx->isc_ntxd[0]; j++) 1860ca853deeSEric Joyner txr->tx_rsq[j] = QIDX_INVALID; 1861ca853deeSEric Joyner } 1862ca853deeSEric Joyner } 1863ca853deeSEric Joyner 1864ca853deeSEric Joyner /** 1865ca853deeSEric Joyner * iavf_init_tx_cidx - Initialize Tx cidx values 1866ca853deeSEric Joyner * @vsi: the main VSI 1867ca853deeSEric Joyner * 1868ca853deeSEric Joyner * Initialize the tx_cidx_processed values for Tx queues in order to 1869ca853deeSEric Joyner * initialize the Tx queues for transmit. 1870ca853deeSEric Joyner */ 1871ca853deeSEric Joyner void 1872ca853deeSEric Joyner iavf_init_tx_cidx(struct iavf_vsi *vsi) 1873ca853deeSEric Joyner { 1874ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared; 1875ca853deeSEric Joyner struct iavf_tx_queue *tx_que; 1876ca853deeSEric Joyner int i; 1877ca853deeSEric Joyner 1878ca853deeSEric Joyner for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 1879ca853deeSEric Joyner struct tx_ring *txr = &tx_que->txr; 1880ca853deeSEric Joyner 1881ca853deeSEric Joyner txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; 1882ca853deeSEric Joyner } 1883ca853deeSEric Joyner } 1884ca853deeSEric Joyner 1885ca853deeSEric Joyner /** 1886ca853deeSEric Joyner * iavf_add_device_sysctls - Add device sysctls for configuration 1887ca853deeSEric Joyner * @sc: device softc 1888ca853deeSEric Joyner * 1889ca853deeSEric Joyner * Add the main sysctl nodes and sysctls for device configuration. 1890ca853deeSEric Joyner */ 1891ca853deeSEric Joyner static void 1892ca853deeSEric Joyner iavf_add_device_sysctls(struct iavf_sc *sc) 1893ca853deeSEric Joyner { 1894ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 1895ca853deeSEric Joyner device_t dev = sc->dev; 1896ca853deeSEric Joyner struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1897ca853deeSEric Joyner struct sysctl_oid_list *debug_list; 1898ca853deeSEric Joyner 1899ca853deeSEric Joyner iavf_add_device_sysctls_common(sc); 1900ca853deeSEric Joyner 1901ca853deeSEric Joyner debug_list = iavf_create_debug_sysctl_tree(sc); 1902ca853deeSEric Joyner 1903ca853deeSEric Joyner iavf_add_debug_sysctls_common(sc, debug_list); 1904ca853deeSEric Joyner 1905ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list, 1906ca853deeSEric Joyner OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD, 1907ca853deeSEric Joyner sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); 1908ca853deeSEric Joyner 1909ca853deeSEric Joyner #ifdef IAVF_DEBUG 1910ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list, 1911ca853deeSEric Joyner OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR, 1912ca853deeSEric Joyner sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF"); 1913ca853deeSEric Joyner 1914ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list, 1915ca853deeSEric Joyner OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR, 1916ca853deeSEric Joyner sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW"); 1917ca853deeSEric Joyner #endif 1918ca853deeSEric Joyner 1919ca853deeSEric Joyner /* Add stats sysctls */ 1920ca853deeSEric Joyner iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi"); 1921ca853deeSEric Joyner 1922ca853deeSEric Joyner iavf_add_queues_sysctls(dev, vsi); 1923ca853deeSEric Joyner } 1924ca853deeSEric Joyner 1925ca853deeSEric Joyner /** 1926ca853deeSEric Joyner * iavf_add_queues_sysctls - Add per-queue sysctls 1927ca853deeSEric Joyner * @dev: device pointer 1928ca853deeSEric Joyner * @vsi: the main VSI 1929ca853deeSEric Joyner * 1930ca853deeSEric Joyner * Add sysctls for each Tx and Rx queue. 1931ca853deeSEric Joyner */ 1932ca853deeSEric Joyner void 1933ca853deeSEric Joyner iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi) 1934ca853deeSEric Joyner { 1935ca853deeSEric Joyner struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 1936ca853deeSEric Joyner struct sysctl_oid_list *vsi_list, *queue_list; 1937ca853deeSEric Joyner struct sysctl_oid *queue_node; 1938ca853deeSEric Joyner char queue_namebuf[32]; 1939ca853deeSEric Joyner 1940ca853deeSEric Joyner struct iavf_rx_queue *rx_que; 1941ca853deeSEric Joyner struct iavf_tx_queue *tx_que; 1942ca853deeSEric Joyner struct tx_ring *txr; 1943ca853deeSEric Joyner struct rx_ring *rxr; 1944ca853deeSEric Joyner 1945ca853deeSEric Joyner vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); 1946ca853deeSEric Joyner 1947ca853deeSEric Joyner /* Queue statistics */ 1948ca853deeSEric Joyner for (int q = 0; q < vsi->num_rx_queues; q++) { 1949ca853deeSEric Joyner bzero(queue_namebuf, sizeof(queue_namebuf)); 1950ca853deeSEric Joyner snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q); 1951ca853deeSEric Joyner queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, 1952ca853deeSEric Joyner OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #"); 1953ca853deeSEric Joyner queue_list = SYSCTL_CHILDREN(queue_node); 1954ca853deeSEric Joyner 1955ca853deeSEric Joyner rx_que = &(vsi->rx_queues[q]); 1956ca853deeSEric Joyner rxr = &(rx_que->rxr); 1957ca853deeSEric Joyner 1958ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", 1959ca853deeSEric Joyner CTLFLAG_RD, &(rx_que->irqs), 1960ca853deeSEric Joyner "irqs on this queue (both Tx and Rx)"); 1961ca853deeSEric Joyner 1962ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", 1963ca853deeSEric Joyner CTLFLAG_RD, &(rxr->rx_packets), 1964ca853deeSEric Joyner "Queue Packets Received"); 1965ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", 1966ca853deeSEric Joyner CTLFLAG_RD, &(rxr->rx_bytes), 1967ca853deeSEric Joyner "Queue Bytes Received"); 1968ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err", 1969ca853deeSEric Joyner CTLFLAG_RD, &(rxr->desc_errs), 1970ca853deeSEric Joyner "Queue Rx Descriptor Errors"); 1971ca853deeSEric Joyner SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", 1972ca853deeSEric Joyner CTLFLAG_RD, &(rxr->itr), 0, 1973ca853deeSEric Joyner "Queue Rx ITR Interval"); 1974ca853deeSEric Joyner } 1975ca853deeSEric Joyner for (int q = 0; q < vsi->num_tx_queues; q++) { 1976ca853deeSEric Joyner bzero(queue_namebuf, sizeof(queue_namebuf)); 1977ca853deeSEric Joyner snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q); 1978ca853deeSEric Joyner queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, 1979ca853deeSEric Joyner OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #"); 1980ca853deeSEric Joyner queue_list = SYSCTL_CHILDREN(queue_node); 1981ca853deeSEric Joyner 1982ca853deeSEric Joyner tx_que = &(vsi->tx_queues[q]); 1983ca853deeSEric Joyner txr = &(tx_que->txr); 1984ca853deeSEric Joyner 1985ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso", 1986ca853deeSEric Joyner CTLFLAG_RD, &(tx_que->tso), 1987ca853deeSEric Joyner "TSO"); 1988ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", 1989ca853deeSEric Joyner CTLFLAG_RD, &(txr->mss_too_small), 1990ca853deeSEric Joyner "TSO sends with an MSS less than 64"); 1991ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", 1992ca853deeSEric Joyner CTLFLAG_RD, &(txr->tx_packets), 1993ca853deeSEric Joyner "Queue Packets Transmitted"); 1994ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", 1995ca853deeSEric Joyner CTLFLAG_RD, &(txr->tx_bytes), 1996ca853deeSEric Joyner "Queue Bytes Transmitted"); 1997ca853deeSEric Joyner SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", 1998ca853deeSEric Joyner CTLFLAG_RD, &(txr->itr), 0, 1999ca853deeSEric Joyner "Queue Tx ITR Interval"); 2000ca853deeSEric Joyner } 2001ca853deeSEric Joyner } 2002ca853deeSEric Joyner 2003ca853deeSEric Joyner /** 2004ca853deeSEric Joyner * iavf_driver_is_detaching - Check if the driver is detaching/unloading 2005ca853deeSEric Joyner * @sc: device private softc 2006ca853deeSEric Joyner * 2007ca853deeSEric Joyner * @returns true if the driver is detaching, false otherwise. 2008ca853deeSEric Joyner * 2009ca853deeSEric Joyner * @remark on newer kernels, take advantage of iflib_in_detach in order to 2010ca853deeSEric Joyner * report detachment correctly as early as possible. 2011ca853deeSEric Joyner * 2012ca853deeSEric Joyner * @remark this function is used by various code paths that want to avoid 2013ca853deeSEric Joyner * running if the driver is about to be removed. This includes sysctls and 2014ca853deeSEric Joyner * other driver access points. Note that it does not fully resolve 2015ca853deeSEric Joyner * detach-based race conditions as it is possible for a thread to race with 2016ca853deeSEric Joyner * iflib_in_detach. 2017ca853deeSEric Joyner */ 2018ca853deeSEric Joyner bool 2019ca853deeSEric Joyner iavf_driver_is_detaching(struct iavf_sc *sc) 2020ca853deeSEric Joyner { 2021ca853deeSEric Joyner return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) || 2022ca853deeSEric Joyner iflib_in_detach(sc->vsi.ctx)); 2023ca853deeSEric Joyner } 2024ca853deeSEric Joyner 2025ca853deeSEric Joyner /** 2026ca853deeSEric Joyner * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping 2027ca853deeSEric Joyner * @oidp: sysctl oid structure 2028ca853deeSEric Joyner * @arg1: void pointer to device softc 2029ca853deeSEric Joyner * @arg2: unused 2030ca853deeSEric Joyner * @req: sysctl request pointer 2031ca853deeSEric Joyner * 2032ca853deeSEric Joyner * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors. 2033ca853deeSEric Joyner * 2034ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 2035ca853deeSEric Joyner */ 2036ca853deeSEric Joyner static int 2037ca853deeSEric Joyner iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) 2038ca853deeSEric Joyner { 2039ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1; 2040ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi; 2041ca853deeSEric Joyner device_t dev = sc->dev; 2042ca853deeSEric Joyner struct sbuf *buf; 2043ca853deeSEric Joyner int error = 0; 2044ca853deeSEric Joyner 2045ca853deeSEric Joyner struct iavf_rx_queue *rx_que; 2046ca853deeSEric Joyner struct iavf_tx_queue *tx_que; 2047ca853deeSEric Joyner 2048ca853deeSEric Joyner UNREFERENCED_2PARAMETER(arg2, oidp); 2049ca853deeSEric Joyner 2050ca853deeSEric Joyner if (iavf_driver_is_detaching(sc)) 2051ca853deeSEric Joyner return (ESHUTDOWN); 2052ca853deeSEric Joyner 2053ca853deeSEric Joyner buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 2054ca853deeSEric Joyner if (!buf) { 2055ca853deeSEric Joyner device_printf(dev, "Could not allocate sbuf for output.\n"); 2056ca853deeSEric Joyner return (ENOMEM); 2057ca853deeSEric Joyner } 2058ca853deeSEric Joyner 2059ca853deeSEric Joyner sbuf_cat(buf, "\n"); 2060ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++) { 2061ca853deeSEric Joyner rx_que = &vsi->rx_queues[i]; 2062ca853deeSEric Joyner sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); 2063ca853deeSEric Joyner } 2064ca853deeSEric Joyner for (int i = 0; i < vsi->num_tx_queues; i++) { 2065ca853deeSEric Joyner tx_que = &vsi->tx_queues[i]; 2066ca853deeSEric Joyner sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); 2067ca853deeSEric Joyner } 2068ca853deeSEric Joyner 2069ca853deeSEric Joyner error = sbuf_finish(buf); 2070ca853deeSEric Joyner if (error) 2071ca853deeSEric Joyner device_printf(dev, "Error finishing sbuf: %d\n", error); 2072ca853deeSEric Joyner sbuf_delete(buf); 2073ca853deeSEric Joyner 2074ca853deeSEric Joyner return (error); 2075ca853deeSEric Joyner } 2076ca853deeSEric Joyner 2077ca853deeSEric Joyner #ifdef IAVF_DEBUG 2078ca853deeSEric Joyner #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) 2079ca853deeSEric Joyner 2080ca853deeSEric Joyner /** 2081ca853deeSEric Joyner * iavf_sysctl_vf_reset - Request a VF reset 2082ca853deeSEric Joyner * @oidp: sysctl oid pointer 2083ca853deeSEric Joyner * @arg1: void pointer to device softc 2084ca853deeSEric Joyner * @arg2: unused 2085ca853deeSEric Joyner * @req: sysctl request pointer 2086ca853deeSEric Joyner * 2087ca853deeSEric Joyner * Request a VF reset for the device. 2088ca853deeSEric Joyner * 2089ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 2090ca853deeSEric Joyner */ 2091ca853deeSEric Joyner static int 2092ca853deeSEric Joyner iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS) 2093ca853deeSEric Joyner { 2094ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1; 2095ca853deeSEric Joyner int do_reset = 0, error = 0; 2096ca853deeSEric Joyner 2097ca853deeSEric Joyner UNREFERENCED_PARAMETER(arg2); 2098ca853deeSEric Joyner 2099ca853deeSEric Joyner if (iavf_driver_is_detaching(sc)) 2100ca853deeSEric Joyner return (ESHUTDOWN); 2101ca853deeSEric Joyner 2102ca853deeSEric Joyner error = sysctl_handle_int(oidp, &do_reset, 0, req); 2103ca853deeSEric Joyner if ((error) || (req->newptr == NULL)) 2104ca853deeSEric Joyner return (error); 2105ca853deeSEric Joyner 2106ca853deeSEric Joyner if (do_reset == 1) { 2107ca853deeSEric Joyner iavf_reset(sc); 2108ca853deeSEric Joyner if (CTX_ACTIVE(sc->vsi.ctx)) 2109ca853deeSEric Joyner iflib_request_reset(sc->vsi.ctx); 2110ca853deeSEric Joyner } 2111ca853deeSEric Joyner 2112ca853deeSEric Joyner return (error); 2113ca853deeSEric Joyner } 2114ca853deeSEric Joyner 2115ca853deeSEric Joyner /** 2116ca853deeSEric Joyner * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device 2117ca853deeSEric Joyner * @oidp: sysctl oid pointer 2118ca853deeSEric Joyner * @arg1: void pointer to device softc 2119ca853deeSEric Joyner * @arg2: unused 2120ca853deeSEric Joyner * @req: sysctl request pointer 2121ca853deeSEric Joyner * 2122ca853deeSEric Joyner * Sysctl callback to trigger a PCIe FLR. 2123ca853deeSEric Joyner * 2124ca853deeSEric Joyner * @returns zero on success, or an error code on failure. 2125ca853deeSEric Joyner */ 2126ca853deeSEric Joyner static int 2127ca853deeSEric Joyner iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS) 2128ca853deeSEric Joyner { 2129ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1; 2130ca853deeSEric Joyner device_t dev = sc->dev; 2131ca853deeSEric Joyner int do_reset = 0, error = 0; 2132ca853deeSEric Joyner 2133ca853deeSEric Joyner UNREFERENCED_PARAMETER(arg2); 2134ca853deeSEric Joyner 2135ca853deeSEric Joyner if (iavf_driver_is_detaching(sc)) 2136ca853deeSEric Joyner return (ESHUTDOWN); 2137ca853deeSEric Joyner 2138ca853deeSEric Joyner error = sysctl_handle_int(oidp, &do_reset, 0, req); 2139ca853deeSEric Joyner if ((error) || (req->newptr == NULL)) 2140ca853deeSEric Joyner return (error); 2141ca853deeSEric Joyner 2142ca853deeSEric Joyner if (do_reset == 1) { 2143ca853deeSEric Joyner if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) { 2144ca853deeSEric Joyner device_printf(dev, "PCIE FLR failed\n"); 2145ca853deeSEric Joyner error = EIO; 2146ca853deeSEric Joyner } 2147ca853deeSEric Joyner else if (CTX_ACTIVE(sc->vsi.ctx)) 2148ca853deeSEric Joyner iflib_request_reset(sc->vsi.ctx); 2149ca853deeSEric Joyner } 2150ca853deeSEric Joyner 2151ca853deeSEric Joyner return (error); 2152ca853deeSEric Joyner } 2153ca853deeSEric Joyner #undef CTX_ACTIVE 2154ca853deeSEric Joyner #endif 2155