1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 2 51 #define IXL_DRIVER_VERSION_MINOR 3 52 #define IXL_DRIVER_VERSION_BUILD 0 53 54 #define IXL_DRIVER_VERSION_STRING \ 55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ 56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ 57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * 64 * ( Vendor ID, Device ID, Branding String ) 65 *********************************************************************/ 66 67 static pci_vendor_info_t ixl_vendor_info_array[] = 68 { 69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), 71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), 75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), 77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), 78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), 79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), 81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), 82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), 84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), 85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"), 89 /* required last entry */ 90 PVID_END 91 }; 92 93 /********************************************************************* 94 * Function prototypes 95 *********************************************************************/ 96 /*** IFLIB interface ***/ 97 static void *ixl_register(device_t dev); 98 static int ixl_if_attach_pre(if_ctx_t ctx); 99 static int ixl_if_attach_post(if_ctx_t ctx); 100 static int ixl_if_detach(if_ctx_t ctx); 101 static int ixl_if_shutdown(if_ctx_t ctx); 102 static int ixl_if_suspend(if_ctx_t ctx); 103 static int ixl_if_resume(if_ctx_t ctx); 104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); 105 static void ixl_if_enable_intr(if_ctx_t ctx); 106 static void ixl_if_disable_intr(if_ctx_t ctx); 107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 111 static void ixl_if_queues_free(if_ctx_t ctx); 112 static void ixl_if_update_admin_status(if_ctx_t ctx); 113 static void ixl_if_multi_set(if_ctx_t ctx); 114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 116 static int ixl_if_media_change(if_ctx_t ctx); 117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags); 118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); 119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); 120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); 122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); 124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); 125 #ifdef PCI_IOV 126 static void ixl_if_vflr_handle(if_ctx_t ctx); 127 #endif 128 129 /*** Other ***/ 130 static void ixl_save_pf_tunables(struct ixl_pf *); 131 static int ixl_allocate_pci_resources(struct ixl_pf *); 132 static void ixl_setup_ssctx(struct ixl_pf *pf); 133 static void ixl_admin_timer(void *arg); 134 135 /********************************************************************* 136 * FreeBSD Device Interface Entry Points 137 *********************************************************************/ 138 139 static device_method_t ixl_methods[] = { 140 /* Device interface */ 141 DEVMETHOD(device_register, ixl_register), 142 DEVMETHOD(device_probe, iflib_device_probe), 143 DEVMETHOD(device_attach, iflib_device_attach), 144 DEVMETHOD(device_detach, iflib_device_detach), 145 DEVMETHOD(device_shutdown, iflib_device_shutdown), 146 #ifdef PCI_IOV 147 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 148 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 149 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 150 #endif 151 DEVMETHOD_END 152 }; 153 154 static driver_t ixl_driver = { 155 "ixl", ixl_methods, sizeof(struct ixl_pf), 156 }; 157 158 devclass_t ixl_devclass; 159 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); 160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array); 161 MODULE_VERSION(ixl, 3); 162 163 MODULE_DEPEND(ixl, pci, 1, 1, 1); 164 MODULE_DEPEND(ixl, ether, 1, 1, 1); 165 MODULE_DEPEND(ixl, iflib, 1, 1, 1); 166 167 static device_method_t ixl_if_methods[] = { 168 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), 169 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), 170 DEVMETHOD(ifdi_detach, ixl_if_detach), 171 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), 172 DEVMETHOD(ifdi_suspend, ixl_if_suspend), 173 DEVMETHOD(ifdi_resume, ixl_if_resume), 174 DEVMETHOD(ifdi_init, ixl_if_init), 175 DEVMETHOD(ifdi_stop, ixl_if_stop), 176 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), 177 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), 178 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), 179 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), 180 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), 181 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), 182 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), 183 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), 184 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), 185 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), 186 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), 187 DEVMETHOD(ifdi_media_status, ixl_if_media_status), 188 DEVMETHOD(ifdi_media_change, ixl_if_media_change), 189 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), 190 DEVMETHOD(ifdi_timer, ixl_if_timer), 191 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), 192 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), 193 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), 194 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), 195 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), 196 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart), 197 #ifdef PCI_IOV 198 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init), 199 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit), 200 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add), 201 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), 202 #endif 203 // ifdi_led_func 204 // ifdi_debug 205 DEVMETHOD_END 206 }; 207 208 static driver_t ixl_if_driver = { 209 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) 210 }; 211 212 /* 213 ** TUNEABLE PARAMETERS: 214 */ 215 216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 217 "ixl driver parameters"); 218 219 #ifdef IXL_DEBUG_FC 220 /* 221 * Leave this on unless you need to send flow control 222 * frames (or other control frames) from software 223 */ 224 static int ixl_enable_tx_fc_filter = 1; 225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 226 &ixl_enable_tx_fc_filter); 227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 228 &ixl_enable_tx_fc_filter, 0, 229 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 230 #endif 231 232 #ifdef IXL_DEBUG 233 static int ixl_debug_recovery_mode = 0; 234 TUNABLE_INT("hw.ixl.debug_recovery_mode", 235 &ixl_debug_recovery_mode); 236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN, 237 &ixl_debug_recovery_mode, 0, 238 "Act like when FW entered recovery mode (for debuging)"); 239 #endif 240 241 static int ixl_i2c_access_method = 0; 242 TUNABLE_INT("hw.ixl.i2c_access_method", 243 &ixl_i2c_access_method); 244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, 245 &ixl_i2c_access_method, 0, 246 IXL_SYSCTL_HELP_I2C_METHOD); 247 248 static int ixl_enable_vf_loopback = 1; 249 TUNABLE_INT("hw.ixl.enable_vf_loopback", 250 &ixl_enable_vf_loopback); 251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN, 252 &ixl_enable_vf_loopback, 0, 253 IXL_SYSCTL_HELP_VF_LOOPBACK); 254 255 /* 256 * Different method for processing TX descriptor 257 * completion. 258 */ 259 static int ixl_enable_head_writeback = 1; 260 TUNABLE_INT("hw.ixl.enable_head_writeback", 261 &ixl_enable_head_writeback); 262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 263 &ixl_enable_head_writeback, 0, 264 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 265 266 static int ixl_core_debug_mask = 0; 267 TUNABLE_INT("hw.ixl.core_debug_mask", 268 &ixl_core_debug_mask); 269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 270 &ixl_core_debug_mask, 0, 271 "Display debug statements that are printed in non-shared code"); 272 273 static int ixl_shared_debug_mask = 0; 274 TUNABLE_INT("hw.ixl.shared_debug_mask", 275 &ixl_shared_debug_mask); 276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 277 &ixl_shared_debug_mask, 0, 278 "Display debug statements that are printed in shared code"); 279 280 #if 0 281 /* 282 ** Controls for Interrupt Throttling 283 ** - true/false for dynamic adjustment 284 ** - default values for static ITR 285 */ 286 static int ixl_dynamic_rx_itr = 0; 287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 289 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 290 291 static int ixl_dynamic_tx_itr = 0; 292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 294 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 295 #endif 296 297 static int ixl_rx_itr = IXL_ITR_8K; 298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 300 &ixl_rx_itr, 0, "RX Interrupt Rate"); 301 302 static int ixl_tx_itr = IXL_ITR_4K; 303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 305 &ixl_tx_itr, 0, "TX Interrupt Rate"); 306 307 #ifdef IXL_IW 308 int ixl_enable_iwarp = 0; 309 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 310 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 311 &ixl_enable_iwarp, 0, "iWARP enabled"); 312 313 #if __FreeBSD_version < 1100000 314 int ixl_limit_iwarp_msix = 1; 315 #else 316 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 317 #endif 318 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 319 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 320 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP"); 321 #endif 322 323 extern struct if_txrx ixl_txrx_hwb; 324 extern struct if_txrx ixl_txrx_dwb; 325 326 static struct if_shared_ctx ixl_sctx_init = { 327 .isc_magic = IFLIB_MAGIC, 328 .isc_q_align = PAGE_SIZE, 329 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 330 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 331 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 332 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 333 .isc_rx_maxsize = 16384, 334 .isc_rx_nsegments = IXL_MAX_RX_SEGS, 335 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 336 .isc_nfl = 1, 337 .isc_ntxqs = 1, 338 .isc_nrxqs = 1, 339 340 .isc_admin_intrcnt = 1, 341 .isc_vendor_info = ixl_vendor_info_array, 342 .isc_driver_version = IXL_DRIVER_VERSION_STRING, 343 .isc_driver = &ixl_if_driver, 344 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN, 345 346 .isc_nrxd_min = {IXL_MIN_RING}, 347 .isc_ntxd_min = {IXL_MIN_RING}, 348 .isc_nrxd_max = {IXL_MAX_RING}, 349 .isc_ntxd_max = {IXL_MAX_RING}, 350 .isc_nrxd_default = {IXL_DEFAULT_RING}, 351 .isc_ntxd_default = {IXL_DEFAULT_RING}, 352 }; 353 354 if_shared_ctx_t ixl_sctx = &ixl_sctx_init; 355 356 /*** Functions ***/ 357 static void * 358 ixl_register(device_t dev) 359 { 360 return (ixl_sctx); 361 } 362 363 static int 364 ixl_allocate_pci_resources(struct ixl_pf *pf) 365 { 366 device_t dev = iflib_get_dev(pf->vsi.ctx); 367 struct i40e_hw *hw = &pf->hw; 368 int rid; 369 370 /* Map BAR0 */ 371 rid = PCIR_BAR(0); 372 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 373 &rid, RF_ACTIVE); 374 375 if (!(pf->pci_mem)) { 376 device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 377 return (ENXIO); 378 } 379 380 /* Save off the PCI information */ 381 hw->vendor_id = pci_get_vendor(dev); 382 hw->device_id = pci_get_device(dev); 383 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 384 hw->subsystem_vendor_id = 385 pci_read_config(dev, PCIR_SUBVEND_0, 2); 386 hw->subsystem_device_id = 387 pci_read_config(dev, PCIR_SUBDEV_0, 2); 388 389 hw->bus.device = pci_get_slot(dev); 390 hw->bus.func = pci_get_function(dev); 391 392 /* Save off register access information */ 393 pf->osdep.mem_bus_space_tag = 394 rman_get_bustag(pf->pci_mem); 395 pf->osdep.mem_bus_space_handle = 396 rman_get_bushandle(pf->pci_mem); 397 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 398 pf->osdep.flush_reg = I40E_GLGEN_STAT; 399 pf->osdep.dev = dev; 400 401 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 402 pf->hw.back = &pf->osdep; 403 404 return (0); 405 } 406 407 static void 408 ixl_setup_ssctx(struct ixl_pf *pf) 409 { 410 if_softc_ctx_t scctx = pf->vsi.shared; 411 struct i40e_hw *hw = &pf->hw; 412 413 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 414 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 415 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; 416 } else if (hw->mac.type == I40E_MAC_X722) 417 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; 418 else 419 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 420 421 if (pf->vsi.enable_head_writeback) { 422 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 423 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); 424 scctx->isc_txrx = &ixl_txrx_hwb; 425 } else { 426 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 427 * sizeof(struct i40e_tx_desc), DBA_ALIGN); 428 scctx->isc_txrx = &ixl_txrx_dwb; 429 } 430 431 scctx->isc_txrx->ift_legacy_intr = ixl_intr; 432 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 433 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); 434 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); 435 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; 436 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; 437 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; 438 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; 439 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; 440 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 441 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; 442 } 443 444 static void 445 ixl_admin_timer(void *arg) 446 { 447 struct ixl_pf *pf = (struct ixl_pf *)arg; 448 449 /* Fire off the admin task */ 450 iflib_admin_intr_deferred(pf->vsi.ctx); 451 452 /* Reschedule the admin timer */ 453 callout_schedule(&pf->admin_timer, hz/2); 454 } 455 456 static int 457 ixl_attach_pre_recovery_mode(struct ixl_pf *pf) 458 { 459 struct ixl_vsi *vsi = &pf->vsi; 460 struct i40e_hw *hw = &pf->hw; 461 device_t dev = pf->dev; 462 463 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 464 465 i40e_get_mac_addr(hw, hw->mac.addr); 466 467 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 468 ixl_configure_intr0_msix(pf); 469 ixl_enable_intr0(hw); 470 } 471 472 ixl_setup_ssctx(pf); 473 474 return (0); 475 } 476 477 static int 478 ixl_if_attach_pre(if_ctx_t ctx) 479 { 480 device_t dev; 481 struct ixl_pf *pf; 482 struct i40e_hw *hw; 483 struct ixl_vsi *vsi; 484 enum i40e_get_fw_lldp_status_resp lldp_status; 485 struct i40e_filter_control_settings filter; 486 enum i40e_status_code status; 487 int error = 0; 488 489 dev = iflib_get_dev(ctx); 490 pf = iflib_get_softc(ctx); 491 492 INIT_DBG_DEV(dev, "begin"); 493 494 vsi = &pf->vsi; 495 vsi->back = pf; 496 pf->dev = dev; 497 hw = &pf->hw; 498 499 vsi->dev = dev; 500 vsi->hw = &pf->hw; 501 vsi->id = 0; 502 vsi->num_vlans = 0; 503 vsi->ctx = ctx; 504 vsi->media = iflib_get_media(ctx); 505 vsi->shared = iflib_get_softc_ctx(ctx); 506 507 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name), 508 "%s:admin", device_get_nameunit(dev)); 509 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF); 510 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0); 511 512 /* Save tunable values */ 513 ixl_save_pf_tunables(pf); 514 515 /* Do PCI setup - map BAR0, etc */ 516 if (ixl_allocate_pci_resources(pf)) { 517 device_printf(dev, "Allocation of PCI resources failed\n"); 518 error = ENXIO; 519 goto err_pci_res; 520 } 521 522 /* Establish a clean starting point */ 523 i40e_clear_hw(hw); 524 i40e_set_mac_type(hw); 525 526 error = ixl_pf_reset(pf); 527 if (error) 528 goto err_out; 529 530 /* Initialize the shared code */ 531 status = i40e_init_shared_code(hw); 532 if (status) { 533 device_printf(dev, "Unable to initialize shared code, error %s\n", 534 i40e_stat_str(hw, status)); 535 error = EIO; 536 goto err_out; 537 } 538 539 /* Set up the admin queue */ 540 hw->aq.num_arq_entries = IXL_AQ_LEN; 541 hw->aq.num_asq_entries = IXL_AQ_LEN; 542 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 543 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 544 545 status = i40e_init_adminq(hw); 546 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 547 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 548 i40e_stat_str(hw, status)); 549 error = EIO; 550 goto err_out; 551 } 552 ixl_print_nvm_version(pf); 553 554 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 555 device_printf(dev, "The driver for the device stopped " 556 "because the NVM image is newer than expected.\n"); 557 device_printf(dev, "You must install the most recent version of " 558 "the network driver.\n"); 559 error = EIO; 560 goto err_out; 561 } 562 563 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 564 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 565 device_printf(dev, "The driver for the device detected " 566 "a newer version of the NVM image than expected.\n"); 567 device_printf(dev, "Please install the most recent version " 568 "of the network driver.\n"); 569 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 570 device_printf(dev, "The driver for the device detected " 571 "an older version of the NVM image than expected.\n"); 572 device_printf(dev, "Please update the NVM image.\n"); 573 } 574 575 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 576 error = ixl_attach_pre_recovery_mode(pf); 577 if (error) 578 goto err_out; 579 return (error); 580 } 581 582 /* Clear PXE mode */ 583 i40e_clear_pxe_mode(hw); 584 585 /* Get capabilities from the device */ 586 error = ixl_get_hw_capabilities(pf); 587 if (error) { 588 device_printf(dev, "get_hw_capabilities failed: %d\n", 589 error); 590 goto err_get_cap; 591 } 592 593 /* Set up host memory cache */ 594 error = ixl_setup_hmc(pf); 595 if (error) 596 goto err_mac_hmc; 597 598 /* Disable LLDP from the firmware for certain NVM versions */ 599 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 600 (pf->hw.aq.fw_maj_ver < 4)) { 601 i40e_aq_stop_lldp(hw, true, false, NULL); 602 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; 603 } 604 605 /* Try enabling Energy Efficient Ethernet (EEE) mode */ 606 if (i40e_enable_eee(hw, true) == I40E_SUCCESS) 607 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 608 else 609 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 610 611 /* Get MAC addresses from hardware */ 612 i40e_get_mac_addr(hw, hw->mac.addr); 613 error = i40e_validate_mac_addr(hw->mac.addr); 614 if (error) { 615 device_printf(dev, "validate_mac_addr failed: %d\n", error); 616 goto err_mac_hmc; 617 } 618 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 619 iflib_set_mac(ctx, hw->mac.addr); 620 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 621 622 /* Set up the device filtering */ 623 bzero(&filter, sizeof(filter)); 624 filter.enable_ethtype = TRUE; 625 filter.enable_macvlan = TRUE; 626 filter.enable_fdir = FALSE; 627 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 628 if (i40e_set_filter_control(hw, &filter)) 629 device_printf(dev, "i40e_set_filter_control() failed\n"); 630 631 /* Query device FW LLDP status */ 632 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { 633 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { 634 atomic_set_32(&pf->state, 635 IXL_PF_STATE_FW_LLDP_DISABLED); 636 } else { 637 atomic_clear_32(&pf->state, 638 IXL_PF_STATE_FW_LLDP_DISABLED); 639 } 640 } 641 642 /* Tell FW to apply DCB config on link up */ 643 i40e_aq_set_dcb_parameters(hw, true, NULL); 644 645 /* Fill out iflib parameters */ 646 ixl_setup_ssctx(pf); 647 648 INIT_DBG_DEV(dev, "end"); 649 return (0); 650 651 err_mac_hmc: 652 ixl_shutdown_hmc(pf); 653 err_get_cap: 654 i40e_shutdown_adminq(hw); 655 err_out: 656 ixl_free_pci_resources(pf); 657 err_pci_res: 658 mtx_lock(&pf->admin_mtx); 659 callout_stop(&pf->admin_timer); 660 mtx_unlock(&pf->admin_mtx); 661 mtx_destroy(&pf->admin_mtx); 662 return (error); 663 } 664 665 static int 666 ixl_if_attach_post(if_ctx_t ctx) 667 { 668 device_t dev; 669 struct ixl_pf *pf; 670 struct i40e_hw *hw; 671 struct ixl_vsi *vsi; 672 int error = 0; 673 enum i40e_status_code status; 674 675 dev = iflib_get_dev(ctx); 676 pf = iflib_get_softc(ctx); 677 678 INIT_DBG_DEV(dev, "begin"); 679 680 vsi = &pf->vsi; 681 vsi->ifp = iflib_get_ifp(ctx); 682 hw = &pf->hw; 683 684 /* Save off determined number of queues for interface */ 685 vsi->num_rx_queues = vsi->shared->isc_nrxqsets; 686 vsi->num_tx_queues = vsi->shared->isc_ntxqsets; 687 688 /* Setup OS network interface / ifnet */ 689 if (ixl_setup_interface(dev, pf)) { 690 device_printf(dev, "interface setup failed!\n"); 691 error = EIO; 692 goto err; 693 } 694 695 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 696 /* Keep admin queue interrupts active while driver is loaded */ 697 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 698 ixl_configure_intr0_msix(pf); 699 ixl_enable_intr0(hw); 700 } 701 702 ixl_add_sysctls_recovery_mode(pf); 703 704 /* Start the admin timer */ 705 mtx_lock(&pf->admin_mtx); 706 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 707 mtx_unlock(&pf->admin_mtx); 708 return (0); 709 } 710 711 /* Determine link state */ 712 if (ixl_attach_get_link_status(pf)) { 713 error = EINVAL; 714 goto err; 715 } 716 717 error = ixl_switch_config(pf); 718 if (error) { 719 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 720 error); 721 goto err; 722 } 723 724 /* Add protocol filters to list */ 725 ixl_init_filters(vsi); 726 727 /* Init queue allocation manager */ 728 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 729 if (error) { 730 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 731 error); 732 goto err; 733 } 734 /* reserve a contiguous allocation for the PF's VSI */ 735 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, 736 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); 737 if (error) { 738 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 739 error); 740 goto err; 741 } 742 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 743 pf->qtag.num_allocated, pf->qtag.num_active); 744 745 /* Limit PHY interrupts to link, autoneg, and modules failure */ 746 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 747 NULL); 748 if (status) { 749 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 750 " aq_err %s\n", i40e_stat_str(hw, status), 751 i40e_aq_str(hw, hw->aq.asq_last_status)); 752 goto err; 753 } 754 755 /* Get the bus configuration and set the shared code */ 756 ixl_get_bus_info(pf); 757 758 /* Keep admin queue interrupts active while driver is loaded */ 759 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 760 ixl_configure_intr0_msix(pf); 761 ixl_enable_intr0(hw); 762 } 763 764 /* Set initial advertised speed sysctl value */ 765 ixl_set_initial_advertised_speeds(pf); 766 767 /* Initialize statistics & add sysctls */ 768 ixl_add_device_sysctls(pf); 769 ixl_pf_reset_stats(pf); 770 ixl_update_stats_counters(pf); 771 ixl_add_hw_stats(pf); 772 773 hw->phy.get_link_info = true; 774 i40e_get_link_status(hw, &pf->link_up); 775 ixl_update_link_status(pf); 776 777 #ifdef PCI_IOV 778 ixl_initialize_sriov(pf); 779 #endif 780 781 #ifdef IXL_IW 782 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 783 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 784 if (pf->iw_enabled) { 785 error = ixl_iw_pf_attach(pf); 786 if (error) { 787 device_printf(dev, 788 "interfacing to iWARP driver failed: %d\n", 789 error); 790 goto err; 791 } else 792 device_printf(dev, "iWARP ready\n"); 793 } else 794 device_printf(dev, "iWARP disabled on this device " 795 "(no MSI-X vectors)\n"); 796 } else { 797 pf->iw_enabled = false; 798 device_printf(dev, "The device is not iWARP enabled\n"); 799 } 800 #endif 801 /* Start the admin timer */ 802 mtx_lock(&pf->admin_mtx); 803 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 804 mtx_unlock(&pf->admin_mtx); 805 806 INIT_DBG_DEV(dev, "end"); 807 return (0); 808 809 err: 810 INIT_DEBUGOUT("end: error %d", error); 811 /* ixl_if_detach() is called on error from this */ 812 return (error); 813 } 814 815 /** 816 * XXX: iflib always ignores the return value of detach() 817 * -> This means that this isn't allowed to fail 818 */ 819 static int 820 ixl_if_detach(if_ctx_t ctx) 821 { 822 struct ixl_pf *pf = iflib_get_softc(ctx); 823 struct ixl_vsi *vsi = &pf->vsi; 824 struct i40e_hw *hw = &pf->hw; 825 device_t dev = pf->dev; 826 enum i40e_status_code status; 827 #ifdef IXL_IW 828 int error; 829 #endif 830 831 INIT_DBG_DEV(dev, "begin"); 832 833 /* Stop the admin timer */ 834 mtx_lock(&pf->admin_mtx); 835 callout_stop(&pf->admin_timer); 836 mtx_unlock(&pf->admin_mtx); 837 mtx_destroy(&pf->admin_mtx); 838 839 #ifdef IXL_IW 840 if (ixl_enable_iwarp && pf->iw_enabled) { 841 error = ixl_iw_pf_detach(pf); 842 if (error == EBUSY) { 843 device_printf(dev, "iwarp in use; stop it first.\n"); 844 //return (error); 845 } 846 } 847 #endif 848 /* Remove all previously allocated media types */ 849 ifmedia_removeall(vsi->media); 850 851 /* Shutdown LAN HMC */ 852 ixl_shutdown_hmc(pf); 853 854 /* Shutdown admin queue */ 855 ixl_disable_intr0(hw); 856 status = i40e_shutdown_adminq(hw); 857 if (status) 858 device_printf(dev, 859 "i40e_shutdown_adminq() failed with status %s\n", 860 i40e_stat_str(hw, status)); 861 862 ixl_pf_qmgr_destroy(&pf->qmgr); 863 ixl_free_pci_resources(pf); 864 ixl_free_filters(&vsi->ftl); 865 INIT_DBG_DEV(dev, "end"); 866 return (0); 867 } 868 869 static int 870 ixl_if_shutdown(if_ctx_t ctx) 871 { 872 int error = 0; 873 874 INIT_DEBUGOUT("ixl_if_shutdown: begin"); 875 876 /* TODO: Call ixl_if_stop()? */ 877 878 /* TODO: Then setup low power mode */ 879 880 return (error); 881 } 882 883 static int 884 ixl_if_suspend(if_ctx_t ctx) 885 { 886 int error = 0; 887 888 INIT_DEBUGOUT("ixl_if_suspend: begin"); 889 890 /* TODO: Call ixl_if_stop()? */ 891 892 /* TODO: Then setup low power mode */ 893 894 return (error); 895 } 896 897 static int 898 ixl_if_resume(if_ctx_t ctx) 899 { 900 struct ifnet *ifp = iflib_get_ifp(ctx); 901 902 INIT_DEBUGOUT("ixl_if_resume: begin"); 903 904 /* Read & clear wake-up registers */ 905 906 /* Required after D3->D0 transition */ 907 if (ifp->if_flags & IFF_UP) 908 ixl_if_init(ctx); 909 910 return (0); 911 } 912 913 void 914 ixl_if_init(if_ctx_t ctx) 915 { 916 struct ixl_pf *pf = iflib_get_softc(ctx); 917 struct ixl_vsi *vsi = &pf->vsi; 918 struct i40e_hw *hw = &pf->hw; 919 struct ifnet *ifp = iflib_get_ifp(ctx); 920 device_t dev = iflib_get_dev(ctx); 921 u8 tmpaddr[ETHER_ADDR_LEN]; 922 int ret; 923 924 if (IXL_PF_IN_RECOVERY_MODE(pf)) 925 return; 926 /* 927 * If the aq is dead here, it probably means something outside of the driver 928 * did something to the adapter, like a PF reset. 929 * So, rebuild the driver's state here if that occurs. 930 */ 931 if (!i40e_check_asq_alive(&pf->hw)) { 932 device_printf(dev, "Admin Queue is down; resetting...\n"); 933 ixl_teardown_hw_structs(pf); 934 ixl_rebuild_hw_structs_after_reset(pf, false); 935 } 936 937 /* Get the latest mac address... User might use a LAA */ 938 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); 939 if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) && 940 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { 941 ixl_del_all_vlan_filters(vsi, hw->mac.addr); 942 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 943 ret = i40e_aq_mac_address_write(hw, 944 I40E_AQC_WRITE_TYPE_LAA_ONLY, 945 hw->mac.addr, NULL); 946 if (ret) { 947 device_printf(dev, "LLA address change failed!!\n"); 948 return; 949 } 950 /* 951 * New filters are configured by ixl_reconfigure_filters 952 * at the end of ixl_init_locked. 953 */ 954 } 955 956 iflib_set_mac(ctx, hw->mac.addr); 957 958 /* Prepare the VSI: rings, hmc contexts, etc... */ 959 if (ixl_initialize_vsi(vsi)) { 960 device_printf(dev, "initialize vsi failed!!\n"); 961 return; 962 } 963 964 /* Reconfigure multicast filters in HW */ 965 ixl_if_multi_set(ctx); 966 967 /* Set up RSS */ 968 ixl_config_rss(pf); 969 970 /* Set up MSI-X routing and the ITR settings */ 971 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 972 ixl_configure_queue_intr_msix(pf); 973 ixl_configure_itr(pf); 974 } else 975 ixl_configure_legacy(pf); 976 977 if (vsi->enable_head_writeback) 978 ixl_init_tx_cidx(vsi); 979 else 980 ixl_init_tx_rsqs(vsi); 981 982 ixl_enable_rings(vsi); 983 984 i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 985 986 /* Re-add configure filters to HW */ 987 ixl_reconfigure_filters(vsi); 988 989 /* Configure promiscuous mode */ 990 ixl_if_promisc_set(ctx, if_getflags(ifp)); 991 992 #ifdef IXL_IW 993 if (ixl_enable_iwarp && pf->iw_enabled) { 994 ret = ixl_iw_pf_init(pf); 995 if (ret) 996 device_printf(dev, 997 "initialize iwarp failed, code %d\n", ret); 998 } 999 #endif 1000 } 1001 1002 void 1003 ixl_if_stop(if_ctx_t ctx) 1004 { 1005 struct ixl_pf *pf = iflib_get_softc(ctx); 1006 struct ixl_vsi *vsi = &pf->vsi; 1007 1008 INIT_DEBUGOUT("ixl_if_stop: begin\n"); 1009 1010 if (IXL_PF_IN_RECOVERY_MODE(pf)) 1011 return; 1012 1013 // TODO: This may need to be reworked 1014 #ifdef IXL_IW 1015 /* Stop iWARP device */ 1016 if (ixl_enable_iwarp && pf->iw_enabled) 1017 ixl_iw_pf_stop(pf); 1018 #endif 1019 1020 ixl_disable_rings_intr(vsi); 1021 ixl_disable_rings(pf, vsi, &pf->qtag); 1022 } 1023 1024 static int 1025 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) 1026 { 1027 struct ixl_pf *pf = iflib_get_softc(ctx); 1028 struct ixl_vsi *vsi = &pf->vsi; 1029 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1030 struct ixl_tx_queue *tx_que = vsi->tx_queues; 1031 int err, i, rid, vector = 0; 1032 char buf[16]; 1033 1034 MPASS(vsi->shared->isc_nrxqsets > 0); 1035 MPASS(vsi->shared->isc_ntxqsets > 0); 1036 1037 /* Admin Que must use vector 0*/ 1038 rid = vector + 1; 1039 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 1040 ixl_msix_adminq, pf, 0, "aq"); 1041 if (err) { 1042 iflib_irq_free(ctx, &vsi->irq); 1043 device_printf(iflib_get_dev(ctx), 1044 "Failed to register Admin Que handler"); 1045 return (err); 1046 } 1047 /* Create soft IRQ for handling VFLRs */ 1048 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov"); 1049 1050 /* Now set up the stations */ 1051 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { 1052 rid = vector + 1; 1053 1054 snprintf(buf, sizeof(buf), "rxq%d", i); 1055 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1056 IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); 1057 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than 1058 * what's expected in the iflib context? */ 1059 if (err) { 1060 device_printf(iflib_get_dev(ctx), 1061 "Failed to allocate queue RX int vector %d, err: %d\n", i, err); 1062 vsi->num_rx_queues = i + 1; 1063 goto fail; 1064 } 1065 rx_que->msix = vector; 1066 } 1067 1068 bzero(buf, sizeof(buf)); 1069 1070 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { 1071 snprintf(buf, sizeof(buf), "txq%d", i); 1072 iflib_softirq_alloc_generic(ctx, 1073 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, 1074 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1075 1076 /* TODO: Maybe call a strategy function for this to figure out which 1077 * interrupts to map Tx queues to. I don't know if there's an immediately 1078 * better way than this other than a user-supplied map, though. */ 1079 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; 1080 } 1081 1082 return (0); 1083 fail: 1084 iflib_irq_free(ctx, &vsi->irq); 1085 rx_que = vsi->rx_queues; 1086 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1087 iflib_irq_free(ctx, &rx_que->que_irq); 1088 return (err); 1089 } 1090 1091 /* 1092 * Enable all interrupts 1093 * 1094 * Called in: 1095 * iflib_init_locked, after ixl_if_init() 1096 */ 1097 static void 1098 ixl_if_enable_intr(if_ctx_t ctx) 1099 { 1100 struct ixl_pf *pf = iflib_get_softc(ctx); 1101 struct ixl_vsi *vsi = &pf->vsi; 1102 struct i40e_hw *hw = vsi->hw; 1103 struct ixl_rx_queue *que = vsi->rx_queues; 1104 1105 ixl_enable_intr0(hw); 1106 /* Enable queue interrupts */ 1107 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1108 /* TODO: Queue index parameter is probably wrong */ 1109 ixl_enable_queue(hw, que->rxr.me); 1110 } 1111 1112 /* 1113 * Disable queue interrupts 1114 * 1115 * Other interrupt causes need to remain active. 1116 */ 1117 static void 1118 ixl_if_disable_intr(if_ctx_t ctx) 1119 { 1120 struct ixl_pf *pf = iflib_get_softc(ctx); 1121 struct ixl_vsi *vsi = &pf->vsi; 1122 struct i40e_hw *hw = vsi->hw; 1123 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1124 1125 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1126 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1127 ixl_disable_queue(hw, rx_que->msix - 1); 1128 } else { 1129 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF 1130 // stops queues from triggering interrupts 1131 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1132 } 1133 } 1134 1135 static int 1136 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1137 { 1138 struct ixl_pf *pf = iflib_get_softc(ctx); 1139 struct ixl_vsi *vsi = &pf->vsi; 1140 struct i40e_hw *hw = vsi->hw; 1141 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 1142 1143 ixl_enable_queue(hw, rx_que->msix - 1); 1144 return (0); 1145 } 1146 1147 static int 1148 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1149 { 1150 struct ixl_pf *pf = iflib_get_softc(ctx); 1151 struct ixl_vsi *vsi = &pf->vsi; 1152 struct i40e_hw *hw = vsi->hw; 1153 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; 1154 1155 ixl_enable_queue(hw, tx_que->msix - 1); 1156 return (0); 1157 } 1158 1159 static int 1160 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1161 { 1162 struct ixl_pf *pf = iflib_get_softc(ctx); 1163 struct ixl_vsi *vsi = &pf->vsi; 1164 if_softc_ctx_t scctx = vsi->shared; 1165 struct ixl_tx_queue *que; 1166 int i, j, error = 0; 1167 1168 MPASS(scctx->isc_ntxqsets > 0); 1169 MPASS(ntxqs == 1); 1170 MPASS(scctx->isc_ntxqsets == ntxqsets); 1171 1172 /* Allocate queue structure memory */ 1173 if (!(vsi->tx_queues = 1174 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1175 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 1176 return (ENOMEM); 1177 } 1178 1179 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 1180 struct tx_ring *txr = &que->txr; 1181 1182 txr->me = i; 1183 que->vsi = vsi; 1184 1185 if (!vsi->enable_head_writeback) { 1186 /* Allocate report status array */ 1187 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { 1188 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 1189 error = ENOMEM; 1190 goto fail; 1191 } 1192 /* Init report status array */ 1193 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1194 txr->tx_rsq[j] = QIDX_INVALID; 1195 } 1196 /* get the virtual and physical address of the hardware queues */ 1197 txr->tail = I40E_QTX_TAIL(txr->me); 1198 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; 1199 txr->tx_paddr = paddrs[i * ntxqs]; 1200 txr->que = que; 1201 } 1202 1203 return (0); 1204 fail: 1205 ixl_if_queues_free(ctx); 1206 return (error); 1207 } 1208 1209 static int 1210 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1211 { 1212 struct ixl_pf *pf = iflib_get_softc(ctx); 1213 struct ixl_vsi *vsi = &pf->vsi; 1214 struct ixl_rx_queue *que; 1215 int i, error = 0; 1216 1217 #ifdef INVARIANTS 1218 if_softc_ctx_t scctx = vsi->shared; 1219 MPASS(scctx->isc_nrxqsets > 0); 1220 MPASS(nrxqs == 1); 1221 MPASS(scctx->isc_nrxqsets == nrxqsets); 1222 #endif 1223 1224 /* Allocate queue structure memory */ 1225 if (!(vsi->rx_queues = 1226 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * 1227 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1228 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1229 error = ENOMEM; 1230 goto fail; 1231 } 1232 1233 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1234 struct rx_ring *rxr = &que->rxr; 1235 1236 rxr->me = i; 1237 que->vsi = vsi; 1238 1239 /* get the virtual and physical address of the hardware queues */ 1240 rxr->tail = I40E_QRX_TAIL(rxr->me); 1241 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; 1242 rxr->rx_paddr = paddrs[i * nrxqs]; 1243 rxr->que = que; 1244 } 1245 1246 return (0); 1247 fail: 1248 ixl_if_queues_free(ctx); 1249 return (error); 1250 } 1251 1252 static void 1253 ixl_if_queues_free(if_ctx_t ctx) 1254 { 1255 struct ixl_pf *pf = iflib_get_softc(ctx); 1256 struct ixl_vsi *vsi = &pf->vsi; 1257 1258 if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) { 1259 struct ixl_tx_queue *que; 1260 int i = 0; 1261 1262 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { 1263 struct tx_ring *txr = &que->txr; 1264 if (txr->tx_rsq != NULL) { 1265 free(txr->tx_rsq, M_IXL); 1266 txr->tx_rsq = NULL; 1267 } 1268 } 1269 } 1270 1271 if (vsi->tx_queues != NULL) { 1272 free(vsi->tx_queues, M_IXL); 1273 vsi->tx_queues = NULL; 1274 } 1275 if (vsi->rx_queues != NULL) { 1276 free(vsi->rx_queues, M_IXL); 1277 vsi->rx_queues = NULL; 1278 } 1279 1280 if (!IXL_PF_IN_RECOVERY_MODE(pf)) 1281 sysctl_ctx_free(&vsi->sysctl_ctx); 1282 } 1283 1284 void 1285 ixl_update_link_status(struct ixl_pf *pf) 1286 { 1287 struct ixl_vsi *vsi = &pf->vsi; 1288 struct i40e_hw *hw = &pf->hw; 1289 u64 baudrate; 1290 1291 if (pf->link_up) { 1292 if (vsi->link_active == FALSE) { 1293 vsi->link_active = TRUE; 1294 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); 1295 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1296 ixl_link_up_msg(pf); 1297 #ifdef PCI_IOV 1298 ixl_broadcast_link_state(pf); 1299 #endif 1300 } 1301 } else { /* Link down */ 1302 if (vsi->link_active == TRUE) { 1303 vsi->link_active = FALSE; 1304 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1305 #ifdef PCI_IOV 1306 ixl_broadcast_link_state(pf); 1307 #endif 1308 } 1309 } 1310 } 1311 1312 static void 1313 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) 1314 { 1315 device_t dev = pf->dev; 1316 u32 rxq_idx, qtx_ctl; 1317 1318 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >> 1319 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT; 1320 qtx_ctl = e->desc.params.external.param1; 1321 1322 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx); 1323 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl); 1324 } 1325 1326 static int 1327 ixl_process_adminq(struct ixl_pf *pf, u16 *pending) 1328 { 1329 enum i40e_status_code status = I40E_SUCCESS; 1330 struct i40e_arq_event_info event; 1331 struct i40e_hw *hw = &pf->hw; 1332 device_t dev = pf->dev; 1333 u16 opcode; 1334 u32 loop = 0, reg; 1335 1336 event.buf_len = IXL_AQ_BUF_SZ; 1337 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); 1338 if (!event.msg_buf) { 1339 device_printf(dev, "%s: Unable to allocate memory for Admin" 1340 " Queue event!\n", __func__); 1341 return (ENOMEM); 1342 } 1343 1344 /* clean and process any events */ 1345 do { 1346 status = i40e_clean_arq_element(hw, &event, pending); 1347 if (status) 1348 break; 1349 opcode = LE16_TO_CPU(event.desc.opcode); 1350 ixl_dbg(pf, IXL_DBG_AQ, 1351 "Admin Queue event: %#06x\n", opcode); 1352 switch (opcode) { 1353 case i40e_aqc_opc_get_link_status: 1354 ixl_link_event(pf, &event); 1355 break; 1356 case i40e_aqc_opc_send_msg_to_pf: 1357 #ifdef PCI_IOV 1358 ixl_handle_vf_msg(pf, &event); 1359 #endif 1360 break; 1361 /* 1362 * This should only occur on no-drop queues, which 1363 * aren't currently configured. 1364 */ 1365 case i40e_aqc_opc_event_lan_overflow: 1366 ixl_handle_lan_overflow_event(pf, &event); 1367 break; 1368 default: 1369 break; 1370 } 1371 } while (*pending && (loop++ < IXL_ADM_LIMIT)); 1372 1373 free(event.msg_buf, M_IXL); 1374 1375 /* Re-enable admin queue interrupt cause */ 1376 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1377 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 1378 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1379 1380 return (status); 1381 } 1382 1383 static void 1384 ixl_if_update_admin_status(if_ctx_t ctx) 1385 { 1386 struct ixl_pf *pf = iflib_get_softc(ctx); 1387 struct i40e_hw *hw = &pf->hw; 1388 u16 pending; 1389 1390 if (IXL_PF_IS_RESETTING(pf)) 1391 ixl_handle_empr_reset(pf); 1392 1393 /* 1394 * Admin Queue is shut down while handling reset. 1395 * Don't proceed if it hasn't been re-initialized 1396 * e.g due to an issue with new FW. 1397 */ 1398 if (!i40e_check_asq_alive(&pf->hw)) 1399 return; 1400 1401 if (pf->state & IXL_PF_STATE_MDD_PENDING) 1402 ixl_handle_mdd_event(pf); 1403 1404 ixl_process_adminq(pf, &pending); 1405 ixl_update_link_status(pf); 1406 1407 /* 1408 * If there are still messages to process, reschedule ourselves. 1409 * Otherwise, re-enable our interrupt and go to sleep. 1410 */ 1411 if (pending > 0) 1412 iflib_admin_intr_deferred(ctx); 1413 else 1414 ixl_enable_intr0(hw); 1415 } 1416 1417 static void 1418 ixl_if_multi_set(if_ctx_t ctx) 1419 { 1420 struct ixl_pf *pf = iflib_get_softc(ctx); 1421 struct ixl_vsi *vsi = &pf->vsi; 1422 struct i40e_hw *hw = vsi->hw; 1423 int mcnt; 1424 1425 IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); 1426 1427 /* Delete filters for removed multicast addresses */ 1428 ixl_del_multi(vsi, false); 1429 1430 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR); 1431 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { 1432 i40e_aq_set_vsi_multicast_promiscuous(hw, 1433 vsi->seid, TRUE, NULL); 1434 ixl_del_multi(vsi, true); 1435 return; 1436 } 1437 1438 ixl_add_multi(vsi); 1439 IOCTL_DEBUGOUT("ixl_if_multi_set: end"); 1440 } 1441 1442 static int 1443 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1444 { 1445 struct ixl_pf *pf = iflib_get_softc(ctx); 1446 struct ixl_vsi *vsi = &pf->vsi; 1447 1448 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 1449 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - 1450 ETHER_VLAN_ENCAP_LEN) 1451 return (EINVAL); 1452 1453 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1454 ETHER_VLAN_ENCAP_LEN; 1455 1456 return (0); 1457 } 1458 1459 static void 1460 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1461 { 1462 struct ixl_pf *pf = iflib_get_softc(ctx); 1463 struct i40e_hw *hw = &pf->hw; 1464 1465 INIT_DEBUGOUT("ixl_media_status: begin"); 1466 1467 ifmr->ifm_status = IFM_AVALID; 1468 ifmr->ifm_active = IFM_ETHER; 1469 1470 if (!pf->link_up) { 1471 return; 1472 } 1473 1474 ifmr->ifm_status |= IFM_ACTIVE; 1475 /* Hardware is always full-duplex */ 1476 ifmr->ifm_active |= IFM_FDX; 1477 1478 switch (hw->phy.link_info.phy_type) { 1479 /* 100 M */ 1480 case I40E_PHY_TYPE_100BASE_TX: 1481 ifmr->ifm_active |= IFM_100_TX; 1482 break; 1483 /* 1 G */ 1484 case I40E_PHY_TYPE_1000BASE_T: 1485 ifmr->ifm_active |= IFM_1000_T; 1486 break; 1487 case I40E_PHY_TYPE_1000BASE_SX: 1488 ifmr->ifm_active |= IFM_1000_SX; 1489 break; 1490 case I40E_PHY_TYPE_1000BASE_LX: 1491 ifmr->ifm_active |= IFM_1000_LX; 1492 break; 1493 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 1494 ifmr->ifm_active |= IFM_1000_T; 1495 break; 1496 /* 2.5 G */ 1497 case I40E_PHY_TYPE_2_5GBASE_T: 1498 ifmr->ifm_active |= IFM_2500_T; 1499 break; 1500 /* 5 G */ 1501 case I40E_PHY_TYPE_5GBASE_T: 1502 ifmr->ifm_active |= IFM_5000_T; 1503 break; 1504 /* 10 G */ 1505 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1506 ifmr->ifm_active |= IFM_10G_TWINAX; 1507 break; 1508 case I40E_PHY_TYPE_10GBASE_SR: 1509 ifmr->ifm_active |= IFM_10G_SR; 1510 break; 1511 case I40E_PHY_TYPE_10GBASE_LR: 1512 ifmr->ifm_active |= IFM_10G_LR; 1513 break; 1514 case I40E_PHY_TYPE_10GBASE_T: 1515 ifmr->ifm_active |= IFM_10G_T; 1516 break; 1517 case I40E_PHY_TYPE_XAUI: 1518 case I40E_PHY_TYPE_XFI: 1519 ifmr->ifm_active |= IFM_10G_TWINAX; 1520 break; 1521 case I40E_PHY_TYPE_10GBASE_AOC: 1522 ifmr->ifm_active |= IFM_10G_AOC; 1523 break; 1524 /* 25 G */ 1525 case I40E_PHY_TYPE_25GBASE_KR: 1526 ifmr->ifm_active |= IFM_25G_KR; 1527 break; 1528 case I40E_PHY_TYPE_25GBASE_CR: 1529 ifmr->ifm_active |= IFM_25G_CR; 1530 break; 1531 case I40E_PHY_TYPE_25GBASE_SR: 1532 ifmr->ifm_active |= IFM_25G_SR; 1533 break; 1534 case I40E_PHY_TYPE_25GBASE_LR: 1535 ifmr->ifm_active |= IFM_25G_LR; 1536 break; 1537 case I40E_PHY_TYPE_25GBASE_AOC: 1538 ifmr->ifm_active |= IFM_25G_AOC; 1539 break; 1540 case I40E_PHY_TYPE_25GBASE_ACC: 1541 ifmr->ifm_active |= IFM_25G_ACC; 1542 break; 1543 /* 40 G */ 1544 case I40E_PHY_TYPE_40GBASE_CR4: 1545 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1546 ifmr->ifm_active |= IFM_40G_CR4; 1547 break; 1548 case I40E_PHY_TYPE_40GBASE_SR4: 1549 ifmr->ifm_active |= IFM_40G_SR4; 1550 break; 1551 case I40E_PHY_TYPE_40GBASE_LR4: 1552 ifmr->ifm_active |= IFM_40G_LR4; 1553 break; 1554 case I40E_PHY_TYPE_XLAUI: 1555 ifmr->ifm_active |= IFM_OTHER; 1556 break; 1557 case I40E_PHY_TYPE_1000BASE_KX: 1558 ifmr->ifm_active |= IFM_1000_KX; 1559 break; 1560 case I40E_PHY_TYPE_SGMII: 1561 ifmr->ifm_active |= IFM_1000_SGMII; 1562 break; 1563 /* ERJ: What's the difference between these? */ 1564 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1565 case I40E_PHY_TYPE_10GBASE_CR1: 1566 ifmr->ifm_active |= IFM_10G_CR1; 1567 break; 1568 case I40E_PHY_TYPE_10GBASE_KX4: 1569 ifmr->ifm_active |= IFM_10G_KX4; 1570 break; 1571 case I40E_PHY_TYPE_10GBASE_KR: 1572 ifmr->ifm_active |= IFM_10G_KR; 1573 break; 1574 case I40E_PHY_TYPE_SFI: 1575 ifmr->ifm_active |= IFM_10G_SFI; 1576 break; 1577 /* Our single 20G media type */ 1578 case I40E_PHY_TYPE_20GBASE_KR2: 1579 ifmr->ifm_active |= IFM_20G_KR2; 1580 break; 1581 case I40E_PHY_TYPE_40GBASE_KR4: 1582 ifmr->ifm_active |= IFM_40G_KR4; 1583 break; 1584 case I40E_PHY_TYPE_XLPPI: 1585 case I40E_PHY_TYPE_40GBASE_AOC: 1586 ifmr->ifm_active |= IFM_40G_XLPPI; 1587 break; 1588 /* Unknown to driver */ 1589 default: 1590 ifmr->ifm_active |= IFM_UNKNOWN; 1591 break; 1592 } 1593 /* Report flow control status as well */ 1594 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 1595 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1596 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 1597 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1598 } 1599 1600 static int 1601 ixl_if_media_change(if_ctx_t ctx) 1602 { 1603 struct ifmedia *ifm = iflib_get_media(ctx); 1604 1605 INIT_DEBUGOUT("ixl_media_change: begin"); 1606 1607 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1608 return (EINVAL); 1609 1610 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); 1611 return (ENODEV); 1612 } 1613 1614 static int 1615 ixl_if_promisc_set(if_ctx_t ctx, int flags) 1616 { 1617 struct ixl_pf *pf = iflib_get_softc(ctx); 1618 struct ixl_vsi *vsi = &pf->vsi; 1619 struct ifnet *ifp = iflib_get_ifp(ctx); 1620 struct i40e_hw *hw = vsi->hw; 1621 int err; 1622 bool uni = FALSE, multi = FALSE; 1623 1624 if (flags & IFF_PROMISC) 1625 uni = multi = TRUE; 1626 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >= 1627 MAX_MULTICAST_ADDR) 1628 multi = TRUE; 1629 1630 err = i40e_aq_set_vsi_unicast_promiscuous(hw, 1631 vsi->seid, uni, NULL, true); 1632 if (err) 1633 return (err); 1634 err = i40e_aq_set_vsi_multicast_promiscuous(hw, 1635 vsi->seid, multi, NULL); 1636 return (err); 1637 } 1638 1639 static void 1640 ixl_if_timer(if_ctx_t ctx, uint16_t qid) 1641 { 1642 struct ixl_pf *pf = iflib_get_softc(ctx); 1643 1644 if (qid != 0) 1645 return; 1646 1647 ixl_update_stats_counters(pf); 1648 } 1649 1650 static void 1651 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) 1652 { 1653 struct ixl_pf *pf = iflib_get_softc(ctx); 1654 struct ixl_vsi *vsi = &pf->vsi; 1655 struct i40e_hw *hw = vsi->hw; 1656 if_t ifp = iflib_get_ifp(ctx); 1657 1658 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1659 return; 1660 1661 /* 1662 * Keep track of registered VLANS to know what 1663 * filters have to be configured when VLAN_HWFILTER 1664 * capability is enabled. 1665 */ 1666 ++vsi->num_vlans; 1667 bit_set(vsi->vlans_map, vtag); 1668 1669 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1670 return; 1671 1672 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS) 1673 ixl_add_filter(vsi, hw->mac.addr, vtag); 1674 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) { 1675 /* 1676 * There is not enough HW resources to add filters 1677 * for all registered VLANs. Re-configure filtering 1678 * to allow reception of all expected traffic. 1679 */ 1680 device_printf(vsi->dev, 1681 "Not enough HW filters for all VLANs. VLAN HW filtering disabled"); 1682 ixl_del_all_vlan_filters(vsi, hw->mac.addr); 1683 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1684 } 1685 } 1686 1687 static void 1688 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1689 { 1690 struct ixl_pf *pf = iflib_get_softc(ctx); 1691 struct ixl_vsi *vsi = &pf->vsi; 1692 struct i40e_hw *hw = vsi->hw; 1693 if_t ifp = iflib_get_ifp(ctx); 1694 1695 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1696 return; 1697 1698 --vsi->num_vlans; 1699 bit_clear(vsi->vlans_map, vtag); 1700 1701 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1702 return; 1703 1704 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS) 1705 ixl_del_filter(vsi, hw->mac.addr, vtag); 1706 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) { 1707 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1708 ixl_add_vlan_filters(vsi, hw->mac.addr); 1709 } 1710 } 1711 1712 static uint64_t 1713 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1714 { 1715 struct ixl_pf *pf = iflib_get_softc(ctx); 1716 struct ixl_vsi *vsi = &pf->vsi; 1717 if_t ifp = iflib_get_ifp(ctx); 1718 1719 switch (cnt) { 1720 case IFCOUNTER_IPACKETS: 1721 return (vsi->ipackets); 1722 case IFCOUNTER_IERRORS: 1723 return (vsi->ierrors); 1724 case IFCOUNTER_OPACKETS: 1725 return (vsi->opackets); 1726 case IFCOUNTER_OERRORS: 1727 return (vsi->oerrors); 1728 case IFCOUNTER_COLLISIONS: 1729 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1730 return (0); 1731 case IFCOUNTER_IBYTES: 1732 return (vsi->ibytes); 1733 case IFCOUNTER_OBYTES: 1734 return (vsi->obytes); 1735 case IFCOUNTER_IMCASTS: 1736 return (vsi->imcasts); 1737 case IFCOUNTER_OMCASTS: 1738 return (vsi->omcasts); 1739 case IFCOUNTER_IQDROPS: 1740 return (vsi->iqdrops); 1741 case IFCOUNTER_OQDROPS: 1742 return (vsi->oqdrops); 1743 case IFCOUNTER_NOPROTO: 1744 return (vsi->noproto); 1745 default: 1746 return (if_get_counter_default(ifp, cnt)); 1747 } 1748 } 1749 1750 #ifdef PCI_IOV 1751 static void 1752 ixl_if_vflr_handle(if_ctx_t ctx) 1753 { 1754 struct ixl_pf *pf = iflib_get_softc(ctx); 1755 1756 ixl_handle_vflr(pf); 1757 } 1758 #endif 1759 1760 static int 1761 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1762 { 1763 struct ixl_pf *pf = iflib_get_softc(ctx); 1764 1765 if (pf->read_i2c_byte == NULL) 1766 return (EINVAL); 1767 1768 for (int i = 0; i < req->len; i++) 1769 if (pf->read_i2c_byte(pf, req->offset + i, 1770 req->dev_addr, &req->data[i])) 1771 return (EIO); 1772 return (0); 1773 } 1774 1775 static int 1776 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) 1777 { 1778 struct ixl_pf *pf = iflib_get_softc(ctx); 1779 struct ifdrv *ifd = (struct ifdrv *)data; 1780 int error = 0; 1781 1782 /* 1783 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without 1784 * performing privilege checks. It is important that this function 1785 * perform the necessary checks for commands which should only be 1786 * executed by privileged threads. 1787 */ 1788 1789 switch(command) { 1790 case SIOCGDRVSPEC: 1791 case SIOCSDRVSPEC: 1792 /* NVM update command */ 1793 if (ifd->ifd_cmd == I40E_NVM_ACCESS) { 1794 error = priv_check(curthread, PRIV_DRIVER); 1795 if (error) 1796 break; 1797 error = ixl_handle_nvmupd_cmd(pf, ifd); 1798 } else { 1799 error = EINVAL; 1800 } 1801 break; 1802 default: 1803 error = EOPNOTSUPP; 1804 } 1805 1806 return (error); 1807 } 1808 1809 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1810 * @ctx: iflib context 1811 * @event: event code to check 1812 * 1813 * Defaults to returning false for every event. 1814 * 1815 * @returns true if iflib needs to reinit the interface, false otherwise 1816 */ 1817 static bool 1818 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1819 { 1820 switch (event) { 1821 case IFLIB_RESTART_VLAN_CONFIG: 1822 default: 1823 return (false); 1824 } 1825 } 1826 1827 /* 1828 * Sanity check and save off tunable values. 1829 */ 1830 static void 1831 ixl_save_pf_tunables(struct ixl_pf *pf) 1832 { 1833 device_t dev = pf->dev; 1834 1835 /* Save tunable information */ 1836 #ifdef IXL_DEBUG_FC 1837 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 1838 #endif 1839 #ifdef IXL_DEBUG 1840 pf->recovery_mode = ixl_debug_recovery_mode; 1841 #endif 1842 pf->dbg_mask = ixl_core_debug_mask; 1843 pf->hw.debug_mask = ixl_shared_debug_mask; 1844 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 1845 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback); 1846 #if 0 1847 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 1848 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 1849 #endif 1850 1851 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) 1852 pf->i2c_access_method = 0; 1853 else 1854 pf->i2c_access_method = ixl_i2c_access_method; 1855 1856 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 1857 device_printf(dev, "Invalid tx_itr value of %d set!\n", 1858 ixl_tx_itr); 1859 device_printf(dev, "tx_itr must be between %d and %d, " 1860 "inclusive\n", 1861 0, IXL_MAX_ITR); 1862 device_printf(dev, "Using default value of %d instead\n", 1863 IXL_ITR_4K); 1864 pf->tx_itr = IXL_ITR_4K; 1865 } else 1866 pf->tx_itr = ixl_tx_itr; 1867 1868 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 1869 device_printf(dev, "Invalid rx_itr value of %d set!\n", 1870 ixl_rx_itr); 1871 device_printf(dev, "rx_itr must be between %d and %d, " 1872 "inclusive\n", 1873 0, IXL_MAX_ITR); 1874 device_printf(dev, "Using default value of %d instead\n", 1875 IXL_ITR_8K); 1876 pf->rx_itr = IXL_ITR_8K; 1877 } else 1878 pf->rx_itr = ixl_rx_itr; 1879 } 1880 1881