1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 2 51 #define IXL_DRIVER_VERSION_MINOR 3 52 #define IXL_DRIVER_VERSION_BUILD 0 53 54 #define IXL_DRIVER_VERSION_STRING \ 55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ 56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ 57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * 64 * ( Vendor ID, Device ID, Branding String ) 65 *********************************************************************/ 66 67 static pci_vendor_info_t ixl_vendor_info_array[] = 68 { 69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), 71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), 75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), 77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), 78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), 79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), 81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), 82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), 84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), 85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"), 89 /* required last entry */ 90 PVID_END 91 }; 92 93 /********************************************************************* 94 * Function prototypes 95 *********************************************************************/ 96 /*** IFLIB interface ***/ 97 static void *ixl_register(device_t dev); 98 static int ixl_if_attach_pre(if_ctx_t ctx); 99 static int ixl_if_attach_post(if_ctx_t ctx); 100 static int ixl_if_detach(if_ctx_t ctx); 101 static int ixl_if_shutdown(if_ctx_t ctx); 102 static int ixl_if_suspend(if_ctx_t ctx); 103 static int ixl_if_resume(if_ctx_t ctx); 104 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); 105 static void ixl_if_enable_intr(if_ctx_t ctx); 106 static void ixl_if_disable_intr(if_ctx_t ctx); 107 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 108 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 109 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 110 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 111 static void ixl_if_queues_free(if_ctx_t ctx); 112 static void ixl_if_update_admin_status(if_ctx_t ctx); 113 static void ixl_if_multi_set(if_ctx_t ctx); 114 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 115 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 116 static int ixl_if_media_change(if_ctx_t ctx); 117 static int ixl_if_promisc_set(if_ctx_t ctx, int flags); 118 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); 119 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); 120 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 121 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); 122 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 123 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); 124 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); 125 #ifdef PCI_IOV 126 static void ixl_if_vflr_handle(if_ctx_t ctx); 127 #endif 128 129 /*** Other ***/ 130 static u_int ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int); 131 static void ixl_save_pf_tunables(struct ixl_pf *); 132 static int ixl_allocate_pci_resources(struct ixl_pf *); 133 static void ixl_setup_ssctx(struct ixl_pf *pf); 134 static void ixl_admin_timer(void *arg); 135 136 /********************************************************************* 137 * FreeBSD Device Interface Entry Points 138 *********************************************************************/ 139 140 static device_method_t ixl_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_register, ixl_register), 143 DEVMETHOD(device_probe, iflib_device_probe), 144 DEVMETHOD(device_attach, iflib_device_attach), 145 DEVMETHOD(device_detach, iflib_device_detach), 146 DEVMETHOD(device_shutdown, iflib_device_shutdown), 147 #ifdef PCI_IOV 148 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 149 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 150 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 151 #endif 152 DEVMETHOD_END 153 }; 154 155 static driver_t ixl_driver = { 156 "ixl", ixl_methods, sizeof(struct ixl_pf), 157 }; 158 159 devclass_t ixl_devclass; 160 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); 161 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array); 162 MODULE_VERSION(ixl, 3); 163 164 MODULE_DEPEND(ixl, pci, 1, 1, 1); 165 MODULE_DEPEND(ixl, ether, 1, 1, 1); 166 MODULE_DEPEND(ixl, iflib, 1, 1, 1); 167 168 static device_method_t ixl_if_methods[] = { 169 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), 170 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), 171 DEVMETHOD(ifdi_detach, ixl_if_detach), 172 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), 173 DEVMETHOD(ifdi_suspend, ixl_if_suspend), 174 DEVMETHOD(ifdi_resume, ixl_if_resume), 175 DEVMETHOD(ifdi_init, ixl_if_init), 176 DEVMETHOD(ifdi_stop, ixl_if_stop), 177 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), 178 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), 179 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), 180 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), 181 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), 182 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), 183 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), 184 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), 185 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), 186 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), 187 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), 188 DEVMETHOD(ifdi_media_status, ixl_if_media_status), 189 DEVMETHOD(ifdi_media_change, ixl_if_media_change), 190 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), 191 DEVMETHOD(ifdi_timer, ixl_if_timer), 192 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), 193 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), 194 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), 195 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), 196 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), 197 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart), 198 #ifdef PCI_IOV 199 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init), 200 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit), 201 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add), 202 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), 203 #endif 204 // ifdi_led_func 205 // ifdi_debug 206 DEVMETHOD_END 207 }; 208 209 static driver_t ixl_if_driver = { 210 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) 211 }; 212 213 /* 214 ** TUNEABLE PARAMETERS: 215 */ 216 217 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 218 "ixl driver parameters"); 219 220 #ifdef IXL_DEBUG_FC 221 /* 222 * Leave this on unless you need to send flow control 223 * frames (or other control frames) from software 224 */ 225 static int ixl_enable_tx_fc_filter = 1; 226 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 227 &ixl_enable_tx_fc_filter); 228 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 229 &ixl_enable_tx_fc_filter, 0, 230 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 231 #endif 232 233 #ifdef IXL_DEBUG 234 static int ixl_debug_recovery_mode = 0; 235 TUNABLE_INT("hw.ixl.debug_recovery_mode", 236 &ixl_debug_recovery_mode); 237 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN, 238 &ixl_debug_recovery_mode, 0, 239 "Act like when FW entered recovery mode (for debuging)"); 240 #endif 241 242 static int ixl_i2c_access_method = 0; 243 TUNABLE_INT("hw.ixl.i2c_access_method", 244 &ixl_i2c_access_method); 245 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, 246 &ixl_i2c_access_method, 0, 247 IXL_SYSCTL_HELP_I2C_METHOD); 248 249 static int ixl_enable_vf_loopback = 1; 250 TUNABLE_INT("hw.ixl.enable_vf_loopback", 251 &ixl_enable_vf_loopback); 252 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN, 253 &ixl_enable_vf_loopback, 0, 254 IXL_SYSCTL_HELP_VF_LOOPBACK); 255 256 /* 257 * Different method for processing TX descriptor 258 * completion. 259 */ 260 static int ixl_enable_head_writeback = 1; 261 TUNABLE_INT("hw.ixl.enable_head_writeback", 262 &ixl_enable_head_writeback); 263 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 264 &ixl_enable_head_writeback, 0, 265 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 266 267 static int ixl_core_debug_mask = 0; 268 TUNABLE_INT("hw.ixl.core_debug_mask", 269 &ixl_core_debug_mask); 270 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 271 &ixl_core_debug_mask, 0, 272 "Display debug statements that are printed in non-shared code"); 273 274 static int ixl_shared_debug_mask = 0; 275 TUNABLE_INT("hw.ixl.shared_debug_mask", 276 &ixl_shared_debug_mask); 277 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 278 &ixl_shared_debug_mask, 0, 279 "Display debug statements that are printed in shared code"); 280 281 #if 0 282 /* 283 ** Controls for Interrupt Throttling 284 ** - true/false for dynamic adjustment 285 ** - default values for static ITR 286 */ 287 static int ixl_dynamic_rx_itr = 0; 288 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 289 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 290 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 291 292 static int ixl_dynamic_tx_itr = 0; 293 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 294 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 295 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 296 #endif 297 298 static int ixl_rx_itr = IXL_ITR_8K; 299 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 300 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 301 &ixl_rx_itr, 0, "RX Interrupt Rate"); 302 303 static int ixl_tx_itr = IXL_ITR_4K; 304 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 305 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 306 &ixl_tx_itr, 0, "TX Interrupt Rate"); 307 308 #ifdef IXL_IW 309 int ixl_enable_iwarp = 0; 310 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 311 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 312 &ixl_enable_iwarp, 0, "iWARP enabled"); 313 314 #if __FreeBSD_version < 1100000 315 int ixl_limit_iwarp_msix = 1; 316 #else 317 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 318 #endif 319 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 320 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 321 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP"); 322 #endif 323 324 extern struct if_txrx ixl_txrx_hwb; 325 extern struct if_txrx ixl_txrx_dwb; 326 327 static struct if_shared_ctx ixl_sctx_init = { 328 .isc_magic = IFLIB_MAGIC, 329 .isc_q_align = PAGE_SIZE, 330 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 331 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 332 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 333 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 334 .isc_rx_maxsize = 16384, 335 .isc_rx_nsegments = IXL_MAX_RX_SEGS, 336 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 337 .isc_nfl = 1, 338 .isc_ntxqs = 1, 339 .isc_nrxqs = 1, 340 341 .isc_admin_intrcnt = 1, 342 .isc_vendor_info = ixl_vendor_info_array, 343 .isc_driver_version = IXL_DRIVER_VERSION_STRING, 344 .isc_driver = &ixl_if_driver, 345 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN, 346 347 .isc_nrxd_min = {IXL_MIN_RING}, 348 .isc_ntxd_min = {IXL_MIN_RING}, 349 .isc_nrxd_max = {IXL_MAX_RING}, 350 .isc_ntxd_max = {IXL_MAX_RING}, 351 .isc_nrxd_default = {IXL_DEFAULT_RING}, 352 .isc_ntxd_default = {IXL_DEFAULT_RING}, 353 }; 354 355 if_shared_ctx_t ixl_sctx = &ixl_sctx_init; 356 357 /*** Functions ***/ 358 static void * 359 ixl_register(device_t dev) 360 { 361 return (ixl_sctx); 362 } 363 364 static int 365 ixl_allocate_pci_resources(struct ixl_pf *pf) 366 { 367 device_t dev = iflib_get_dev(pf->vsi.ctx); 368 struct i40e_hw *hw = &pf->hw; 369 int rid; 370 371 /* Map BAR0 */ 372 rid = PCIR_BAR(0); 373 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 374 &rid, RF_ACTIVE); 375 376 if (!(pf->pci_mem)) { 377 device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 378 return (ENXIO); 379 } 380 381 /* Save off the PCI information */ 382 hw->vendor_id = pci_get_vendor(dev); 383 hw->device_id = pci_get_device(dev); 384 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 385 hw->subsystem_vendor_id = 386 pci_read_config(dev, PCIR_SUBVEND_0, 2); 387 hw->subsystem_device_id = 388 pci_read_config(dev, PCIR_SUBDEV_0, 2); 389 390 hw->bus.device = pci_get_slot(dev); 391 hw->bus.func = pci_get_function(dev); 392 393 /* Save off register access information */ 394 pf->osdep.mem_bus_space_tag = 395 rman_get_bustag(pf->pci_mem); 396 pf->osdep.mem_bus_space_handle = 397 rman_get_bushandle(pf->pci_mem); 398 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 399 pf->osdep.flush_reg = I40E_GLGEN_STAT; 400 pf->osdep.dev = dev; 401 402 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 403 pf->hw.back = &pf->osdep; 404 405 return (0); 406 } 407 408 static void 409 ixl_setup_ssctx(struct ixl_pf *pf) 410 { 411 if_softc_ctx_t scctx = pf->vsi.shared; 412 struct i40e_hw *hw = &pf->hw; 413 414 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 415 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 416 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; 417 } else if (hw->mac.type == I40E_MAC_X722) 418 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; 419 else 420 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 421 422 if (pf->vsi.enable_head_writeback) { 423 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 424 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); 425 scctx->isc_txrx = &ixl_txrx_hwb; 426 } else { 427 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 428 * sizeof(struct i40e_tx_desc), DBA_ALIGN); 429 scctx->isc_txrx = &ixl_txrx_dwb; 430 } 431 432 scctx->isc_txrx->ift_legacy_intr = ixl_intr; 433 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 434 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); 435 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); 436 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; 437 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; 438 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; 439 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; 440 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; 441 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 442 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; 443 } 444 445 static void 446 ixl_admin_timer(void *arg) 447 { 448 struct ixl_pf *pf = (struct ixl_pf *)arg; 449 450 /* Fire off the admin task */ 451 iflib_admin_intr_deferred(pf->vsi.ctx); 452 453 /* Reschedule the admin timer */ 454 callout_schedule(&pf->admin_timer, hz/2); 455 } 456 457 static int 458 ixl_attach_pre_recovery_mode(struct ixl_pf *pf) 459 { 460 struct ixl_vsi *vsi = &pf->vsi; 461 struct i40e_hw *hw = &pf->hw; 462 device_t dev = pf->dev; 463 464 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 465 466 i40e_get_mac_addr(hw, hw->mac.addr); 467 468 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 469 ixl_configure_intr0_msix(pf); 470 ixl_enable_intr0(hw); 471 } 472 473 ixl_setup_ssctx(pf); 474 475 return (0); 476 } 477 478 static int 479 ixl_if_attach_pre(if_ctx_t ctx) 480 { 481 device_t dev; 482 struct ixl_pf *pf; 483 struct i40e_hw *hw; 484 struct ixl_vsi *vsi; 485 enum i40e_get_fw_lldp_status_resp lldp_status; 486 struct i40e_filter_control_settings filter; 487 enum i40e_status_code status; 488 int error = 0; 489 490 dev = iflib_get_dev(ctx); 491 pf = iflib_get_softc(ctx); 492 493 INIT_DBG_DEV(dev, "begin"); 494 495 vsi = &pf->vsi; 496 vsi->back = pf; 497 pf->dev = dev; 498 hw = &pf->hw; 499 500 vsi->dev = dev; 501 vsi->hw = &pf->hw; 502 vsi->id = 0; 503 vsi->num_vlans = 0; 504 vsi->ctx = ctx; 505 vsi->media = iflib_get_media(ctx); 506 vsi->shared = iflib_get_softc_ctx(ctx); 507 508 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name), 509 "%s:admin", device_get_nameunit(dev)); 510 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF); 511 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0); 512 513 /* Save tunable values */ 514 ixl_save_pf_tunables(pf); 515 516 /* Do PCI setup - map BAR0, etc */ 517 if (ixl_allocate_pci_resources(pf)) { 518 device_printf(dev, "Allocation of PCI resources failed\n"); 519 error = ENXIO; 520 goto err_pci_res; 521 } 522 523 /* Establish a clean starting point */ 524 i40e_clear_hw(hw); 525 i40e_set_mac_type(hw); 526 527 error = ixl_pf_reset(pf); 528 if (error) 529 goto err_out; 530 531 /* Initialize the shared code */ 532 status = i40e_init_shared_code(hw); 533 if (status) { 534 device_printf(dev, "Unable to initialize shared code, error %s\n", 535 i40e_stat_str(hw, status)); 536 error = EIO; 537 goto err_out; 538 } 539 540 /* Set up the admin queue */ 541 hw->aq.num_arq_entries = IXL_AQ_LEN; 542 hw->aq.num_asq_entries = IXL_AQ_LEN; 543 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 544 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 545 546 status = i40e_init_adminq(hw); 547 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 548 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 549 i40e_stat_str(hw, status)); 550 error = EIO; 551 goto err_out; 552 } 553 ixl_print_nvm_version(pf); 554 555 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 556 device_printf(dev, "The driver for the device stopped " 557 "because the NVM image is newer than expected.\n"); 558 device_printf(dev, "You must install the most recent version of " 559 "the network driver.\n"); 560 error = EIO; 561 goto err_out; 562 } 563 564 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 565 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 566 device_printf(dev, "The driver for the device detected " 567 "a newer version of the NVM image than expected.\n"); 568 device_printf(dev, "Please install the most recent version " 569 "of the network driver.\n"); 570 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 571 device_printf(dev, "The driver for the device detected " 572 "an older version of the NVM image than expected.\n"); 573 device_printf(dev, "Please update the NVM image.\n"); 574 } 575 576 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 577 error = ixl_attach_pre_recovery_mode(pf); 578 if (error) 579 goto err_out; 580 return (error); 581 } 582 583 /* Clear PXE mode */ 584 i40e_clear_pxe_mode(hw); 585 586 /* Get capabilities from the device */ 587 error = ixl_get_hw_capabilities(pf); 588 if (error) { 589 device_printf(dev, "get_hw_capabilities failed: %d\n", 590 error); 591 goto err_get_cap; 592 } 593 594 /* Set up host memory cache */ 595 error = ixl_setup_hmc(pf); 596 if (error) 597 goto err_mac_hmc; 598 599 /* Disable LLDP from the firmware for certain NVM versions */ 600 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 601 (pf->hw.aq.fw_maj_ver < 4)) { 602 i40e_aq_stop_lldp(hw, true, false, NULL); 603 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; 604 } 605 606 /* Try enabling Energy Efficient Ethernet (EEE) mode */ 607 if (i40e_enable_eee(hw, true) == I40E_SUCCESS) 608 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 609 else 610 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED); 611 612 /* Get MAC addresses from hardware */ 613 i40e_get_mac_addr(hw, hw->mac.addr); 614 error = i40e_validate_mac_addr(hw->mac.addr); 615 if (error) { 616 device_printf(dev, "validate_mac_addr failed: %d\n", error); 617 goto err_mac_hmc; 618 } 619 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 620 iflib_set_mac(ctx, hw->mac.addr); 621 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 622 623 /* Set up the device filtering */ 624 bzero(&filter, sizeof(filter)); 625 filter.enable_ethtype = TRUE; 626 filter.enable_macvlan = TRUE; 627 filter.enable_fdir = FALSE; 628 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 629 if (i40e_set_filter_control(hw, &filter)) 630 device_printf(dev, "i40e_set_filter_control() failed\n"); 631 632 /* Query device FW LLDP status */ 633 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { 634 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { 635 atomic_set_32(&pf->state, 636 IXL_PF_STATE_FW_LLDP_DISABLED); 637 } else { 638 atomic_clear_32(&pf->state, 639 IXL_PF_STATE_FW_LLDP_DISABLED); 640 } 641 } 642 643 /* Tell FW to apply DCB config on link up */ 644 i40e_aq_set_dcb_parameters(hw, true, NULL); 645 646 /* Fill out iflib parameters */ 647 ixl_setup_ssctx(pf); 648 649 INIT_DBG_DEV(dev, "end"); 650 return (0); 651 652 err_mac_hmc: 653 ixl_shutdown_hmc(pf); 654 err_get_cap: 655 i40e_shutdown_adminq(hw); 656 err_out: 657 ixl_free_pci_resources(pf); 658 err_pci_res: 659 mtx_lock(&pf->admin_mtx); 660 callout_stop(&pf->admin_timer); 661 mtx_unlock(&pf->admin_mtx); 662 mtx_destroy(&pf->admin_mtx); 663 return (error); 664 } 665 666 static int 667 ixl_if_attach_post(if_ctx_t ctx) 668 { 669 device_t dev; 670 struct ixl_pf *pf; 671 struct i40e_hw *hw; 672 struct ixl_vsi *vsi; 673 int error = 0; 674 enum i40e_status_code status; 675 676 dev = iflib_get_dev(ctx); 677 pf = iflib_get_softc(ctx); 678 679 INIT_DBG_DEV(dev, "begin"); 680 681 vsi = &pf->vsi; 682 vsi->ifp = iflib_get_ifp(ctx); 683 hw = &pf->hw; 684 685 /* Save off determined number of queues for interface */ 686 vsi->num_rx_queues = vsi->shared->isc_nrxqsets; 687 vsi->num_tx_queues = vsi->shared->isc_ntxqsets; 688 689 /* Setup OS network interface / ifnet */ 690 if (ixl_setup_interface(dev, pf)) { 691 device_printf(dev, "interface setup failed!\n"); 692 error = EIO; 693 goto err; 694 } 695 696 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 697 /* Keep admin queue interrupts active while driver is loaded */ 698 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 699 ixl_configure_intr0_msix(pf); 700 ixl_enable_intr0(hw); 701 } 702 703 ixl_add_sysctls_recovery_mode(pf); 704 705 /* Start the admin timer */ 706 mtx_lock(&pf->admin_mtx); 707 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 708 mtx_unlock(&pf->admin_mtx); 709 return (0); 710 } 711 712 /* Determine link state */ 713 if (ixl_attach_get_link_status(pf)) { 714 error = EINVAL; 715 goto err; 716 } 717 718 error = ixl_switch_config(pf); 719 if (error) { 720 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 721 error); 722 goto err; 723 } 724 725 /* Add protocol filters to list */ 726 ixl_init_filters(vsi); 727 728 /* Init queue allocation manager */ 729 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 730 if (error) { 731 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 732 error); 733 goto err; 734 } 735 /* reserve a contiguous allocation for the PF's VSI */ 736 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, 737 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); 738 if (error) { 739 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 740 error); 741 goto err; 742 } 743 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 744 pf->qtag.num_allocated, pf->qtag.num_active); 745 746 /* Limit PHY interrupts to link, autoneg, and modules failure */ 747 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 748 NULL); 749 if (status) { 750 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 751 " aq_err %s\n", i40e_stat_str(hw, status), 752 i40e_aq_str(hw, hw->aq.asq_last_status)); 753 goto err; 754 } 755 756 /* Get the bus configuration and set the shared code */ 757 ixl_get_bus_info(pf); 758 759 /* Keep admin queue interrupts active while driver is loaded */ 760 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 761 ixl_configure_intr0_msix(pf); 762 ixl_enable_intr0(hw); 763 } 764 765 /* Set initial advertised speed sysctl value */ 766 ixl_set_initial_advertised_speeds(pf); 767 768 /* Initialize statistics & add sysctls */ 769 ixl_add_device_sysctls(pf); 770 ixl_pf_reset_stats(pf); 771 ixl_update_stats_counters(pf); 772 ixl_add_hw_stats(pf); 773 774 hw->phy.get_link_info = true; 775 i40e_get_link_status(hw, &pf->link_up); 776 ixl_update_link_status(pf); 777 778 #ifdef PCI_IOV 779 ixl_initialize_sriov(pf); 780 #endif 781 782 #ifdef IXL_IW 783 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 784 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 785 if (pf->iw_enabled) { 786 error = ixl_iw_pf_attach(pf); 787 if (error) { 788 device_printf(dev, 789 "interfacing to iWARP driver failed: %d\n", 790 error); 791 goto err; 792 } else 793 device_printf(dev, "iWARP ready\n"); 794 } else 795 device_printf(dev, "iWARP disabled on this device " 796 "(no MSI-X vectors)\n"); 797 } else { 798 pf->iw_enabled = false; 799 device_printf(dev, "The device is not iWARP enabled\n"); 800 } 801 #endif 802 /* Start the admin timer */ 803 mtx_lock(&pf->admin_mtx); 804 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 805 mtx_unlock(&pf->admin_mtx); 806 807 INIT_DBG_DEV(dev, "end"); 808 return (0); 809 810 err: 811 INIT_DEBUGOUT("end: error %d", error); 812 /* ixl_if_detach() is called on error from this */ 813 return (error); 814 } 815 816 /** 817 * XXX: iflib always ignores the return value of detach() 818 * -> This means that this isn't allowed to fail 819 */ 820 static int 821 ixl_if_detach(if_ctx_t ctx) 822 { 823 struct ixl_pf *pf = iflib_get_softc(ctx); 824 struct ixl_vsi *vsi = &pf->vsi; 825 struct i40e_hw *hw = &pf->hw; 826 device_t dev = pf->dev; 827 enum i40e_status_code status; 828 #ifdef IXL_IW 829 int error; 830 #endif 831 832 INIT_DBG_DEV(dev, "begin"); 833 834 /* Stop the admin timer */ 835 mtx_lock(&pf->admin_mtx); 836 callout_stop(&pf->admin_timer); 837 mtx_unlock(&pf->admin_mtx); 838 mtx_destroy(&pf->admin_mtx); 839 840 #ifdef IXL_IW 841 if (ixl_enable_iwarp && pf->iw_enabled) { 842 error = ixl_iw_pf_detach(pf); 843 if (error == EBUSY) { 844 device_printf(dev, "iwarp in use; stop it first.\n"); 845 //return (error); 846 } 847 } 848 #endif 849 /* Remove all previously allocated media types */ 850 ifmedia_removeall(vsi->media); 851 852 /* Shutdown LAN HMC */ 853 ixl_shutdown_hmc(pf); 854 855 /* Shutdown admin queue */ 856 ixl_disable_intr0(hw); 857 status = i40e_shutdown_adminq(hw); 858 if (status) 859 device_printf(dev, 860 "i40e_shutdown_adminq() failed with status %s\n", 861 i40e_stat_str(hw, status)); 862 863 ixl_pf_qmgr_destroy(&pf->qmgr); 864 ixl_free_pci_resources(pf); 865 ixl_free_mac_filters(vsi); 866 INIT_DBG_DEV(dev, "end"); 867 return (0); 868 } 869 870 static int 871 ixl_if_shutdown(if_ctx_t ctx) 872 { 873 int error = 0; 874 875 INIT_DEBUGOUT("ixl_if_shutdown: begin"); 876 877 /* TODO: Call ixl_if_stop()? */ 878 879 /* TODO: Then setup low power mode */ 880 881 return (error); 882 } 883 884 static int 885 ixl_if_suspend(if_ctx_t ctx) 886 { 887 int error = 0; 888 889 INIT_DEBUGOUT("ixl_if_suspend: begin"); 890 891 /* TODO: Call ixl_if_stop()? */ 892 893 /* TODO: Then setup low power mode */ 894 895 return (error); 896 } 897 898 static int 899 ixl_if_resume(if_ctx_t ctx) 900 { 901 struct ifnet *ifp = iflib_get_ifp(ctx); 902 903 INIT_DEBUGOUT("ixl_if_resume: begin"); 904 905 /* Read & clear wake-up registers */ 906 907 /* Required after D3->D0 transition */ 908 if (ifp->if_flags & IFF_UP) 909 ixl_if_init(ctx); 910 911 return (0); 912 } 913 914 void 915 ixl_if_init(if_ctx_t ctx) 916 { 917 struct ixl_pf *pf = iflib_get_softc(ctx); 918 struct ixl_vsi *vsi = &pf->vsi; 919 struct i40e_hw *hw = &pf->hw; 920 struct ifnet *ifp = iflib_get_ifp(ctx); 921 device_t dev = iflib_get_dev(ctx); 922 u8 tmpaddr[ETHER_ADDR_LEN]; 923 int ret; 924 925 if (IXL_PF_IN_RECOVERY_MODE(pf)) 926 return; 927 /* 928 * If the aq is dead here, it probably means something outside of the driver 929 * did something to the adapter, like a PF reset. 930 * So, rebuild the driver's state here if that occurs. 931 */ 932 if (!i40e_check_asq_alive(&pf->hw)) { 933 device_printf(dev, "Admin Queue is down; resetting...\n"); 934 ixl_teardown_hw_structs(pf); 935 ixl_rebuild_hw_structs_after_reset(pf, false); 936 } 937 938 /* Get the latest mac address... User might use a LAA */ 939 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); 940 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 941 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { 942 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 943 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 944 ret = i40e_aq_mac_address_write(hw, 945 I40E_AQC_WRITE_TYPE_LAA_ONLY, 946 hw->mac.addr, NULL); 947 if (ret) { 948 device_printf(dev, "LLA address change failed!!\n"); 949 return; 950 } 951 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 952 } 953 954 iflib_set_mac(ctx, hw->mac.addr); 955 956 /* Prepare the VSI: rings, hmc contexts, etc... */ 957 if (ixl_initialize_vsi(vsi)) { 958 device_printf(dev, "initialize vsi failed!!\n"); 959 return; 960 } 961 962 /* Reconfigure multicast filters in HW */ 963 ixl_if_multi_set(ctx); 964 965 /* Set up RSS */ 966 ixl_config_rss(pf); 967 968 /* Set up MSI-X routing and the ITR settings */ 969 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 970 ixl_configure_queue_intr_msix(pf); 971 ixl_configure_itr(pf); 972 } else 973 ixl_configure_legacy(pf); 974 975 if (vsi->enable_head_writeback) 976 ixl_init_tx_cidx(vsi); 977 else 978 ixl_init_tx_rsqs(vsi); 979 980 ixl_enable_rings(vsi); 981 982 i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 983 984 /* Re-add configure filters to HW */ 985 ixl_reconfigure_filters(vsi); 986 987 /* Configure promiscuous mode */ 988 ixl_if_promisc_set(ctx, if_getflags(ifp)); 989 990 #ifdef IXL_IW 991 if (ixl_enable_iwarp && pf->iw_enabled) { 992 ret = ixl_iw_pf_init(pf); 993 if (ret) 994 device_printf(dev, 995 "initialize iwarp failed, code %d\n", ret); 996 } 997 #endif 998 } 999 1000 void 1001 ixl_if_stop(if_ctx_t ctx) 1002 { 1003 struct ixl_pf *pf = iflib_get_softc(ctx); 1004 struct ixl_vsi *vsi = &pf->vsi; 1005 1006 INIT_DEBUGOUT("ixl_if_stop: begin\n"); 1007 1008 if (IXL_PF_IN_RECOVERY_MODE(pf)) 1009 return; 1010 1011 // TODO: This may need to be reworked 1012 #ifdef IXL_IW 1013 /* Stop iWARP device */ 1014 if (ixl_enable_iwarp && pf->iw_enabled) 1015 ixl_iw_pf_stop(pf); 1016 #endif 1017 1018 ixl_disable_rings_intr(vsi); 1019 ixl_disable_rings(pf, vsi, &pf->qtag); 1020 } 1021 1022 static int 1023 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) 1024 { 1025 struct ixl_pf *pf = iflib_get_softc(ctx); 1026 struct ixl_vsi *vsi = &pf->vsi; 1027 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1028 struct ixl_tx_queue *tx_que = vsi->tx_queues; 1029 int err, i, rid, vector = 0; 1030 char buf[16]; 1031 1032 MPASS(vsi->shared->isc_nrxqsets > 0); 1033 MPASS(vsi->shared->isc_ntxqsets > 0); 1034 1035 /* Admin Que must use vector 0*/ 1036 rid = vector + 1; 1037 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 1038 ixl_msix_adminq, pf, 0, "aq"); 1039 if (err) { 1040 iflib_irq_free(ctx, &vsi->irq); 1041 device_printf(iflib_get_dev(ctx), 1042 "Failed to register Admin Que handler"); 1043 return (err); 1044 } 1045 /* Create soft IRQ for handling VFLRs */ 1046 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov"); 1047 1048 /* Now set up the stations */ 1049 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { 1050 rid = vector + 1; 1051 1052 snprintf(buf, sizeof(buf), "rxq%d", i); 1053 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1054 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); 1055 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than 1056 * what's expected in the iflib context? */ 1057 if (err) { 1058 device_printf(iflib_get_dev(ctx), 1059 "Failed to allocate queue RX int vector %d, err: %d\n", i, err); 1060 vsi->num_rx_queues = i + 1; 1061 goto fail; 1062 } 1063 rx_que->msix = vector; 1064 } 1065 1066 bzero(buf, sizeof(buf)); 1067 1068 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { 1069 snprintf(buf, sizeof(buf), "txq%d", i); 1070 iflib_softirq_alloc_generic(ctx, 1071 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, 1072 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1073 1074 /* TODO: Maybe call a strategy function for this to figure out which 1075 * interrupts to map Tx queues to. I don't know if there's an immediately 1076 * better way than this other than a user-supplied map, though. */ 1077 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; 1078 } 1079 1080 return (0); 1081 fail: 1082 iflib_irq_free(ctx, &vsi->irq); 1083 rx_que = vsi->rx_queues; 1084 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1085 iflib_irq_free(ctx, &rx_que->que_irq); 1086 return (err); 1087 } 1088 1089 /* 1090 * Enable all interrupts 1091 * 1092 * Called in: 1093 * iflib_init_locked, after ixl_if_init() 1094 */ 1095 static void 1096 ixl_if_enable_intr(if_ctx_t ctx) 1097 { 1098 struct ixl_pf *pf = iflib_get_softc(ctx); 1099 struct ixl_vsi *vsi = &pf->vsi; 1100 struct i40e_hw *hw = vsi->hw; 1101 struct ixl_rx_queue *que = vsi->rx_queues; 1102 1103 ixl_enable_intr0(hw); 1104 /* Enable queue interrupts */ 1105 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1106 /* TODO: Queue index parameter is probably wrong */ 1107 ixl_enable_queue(hw, que->rxr.me); 1108 } 1109 1110 /* 1111 * Disable queue interrupts 1112 * 1113 * Other interrupt causes need to remain active. 1114 */ 1115 static void 1116 ixl_if_disable_intr(if_ctx_t ctx) 1117 { 1118 struct ixl_pf *pf = iflib_get_softc(ctx); 1119 struct ixl_vsi *vsi = &pf->vsi; 1120 struct i40e_hw *hw = vsi->hw; 1121 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1122 1123 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1124 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1125 ixl_disable_queue(hw, rx_que->msix - 1); 1126 } else { 1127 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF 1128 // stops queues from triggering interrupts 1129 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1130 } 1131 } 1132 1133 static int 1134 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1135 { 1136 struct ixl_pf *pf = iflib_get_softc(ctx); 1137 struct ixl_vsi *vsi = &pf->vsi; 1138 struct i40e_hw *hw = vsi->hw; 1139 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 1140 1141 ixl_enable_queue(hw, rx_que->msix - 1); 1142 return (0); 1143 } 1144 1145 static int 1146 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1147 { 1148 struct ixl_pf *pf = iflib_get_softc(ctx); 1149 struct ixl_vsi *vsi = &pf->vsi; 1150 struct i40e_hw *hw = vsi->hw; 1151 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; 1152 1153 ixl_enable_queue(hw, tx_que->msix - 1); 1154 return (0); 1155 } 1156 1157 static int 1158 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1159 { 1160 struct ixl_pf *pf = iflib_get_softc(ctx); 1161 struct ixl_vsi *vsi = &pf->vsi; 1162 if_softc_ctx_t scctx = vsi->shared; 1163 struct ixl_tx_queue *que; 1164 int i, j, error = 0; 1165 1166 MPASS(scctx->isc_ntxqsets > 0); 1167 MPASS(ntxqs == 1); 1168 MPASS(scctx->isc_ntxqsets == ntxqsets); 1169 1170 /* Allocate queue structure memory */ 1171 if (!(vsi->tx_queues = 1172 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1173 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 1174 return (ENOMEM); 1175 } 1176 1177 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 1178 struct tx_ring *txr = &que->txr; 1179 1180 txr->me = i; 1181 que->vsi = vsi; 1182 1183 if (!vsi->enable_head_writeback) { 1184 /* Allocate report status array */ 1185 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { 1186 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 1187 error = ENOMEM; 1188 goto fail; 1189 } 1190 /* Init report status array */ 1191 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1192 txr->tx_rsq[j] = QIDX_INVALID; 1193 } 1194 /* get the virtual and physical address of the hardware queues */ 1195 txr->tail = I40E_QTX_TAIL(txr->me); 1196 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; 1197 txr->tx_paddr = paddrs[i * ntxqs]; 1198 txr->que = que; 1199 } 1200 1201 return (0); 1202 fail: 1203 ixl_if_queues_free(ctx); 1204 return (error); 1205 } 1206 1207 static int 1208 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1209 { 1210 struct ixl_pf *pf = iflib_get_softc(ctx); 1211 struct ixl_vsi *vsi = &pf->vsi; 1212 struct ixl_rx_queue *que; 1213 int i, error = 0; 1214 1215 #ifdef INVARIANTS 1216 if_softc_ctx_t scctx = vsi->shared; 1217 MPASS(scctx->isc_nrxqsets > 0); 1218 MPASS(nrxqs == 1); 1219 MPASS(scctx->isc_nrxqsets == nrxqsets); 1220 #endif 1221 1222 /* Allocate queue structure memory */ 1223 if (!(vsi->rx_queues = 1224 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * 1225 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1226 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1227 error = ENOMEM; 1228 goto fail; 1229 } 1230 1231 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1232 struct rx_ring *rxr = &que->rxr; 1233 1234 rxr->me = i; 1235 que->vsi = vsi; 1236 1237 /* get the virtual and physical address of the hardware queues */ 1238 rxr->tail = I40E_QRX_TAIL(rxr->me); 1239 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; 1240 rxr->rx_paddr = paddrs[i * nrxqs]; 1241 rxr->que = que; 1242 } 1243 1244 return (0); 1245 fail: 1246 ixl_if_queues_free(ctx); 1247 return (error); 1248 } 1249 1250 static void 1251 ixl_if_queues_free(if_ctx_t ctx) 1252 { 1253 struct ixl_pf *pf = iflib_get_softc(ctx); 1254 struct ixl_vsi *vsi = &pf->vsi; 1255 1256 if (!vsi->enable_head_writeback) { 1257 struct ixl_tx_queue *que; 1258 int i = 0; 1259 1260 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { 1261 struct tx_ring *txr = &que->txr; 1262 if (txr->tx_rsq != NULL) { 1263 free(txr->tx_rsq, M_IXL); 1264 txr->tx_rsq = NULL; 1265 } 1266 } 1267 } 1268 1269 if (vsi->tx_queues != NULL) { 1270 free(vsi->tx_queues, M_IXL); 1271 vsi->tx_queues = NULL; 1272 } 1273 if (vsi->rx_queues != NULL) { 1274 free(vsi->rx_queues, M_IXL); 1275 vsi->rx_queues = NULL; 1276 } 1277 1278 if (!IXL_PF_IN_RECOVERY_MODE(pf)) 1279 sysctl_ctx_free(&vsi->sysctl_ctx); 1280 } 1281 1282 void 1283 ixl_update_link_status(struct ixl_pf *pf) 1284 { 1285 struct ixl_vsi *vsi = &pf->vsi; 1286 struct i40e_hw *hw = &pf->hw; 1287 u64 baudrate; 1288 1289 if (pf->link_up) { 1290 if (vsi->link_active == FALSE) { 1291 vsi->link_active = TRUE; 1292 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); 1293 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1294 ixl_link_up_msg(pf); 1295 #ifdef PCI_IOV 1296 ixl_broadcast_link_state(pf); 1297 #endif 1298 } 1299 } else { /* Link down */ 1300 if (vsi->link_active == TRUE) { 1301 vsi->link_active = FALSE; 1302 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1303 #ifdef PCI_IOV 1304 ixl_broadcast_link_state(pf); 1305 #endif 1306 } 1307 } 1308 } 1309 1310 static void 1311 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) 1312 { 1313 device_t dev = pf->dev; 1314 u32 rxq_idx, qtx_ctl; 1315 1316 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >> 1317 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT; 1318 qtx_ctl = e->desc.params.external.param1; 1319 1320 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx); 1321 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl); 1322 } 1323 1324 static int 1325 ixl_process_adminq(struct ixl_pf *pf, u16 *pending) 1326 { 1327 enum i40e_status_code status = I40E_SUCCESS; 1328 struct i40e_arq_event_info event; 1329 struct i40e_hw *hw = &pf->hw; 1330 device_t dev = pf->dev; 1331 u16 opcode; 1332 u32 loop = 0, reg; 1333 1334 event.buf_len = IXL_AQ_BUF_SZ; 1335 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); 1336 if (!event.msg_buf) { 1337 device_printf(dev, "%s: Unable to allocate memory for Admin" 1338 " Queue event!\n", __func__); 1339 return (ENOMEM); 1340 } 1341 1342 /* clean and process any events */ 1343 do { 1344 status = i40e_clean_arq_element(hw, &event, pending); 1345 if (status) 1346 break; 1347 opcode = LE16_TO_CPU(event.desc.opcode); 1348 ixl_dbg(pf, IXL_DBG_AQ, 1349 "Admin Queue event: %#06x\n", opcode); 1350 switch (opcode) { 1351 case i40e_aqc_opc_get_link_status: 1352 ixl_link_event(pf, &event); 1353 break; 1354 case i40e_aqc_opc_send_msg_to_pf: 1355 #ifdef PCI_IOV 1356 ixl_handle_vf_msg(pf, &event); 1357 #endif 1358 break; 1359 /* 1360 * This should only occur on no-drop queues, which 1361 * aren't currently configured. 1362 */ 1363 case i40e_aqc_opc_event_lan_overflow: 1364 ixl_handle_lan_overflow_event(pf, &event); 1365 break; 1366 default: 1367 break; 1368 } 1369 } while (*pending && (loop++ < IXL_ADM_LIMIT)); 1370 1371 free(event.msg_buf, M_IXL); 1372 1373 /* Re-enable admin queue interrupt cause */ 1374 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1375 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 1376 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1377 1378 return (status); 1379 } 1380 1381 static void 1382 ixl_if_update_admin_status(if_ctx_t ctx) 1383 { 1384 struct ixl_pf *pf = iflib_get_softc(ctx); 1385 struct i40e_hw *hw = &pf->hw; 1386 u16 pending; 1387 1388 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) 1389 ixl_handle_empr_reset(pf); 1390 1391 /* 1392 * Admin Queue is shut down while handling reset. 1393 * Don't proceed if it hasn't been re-initialized 1394 * e.g due to an issue with new FW. 1395 */ 1396 if (!i40e_check_asq_alive(&pf->hw)) 1397 return; 1398 1399 if (pf->state & IXL_PF_STATE_MDD_PENDING) 1400 ixl_handle_mdd_event(pf); 1401 1402 ixl_process_adminq(pf, &pending); 1403 ixl_update_link_status(pf); 1404 1405 /* 1406 * If there are still messages to process, reschedule ourselves. 1407 * Otherwise, re-enable our interrupt and go to sleep. 1408 */ 1409 if (pending > 0) 1410 iflib_admin_intr_deferred(ctx); 1411 else 1412 ixl_enable_intr0(hw); 1413 } 1414 1415 static void 1416 ixl_if_multi_set(if_ctx_t ctx) 1417 { 1418 struct ixl_pf *pf = iflib_get_softc(ctx); 1419 struct ixl_vsi *vsi = &pf->vsi; 1420 struct i40e_hw *hw = vsi->hw; 1421 int mcnt, flags; 1422 int del_mcnt; 1423 1424 IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); 1425 1426 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR); 1427 /* Delete filters for removed multicast addresses */ 1428 del_mcnt = ixl_del_multi(vsi); 1429 vsi->num_macs -= del_mcnt; 1430 1431 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { 1432 i40e_aq_set_vsi_multicast_promiscuous(hw, 1433 vsi->seid, TRUE, NULL); 1434 return; 1435 } 1436 /* (re-)install filters for all mcast addresses */ 1437 /* XXX: This bypasses filter count tracking code! */ 1438 mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); 1439 if (mcnt > 0) { 1440 vsi->num_macs += mcnt; 1441 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 1442 ixl_add_hw_filters(vsi, flags, mcnt); 1443 } 1444 1445 ixl_dbg_filter(pf, "%s: filter mac total: %d\n", 1446 __func__, vsi->num_macs); 1447 IOCTL_DEBUGOUT("ixl_if_multi_set: end"); 1448 } 1449 1450 static int 1451 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1452 { 1453 struct ixl_pf *pf = iflib_get_softc(ctx); 1454 struct ixl_vsi *vsi = &pf->vsi; 1455 1456 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 1457 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - 1458 ETHER_VLAN_ENCAP_LEN) 1459 return (EINVAL); 1460 1461 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1462 ETHER_VLAN_ENCAP_LEN; 1463 1464 return (0); 1465 } 1466 1467 static void 1468 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1469 { 1470 struct ixl_pf *pf = iflib_get_softc(ctx); 1471 struct i40e_hw *hw = &pf->hw; 1472 1473 INIT_DEBUGOUT("ixl_media_status: begin"); 1474 1475 ifmr->ifm_status = IFM_AVALID; 1476 ifmr->ifm_active = IFM_ETHER; 1477 1478 if (!pf->link_up) { 1479 return; 1480 } 1481 1482 ifmr->ifm_status |= IFM_ACTIVE; 1483 /* Hardware is always full-duplex */ 1484 ifmr->ifm_active |= IFM_FDX; 1485 1486 switch (hw->phy.link_info.phy_type) { 1487 /* 100 M */ 1488 case I40E_PHY_TYPE_100BASE_TX: 1489 ifmr->ifm_active |= IFM_100_TX; 1490 break; 1491 /* 1 G */ 1492 case I40E_PHY_TYPE_1000BASE_T: 1493 ifmr->ifm_active |= IFM_1000_T; 1494 break; 1495 case I40E_PHY_TYPE_1000BASE_SX: 1496 ifmr->ifm_active |= IFM_1000_SX; 1497 break; 1498 case I40E_PHY_TYPE_1000BASE_LX: 1499 ifmr->ifm_active |= IFM_1000_LX; 1500 break; 1501 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 1502 ifmr->ifm_active |= IFM_1000_T; 1503 break; 1504 /* 2.5 G */ 1505 case I40E_PHY_TYPE_2_5GBASE_T: 1506 ifmr->ifm_active |= IFM_2500_T; 1507 break; 1508 /* 5 G */ 1509 case I40E_PHY_TYPE_5GBASE_T: 1510 ifmr->ifm_active |= IFM_5000_T; 1511 break; 1512 /* 10 G */ 1513 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1514 ifmr->ifm_active |= IFM_10G_TWINAX; 1515 break; 1516 case I40E_PHY_TYPE_10GBASE_SR: 1517 ifmr->ifm_active |= IFM_10G_SR; 1518 break; 1519 case I40E_PHY_TYPE_10GBASE_LR: 1520 ifmr->ifm_active |= IFM_10G_LR; 1521 break; 1522 case I40E_PHY_TYPE_10GBASE_T: 1523 ifmr->ifm_active |= IFM_10G_T; 1524 break; 1525 case I40E_PHY_TYPE_XAUI: 1526 case I40E_PHY_TYPE_XFI: 1527 ifmr->ifm_active |= IFM_10G_TWINAX; 1528 break; 1529 case I40E_PHY_TYPE_10GBASE_AOC: 1530 ifmr->ifm_active |= IFM_10G_AOC; 1531 break; 1532 /* 25 G */ 1533 case I40E_PHY_TYPE_25GBASE_KR: 1534 ifmr->ifm_active |= IFM_25G_KR; 1535 break; 1536 case I40E_PHY_TYPE_25GBASE_CR: 1537 ifmr->ifm_active |= IFM_25G_CR; 1538 break; 1539 case I40E_PHY_TYPE_25GBASE_SR: 1540 ifmr->ifm_active |= IFM_25G_SR; 1541 break; 1542 case I40E_PHY_TYPE_25GBASE_LR: 1543 ifmr->ifm_active |= IFM_25G_LR; 1544 break; 1545 case I40E_PHY_TYPE_25GBASE_AOC: 1546 ifmr->ifm_active |= IFM_25G_AOC; 1547 break; 1548 case I40E_PHY_TYPE_25GBASE_ACC: 1549 ifmr->ifm_active |= IFM_25G_ACC; 1550 break; 1551 /* 40 G */ 1552 case I40E_PHY_TYPE_40GBASE_CR4: 1553 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1554 ifmr->ifm_active |= IFM_40G_CR4; 1555 break; 1556 case I40E_PHY_TYPE_40GBASE_SR4: 1557 ifmr->ifm_active |= IFM_40G_SR4; 1558 break; 1559 case I40E_PHY_TYPE_40GBASE_LR4: 1560 ifmr->ifm_active |= IFM_40G_LR4; 1561 break; 1562 case I40E_PHY_TYPE_XLAUI: 1563 ifmr->ifm_active |= IFM_OTHER; 1564 break; 1565 case I40E_PHY_TYPE_1000BASE_KX: 1566 ifmr->ifm_active |= IFM_1000_KX; 1567 break; 1568 case I40E_PHY_TYPE_SGMII: 1569 ifmr->ifm_active |= IFM_1000_SGMII; 1570 break; 1571 /* ERJ: What's the difference between these? */ 1572 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1573 case I40E_PHY_TYPE_10GBASE_CR1: 1574 ifmr->ifm_active |= IFM_10G_CR1; 1575 break; 1576 case I40E_PHY_TYPE_10GBASE_KX4: 1577 ifmr->ifm_active |= IFM_10G_KX4; 1578 break; 1579 case I40E_PHY_TYPE_10GBASE_KR: 1580 ifmr->ifm_active |= IFM_10G_KR; 1581 break; 1582 case I40E_PHY_TYPE_SFI: 1583 ifmr->ifm_active |= IFM_10G_SFI; 1584 break; 1585 /* Our single 20G media type */ 1586 case I40E_PHY_TYPE_20GBASE_KR2: 1587 ifmr->ifm_active |= IFM_20G_KR2; 1588 break; 1589 case I40E_PHY_TYPE_40GBASE_KR4: 1590 ifmr->ifm_active |= IFM_40G_KR4; 1591 break; 1592 case I40E_PHY_TYPE_XLPPI: 1593 case I40E_PHY_TYPE_40GBASE_AOC: 1594 ifmr->ifm_active |= IFM_40G_XLPPI; 1595 break; 1596 /* Unknown to driver */ 1597 default: 1598 ifmr->ifm_active |= IFM_UNKNOWN; 1599 break; 1600 } 1601 /* Report flow control status as well */ 1602 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 1603 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1604 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 1605 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1606 } 1607 1608 static int 1609 ixl_if_media_change(if_ctx_t ctx) 1610 { 1611 struct ifmedia *ifm = iflib_get_media(ctx); 1612 1613 INIT_DEBUGOUT("ixl_media_change: begin"); 1614 1615 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1616 return (EINVAL); 1617 1618 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); 1619 return (ENODEV); 1620 } 1621 1622 static int 1623 ixl_if_promisc_set(if_ctx_t ctx, int flags) 1624 { 1625 struct ixl_pf *pf = iflib_get_softc(ctx); 1626 struct ixl_vsi *vsi = &pf->vsi; 1627 struct ifnet *ifp = iflib_get_ifp(ctx); 1628 struct i40e_hw *hw = vsi->hw; 1629 int err; 1630 bool uni = FALSE, multi = FALSE; 1631 1632 if (flags & IFF_PROMISC) 1633 uni = multi = TRUE; 1634 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >= 1635 MAX_MULTICAST_ADDR) 1636 multi = TRUE; 1637 1638 err = i40e_aq_set_vsi_unicast_promiscuous(hw, 1639 vsi->seid, uni, NULL, true); 1640 if (err) 1641 return (err); 1642 err = i40e_aq_set_vsi_multicast_promiscuous(hw, 1643 vsi->seid, multi, NULL); 1644 return (err); 1645 } 1646 1647 static void 1648 ixl_if_timer(if_ctx_t ctx, uint16_t qid) 1649 { 1650 struct ixl_pf *pf = iflib_get_softc(ctx); 1651 1652 if (qid != 0) 1653 return; 1654 1655 ixl_update_stats_counters(pf); 1656 } 1657 1658 static void 1659 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) 1660 { 1661 struct ixl_pf *pf = iflib_get_softc(ctx); 1662 struct ixl_vsi *vsi = &pf->vsi; 1663 struct i40e_hw *hw = vsi->hw; 1664 1665 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1666 return; 1667 1668 ++vsi->num_vlans; 1669 ixl_add_filter(vsi, hw->mac.addr, vtag); 1670 } 1671 1672 static void 1673 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1674 { 1675 struct ixl_pf *pf = iflib_get_softc(ctx); 1676 struct ixl_vsi *vsi = &pf->vsi; 1677 struct i40e_hw *hw = vsi->hw; 1678 1679 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1680 return; 1681 1682 --vsi->num_vlans; 1683 ixl_del_filter(vsi, hw->mac.addr, vtag); 1684 } 1685 1686 static uint64_t 1687 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1688 { 1689 struct ixl_pf *pf = iflib_get_softc(ctx); 1690 struct ixl_vsi *vsi = &pf->vsi; 1691 if_t ifp = iflib_get_ifp(ctx); 1692 1693 switch (cnt) { 1694 case IFCOUNTER_IPACKETS: 1695 return (vsi->ipackets); 1696 case IFCOUNTER_IERRORS: 1697 return (vsi->ierrors); 1698 case IFCOUNTER_OPACKETS: 1699 return (vsi->opackets); 1700 case IFCOUNTER_OERRORS: 1701 return (vsi->oerrors); 1702 case IFCOUNTER_COLLISIONS: 1703 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1704 return (0); 1705 case IFCOUNTER_IBYTES: 1706 return (vsi->ibytes); 1707 case IFCOUNTER_OBYTES: 1708 return (vsi->obytes); 1709 case IFCOUNTER_IMCASTS: 1710 return (vsi->imcasts); 1711 case IFCOUNTER_OMCASTS: 1712 return (vsi->omcasts); 1713 case IFCOUNTER_IQDROPS: 1714 return (vsi->iqdrops); 1715 case IFCOUNTER_OQDROPS: 1716 return (vsi->oqdrops); 1717 case IFCOUNTER_NOPROTO: 1718 return (vsi->noproto); 1719 default: 1720 return (if_get_counter_default(ifp, cnt)); 1721 } 1722 } 1723 1724 #ifdef PCI_IOV 1725 static void 1726 ixl_if_vflr_handle(if_ctx_t ctx) 1727 { 1728 struct ixl_pf *pf = iflib_get_softc(ctx); 1729 1730 ixl_handle_vflr(pf); 1731 } 1732 #endif 1733 1734 static int 1735 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1736 { 1737 struct ixl_pf *pf = iflib_get_softc(ctx); 1738 1739 if (pf->read_i2c_byte == NULL) 1740 return (EINVAL); 1741 1742 for (int i = 0; i < req->len; i++) 1743 if (pf->read_i2c_byte(pf, req->offset + i, 1744 req->dev_addr, &req->data[i])) 1745 return (EIO); 1746 return (0); 1747 } 1748 1749 static int 1750 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) 1751 { 1752 struct ixl_pf *pf = iflib_get_softc(ctx); 1753 struct ifdrv *ifd = (struct ifdrv *)data; 1754 int error = 0; 1755 1756 /* 1757 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without 1758 * performing privilege checks. It is important that this function 1759 * perform the necessary checks for commands which should only be 1760 * executed by privileged threads. 1761 */ 1762 1763 switch(command) { 1764 case SIOCGDRVSPEC: 1765 case SIOCSDRVSPEC: 1766 /* NVM update command */ 1767 if (ifd->ifd_cmd == I40E_NVM_ACCESS) { 1768 error = priv_check(curthread, PRIV_DRIVER); 1769 if (error) 1770 break; 1771 error = ixl_handle_nvmupd_cmd(pf, ifd); 1772 } else { 1773 error = EINVAL; 1774 } 1775 break; 1776 default: 1777 error = EOPNOTSUPP; 1778 } 1779 1780 return (error); 1781 } 1782 1783 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1784 * @ctx: iflib context 1785 * @event: event code to check 1786 * 1787 * Defaults to returning false for every event. 1788 * 1789 * @returns true if iflib needs to reinit the interface, false otherwise 1790 */ 1791 static bool 1792 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1793 { 1794 switch (event) { 1795 case IFLIB_RESTART_VLAN_CONFIG: 1796 default: 1797 return (false); 1798 } 1799 } 1800 1801 static u_int 1802 ixl_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int count __unused) 1803 { 1804 struct ixl_vsi *vsi = arg; 1805 1806 ixl_add_mc_filter(vsi, (u8*)LLADDR(sdl)); 1807 return (1); 1808 } 1809 1810 /* 1811 * Sanity check and save off tunable values. 1812 */ 1813 static void 1814 ixl_save_pf_tunables(struct ixl_pf *pf) 1815 { 1816 device_t dev = pf->dev; 1817 1818 /* Save tunable information */ 1819 #ifdef IXL_DEBUG_FC 1820 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 1821 #endif 1822 #ifdef IXL_DEBUG 1823 pf->recovery_mode = ixl_debug_recovery_mode; 1824 #endif 1825 pf->dbg_mask = ixl_core_debug_mask; 1826 pf->hw.debug_mask = ixl_shared_debug_mask; 1827 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 1828 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback); 1829 #if 0 1830 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 1831 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 1832 #endif 1833 1834 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) 1835 pf->i2c_access_method = 0; 1836 else 1837 pf->i2c_access_method = ixl_i2c_access_method; 1838 1839 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 1840 device_printf(dev, "Invalid tx_itr value of %d set!\n", 1841 ixl_tx_itr); 1842 device_printf(dev, "tx_itr must be between %d and %d, " 1843 "inclusive\n", 1844 0, IXL_MAX_ITR); 1845 device_printf(dev, "Using default value of %d instead\n", 1846 IXL_ITR_4K); 1847 pf->tx_itr = IXL_ITR_4K; 1848 } else 1849 pf->tx_itr = ixl_tx_itr; 1850 1851 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 1852 device_printf(dev, "Invalid rx_itr value of %d set!\n", 1853 ixl_rx_itr); 1854 device_printf(dev, "rx_itr must be between %d and %d, " 1855 "inclusive\n", 1856 0, IXL_MAX_ITR); 1857 device_printf(dev, "Using default value of %d instead\n", 1858 IXL_ITR_8K); 1859 pf->rx_itr = IXL_ITR_8K; 1860 } else 1861 pf->rx_itr = ixl_rx_itr; 1862 } 1863 1864