1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 2 51 #define IXL_DRIVER_VERSION_MINOR 3 52 #define IXL_DRIVER_VERSION_BUILD 3 53 54 #define IXL_DRIVER_VERSION_STRING \ 55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ 56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ 57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * 64 * ( Vendor ID, Device ID, Branding String ) 65 *********************************************************************/ 66 67 static pci_vendor_info_t ixl_vendor_info_array[] = 68 { 69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), 71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), 75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), 77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), 78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), 79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), 81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), 82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), 84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), 85 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 86 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 87 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 88 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"), 89 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"), 90 /* required last entry */ 91 PVID_END 92 }; 93 94 /********************************************************************* 95 * Function prototypes 96 *********************************************************************/ 97 /*** IFLIB interface ***/ 98 static void *ixl_register(device_t dev); 99 static int ixl_if_attach_pre(if_ctx_t ctx); 100 static int ixl_if_attach_post(if_ctx_t ctx); 101 static int ixl_if_detach(if_ctx_t ctx); 102 static int ixl_if_shutdown(if_ctx_t ctx); 103 static int ixl_if_suspend(if_ctx_t ctx); 104 static int ixl_if_resume(if_ctx_t ctx); 105 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); 106 static void ixl_if_enable_intr(if_ctx_t ctx); 107 static void ixl_if_disable_intr(if_ctx_t ctx); 108 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 109 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 110 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 111 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 112 static void ixl_if_queues_free(if_ctx_t ctx); 113 static void ixl_if_update_admin_status(if_ctx_t ctx); 114 static void ixl_if_multi_set(if_ctx_t ctx); 115 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 116 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 117 static int ixl_if_media_change(if_ctx_t ctx); 118 static int ixl_if_promisc_set(if_ctx_t ctx, int flags); 119 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); 120 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); 121 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 122 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); 123 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 124 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); 125 static bool ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); 126 #ifdef PCI_IOV 127 static void ixl_if_vflr_handle(if_ctx_t ctx); 128 #endif 129 130 /*** Other ***/ 131 static void ixl_save_pf_tunables(struct ixl_pf *); 132 static int ixl_allocate_pci_resources(struct ixl_pf *); 133 static void ixl_setup_ssctx(struct ixl_pf *pf); 134 static void ixl_admin_timer(void *arg); 135 136 /********************************************************************* 137 * FreeBSD Device Interface Entry Points 138 *********************************************************************/ 139 140 static device_method_t ixl_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_register, ixl_register), 143 DEVMETHOD(device_probe, iflib_device_probe), 144 DEVMETHOD(device_attach, iflib_device_attach), 145 DEVMETHOD(device_detach, iflib_device_detach), 146 DEVMETHOD(device_shutdown, iflib_device_shutdown), 147 #ifdef PCI_IOV 148 DEVMETHOD(pci_iov_init, iflib_device_iov_init), 149 DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), 150 DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), 151 #endif 152 DEVMETHOD_END 153 }; 154 155 static driver_t ixl_driver = { 156 "ixl", ixl_methods, sizeof(struct ixl_pf), 157 }; 158 159 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0); 160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array); 161 MODULE_VERSION(ixl, 3); 162 163 MODULE_DEPEND(ixl, pci, 1, 1, 1); 164 MODULE_DEPEND(ixl, ether, 1, 1, 1); 165 MODULE_DEPEND(ixl, iflib, 1, 1, 1); 166 167 static device_method_t ixl_if_methods[] = { 168 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), 169 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), 170 DEVMETHOD(ifdi_detach, ixl_if_detach), 171 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), 172 DEVMETHOD(ifdi_suspend, ixl_if_suspend), 173 DEVMETHOD(ifdi_resume, ixl_if_resume), 174 DEVMETHOD(ifdi_init, ixl_if_init), 175 DEVMETHOD(ifdi_stop, ixl_if_stop), 176 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), 177 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), 178 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), 179 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), 180 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), 181 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), 182 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), 183 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), 184 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), 185 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), 186 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), 187 DEVMETHOD(ifdi_media_status, ixl_if_media_status), 188 DEVMETHOD(ifdi_media_change, ixl_if_media_change), 189 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), 190 DEVMETHOD(ifdi_timer, ixl_if_timer), 191 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), 192 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), 193 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), 194 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), 195 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), 196 DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart), 197 #ifdef PCI_IOV 198 DEVMETHOD(ifdi_iov_init, ixl_if_iov_init), 199 DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit), 200 DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add), 201 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), 202 #endif 203 // ifdi_led_func 204 // ifdi_debug 205 DEVMETHOD_END 206 }; 207 208 static driver_t ixl_if_driver = { 209 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) 210 }; 211 212 /* 213 ** TUNEABLE PARAMETERS: 214 */ 215 216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 217 "ixl driver parameters"); 218 219 #ifdef IXL_DEBUG_FC 220 /* 221 * Leave this on unless you need to send flow control 222 * frames (or other control frames) from software 223 */ 224 static int ixl_enable_tx_fc_filter = 1; 225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 226 &ixl_enable_tx_fc_filter); 227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 228 &ixl_enable_tx_fc_filter, 0, 229 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 230 #endif 231 232 #ifdef IXL_DEBUG 233 static int ixl_debug_recovery_mode = 0; 234 TUNABLE_INT("hw.ixl.debug_recovery_mode", 235 &ixl_debug_recovery_mode); 236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN, 237 &ixl_debug_recovery_mode, 0, 238 "Act like when FW entered recovery mode (for debugging)"); 239 #endif 240 241 static int ixl_i2c_access_method = 0; 242 TUNABLE_INT("hw.ixl.i2c_access_method", 243 &ixl_i2c_access_method); 244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, 245 &ixl_i2c_access_method, 0, 246 IXL_SYSCTL_HELP_I2C_METHOD); 247 248 static int ixl_enable_vf_loopback = 1; 249 TUNABLE_INT("hw.ixl.enable_vf_loopback", 250 &ixl_enable_vf_loopback); 251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN, 252 &ixl_enable_vf_loopback, 0, 253 IXL_SYSCTL_HELP_VF_LOOPBACK); 254 255 /* 256 * Different method for processing TX descriptor 257 * completion. 258 */ 259 static int ixl_enable_head_writeback = 1; 260 TUNABLE_INT("hw.ixl.enable_head_writeback", 261 &ixl_enable_head_writeback); 262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 263 &ixl_enable_head_writeback, 0, 264 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 265 266 static int ixl_core_debug_mask = 0; 267 TUNABLE_INT("hw.ixl.core_debug_mask", 268 &ixl_core_debug_mask); 269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 270 &ixl_core_debug_mask, 0, 271 "Display debug statements that are printed in non-shared code"); 272 273 static int ixl_shared_debug_mask = 0; 274 TUNABLE_INT("hw.ixl.shared_debug_mask", 275 &ixl_shared_debug_mask); 276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 277 &ixl_shared_debug_mask, 0, 278 "Display debug statements that are printed in shared code"); 279 280 #if 0 281 /* 282 ** Controls for Interrupt Throttling 283 ** - true/false for dynamic adjustment 284 ** - default values for static ITR 285 */ 286 static int ixl_dynamic_rx_itr = 0; 287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 289 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 290 291 static int ixl_dynamic_tx_itr = 0; 292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 294 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 295 #endif 296 297 static int ixl_rx_itr = IXL_ITR_8K; 298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 300 &ixl_rx_itr, 0, "RX Interrupt Rate"); 301 302 static int ixl_tx_itr = IXL_ITR_4K; 303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 305 &ixl_tx_itr, 0, "TX Interrupt Rate"); 306 307 static int ixl_flow_control = -1; 308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN, 309 &ixl_flow_control, 0, "Initial Flow Control setting"); 310 311 #ifdef IXL_IW 312 int ixl_enable_iwarp = 0; 313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 315 &ixl_enable_iwarp, 0, "iWARP enabled"); 316 317 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 318 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 319 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 320 &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP"); 321 #endif 322 323 extern struct if_txrx ixl_txrx_hwb; 324 extern struct if_txrx ixl_txrx_dwb; 325 326 static struct if_shared_ctx ixl_sctx_init = { 327 .isc_magic = IFLIB_MAGIC, 328 .isc_q_align = PAGE_SIZE, 329 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 330 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 331 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 332 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 333 .isc_rx_maxsize = 16384, 334 .isc_rx_nsegments = IXL_MAX_RX_SEGS, 335 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 336 .isc_nfl = 1, 337 .isc_ntxqs = 1, 338 .isc_nrxqs = 1, 339 340 .isc_admin_intrcnt = 1, 341 .isc_vendor_info = ixl_vendor_info_array, 342 .isc_driver_version = IXL_DRIVER_VERSION_STRING, 343 .isc_driver = &ixl_if_driver, 344 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN, 345 346 .isc_nrxd_min = {IXL_MIN_RING}, 347 .isc_ntxd_min = {IXL_MIN_RING}, 348 .isc_nrxd_max = {IXL_MAX_RING}, 349 .isc_ntxd_max = {IXL_MAX_RING}, 350 .isc_nrxd_default = {IXL_DEFAULT_RING}, 351 .isc_ntxd_default = {IXL_DEFAULT_RING}, 352 }; 353 354 /*** Functions ***/ 355 static void * 356 ixl_register(device_t dev) 357 { 358 return (&ixl_sctx_init); 359 } 360 361 static int 362 ixl_allocate_pci_resources(struct ixl_pf *pf) 363 { 364 device_t dev = iflib_get_dev(pf->vsi.ctx); 365 struct i40e_hw *hw = &pf->hw; 366 int rid; 367 368 /* Map BAR0 */ 369 rid = PCIR_BAR(0); 370 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 371 &rid, RF_ACTIVE); 372 373 if (!(pf->pci_mem)) { 374 device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 375 return (ENXIO); 376 } 377 378 /* Save off the PCI information */ 379 hw->vendor_id = pci_get_vendor(dev); 380 hw->device_id = pci_get_device(dev); 381 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 382 hw->subsystem_vendor_id = 383 pci_read_config(dev, PCIR_SUBVEND_0, 2); 384 hw->subsystem_device_id = 385 pci_read_config(dev, PCIR_SUBDEV_0, 2); 386 387 hw->bus.device = pci_get_slot(dev); 388 hw->bus.func = pci_get_function(dev); 389 390 /* Save off register access information */ 391 pf->osdep.mem_bus_space_tag = 392 rman_get_bustag(pf->pci_mem); 393 pf->osdep.mem_bus_space_handle = 394 rman_get_bushandle(pf->pci_mem); 395 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 396 pf->osdep.flush_reg = I40E_GLGEN_STAT; 397 pf->osdep.dev = dev; 398 399 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 400 pf->hw.back = &pf->osdep; 401 402 return (0); 403 } 404 405 static void 406 ixl_setup_ssctx(struct ixl_pf *pf) 407 { 408 if_softc_ctx_t scctx = pf->vsi.shared; 409 struct i40e_hw *hw = &pf->hw; 410 411 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 412 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; 413 scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; 414 } else if (hw->mac.type == I40E_MAC_X722) 415 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; 416 else 417 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 418 419 if (pf->vsi.enable_head_writeback) { 420 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 421 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); 422 scctx->isc_txrx = &ixl_txrx_hwb; 423 } else { 424 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 425 * sizeof(struct i40e_tx_desc), DBA_ALIGN); 426 scctx->isc_txrx = &ixl_txrx_dwb; 427 } 428 429 scctx->isc_txrx->ift_legacy_intr = ixl_intr; 430 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 431 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); 432 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); 433 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; 434 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; 435 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; 436 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; 437 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; 438 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 439 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; 440 } 441 442 static void 443 ixl_admin_timer(void *arg) 444 { 445 struct ixl_pf *pf = (struct ixl_pf *)arg; 446 447 if (ixl_test_state(&pf->state, IXL_STATE_LINK_POLLING)) { 448 struct i40e_hw *hw = &pf->hw; 449 sbintime_t stime; 450 enum i40e_status_code status; 451 452 hw->phy.get_link_info = TRUE; 453 status = i40e_get_link_status(hw, &pf->link_up); 454 if (status == I40E_SUCCESS) { 455 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING); 456 /* OS link info is updated in the admin task */ 457 } else { 458 device_printf(pf->dev, 459 "%s: i40e_get_link_status status %s, aq error %s\n", 460 __func__, i40e_stat_str(hw, status), 461 i40e_aq_str(hw, hw->aq.asq_last_status)); 462 stime = getsbinuptime(); 463 if (stime - pf->link_poll_start > IXL_PF_MAX_LINK_POLL) { 464 device_printf(pf->dev, "Polling link status failed\n"); 465 ixl_clear_state(&pf->state, IXL_STATE_LINK_POLLING); 466 } 467 } 468 } 469 470 /* Fire off the admin task */ 471 iflib_admin_intr_deferred(pf->vsi.ctx); 472 473 /* Reschedule the admin timer */ 474 callout_schedule(&pf->admin_timer, hz/2); 475 } 476 477 static int 478 ixl_attach_pre_recovery_mode(struct ixl_pf *pf) 479 { 480 struct ixl_vsi *vsi = &pf->vsi; 481 struct i40e_hw *hw = &pf->hw; 482 device_t dev = pf->dev; 483 484 device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); 485 486 i40e_get_mac_addr(hw, hw->mac.addr); 487 488 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 489 ixl_configure_intr0_msix(pf); 490 ixl_enable_intr0(hw); 491 } 492 493 ixl_setup_ssctx(pf); 494 495 return (0); 496 } 497 498 static int 499 ixl_if_attach_pre(if_ctx_t ctx) 500 { 501 device_t dev; 502 struct ixl_pf *pf; 503 struct i40e_hw *hw; 504 struct ixl_vsi *vsi; 505 enum i40e_get_fw_lldp_status_resp lldp_status; 506 struct i40e_filter_control_settings filter; 507 enum i40e_status_code status; 508 int error = 0; 509 510 dev = iflib_get_dev(ctx); 511 pf = iflib_get_softc(ctx); 512 513 INIT_DBG_DEV(dev, "begin"); 514 515 vsi = &pf->vsi; 516 vsi->back = pf; 517 pf->dev = dev; 518 hw = &pf->hw; 519 520 vsi->dev = dev; 521 vsi->hw = &pf->hw; 522 vsi->id = 0; 523 vsi->num_vlans = 0; 524 vsi->ctx = ctx; 525 vsi->media = iflib_get_media(ctx); 526 vsi->shared = iflib_get_softc_ctx(ctx); 527 528 snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name), 529 "%s:admin", device_get_nameunit(dev)); 530 mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF); 531 callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0); 532 533 /* Save tunable values */ 534 ixl_save_pf_tunables(pf); 535 536 /* Do PCI setup - map BAR0, etc */ 537 if (ixl_allocate_pci_resources(pf)) { 538 device_printf(dev, "Allocation of PCI resources failed\n"); 539 error = ENXIO; 540 goto err_pci_res; 541 } 542 543 /* Establish a clean starting point */ 544 i40e_clear_hw(hw); 545 i40e_set_mac_type(hw); 546 547 error = ixl_pf_reset(pf); 548 if (error) 549 goto err_out; 550 551 /* Initialize the shared code */ 552 status = i40e_init_shared_code(hw); 553 if (status) { 554 device_printf(dev, "Unable to initialize shared code, error %s\n", 555 i40e_stat_str(hw, status)); 556 error = EIO; 557 goto err_out; 558 } 559 560 /* Set up the admin queue */ 561 hw->aq.num_arq_entries = IXL_AQ_LEN; 562 hw->aq.num_asq_entries = IXL_AQ_LEN; 563 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 564 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 565 566 status = i40e_init_adminq(hw); 567 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 568 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 569 i40e_stat_str(hw, status)); 570 error = EIO; 571 goto err_out; 572 } 573 ixl_print_nvm_version(pf); 574 575 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 576 device_printf(dev, "The driver for the device stopped " 577 "because the NVM image is newer than expected.\n"); 578 device_printf(dev, "You must install the most recent version of " 579 "the network driver.\n"); 580 error = EIO; 581 goto err_out; 582 } 583 584 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 585 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 586 device_printf(dev, "The driver for the device detected " 587 "a newer version of the NVM image than expected.\n"); 588 device_printf(dev, "Please install the most recent version " 589 "of the network driver.\n"); 590 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 591 device_printf(dev, "The driver for the device detected " 592 "an older version of the NVM image than expected.\n"); 593 device_printf(dev, "Please update the NVM image.\n"); 594 } 595 596 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 597 error = ixl_attach_pre_recovery_mode(pf); 598 if (error) 599 goto err_out; 600 return (error); 601 } 602 603 /* Clear PXE mode */ 604 i40e_clear_pxe_mode(hw); 605 606 /* Get capabilities from the device */ 607 error = ixl_get_hw_capabilities(pf); 608 if (error) { 609 device_printf(dev, "get_hw_capabilities failed: %d\n", 610 error); 611 goto err_get_cap; 612 } 613 614 /* Set up host memory cache */ 615 error = ixl_setup_hmc(pf); 616 if (error) 617 goto err_mac_hmc; 618 619 /* Disable LLDP from the firmware for certain NVM versions */ 620 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 621 (pf->hw.aq.fw_maj_ver < 4)) { 622 i40e_aq_stop_lldp(hw, true, false, NULL); 623 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED); 624 } 625 626 /* Try enabling Energy Efficient Ethernet (EEE) mode */ 627 if (i40e_enable_eee(hw, true) == I40E_SUCCESS) 628 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED); 629 else 630 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED); 631 632 /* Get MAC addresses from hardware */ 633 i40e_get_mac_addr(hw, hw->mac.addr); 634 error = i40e_validate_mac_addr(hw->mac.addr); 635 if (error) { 636 device_printf(dev, "validate_mac_addr failed: %d\n", error); 637 goto err_mac_hmc; 638 } 639 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 640 iflib_set_mac(ctx, hw->mac.addr); 641 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 642 643 /* Set up the device filtering */ 644 bzero(&filter, sizeof(filter)); 645 filter.enable_ethtype = TRUE; 646 filter.enable_macvlan = TRUE; 647 filter.enable_fdir = FALSE; 648 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 649 if (i40e_set_filter_control(hw, &filter)) 650 device_printf(dev, "i40e_set_filter_control() failed\n"); 651 652 /* Query device FW LLDP status */ 653 if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) { 654 if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) { 655 ixl_set_state(&pf->state, 656 IXL_STATE_FW_LLDP_DISABLED); 657 } else { 658 ixl_clear_state(&pf->state, 659 IXL_STATE_FW_LLDP_DISABLED); 660 } 661 } 662 663 /* Tell FW to apply DCB config on link up */ 664 i40e_aq_set_dcb_parameters(hw, true, NULL); 665 666 /* Fill out iflib parameters */ 667 ixl_setup_ssctx(pf); 668 669 INIT_DBG_DEV(dev, "end"); 670 return (0); 671 672 err_mac_hmc: 673 ixl_shutdown_hmc(pf); 674 err_get_cap: 675 i40e_shutdown_adminq(hw); 676 err_out: 677 ixl_free_pci_resources(pf); 678 err_pci_res: 679 mtx_lock(&pf->admin_mtx); 680 callout_stop(&pf->admin_timer); 681 mtx_unlock(&pf->admin_mtx); 682 mtx_destroy(&pf->admin_mtx); 683 return (error); 684 } 685 686 static int 687 ixl_if_attach_post(if_ctx_t ctx) 688 { 689 device_t dev; 690 struct ixl_pf *pf; 691 struct i40e_hw *hw; 692 struct ixl_vsi *vsi; 693 int error = 0; 694 enum i40e_status_code status; 695 696 dev = iflib_get_dev(ctx); 697 pf = iflib_get_softc(ctx); 698 699 INIT_DBG_DEV(dev, "begin"); 700 701 vsi = &pf->vsi; 702 vsi->ifp = iflib_get_ifp(ctx); 703 hw = &pf->hw; 704 705 /* Save off determined number of queues for interface */ 706 vsi->num_rx_queues = vsi->shared->isc_nrxqsets; 707 vsi->num_tx_queues = vsi->shared->isc_ntxqsets; 708 709 /* Setup OS network interface / ifnet */ 710 if (ixl_setup_interface(dev, pf)) { 711 device_printf(dev, "interface setup failed!\n"); 712 error = EIO; 713 goto err; 714 } 715 716 if (IXL_PF_IN_RECOVERY_MODE(pf)) { 717 /* Keep admin queue interrupts active while driver is loaded */ 718 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 719 ixl_configure_intr0_msix(pf); 720 ixl_enable_intr0(hw); 721 } 722 723 ixl_add_sysctls_recovery_mode(pf); 724 725 /* Start the admin timer */ 726 mtx_lock(&pf->admin_mtx); 727 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 728 mtx_unlock(&pf->admin_mtx); 729 return (0); 730 } 731 732 error = ixl_switch_config(pf); 733 if (error) { 734 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 735 error); 736 goto err; 737 } 738 739 /* Add protocol filters to list */ 740 ixl_init_filters(vsi); 741 742 /* Init queue allocation manager */ 743 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 744 if (error) { 745 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 746 error); 747 goto err; 748 } 749 /* reserve a contiguous allocation for the PF's VSI */ 750 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, 751 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); 752 if (error) { 753 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 754 error); 755 goto err; 756 } 757 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 758 pf->qtag.num_allocated, pf->qtag.num_active); 759 760 /* Determine link state */ 761 error = ixl_attach_get_link_status(pf); 762 if (error == EINVAL) 763 goto err; 764 765 /* Limit PHY interrupts to link, autoneg, and modules failure */ 766 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 767 NULL); 768 if (status) { 769 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 770 " aq_err %s\n", i40e_stat_str(hw, status), 771 i40e_aq_str(hw, hw->aq.asq_last_status)); 772 goto err; 773 } 774 775 /* Get the bus configuration and set the shared code */ 776 ixl_get_bus_info(pf); 777 778 /* Keep admin queue interrupts active while driver is loaded */ 779 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 780 ixl_configure_intr0_msix(pf); 781 ixl_enable_intr0(hw); 782 } 783 784 /* Set initial advertised speed sysctl value */ 785 ixl_set_initial_advertised_speeds(pf); 786 787 /* Initialize statistics & add sysctls */ 788 ixl_add_device_sysctls(pf); 789 ixl_pf_reset_stats(pf); 790 ixl_update_stats_counters(pf); 791 ixl_add_hw_stats(pf); 792 793 /* 794 * Driver may have been reloaded. Ensure that the link state 795 * is consistent with current settings. 796 */ 797 ixl_set_link(pf, ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN)); 798 799 hw->phy.get_link_info = true; 800 status = i40e_get_link_status(hw, &pf->link_up); 801 if (status != I40E_SUCCESS) { 802 device_printf(dev, 803 "%s get link status, status: %s aq_err=%s\n", 804 __func__, i40e_stat_str(hw, status), 805 i40e_aq_str(hw, hw->aq.asq_last_status)); 806 /* 807 * Most probably FW has not finished configuring PHY. 808 * Retry periodically in a timer callback. 809 */ 810 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING); 811 pf->link_poll_start = getsbinuptime(); 812 } else 813 ixl_update_link_status(pf); 814 815 #ifdef PCI_IOV 816 ixl_initialize_sriov(pf); 817 #endif 818 819 #ifdef IXL_IW 820 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 821 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 822 if (pf->iw_enabled) { 823 error = ixl_iw_pf_attach(pf); 824 if (error) { 825 device_printf(dev, 826 "interfacing to iWARP driver failed: %d\n", 827 error); 828 goto err; 829 } else 830 device_printf(dev, "iWARP ready\n"); 831 } else 832 device_printf(dev, "iWARP disabled on this device " 833 "(no MSI-X vectors)\n"); 834 } else { 835 pf->iw_enabled = false; 836 device_printf(dev, "The device is not iWARP enabled\n"); 837 } 838 #endif 839 /* Start the admin timer */ 840 mtx_lock(&pf->admin_mtx); 841 callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf); 842 mtx_unlock(&pf->admin_mtx); 843 844 INIT_DBG_DEV(dev, "end"); 845 return (0); 846 847 err: 848 INIT_DEBUGOUT("end: error %d", error); 849 /* ixl_if_detach() is called on error from this */ 850 return (error); 851 } 852 853 /** 854 * XXX: iflib always ignores the return value of detach() 855 * -> This means that this isn't allowed to fail 856 */ 857 static int 858 ixl_if_detach(if_ctx_t ctx) 859 { 860 struct ixl_pf *pf = iflib_get_softc(ctx); 861 struct ixl_vsi *vsi = &pf->vsi; 862 struct i40e_hw *hw = &pf->hw; 863 device_t dev = pf->dev; 864 enum i40e_status_code status; 865 #ifdef IXL_IW 866 int error; 867 #endif 868 869 INIT_DBG_DEV(dev, "begin"); 870 871 /* Stop the admin timer */ 872 mtx_lock(&pf->admin_mtx); 873 callout_stop(&pf->admin_timer); 874 mtx_unlock(&pf->admin_mtx); 875 mtx_destroy(&pf->admin_mtx); 876 877 #ifdef IXL_IW 878 if (ixl_enable_iwarp && pf->iw_enabled) { 879 error = ixl_iw_pf_detach(pf); 880 if (error == EBUSY) { 881 device_printf(dev, "iwarp in use; stop it first.\n"); 882 //return (error); 883 } 884 } 885 #endif 886 /* Remove all previously allocated media types */ 887 ifmedia_removeall(vsi->media); 888 889 /* Shutdown LAN HMC */ 890 ixl_shutdown_hmc(pf); 891 892 /* Shutdown admin queue */ 893 ixl_disable_intr0(hw); 894 status = i40e_shutdown_adminq(hw); 895 if (status) 896 device_printf(dev, 897 "i40e_shutdown_adminq() failed with status %s\n", 898 i40e_stat_str(hw, status)); 899 900 ixl_pf_qmgr_destroy(&pf->qmgr); 901 ixl_free_pci_resources(pf); 902 ixl_free_filters(&vsi->ftl); 903 INIT_DBG_DEV(dev, "end"); 904 return (0); 905 } 906 907 static int 908 ixl_if_shutdown(if_ctx_t ctx) 909 { 910 int error = 0; 911 912 INIT_DEBUGOUT("ixl_if_shutdown: begin"); 913 914 /* TODO: Call ixl_if_stop()? */ 915 916 /* TODO: Then setup low power mode */ 917 918 return (error); 919 } 920 921 static int 922 ixl_if_suspend(if_ctx_t ctx) 923 { 924 int error = 0; 925 926 INIT_DEBUGOUT("ixl_if_suspend: begin"); 927 928 /* TODO: Call ixl_if_stop()? */ 929 930 /* TODO: Then setup low power mode */ 931 932 return (error); 933 } 934 935 static int 936 ixl_if_resume(if_ctx_t ctx) 937 { 938 if_t ifp = iflib_get_ifp(ctx); 939 940 INIT_DEBUGOUT("ixl_if_resume: begin"); 941 942 /* Read & clear wake-up registers */ 943 944 /* Required after D3->D0 transition */ 945 if (if_getflags(ifp) & IFF_UP) 946 ixl_if_init(ctx); 947 948 return (0); 949 } 950 951 void 952 ixl_if_init(if_ctx_t ctx) 953 { 954 struct ixl_pf *pf = iflib_get_softc(ctx); 955 struct ixl_vsi *vsi = &pf->vsi; 956 struct i40e_hw *hw = &pf->hw; 957 if_t ifp = iflib_get_ifp(ctx); 958 device_t dev = iflib_get_dev(ctx); 959 u8 tmpaddr[ETHER_ADDR_LEN]; 960 int ret; 961 962 if (IXL_PF_IN_RECOVERY_MODE(pf)) 963 return; 964 /* 965 * If the aq is dead here, it probably means something outside of the driver 966 * did something to the adapter, like a PF reset. 967 * So, rebuild the driver's state here if that occurs. 968 */ 969 if (!i40e_check_asq_alive(&pf->hw)) { 970 device_printf(dev, "Admin Queue is down; resetting...\n"); 971 ixl_teardown_hw_structs(pf); 972 ixl_rebuild_hw_structs_after_reset(pf, false); 973 } 974 975 /* Get the latest mac address... User might use a LAA */ 976 bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN); 977 if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) && 978 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { 979 ixl_del_all_vlan_filters(vsi, hw->mac.addr); 980 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 981 ret = i40e_aq_mac_address_write(hw, 982 I40E_AQC_WRITE_TYPE_LAA_ONLY, 983 hw->mac.addr, NULL); 984 if (ret) { 985 device_printf(dev, "LLA address change failed!!\n"); 986 return; 987 } 988 /* 989 * New filters are configured by ixl_reconfigure_filters 990 * at the end of ixl_init_locked. 991 */ 992 } 993 994 iflib_set_mac(ctx, hw->mac.addr); 995 996 /* Prepare the VSI: rings, hmc contexts, etc... */ 997 if (ixl_initialize_vsi(vsi)) { 998 device_printf(dev, "initialize vsi failed!!\n"); 999 return; 1000 } 1001 1002 ixl_set_link(pf, true); 1003 1004 /* Reconfigure multicast filters in HW */ 1005 ixl_if_multi_set(ctx); 1006 1007 /* Set up RSS */ 1008 ixl_config_rss(pf); 1009 1010 /* Set up MSI-X routing and the ITR settings */ 1011 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1012 ixl_configure_queue_intr_msix(pf); 1013 ixl_configure_itr(pf); 1014 } else 1015 ixl_configure_legacy(pf); 1016 1017 if (vsi->enable_head_writeback) 1018 ixl_init_tx_cidx(vsi); 1019 else 1020 ixl_init_tx_rsqs(vsi); 1021 1022 ixl_enable_rings(vsi); 1023 1024 i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 1025 1026 /* Re-add configure filters to HW */ 1027 ixl_reconfigure_filters(vsi); 1028 1029 /* Configure promiscuous mode */ 1030 ixl_if_promisc_set(ctx, if_getflags(ifp)); 1031 1032 #ifdef IXL_IW 1033 if (ixl_enable_iwarp && pf->iw_enabled) { 1034 ret = ixl_iw_pf_init(pf); 1035 if (ret) 1036 device_printf(dev, 1037 "initialize iwarp failed, code %d\n", ret); 1038 } 1039 #endif 1040 } 1041 1042 void 1043 ixl_if_stop(if_ctx_t ctx) 1044 { 1045 struct ixl_pf *pf = iflib_get_softc(ctx); 1046 if_t ifp = iflib_get_ifp(ctx); 1047 struct ixl_vsi *vsi = &pf->vsi; 1048 1049 INIT_DEBUGOUT("ixl_if_stop: begin\n"); 1050 1051 if (IXL_PF_IN_RECOVERY_MODE(pf)) 1052 return; 1053 1054 // TODO: This may need to be reworked 1055 #ifdef IXL_IW 1056 /* Stop iWARP device */ 1057 if (ixl_enable_iwarp && pf->iw_enabled) 1058 ixl_iw_pf_stop(pf); 1059 #endif 1060 1061 ixl_disable_rings_intr(vsi); 1062 ixl_disable_rings(pf, vsi, &pf->qtag); 1063 1064 /* 1065 * Don't set link state if only reconfiguring 1066 * e.g. on MTU change. 1067 */ 1068 if ((if_getflags(ifp) & IFF_UP) == 0 && 1069 !ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN)) 1070 ixl_set_link(pf, false); 1071 } 1072 1073 static int 1074 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) 1075 { 1076 struct ixl_pf *pf = iflib_get_softc(ctx); 1077 struct ixl_vsi *vsi = &pf->vsi; 1078 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1079 struct ixl_tx_queue *tx_que = vsi->tx_queues; 1080 int err, i, rid, vector = 0; 1081 char buf[16]; 1082 1083 MPASS(vsi->shared->isc_nrxqsets > 0); 1084 MPASS(vsi->shared->isc_ntxqsets > 0); 1085 1086 /* Admin Que must use vector 0*/ 1087 rid = vector + 1; 1088 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 1089 ixl_msix_adminq, pf, 0, "aq"); 1090 if (err) { 1091 iflib_irq_free(ctx, &vsi->irq); 1092 device_printf(iflib_get_dev(ctx), 1093 "Failed to register Admin Que handler"); 1094 return (err); 1095 } 1096 1097 #ifdef PCI_IOV 1098 /* Create soft IRQ for handling VFLRs */ 1099 iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov"); 1100 #endif 1101 1102 /* Now set up the stations */ 1103 for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { 1104 rid = vector + 1; 1105 1106 snprintf(buf, sizeof(buf), "rxq%d", i); 1107 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 1108 IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); 1109 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than 1110 * what's expected in the iflib context? */ 1111 if (err) { 1112 device_printf(iflib_get_dev(ctx), 1113 "Failed to allocate queue RX int vector %d, err: %d\n", i, err); 1114 vsi->num_rx_queues = i + 1; 1115 goto fail; 1116 } 1117 rx_que->msix = vector; 1118 } 1119 1120 bzero(buf, sizeof(buf)); 1121 1122 for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { 1123 snprintf(buf, sizeof(buf), "txq%d", i); 1124 iflib_softirq_alloc_generic(ctx, 1125 &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, 1126 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 1127 1128 /* TODO: Maybe call a strategy function for this to figure out which 1129 * interrupts to map Tx queues to. I don't know if there's an immediately 1130 * better way than this other than a user-supplied map, though. */ 1131 tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; 1132 } 1133 1134 return (0); 1135 fail: 1136 iflib_irq_free(ctx, &vsi->irq); 1137 rx_que = vsi->rx_queues; 1138 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1139 iflib_irq_free(ctx, &rx_que->que_irq); 1140 return (err); 1141 } 1142 1143 /* 1144 * Enable all interrupts 1145 * 1146 * Called in: 1147 * iflib_init_locked, after ixl_if_init() 1148 */ 1149 static void 1150 ixl_if_enable_intr(if_ctx_t ctx) 1151 { 1152 struct ixl_pf *pf = iflib_get_softc(ctx); 1153 struct ixl_vsi *vsi = &pf->vsi; 1154 struct i40e_hw *hw = vsi->hw; 1155 struct ixl_rx_queue *que = vsi->rx_queues; 1156 1157 ixl_enable_intr0(hw); 1158 /* Enable queue interrupts */ 1159 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1160 /* TODO: Queue index parameter is probably wrong */ 1161 ixl_enable_queue(hw, que->rxr.me); 1162 } 1163 1164 /* 1165 * Disable queue interrupts 1166 * 1167 * Other interrupt causes need to remain active. 1168 */ 1169 static void 1170 ixl_if_disable_intr(if_ctx_t ctx) 1171 { 1172 struct ixl_pf *pf = iflib_get_softc(ctx); 1173 struct ixl_vsi *vsi = &pf->vsi; 1174 struct i40e_hw *hw = vsi->hw; 1175 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1176 1177 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1178 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1179 ixl_disable_queue(hw, rx_que->msix - 1); 1180 } else { 1181 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF 1182 // stops queues from triggering interrupts 1183 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1184 } 1185 } 1186 1187 static int 1188 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1189 { 1190 struct ixl_pf *pf = iflib_get_softc(ctx); 1191 struct ixl_vsi *vsi = &pf->vsi; 1192 struct i40e_hw *hw = vsi->hw; 1193 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 1194 1195 ixl_enable_queue(hw, rx_que->msix - 1); 1196 return (0); 1197 } 1198 1199 static int 1200 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1201 { 1202 struct ixl_pf *pf = iflib_get_softc(ctx); 1203 struct ixl_vsi *vsi = &pf->vsi; 1204 struct i40e_hw *hw = vsi->hw; 1205 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; 1206 1207 ixl_enable_queue(hw, tx_que->msix - 1); 1208 return (0); 1209 } 1210 1211 static int 1212 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1213 { 1214 struct ixl_pf *pf = iflib_get_softc(ctx); 1215 struct ixl_vsi *vsi = &pf->vsi; 1216 if_softc_ctx_t scctx = vsi->shared; 1217 struct ixl_tx_queue *que; 1218 int i, j, error = 0; 1219 1220 MPASS(scctx->isc_ntxqsets > 0); 1221 MPASS(ntxqs == 1); 1222 MPASS(scctx->isc_ntxqsets == ntxqsets); 1223 1224 /* Allocate queue structure memory */ 1225 if (!(vsi->tx_queues = 1226 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1227 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 1228 return (ENOMEM); 1229 } 1230 1231 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 1232 struct tx_ring *txr = &que->txr; 1233 1234 txr->me = i; 1235 que->vsi = vsi; 1236 1237 if (!vsi->enable_head_writeback) { 1238 /* Allocate report status array */ 1239 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { 1240 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 1241 error = ENOMEM; 1242 goto fail; 1243 } 1244 /* Init report status array */ 1245 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1246 txr->tx_rsq[j] = QIDX_INVALID; 1247 } 1248 /* get the virtual and physical address of the hardware queues */ 1249 txr->tail = I40E_QTX_TAIL(txr->me); 1250 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; 1251 txr->tx_paddr = paddrs[i * ntxqs]; 1252 txr->que = que; 1253 } 1254 1255 return (0); 1256 fail: 1257 ixl_if_queues_free(ctx); 1258 return (error); 1259 } 1260 1261 static int 1262 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1263 { 1264 struct ixl_pf *pf = iflib_get_softc(ctx); 1265 struct ixl_vsi *vsi = &pf->vsi; 1266 struct ixl_rx_queue *que; 1267 int i, error = 0; 1268 1269 #ifdef INVARIANTS 1270 if_softc_ctx_t scctx = vsi->shared; 1271 MPASS(scctx->isc_nrxqsets > 0); 1272 MPASS(nrxqs == 1); 1273 MPASS(scctx->isc_nrxqsets == nrxqsets); 1274 #endif 1275 1276 /* Allocate queue structure memory */ 1277 if (!(vsi->rx_queues = 1278 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * 1279 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1280 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1281 error = ENOMEM; 1282 goto fail; 1283 } 1284 1285 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1286 struct rx_ring *rxr = &que->rxr; 1287 1288 rxr->me = i; 1289 que->vsi = vsi; 1290 1291 /* get the virtual and physical address of the hardware queues */ 1292 rxr->tail = I40E_QRX_TAIL(rxr->me); 1293 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; 1294 rxr->rx_paddr = paddrs[i * nrxqs]; 1295 rxr->que = que; 1296 } 1297 1298 return (0); 1299 fail: 1300 ixl_if_queues_free(ctx); 1301 return (error); 1302 } 1303 1304 static void 1305 ixl_if_queues_free(if_ctx_t ctx) 1306 { 1307 struct ixl_pf *pf = iflib_get_softc(ctx); 1308 struct ixl_vsi *vsi = &pf->vsi; 1309 1310 if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) { 1311 struct ixl_tx_queue *que; 1312 int i = 0; 1313 1314 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { 1315 struct tx_ring *txr = &que->txr; 1316 if (txr->tx_rsq != NULL) { 1317 free(txr->tx_rsq, M_IXL); 1318 txr->tx_rsq = NULL; 1319 } 1320 } 1321 } 1322 1323 if (vsi->tx_queues != NULL) { 1324 free(vsi->tx_queues, M_IXL); 1325 vsi->tx_queues = NULL; 1326 } 1327 if (vsi->rx_queues != NULL) { 1328 free(vsi->rx_queues, M_IXL); 1329 vsi->rx_queues = NULL; 1330 } 1331 1332 if (!IXL_PF_IN_RECOVERY_MODE(pf)) 1333 sysctl_ctx_free(&vsi->sysctl_ctx); 1334 } 1335 1336 void 1337 ixl_update_link_status(struct ixl_pf *pf) 1338 { 1339 struct ixl_vsi *vsi = &pf->vsi; 1340 struct i40e_hw *hw = &pf->hw; 1341 u64 baudrate; 1342 1343 if (pf->link_up) { 1344 if (vsi->link_active == FALSE) { 1345 vsi->link_active = TRUE; 1346 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); 1347 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1348 ixl_link_up_msg(pf); 1349 #ifdef PCI_IOV 1350 ixl_broadcast_link_state(pf); 1351 #endif 1352 } 1353 } else { /* Link down */ 1354 if (vsi->link_active == TRUE) { 1355 vsi->link_active = FALSE; 1356 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1357 #ifdef PCI_IOV 1358 ixl_broadcast_link_state(pf); 1359 #endif 1360 } 1361 } 1362 } 1363 1364 static void 1365 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) 1366 { 1367 device_t dev = pf->dev; 1368 u32 rxq_idx, qtx_ctl; 1369 1370 rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >> 1371 I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT; 1372 qtx_ctl = e->desc.params.external.param1; 1373 1374 device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx); 1375 device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl); 1376 } 1377 1378 static int 1379 ixl_process_adminq(struct ixl_pf *pf, u16 *pending) 1380 { 1381 enum i40e_status_code status = I40E_SUCCESS; 1382 struct i40e_arq_event_info event; 1383 struct i40e_hw *hw = &pf->hw; 1384 device_t dev = pf->dev; 1385 u16 opcode; 1386 u32 loop = 0, reg; 1387 1388 event.buf_len = IXL_AQ_BUF_SZ; 1389 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); 1390 if (!event.msg_buf) { 1391 device_printf(dev, "%s: Unable to allocate memory for Admin" 1392 " Queue event!\n", __func__); 1393 return (ENOMEM); 1394 } 1395 1396 /* clean and process any events */ 1397 do { 1398 status = i40e_clean_arq_element(hw, &event, pending); 1399 if (status) 1400 break; 1401 opcode = LE16_TO_CPU(event.desc.opcode); 1402 ixl_dbg(pf, IXL_DBG_AQ, 1403 "Admin Queue event: %#06x\n", opcode); 1404 switch (opcode) { 1405 case i40e_aqc_opc_get_link_status: 1406 ixl_link_event(pf, &event); 1407 break; 1408 case i40e_aqc_opc_send_msg_to_pf: 1409 #ifdef PCI_IOV 1410 ixl_handle_vf_msg(pf, &event); 1411 #endif 1412 break; 1413 /* 1414 * This should only occur on no-drop queues, which 1415 * aren't currently configured. 1416 */ 1417 case i40e_aqc_opc_event_lan_overflow: 1418 ixl_handle_lan_overflow_event(pf, &event); 1419 break; 1420 default: 1421 break; 1422 } 1423 } while (*pending && (loop++ < IXL_ADM_LIMIT)); 1424 1425 free(event.msg_buf, M_IXL); 1426 1427 /* Re-enable admin queue interrupt cause */ 1428 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1429 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 1430 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1431 1432 return (status); 1433 } 1434 1435 static void 1436 ixl_if_update_admin_status(if_ctx_t ctx) 1437 { 1438 struct ixl_pf *pf = iflib_get_softc(ctx); 1439 struct i40e_hw *hw = &pf->hw; 1440 u16 pending; 1441 1442 if (IXL_PF_IS_RESETTING(pf)) 1443 ixl_handle_empr_reset(pf); 1444 1445 /* 1446 * Admin Queue is shut down while handling reset. 1447 * Don't proceed if it hasn't been re-initialized 1448 * e.g due to an issue with new FW. 1449 */ 1450 if (!i40e_check_asq_alive(&pf->hw)) 1451 return; 1452 1453 if (ixl_test_state(&pf->state, IXL_STATE_MDD_PENDING)) 1454 ixl_handle_mdd_event(pf); 1455 1456 ixl_process_adminq(pf, &pending); 1457 ixl_update_link_status(pf); 1458 1459 /* 1460 * If there are still messages to process, reschedule ourselves. 1461 * Otherwise, re-enable our interrupt and go to sleep. 1462 */ 1463 if (pending > 0) 1464 iflib_admin_intr_deferred(ctx); 1465 else 1466 ixl_enable_intr0(hw); 1467 } 1468 1469 static void 1470 ixl_if_multi_set(if_ctx_t ctx) 1471 { 1472 struct ixl_pf *pf = iflib_get_softc(ctx); 1473 struct ixl_vsi *vsi = &pf->vsi; 1474 struct i40e_hw *hw = vsi->hw; 1475 int mcnt; 1476 1477 IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); 1478 1479 /* Delete filters for removed multicast addresses */ 1480 ixl_del_multi(vsi, false); 1481 1482 mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR); 1483 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { 1484 i40e_aq_set_vsi_multicast_promiscuous(hw, 1485 vsi->seid, TRUE, NULL); 1486 ixl_del_multi(vsi, true); 1487 return; 1488 } 1489 1490 ixl_add_multi(vsi); 1491 IOCTL_DEBUGOUT("ixl_if_multi_set: end"); 1492 } 1493 1494 static int 1495 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1496 { 1497 struct ixl_pf *pf = iflib_get_softc(ctx); 1498 struct ixl_vsi *vsi = &pf->vsi; 1499 1500 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 1501 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - 1502 ETHER_VLAN_ENCAP_LEN) 1503 return (EINVAL); 1504 1505 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1506 ETHER_VLAN_ENCAP_LEN; 1507 1508 return (0); 1509 } 1510 1511 static void 1512 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1513 { 1514 struct ixl_pf *pf = iflib_get_softc(ctx); 1515 struct i40e_hw *hw = &pf->hw; 1516 1517 INIT_DEBUGOUT("ixl_media_status: begin"); 1518 1519 ifmr->ifm_status = IFM_AVALID; 1520 ifmr->ifm_active = IFM_ETHER; 1521 1522 if (!pf->link_up) { 1523 return; 1524 } 1525 1526 ifmr->ifm_status |= IFM_ACTIVE; 1527 /* Hardware is always full-duplex */ 1528 ifmr->ifm_active |= IFM_FDX; 1529 1530 switch (hw->phy.link_info.phy_type) { 1531 /* 100 M */ 1532 case I40E_PHY_TYPE_100BASE_TX: 1533 ifmr->ifm_active |= IFM_100_TX; 1534 break; 1535 /* 1 G */ 1536 case I40E_PHY_TYPE_1000BASE_T: 1537 ifmr->ifm_active |= IFM_1000_T; 1538 break; 1539 case I40E_PHY_TYPE_1000BASE_SX: 1540 ifmr->ifm_active |= IFM_1000_SX; 1541 break; 1542 case I40E_PHY_TYPE_1000BASE_LX: 1543 ifmr->ifm_active |= IFM_1000_LX; 1544 break; 1545 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 1546 ifmr->ifm_active |= IFM_1000_T; 1547 break; 1548 /* 2.5 G */ 1549 case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS: 1550 ifmr->ifm_active |= IFM_2500_T; 1551 break; 1552 /* 5 G */ 1553 case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS: 1554 ifmr->ifm_active |= IFM_5000_T; 1555 break; 1556 /* 10 G */ 1557 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1558 ifmr->ifm_active |= IFM_10G_TWINAX; 1559 break; 1560 case I40E_PHY_TYPE_10GBASE_SR: 1561 ifmr->ifm_active |= IFM_10G_SR; 1562 break; 1563 case I40E_PHY_TYPE_10GBASE_LR: 1564 ifmr->ifm_active |= IFM_10G_LR; 1565 break; 1566 case I40E_PHY_TYPE_10GBASE_T: 1567 ifmr->ifm_active |= IFM_10G_T; 1568 break; 1569 case I40E_PHY_TYPE_XAUI: 1570 case I40E_PHY_TYPE_XFI: 1571 ifmr->ifm_active |= IFM_10G_TWINAX; 1572 break; 1573 case I40E_PHY_TYPE_10GBASE_AOC: 1574 ifmr->ifm_active |= IFM_10G_AOC; 1575 break; 1576 /* 25 G */ 1577 case I40E_PHY_TYPE_25GBASE_KR: 1578 ifmr->ifm_active |= IFM_25G_KR; 1579 break; 1580 case I40E_PHY_TYPE_25GBASE_CR: 1581 ifmr->ifm_active |= IFM_25G_CR; 1582 break; 1583 case I40E_PHY_TYPE_25GBASE_SR: 1584 ifmr->ifm_active |= IFM_25G_SR; 1585 break; 1586 case I40E_PHY_TYPE_25GBASE_LR: 1587 ifmr->ifm_active |= IFM_25G_LR; 1588 break; 1589 case I40E_PHY_TYPE_25GBASE_AOC: 1590 ifmr->ifm_active |= IFM_25G_AOC; 1591 break; 1592 case I40E_PHY_TYPE_25GBASE_ACC: 1593 ifmr->ifm_active |= IFM_25G_ACC; 1594 break; 1595 /* 40 G */ 1596 case I40E_PHY_TYPE_40GBASE_CR4: 1597 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1598 ifmr->ifm_active |= IFM_40G_CR4; 1599 break; 1600 case I40E_PHY_TYPE_40GBASE_SR4: 1601 ifmr->ifm_active |= IFM_40G_SR4; 1602 break; 1603 case I40E_PHY_TYPE_40GBASE_LR4: 1604 ifmr->ifm_active |= IFM_40G_LR4; 1605 break; 1606 case I40E_PHY_TYPE_XLAUI: 1607 ifmr->ifm_active |= IFM_OTHER; 1608 break; 1609 case I40E_PHY_TYPE_1000BASE_KX: 1610 ifmr->ifm_active |= IFM_1000_KX; 1611 break; 1612 case I40E_PHY_TYPE_SGMII: 1613 ifmr->ifm_active |= IFM_1000_SGMII; 1614 break; 1615 /* ERJ: What's the difference between these? */ 1616 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1617 case I40E_PHY_TYPE_10GBASE_CR1: 1618 ifmr->ifm_active |= IFM_10G_CR1; 1619 break; 1620 case I40E_PHY_TYPE_10GBASE_KX4: 1621 ifmr->ifm_active |= IFM_10G_KX4; 1622 break; 1623 case I40E_PHY_TYPE_10GBASE_KR: 1624 ifmr->ifm_active |= IFM_10G_KR; 1625 break; 1626 case I40E_PHY_TYPE_SFI: 1627 ifmr->ifm_active |= IFM_10G_SFI; 1628 break; 1629 /* Our single 20G media type */ 1630 case I40E_PHY_TYPE_20GBASE_KR2: 1631 ifmr->ifm_active |= IFM_20G_KR2; 1632 break; 1633 case I40E_PHY_TYPE_40GBASE_KR4: 1634 ifmr->ifm_active |= IFM_40G_KR4; 1635 break; 1636 case I40E_PHY_TYPE_XLPPI: 1637 case I40E_PHY_TYPE_40GBASE_AOC: 1638 ifmr->ifm_active |= IFM_40G_XLPPI; 1639 break; 1640 /* Unknown to driver */ 1641 default: 1642 ifmr->ifm_active |= IFM_UNKNOWN; 1643 break; 1644 } 1645 /* Report flow control status as well */ 1646 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 1647 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1648 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 1649 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1650 } 1651 1652 static int 1653 ixl_if_media_change(if_ctx_t ctx) 1654 { 1655 struct ifmedia *ifm = iflib_get_media(ctx); 1656 1657 INIT_DEBUGOUT("ixl_media_change: begin"); 1658 1659 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1660 return (EINVAL); 1661 1662 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); 1663 return (ENODEV); 1664 } 1665 1666 static int 1667 ixl_if_promisc_set(if_ctx_t ctx, int flags) 1668 { 1669 struct ixl_pf *pf = iflib_get_softc(ctx); 1670 struct ixl_vsi *vsi = &pf->vsi; 1671 if_t ifp = iflib_get_ifp(ctx); 1672 struct i40e_hw *hw = vsi->hw; 1673 int err; 1674 bool uni = FALSE, multi = FALSE; 1675 1676 if (flags & IFF_PROMISC) 1677 uni = multi = TRUE; 1678 else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >= 1679 MAX_MULTICAST_ADDR) 1680 multi = TRUE; 1681 1682 err = i40e_aq_set_vsi_unicast_promiscuous(hw, 1683 vsi->seid, uni, NULL, true); 1684 if (err) 1685 return (err); 1686 err = i40e_aq_set_vsi_multicast_promiscuous(hw, 1687 vsi->seid, multi, NULL); 1688 return (err); 1689 } 1690 1691 static void 1692 ixl_if_timer(if_ctx_t ctx, uint16_t qid) 1693 { 1694 struct ixl_pf *pf = iflib_get_softc(ctx); 1695 1696 if (qid != 0) 1697 return; 1698 1699 ixl_update_stats_counters(pf); 1700 } 1701 1702 static void 1703 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) 1704 { 1705 struct ixl_pf *pf = iflib_get_softc(ctx); 1706 struct ixl_vsi *vsi = &pf->vsi; 1707 struct i40e_hw *hw = vsi->hw; 1708 if_t ifp = iflib_get_ifp(ctx); 1709 1710 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1711 return; 1712 1713 /* 1714 * Keep track of registered VLANS to know what 1715 * filters have to be configured when VLAN_HWFILTER 1716 * capability is enabled. 1717 */ 1718 ++vsi->num_vlans; 1719 bit_set(vsi->vlans_map, vtag); 1720 1721 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1722 return; 1723 1724 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS) 1725 ixl_add_filter(vsi, hw->mac.addr, vtag); 1726 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) { 1727 /* 1728 * There is not enough HW resources to add filters 1729 * for all registered VLANs. Re-configure filtering 1730 * to allow reception of all expected traffic. 1731 */ 1732 device_printf(vsi->dev, 1733 "Not enough HW filters for all VLANs. VLAN HW filtering disabled"); 1734 ixl_del_all_vlan_filters(vsi, hw->mac.addr); 1735 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1736 } 1737 } 1738 1739 static void 1740 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1741 { 1742 struct ixl_pf *pf = iflib_get_softc(ctx); 1743 struct ixl_vsi *vsi = &pf->vsi; 1744 struct i40e_hw *hw = vsi->hw; 1745 if_t ifp = iflib_get_ifp(ctx); 1746 1747 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1748 return; 1749 1750 --vsi->num_vlans; 1751 bit_clear(vsi->vlans_map, vtag); 1752 1753 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) 1754 return; 1755 1756 /* One filter is used for untagged frames */ 1757 if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1) 1758 ixl_del_filter(vsi, hw->mac.addr, vtag); 1759 else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) { 1760 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 1761 ixl_add_vlan_filters(vsi, hw->mac.addr); 1762 } 1763 } 1764 1765 static uint64_t 1766 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1767 { 1768 struct ixl_pf *pf = iflib_get_softc(ctx); 1769 struct ixl_vsi *vsi = &pf->vsi; 1770 if_t ifp = iflib_get_ifp(ctx); 1771 1772 switch (cnt) { 1773 case IFCOUNTER_IPACKETS: 1774 return (vsi->ipackets); 1775 case IFCOUNTER_IERRORS: 1776 return (vsi->ierrors); 1777 case IFCOUNTER_OPACKETS: 1778 return (vsi->opackets); 1779 case IFCOUNTER_OERRORS: 1780 return (vsi->oerrors); 1781 case IFCOUNTER_COLLISIONS: 1782 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1783 return (0); 1784 case IFCOUNTER_IBYTES: 1785 return (vsi->ibytes); 1786 case IFCOUNTER_OBYTES: 1787 return (vsi->obytes); 1788 case IFCOUNTER_IMCASTS: 1789 return (vsi->imcasts); 1790 case IFCOUNTER_OMCASTS: 1791 return (vsi->omcasts); 1792 case IFCOUNTER_IQDROPS: 1793 return (vsi->iqdrops); 1794 case IFCOUNTER_OQDROPS: 1795 return (vsi->oqdrops); 1796 case IFCOUNTER_NOPROTO: 1797 return (vsi->noproto); 1798 default: 1799 return (if_get_counter_default(ifp, cnt)); 1800 } 1801 } 1802 1803 #ifdef PCI_IOV 1804 static void 1805 ixl_if_vflr_handle(if_ctx_t ctx) 1806 { 1807 struct ixl_pf *pf = iflib_get_softc(ctx); 1808 1809 ixl_handle_vflr(pf); 1810 } 1811 #endif 1812 1813 static int 1814 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1815 { 1816 struct ixl_pf *pf = iflib_get_softc(ctx); 1817 1818 if (pf->read_i2c_byte == NULL) 1819 return (EINVAL); 1820 1821 for (int i = 0; i < req->len; i++) 1822 if (pf->read_i2c_byte(pf, req->offset + i, 1823 req->dev_addr, &req->data[i])) 1824 return (EIO); 1825 return (0); 1826 } 1827 1828 static int 1829 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) 1830 { 1831 struct ixl_pf *pf = iflib_get_softc(ctx); 1832 struct ifdrv *ifd = (struct ifdrv *)data; 1833 int error = 0; 1834 1835 /* 1836 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without 1837 * performing privilege checks. It is important that this function 1838 * perform the necessary checks for commands which should only be 1839 * executed by privileged threads. 1840 */ 1841 1842 switch(command) { 1843 case SIOCGDRVSPEC: 1844 case SIOCSDRVSPEC: 1845 /* NVM update command */ 1846 if (ifd->ifd_cmd == I40E_NVM_ACCESS) { 1847 error = priv_check(curthread, PRIV_DRIVER); 1848 if (error) 1849 break; 1850 error = ixl_handle_nvmupd_cmd(pf, ifd); 1851 } else { 1852 error = EINVAL; 1853 } 1854 break; 1855 default: 1856 error = EOPNOTSUPP; 1857 } 1858 1859 return (error); 1860 } 1861 1862 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized 1863 * @ctx: iflib context 1864 * @event: event code to check 1865 * 1866 * Defaults to returning false for every event. 1867 * 1868 * @returns true if iflib needs to reinit the interface, false otherwise 1869 */ 1870 static bool 1871 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) 1872 { 1873 switch (event) { 1874 case IFLIB_RESTART_VLAN_CONFIG: 1875 default: 1876 return (false); 1877 } 1878 } 1879 1880 /* 1881 * Sanity check and save off tunable values. 1882 */ 1883 static void 1884 ixl_save_pf_tunables(struct ixl_pf *pf) 1885 { 1886 device_t dev = pf->dev; 1887 1888 /* Save tunable information */ 1889 #ifdef IXL_DEBUG_FC 1890 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 1891 #endif 1892 #ifdef IXL_DEBUG 1893 pf->recovery_mode = ixl_debug_recovery_mode; 1894 #endif 1895 pf->dbg_mask = ixl_core_debug_mask; 1896 pf->hw.debug_mask = ixl_shared_debug_mask; 1897 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 1898 pf->enable_vf_loopback = !!(ixl_enable_vf_loopback); 1899 #if 0 1900 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 1901 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 1902 #endif 1903 1904 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) 1905 pf->i2c_access_method = 0; 1906 else 1907 pf->i2c_access_method = ixl_i2c_access_method; 1908 1909 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 1910 device_printf(dev, "Invalid tx_itr value of %d set!\n", 1911 ixl_tx_itr); 1912 device_printf(dev, "tx_itr must be between %d and %d, " 1913 "inclusive\n", 1914 0, IXL_MAX_ITR); 1915 device_printf(dev, "Using default value of %d instead\n", 1916 IXL_ITR_4K); 1917 pf->tx_itr = IXL_ITR_4K; 1918 } else 1919 pf->tx_itr = ixl_tx_itr; 1920 1921 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 1922 device_printf(dev, "Invalid rx_itr value of %d set!\n", 1923 ixl_rx_itr); 1924 device_printf(dev, "rx_itr must be between %d and %d, " 1925 "inclusive\n", 1926 0, IXL_MAX_ITR); 1927 device_printf(dev, "Using default value of %d instead\n", 1928 IXL_ITR_8K); 1929 pf->rx_itr = IXL_ITR_8K; 1930 } else 1931 pf->rx_itr = ixl_rx_itr; 1932 1933 pf->fc = -1; 1934 if (ixl_flow_control != -1) { 1935 if (ixl_flow_control < 0 || ixl_flow_control > 3) { 1936 device_printf(dev, 1937 "Invalid flow_control value of %d set!\n", 1938 ixl_flow_control); 1939 device_printf(dev, 1940 "flow_control must be between %d and %d, " 1941 "inclusive\n", 0, 3); 1942 device_printf(dev, 1943 "Using default configuration instead\n"); 1944 } else 1945 pf->fc = ixl_flow_control; 1946 } 1947 } 1948 1949