1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 2 51 #define IXL_DRIVER_VERSION_MINOR 0 52 #define IXL_DRIVER_VERSION_BUILD 0 53 54 #define IXL_DRIVER_VERSION_STRING \ 55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ 56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ 57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * 64 * ( Vendor ID, Device ID, Branding String ) 65 *********************************************************************/ 66 67 static pci_vendor_info_t ixl_vendor_info_array[] = 68 { 69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), 71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), 75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), 77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), 78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), 79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), 81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), 82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), 84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), 85 /* required last entry */ 86 PVID_END 87 }; 88 89 /********************************************************************* 90 * Function prototypes 91 *********************************************************************/ 92 /*** IFLIB interface ***/ 93 static void *ixl_register(device_t dev); 94 static int ixl_if_attach_pre(if_ctx_t ctx); 95 static int ixl_if_attach_post(if_ctx_t ctx); 96 static int ixl_if_detach(if_ctx_t ctx); 97 static int ixl_if_shutdown(if_ctx_t ctx); 98 static int ixl_if_suspend(if_ctx_t ctx); 99 static int ixl_if_resume(if_ctx_t ctx); 100 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); 101 static void ixl_if_enable_intr(if_ctx_t ctx); 102 static void ixl_if_disable_intr(if_ctx_t ctx); 103 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 104 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 105 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 106 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 107 static void ixl_if_queues_free(if_ctx_t ctx); 108 static void ixl_if_update_admin_status(if_ctx_t ctx); 109 static void ixl_if_multi_set(if_ctx_t ctx); 110 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 111 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 112 static int ixl_if_media_change(if_ctx_t ctx); 113 static int ixl_if_promisc_set(if_ctx_t ctx, int flags); 114 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); 115 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); 116 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 117 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); 118 static void ixl_if_vflr_handle(if_ctx_t ctx); 119 // static void ixl_if_link_intr_enable(if_ctx_t ctx); 120 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 121 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); 122 123 /*** Other ***/ 124 static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int); 125 static void ixl_save_pf_tunables(struct ixl_pf *); 126 static int ixl_allocate_pci_resources(struct ixl_pf *); 127 128 /********************************************************************* 129 * FreeBSD Device Interface Entry Points 130 *********************************************************************/ 131 132 static device_method_t ixl_methods[] = { 133 /* Device interface */ 134 DEVMETHOD(device_register, ixl_register), 135 DEVMETHOD(device_probe, iflib_device_probe), 136 DEVMETHOD(device_attach, iflib_device_attach), 137 DEVMETHOD(device_detach, iflib_device_detach), 138 DEVMETHOD(device_shutdown, iflib_device_shutdown), 139 #ifdef PCI_IOV 140 DEVMETHOD(pci_iov_init, ixl_iov_init), 141 DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), 142 DEVMETHOD(pci_iov_add_vf, ixl_add_vf), 143 #endif 144 DEVMETHOD_END 145 }; 146 147 static driver_t ixl_driver = { 148 "ixl", ixl_methods, sizeof(struct ixl_pf), 149 }; 150 151 devclass_t ixl_devclass; 152 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); 153 MODULE_VERSION(ixl, 3); 154 155 MODULE_DEPEND(ixl, pci, 1, 1, 1); 156 MODULE_DEPEND(ixl, ether, 1, 1, 1); 157 MODULE_DEPEND(ixl, iflib, 1, 1, 1); 158 159 static device_method_t ixl_if_methods[] = { 160 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), 161 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), 162 DEVMETHOD(ifdi_detach, ixl_if_detach), 163 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), 164 DEVMETHOD(ifdi_suspend, ixl_if_suspend), 165 DEVMETHOD(ifdi_resume, ixl_if_resume), 166 DEVMETHOD(ifdi_init, ixl_if_init), 167 DEVMETHOD(ifdi_stop, ixl_if_stop), 168 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), 169 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), 170 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), 171 //DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable), 172 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), 173 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), 174 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), 175 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), 176 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), 177 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), 178 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), 179 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), 180 DEVMETHOD(ifdi_media_status, ixl_if_media_status), 181 DEVMETHOD(ifdi_media_change, ixl_if_media_change), 182 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), 183 DEVMETHOD(ifdi_timer, ixl_if_timer), 184 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), 185 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), 186 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), 187 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), 188 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), 189 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), 190 // ifdi_led_func 191 // ifdi_debug 192 DEVMETHOD_END 193 }; 194 195 static driver_t ixl_if_driver = { 196 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) 197 }; 198 199 /* 200 ** TUNEABLE PARAMETERS: 201 */ 202 203 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, 204 "IXL driver parameters"); 205 206 /* 207 * Leave this on unless you need to send flow control 208 * frames (or other control frames) from software 209 */ 210 static int ixl_enable_tx_fc_filter = 1; 211 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 212 &ixl_enable_tx_fc_filter); 213 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 214 &ixl_enable_tx_fc_filter, 0, 215 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 216 217 static int ixl_i2c_access_method = 0; 218 TUNABLE_INT("hw.ixl.i2c_access_method", 219 &ixl_i2c_access_method); 220 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, 221 &ixl_i2c_access_method, 0, 222 IXL_SYSCTL_HELP_I2C_METHOD); 223 224 /* 225 * Different method for processing TX descriptor 226 * completion. 227 */ 228 static int ixl_enable_head_writeback = 1; 229 TUNABLE_INT("hw.ixl.enable_head_writeback", 230 &ixl_enable_head_writeback); 231 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 232 &ixl_enable_head_writeback, 0, 233 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 234 235 static int ixl_core_debug_mask = 0; 236 TUNABLE_INT("hw.ixl.core_debug_mask", 237 &ixl_core_debug_mask); 238 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 239 &ixl_core_debug_mask, 0, 240 "Display debug statements that are printed in non-shared code"); 241 242 static int ixl_shared_debug_mask = 0; 243 TUNABLE_INT("hw.ixl.shared_debug_mask", 244 &ixl_shared_debug_mask); 245 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 246 &ixl_shared_debug_mask, 0, 247 "Display debug statements that are printed in shared code"); 248 249 #if 0 250 /* 251 ** Controls for Interrupt Throttling 252 ** - true/false for dynamic adjustment 253 ** - default values for static ITR 254 */ 255 static int ixl_dynamic_rx_itr = 0; 256 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 257 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 258 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 259 260 static int ixl_dynamic_tx_itr = 0; 261 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 262 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 263 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 264 #endif 265 266 static int ixl_rx_itr = IXL_ITR_8K; 267 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 268 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 269 &ixl_rx_itr, 0, "RX Interrupt Rate"); 270 271 static int ixl_tx_itr = IXL_ITR_4K; 272 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 273 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 274 &ixl_tx_itr, 0, "TX Interrupt Rate"); 275 276 #ifdef IXL_IW 277 int ixl_enable_iwarp = 0; 278 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 279 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 280 &ixl_enable_iwarp, 0, "iWARP enabled"); 281 282 #if __FreeBSD_version < 1100000 283 int ixl_limit_iwarp_msix = 1; 284 #else 285 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 286 #endif 287 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 288 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 289 &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); 290 #endif 291 292 extern struct if_txrx ixl_txrx_hwb; 293 extern struct if_txrx ixl_txrx_dwb; 294 295 static struct if_shared_ctx ixl_sctx_init = { 296 .isc_magic = IFLIB_MAGIC, 297 .isc_q_align = PAGE_SIZE, 298 .isc_tx_maxsize = IXL_TSO_SIZE, 299 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 300 301 .isc_rx_maxsize = 16384, 302 .isc_rx_nsegments = IXL_MAX_RX_SEGS, 303 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 304 .isc_nfl = 1, 305 .isc_ntxqs = 1, 306 .isc_nrxqs = 1, 307 308 .isc_admin_intrcnt = 1, 309 .isc_vendor_info = ixl_vendor_info_array, 310 .isc_driver_version = IXL_DRIVER_VERSION_STRING, 311 .isc_driver = &ixl_if_driver, 312 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN, 313 314 .isc_nrxd_min = {IXL_MIN_RING}, 315 .isc_ntxd_min = {IXL_MIN_RING}, 316 .isc_nrxd_max = {IXL_MAX_RING}, 317 .isc_ntxd_max = {IXL_MAX_RING}, 318 .isc_nrxd_default = {IXL_DEFAULT_RING}, 319 .isc_ntxd_default = {IXL_DEFAULT_RING}, 320 }; 321 322 if_shared_ctx_t ixl_sctx = &ixl_sctx_init; 323 324 /*** Functions ***/ 325 static void * 326 ixl_register(device_t dev) 327 { 328 return (ixl_sctx); 329 } 330 331 static int 332 ixl_allocate_pci_resources(struct ixl_pf *pf) 333 { 334 int rid; 335 struct i40e_hw *hw = &pf->hw; 336 device_t dev = iflib_get_dev(pf->vsi.ctx); 337 338 /* Map BAR0 */ 339 rid = PCIR_BAR(0); 340 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 341 &rid, RF_ACTIVE); 342 343 if (!(pf->pci_mem)) { 344 device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 345 return (ENXIO); 346 } 347 348 /* Save off the PCI information */ 349 hw->vendor_id = pci_get_vendor(dev); 350 hw->device_id = pci_get_device(dev); 351 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 352 hw->subsystem_vendor_id = 353 pci_read_config(dev, PCIR_SUBVEND_0, 2); 354 hw->subsystem_device_id = 355 pci_read_config(dev, PCIR_SUBDEV_0, 2); 356 357 hw->bus.device = pci_get_slot(dev); 358 hw->bus.func = pci_get_function(dev); 359 360 /* Save off register access information */ 361 pf->osdep.mem_bus_space_tag = 362 rman_get_bustag(pf->pci_mem); 363 pf->osdep.mem_bus_space_handle = 364 rman_get_bushandle(pf->pci_mem); 365 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 366 pf->osdep.flush_reg = I40E_GLGEN_STAT; 367 pf->osdep.dev = dev; 368 369 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 370 pf->hw.back = &pf->osdep; 371 372 return (0); 373 } 374 375 static int 376 ixl_if_attach_pre(if_ctx_t ctx) 377 { 378 device_t dev; 379 struct ixl_pf *pf; 380 struct i40e_hw *hw; 381 struct ixl_vsi *vsi; 382 if_softc_ctx_t scctx; 383 struct i40e_filter_control_settings filter; 384 enum i40e_status_code status; 385 int error = 0; 386 387 INIT_DEBUGOUT("ixl_if_attach_pre: begin"); 388 389 /* Allocate, clear, and link in our primary soft structure */ 390 dev = iflib_get_dev(ctx); 391 pf = iflib_get_softc(ctx); 392 vsi = &pf->vsi; 393 vsi->back = pf; 394 pf->dev = dev; 395 hw = &pf->hw; 396 397 /* 398 ** Note this assumes we have a single embedded VSI, 399 ** this could be enhanced later to allocate multiple 400 */ 401 //vsi->dev = pf->dev; 402 vsi->hw = &pf->hw; 403 vsi->id = 0; 404 vsi->num_vlans = 0; 405 vsi->ctx = ctx; 406 vsi->media = iflib_get_media(ctx); 407 vsi->shared = scctx = iflib_get_softc_ctx(ctx); 408 409 /* Save tunable values */ 410 ixl_save_pf_tunables(pf); 411 412 /* Do PCI setup - map BAR0, etc */ 413 if (ixl_allocate_pci_resources(pf)) { 414 device_printf(dev, "Allocation of PCI resources failed\n"); 415 error = ENXIO; 416 goto err_pci_res; 417 } 418 419 /* Establish a clean starting point */ 420 i40e_clear_hw(hw); 421 status = i40e_pf_reset(hw); 422 if (status) { 423 device_printf(dev, "PF reset failure %s\n", 424 i40e_stat_str(hw, status)); 425 error = EIO; 426 goto err_out; 427 } 428 429 /* Initialize the shared code */ 430 status = i40e_init_shared_code(hw); 431 if (status) { 432 device_printf(dev, "Unable to initialize shared code, error %s\n", 433 i40e_stat_str(hw, status)); 434 error = EIO; 435 goto err_out; 436 } 437 438 /* Set up the admin queue */ 439 hw->aq.num_arq_entries = IXL_AQ_LEN; 440 hw->aq.num_asq_entries = IXL_AQ_LEN; 441 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 442 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 443 444 status = i40e_init_adminq(hw); 445 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 446 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 447 i40e_stat_str(hw, status)); 448 error = EIO; 449 goto err_out; 450 } 451 ixl_print_nvm_version(pf); 452 453 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 454 device_printf(dev, "The driver for the device stopped " 455 "because the NVM image is newer than expected.\n"); 456 device_printf(dev, "You must install the most recent version of " 457 "the network driver.\n"); 458 error = EIO; 459 goto err_out; 460 } 461 462 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 463 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 464 device_printf(dev, "The driver for the device detected " 465 "a newer version of the NVM image than expected.\n"); 466 device_printf(dev, "Please install the most recent version " 467 "of the network driver.\n"); 468 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 469 device_printf(dev, "The driver for the device detected " 470 "an older version of the NVM image than expected.\n"); 471 device_printf(dev, "Please update the NVM image.\n"); 472 } 473 474 /* Clear PXE mode */ 475 i40e_clear_pxe_mode(hw); 476 477 /* Get capabilities from the device */ 478 error = ixl_get_hw_capabilities(pf); 479 if (error) { 480 device_printf(dev, "get_hw_capabilities failed: %d\n", 481 error); 482 goto err_get_cap; 483 } 484 485 /* Set up host memory cache */ 486 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 487 hw->func_caps.num_rx_qp, 0, 0); 488 if (status) { 489 device_printf(dev, "init_lan_hmc failed: %s\n", 490 i40e_stat_str(hw, status)); 491 goto err_get_cap; 492 } 493 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 494 if (status) { 495 device_printf(dev, "configure_lan_hmc failed: %s\n", 496 i40e_stat_str(hw, status)); 497 goto err_mac_hmc; 498 } 499 500 /* Disable LLDP from the firmware for certain NVM versions */ 501 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 502 (pf->hw.aq.fw_maj_ver < 4)) { 503 i40e_aq_stop_lldp(hw, TRUE, NULL); 504 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; 505 } 506 507 /* Get MAC addresses from hardware */ 508 i40e_get_mac_addr(hw, hw->mac.addr); 509 error = i40e_validate_mac_addr(hw->mac.addr); 510 if (error) { 511 device_printf(dev, "validate_mac_addr failed: %d\n", error); 512 goto err_mac_hmc; 513 } 514 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 515 iflib_set_mac(ctx, hw->mac.addr); 516 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 517 518 /* Set up the device filtering */ 519 bzero(&filter, sizeof(filter)); 520 filter.enable_ethtype = TRUE; 521 filter.enable_macvlan = TRUE; 522 filter.enable_fdir = FALSE; 523 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 524 if (i40e_set_filter_control(hw, &filter)) 525 device_printf(dev, "i40e_set_filter_control() failed\n"); 526 527 /* Query device FW LLDP status */ 528 ixl_get_fw_lldp_status(pf); 529 /* Tell FW to apply DCB config on link up */ 530 i40e_aq_set_dcb_parameters(hw, true, NULL); 531 532 /* Fill out iflib parameters */ 533 if (hw->mac.type == I40E_MAC_X722) 534 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; 535 else 536 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 537 if (vsi->enable_head_writeback) { 538 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 539 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); 540 scctx->isc_txrx = &ixl_txrx_hwb; 541 } else { 542 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 543 * sizeof(struct i40e_tx_desc), DBA_ALIGN); 544 scctx->isc_txrx = &ixl_txrx_dwb; 545 } 546 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 547 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); 548 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); 549 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; 550 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; 551 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; 552 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; 553 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; 554 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 555 scctx->isc_capenable = IXL_CAPS; 556 557 INIT_DEBUGOUT("ixl_if_attach_pre: end"); 558 return (0); 559 560 err_mac_hmc: 561 i40e_shutdown_lan_hmc(hw); 562 err_get_cap: 563 i40e_shutdown_adminq(hw); 564 err_out: 565 ixl_free_pci_resources(pf); 566 err_pci_res: 567 return (error); 568 } 569 570 static int 571 ixl_if_attach_post(if_ctx_t ctx) 572 { 573 device_t dev; 574 struct ixl_pf *pf; 575 struct i40e_hw *hw; 576 struct ixl_vsi *vsi; 577 int error = 0; 578 enum i40e_status_code status; 579 580 INIT_DEBUGOUT("ixl_if_attach_post: begin"); 581 582 dev = iflib_get_dev(ctx); 583 pf = iflib_get_softc(ctx); 584 vsi = &pf->vsi; 585 vsi->ifp = iflib_get_ifp(ctx); 586 hw = &pf->hw; 587 588 /* Setup OS network interface / ifnet */ 589 if (ixl_setup_interface(dev, pf)) { 590 device_printf(dev, "interface setup failed!\n"); 591 error = EIO; 592 goto err; 593 } 594 595 /* Determine link state */ 596 if (ixl_attach_get_link_status(pf)) { 597 error = EINVAL; 598 goto err; 599 } 600 601 error = ixl_switch_config(pf); 602 if (error) { 603 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 604 error); 605 goto err; 606 } 607 608 /* Add protocol filters to list */ 609 ixl_init_filters(vsi); 610 611 /* Init queue allocation manager */ 612 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 613 if (error) { 614 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 615 error); 616 goto err; 617 } 618 /* reserve a contiguous allocation for the PF's VSI */ 619 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, 620 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); 621 if (error) { 622 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 623 error); 624 goto err; 625 } 626 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 627 pf->qtag.num_allocated, pf->qtag.num_active); 628 629 /* Limit PHY interrupts to link, autoneg, and modules failure */ 630 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 631 NULL); 632 if (status) { 633 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 634 " aq_err %s\n", i40e_stat_str(hw, status), 635 i40e_aq_str(hw, hw->aq.asq_last_status)); 636 goto err; 637 } 638 639 /* Get the bus configuration and set the shared code */ 640 ixl_get_bus_info(pf); 641 642 /* Keep admin queue interrupts active while driver is loaded */ 643 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 644 ixl_configure_intr0_msix(pf); 645 ixl_enable_intr0(hw); 646 } 647 648 /* Set initial advertised speed sysctl value */ 649 ixl_set_initial_advertised_speeds(pf); 650 651 /* Initialize statistics & add sysctls */ 652 ixl_add_device_sysctls(pf); 653 ixl_pf_reset_stats(pf); 654 ixl_update_stats_counters(pf); 655 ixl_add_hw_stats(pf); 656 657 hw->phy.get_link_info = true; 658 i40e_get_link_status(hw, &pf->link_up); 659 ixl_update_link_status(pf); 660 661 #ifdef PCI_IOV 662 ixl_initialize_sriov(pf); 663 #endif 664 665 #ifdef IXL_IW 666 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 667 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 668 if (pf->iw_enabled) { 669 error = ixl_iw_pf_attach(pf); 670 if (error) { 671 device_printf(dev, 672 "interfacing to iwarp driver failed: %d\n", 673 error); 674 goto err; 675 } else 676 device_printf(dev, "iWARP ready\n"); 677 } else 678 device_printf(dev, 679 "iwarp disabled on this device (no msix vectors)\n"); 680 } else { 681 pf->iw_enabled = false; 682 device_printf(dev, "The device is not iWARP enabled\n"); 683 } 684 #endif 685 686 INIT_DBG_DEV(dev, "end"); 687 return (0); 688 689 err: 690 INIT_DEBUGOUT("end: error %d", error); 691 /* ixl_if_detach() is called on error from this */ 692 return (error); 693 } 694 695 static int 696 ixl_if_detach(if_ctx_t ctx) 697 { 698 struct ixl_pf *pf = iflib_get_softc(ctx); 699 struct ixl_vsi *vsi = &pf->vsi; 700 struct i40e_hw *hw = &pf->hw; 701 device_t dev = pf->dev; 702 enum i40e_status_code status; 703 #if defined(PCI_IOV) || defined(IXL_IW) 704 int error; 705 #endif 706 707 INIT_DBG_DEV(dev, "begin"); 708 709 #ifdef IXL_IW 710 if (ixl_enable_iwarp && pf->iw_enabled) { 711 error = ixl_iw_pf_detach(pf); 712 if (error == EBUSY) { 713 device_printf(dev, "iwarp in use; stop it first.\n"); 714 return (error); 715 } 716 } 717 #endif 718 #ifdef PCI_IOV 719 error = pci_iov_detach(dev); 720 if (error != 0) { 721 device_printf(dev, "SR-IOV in use; detach first.\n"); 722 return (error); 723 } 724 #endif 725 /* Remove all previously allocated media types */ 726 ifmedia_removeall(vsi->media); 727 728 /* Shutdown LAN HMC */ 729 if (hw->hmc.hmc_obj) { 730 status = i40e_shutdown_lan_hmc(hw); 731 if (status) 732 device_printf(dev, 733 "i40e_shutdown_lan_hmc() failed with status %s\n", 734 i40e_stat_str(hw, status)); 735 } 736 737 /* Shutdown admin queue */ 738 ixl_disable_intr0(hw); 739 status = i40e_shutdown_adminq(hw); 740 if (status) 741 device_printf(dev, 742 "i40e_shutdown_adminq() failed with status %s\n", 743 i40e_stat_str(hw, status)); 744 745 ixl_pf_qmgr_destroy(&pf->qmgr); 746 ixl_free_pci_resources(pf); 747 ixl_free_mac_filters(vsi); 748 INIT_DBG_DEV(dev, "end"); 749 return (0); 750 } 751 752 /* TODO: Do shutdown-specific stuff here */ 753 static int 754 ixl_if_shutdown(if_ctx_t ctx) 755 { 756 int error = 0; 757 758 INIT_DEBUGOUT("ixl_if_shutdown: begin"); 759 760 /* TODO: Call ixl_if_stop()? */ 761 762 /* TODO: Then setup low power mode */ 763 764 return (error); 765 } 766 767 static int 768 ixl_if_suspend(if_ctx_t ctx) 769 { 770 int error = 0; 771 772 INIT_DEBUGOUT("ixl_if_suspend: begin"); 773 774 /* TODO: Call ixl_if_stop()? */ 775 776 /* TODO: Then setup low power mode */ 777 778 return (error); 779 } 780 781 static int 782 ixl_if_resume(if_ctx_t ctx) 783 { 784 struct ifnet *ifp = iflib_get_ifp(ctx); 785 786 INIT_DEBUGOUT("ixl_if_resume: begin"); 787 788 /* Read & clear wake-up registers */ 789 790 /* Required after D3->D0 transition */ 791 if (ifp->if_flags & IFF_UP) 792 ixl_if_init(ctx); 793 794 return (0); 795 } 796 797 /* Set Report Status queue fields to 0 */ 798 static void 799 ixl_init_tx_rsqs(struct ixl_vsi *vsi) 800 { 801 if_softc_ctx_t scctx = vsi->shared; 802 struct ixl_tx_queue *tx_que; 803 int i, j; 804 805 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 806 struct tx_ring *txr = &tx_que->txr; 807 808 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; 809 810 for (j = 0; j < scctx->isc_ntxd[0]; j++) 811 txr->tx_rsq[j] = QIDX_INVALID; 812 } 813 } 814 815 static void 816 ixl_init_tx_cidx(struct ixl_vsi *vsi) 817 { 818 struct ixl_tx_queue *tx_que; 819 int i; 820 821 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 822 struct tx_ring *txr = &tx_que->txr; 823 824 txr->tx_cidx_processed = 0; 825 } 826 } 827 828 void 829 ixl_if_init(if_ctx_t ctx) 830 { 831 struct ixl_pf *pf = iflib_get_softc(ctx); 832 struct ixl_vsi *vsi = &pf->vsi; 833 struct i40e_hw *hw = &pf->hw; 834 device_t dev = iflib_get_dev(ctx); 835 u8 tmpaddr[ETHER_ADDR_LEN]; 836 int ret; 837 838 /* 839 * If the aq is dead here, it probably means something outside of the driver 840 * did something to the adapter, like a PF reset. 841 * So rebuild the driver's state here if that occurs. 842 */ 843 if (!i40e_check_asq_alive(&pf->hw)) { 844 device_printf(dev, "Admin Queue is down; resetting...\n"); 845 ixl_teardown_hw_structs(pf); 846 ixl_reset(pf); 847 } 848 849 /* Get the latest mac address... User might use a LAA */ 850 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); 851 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 852 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { 853 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 854 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 855 ret = i40e_aq_mac_address_write(hw, 856 I40E_AQC_WRITE_TYPE_LAA_ONLY, 857 hw->mac.addr, NULL); 858 if (ret) { 859 device_printf(dev, "LLA address change failed!!\n"); 860 return; 861 } 862 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 863 } 864 865 iflib_set_mac(ctx, hw->mac.addr); 866 867 /* Prepare the VSI: rings, hmc contexts, etc... */ 868 if (ixl_initialize_vsi(vsi)) { 869 device_printf(dev, "initialize vsi failed!!\n"); 870 return; 871 } 872 873 // TODO: Call iflib setup multicast filters here? 874 // It's called in ixgbe in D5213 875 ixl_if_multi_set(ctx); 876 877 /* Set up RSS */ 878 ixl_config_rss(pf); 879 880 /* Set up MSI/X routing and the ITR settings */ 881 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 882 ixl_configure_queue_intr_msix(pf); 883 ixl_configure_itr(pf); 884 } else 885 ixl_configure_legacy(pf); 886 887 if (vsi->enable_head_writeback) 888 ixl_init_tx_cidx(vsi); 889 else 890 ixl_init_tx_rsqs(vsi); 891 892 ixl_enable_rings(vsi); 893 894 i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 895 896 ixl_reconfigure_filters(vsi); 897 898 #ifdef IXL_IW 899 if (ixl_enable_iwarp && pf->iw_enabled) { 900 ret = ixl_iw_pf_init(pf); 901 if (ret) 902 device_printf(dev, 903 "initialize iwarp failed, code %d\n", ret); 904 } 905 #endif 906 } 907 908 void 909 ixl_if_stop(if_ctx_t ctx) 910 { 911 struct ixl_pf *pf = iflib_get_softc(ctx); 912 struct ixl_vsi *vsi = &pf->vsi; 913 914 INIT_DEBUGOUT("ixl_if_stop: begin\n"); 915 916 // TODO: This may need to be reworked 917 #ifdef IXL_IW 918 /* Stop iWARP device */ 919 if (ixl_enable_iwarp && pf->iw_enabled) 920 ixl_iw_pf_stop(pf); 921 #endif 922 923 ixl_disable_rings_intr(vsi); 924 ixl_disable_rings(vsi); 925 } 926 927 static int 928 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) 929 { 930 struct ixl_pf *pf = iflib_get_softc(ctx); 931 struct ixl_vsi *vsi = &pf->vsi; 932 struct ixl_rx_queue *rx_que = vsi->rx_queues; 933 struct ixl_tx_queue *tx_que = vsi->tx_queues; 934 int err, i, rid, vector = 0; 935 char buf[16]; 936 937 /* Admin Que must use vector 0*/ 938 rid = vector + 1; 939 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 940 ixl_msix_adminq, pf, 0, "aq"); 941 if (err) { 942 iflib_irq_free(ctx, &vsi->irq); 943 device_printf(iflib_get_dev(ctx), 944 "Failed to register Admin que handler"); 945 return (err); 946 } 947 // TODO: Re-enable this at some point 948 // iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); 949 950 /* Now set up the stations */ 951 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) { 952 rid = vector + 1; 953 954 snprintf(buf, sizeof(buf), "rxq%d", i); 955 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 956 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); 957 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than 958 * what's expected in the iflib context? */ 959 if (err) { 960 device_printf(iflib_get_dev(ctx), 961 "Failed to allocate q int %d err: %d", i, err); 962 vsi->num_rx_queues = i + 1; 963 goto fail; 964 } 965 rx_que->msix = vector; 966 } 967 968 bzero(buf, sizeof(buf)); 969 970 for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) { 971 snprintf(buf, sizeof(buf), "txq%d", i); 972 iflib_softirq_alloc_generic(ctx, 973 &vsi->rx_queues[i % vsi->num_rx_queues].que_irq, 974 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 975 976 /* TODO: Maybe call a strategy function for this to figure out which 977 * interrupts to map Tx queues to. I don't know if there's an immediately 978 * better way than this other than a user-supplied map, though. */ 979 tx_que->msix = (i % vsi->num_rx_queues) + 1; 980 } 981 982 return (0); 983 fail: 984 iflib_irq_free(ctx, &vsi->irq); 985 rx_que = vsi->rx_queues; 986 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 987 iflib_irq_free(ctx, &rx_que->que_irq); 988 return (err); 989 } 990 991 /* 992 * Enable all interrupts 993 * 994 * Called in: 995 * iflib_init_locked, after ixl_if_init() 996 */ 997 static void 998 ixl_if_enable_intr(if_ctx_t ctx) 999 { 1000 struct ixl_pf *pf = iflib_get_softc(ctx); 1001 struct ixl_vsi *vsi = &pf->vsi; 1002 struct i40e_hw *hw = vsi->hw; 1003 struct ixl_rx_queue *que = vsi->rx_queues; 1004 1005 ixl_enable_intr0(hw); 1006 /* Enable queue interrupts */ 1007 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1008 /* TODO: Queue index parameter is probably wrong */ 1009 ixl_enable_queue(hw, que->rxr.me); 1010 } 1011 1012 /* 1013 * Disable queue interrupts 1014 * 1015 * Other interrupt causes need to remain active. 1016 */ 1017 static void 1018 ixl_if_disable_intr(if_ctx_t ctx) 1019 { 1020 struct ixl_pf *pf = iflib_get_softc(ctx); 1021 struct ixl_vsi *vsi = &pf->vsi; 1022 struct i40e_hw *hw = vsi->hw; 1023 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1024 1025 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1026 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1027 ixl_disable_queue(hw, rx_que->msix - 1); 1028 } else { 1029 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF 1030 // stops queues from triggering interrupts 1031 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1032 } 1033 } 1034 1035 static int 1036 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1037 { 1038 struct ixl_pf *pf = iflib_get_softc(ctx); 1039 struct ixl_vsi *vsi = &pf->vsi; 1040 struct i40e_hw *hw = vsi->hw; 1041 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 1042 1043 ixl_enable_queue(hw, rx_que->msix - 1); 1044 return (0); 1045 } 1046 1047 static int 1048 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1049 { 1050 struct ixl_pf *pf = iflib_get_softc(ctx); 1051 struct ixl_vsi *vsi = &pf->vsi; 1052 struct i40e_hw *hw = vsi->hw; 1053 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; 1054 1055 ixl_enable_queue(hw, tx_que->msix - 1); 1056 1057 return (0); 1058 } 1059 1060 static int 1061 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1062 { 1063 struct ixl_pf *pf = iflib_get_softc(ctx); 1064 struct ixl_vsi *vsi = &pf->vsi; 1065 if_softc_ctx_t scctx = vsi->shared; 1066 struct ixl_tx_queue *que; 1067 // int i; 1068 int i, j, error = 0; 1069 1070 MPASS(vsi->num_tx_queues > 0); 1071 MPASS(ntxqs == 1); 1072 MPASS(vsi->num_tx_queues == ntxqsets); 1073 1074 /* Allocate queue structure memory */ 1075 if (!(vsi->tx_queues = 1076 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1077 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 1078 return (ENOMEM); 1079 } 1080 1081 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 1082 struct tx_ring *txr = &que->txr; 1083 1084 txr->me = i; 1085 que->vsi = vsi; 1086 1087 if (!vsi->enable_head_writeback) { 1088 /* Allocate report status array */ 1089 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { 1090 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 1091 error = ENOMEM; 1092 goto fail; 1093 } 1094 /* Init report status array */ 1095 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1096 txr->tx_rsq[j] = QIDX_INVALID; 1097 } 1098 /* get the virtual and physical address of the hardware queues */ 1099 txr->tail = I40E_QTX_TAIL(txr->me); 1100 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; 1101 txr->tx_paddr = paddrs[i * ntxqs]; 1102 txr->que = que; 1103 } 1104 1105 return (0); 1106 fail: 1107 ixl_if_queues_free(ctx); 1108 return (error); 1109 } 1110 1111 static int 1112 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1113 { 1114 struct ixl_pf *pf = iflib_get_softc(ctx); 1115 struct ixl_vsi *vsi = &pf->vsi; 1116 struct ixl_rx_queue *que; 1117 int i, error = 0; 1118 1119 MPASS(vsi->num_rx_queues > 0); 1120 MPASS(nrxqs == 1); 1121 MPASS(vsi->num_rx_queues == nrxqsets); 1122 1123 /* Allocate queue structure memory */ 1124 if (!(vsi->rx_queues = 1125 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * 1126 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1127 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1128 error = ENOMEM; 1129 goto fail; 1130 } 1131 1132 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1133 struct rx_ring *rxr = &que->rxr; 1134 1135 rxr->me = i; 1136 que->vsi = vsi; 1137 1138 /* get the virtual and physical address of the hardware queues */ 1139 rxr->tail = I40E_QRX_TAIL(rxr->me); 1140 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; 1141 rxr->rx_paddr = paddrs[i * nrxqs]; 1142 rxr->que = que; 1143 } 1144 1145 return (0); 1146 fail: 1147 ixl_if_queues_free(ctx); 1148 return (error); 1149 } 1150 1151 static void 1152 ixl_if_queues_free(if_ctx_t ctx) 1153 { 1154 struct ixl_pf *pf = iflib_get_softc(ctx); 1155 struct ixl_vsi *vsi = &pf->vsi; 1156 1157 if (vsi->enable_head_writeback) { 1158 struct ixl_tx_queue *que; 1159 int i = 0; 1160 1161 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { 1162 struct tx_ring *txr = &que->txr; 1163 if (txr->tx_rsq != NULL) { 1164 free(txr->tx_rsq, M_IXL); 1165 txr->tx_rsq = NULL; 1166 } 1167 } 1168 } 1169 1170 if (vsi->tx_queues != NULL) { 1171 free(vsi->tx_queues, M_IXL); 1172 vsi->tx_queues = NULL; 1173 } 1174 if (vsi->rx_queues != NULL) { 1175 free(vsi->rx_queues, M_IXL); 1176 vsi->rx_queues = NULL; 1177 } 1178 } 1179 1180 void 1181 ixl_update_link_status(struct ixl_pf *pf) 1182 { 1183 struct ixl_vsi *vsi = &pf->vsi; 1184 struct i40e_hw *hw = &pf->hw; 1185 u64 baudrate; 1186 1187 if (pf->link_up) { 1188 if (vsi->link_active == FALSE) { 1189 vsi->link_active = TRUE; 1190 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); 1191 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1192 ixl_link_up_msg(pf); 1193 #ifdef PCI_IOV 1194 ixl_broadcast_link_state(pf); 1195 #endif 1196 1197 } 1198 } else { /* Link down */ 1199 if (vsi->link_active == TRUE) { 1200 vsi->link_active = FALSE; 1201 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1202 #ifdef PCI_IOV 1203 ixl_broadcast_link_state(pf); 1204 #endif 1205 } 1206 } 1207 } 1208 1209 static int 1210 ixl_process_adminq(struct ixl_pf *pf, u16 *pending) 1211 { 1212 enum i40e_status_code status = I40E_SUCCESS; 1213 struct i40e_arq_event_info event; 1214 struct i40e_hw *hw = &pf->hw; 1215 device_t dev = pf->dev; 1216 u16 opcode; 1217 u32 loop = 0, reg; 1218 1219 event.buf_len = IXL_AQ_BUF_SZ; 1220 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); 1221 if (!event.msg_buf) { 1222 device_printf(dev, "%s: Unable to allocate memory for Admin" 1223 " Queue event!\n", __func__); 1224 return (ENOMEM); 1225 } 1226 1227 /* clean and process any events */ 1228 do { 1229 status = i40e_clean_arq_element(hw, &event, pending); 1230 if (status) 1231 break; 1232 opcode = LE16_TO_CPU(event.desc.opcode); 1233 ixl_dbg(pf, IXL_DBG_AQ, 1234 "Admin Queue event: %#06x\n", opcode); 1235 switch (opcode) { 1236 case i40e_aqc_opc_get_link_status: 1237 ixl_link_event(pf, &event); 1238 break; 1239 case i40e_aqc_opc_send_msg_to_pf: 1240 #ifdef PCI_IOV 1241 ixl_handle_vf_msg(pf, &event); 1242 #endif 1243 break; 1244 /* 1245 * This should only occur on no-drop queues, which 1246 * aren't currently configured. 1247 */ 1248 case i40e_aqc_opc_event_lan_overflow: 1249 device_printf(dev, "LAN overflow event\n"); 1250 break; 1251 default: 1252 break; 1253 } 1254 } while (*pending && (loop++ < IXL_ADM_LIMIT)); 1255 1256 free(event.msg_buf, M_IXL); 1257 1258 /* Re-enable admin queue interrupt cause */ 1259 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1260 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 1261 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1262 1263 return (status); 1264 } 1265 1266 static void 1267 ixl_if_update_admin_status(if_ctx_t ctx) 1268 { 1269 struct ixl_pf *pf = iflib_get_softc(ctx); 1270 struct i40e_hw *hw = &pf->hw; 1271 u16 pending; 1272 1273 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) 1274 ixl_handle_empr_reset(pf); 1275 1276 if (pf->state & IXL_PF_STATE_MDD_PENDING) 1277 ixl_handle_mdd_event(pf); 1278 1279 #ifdef PCI_IOV 1280 if (pf->state & IXL_PF_STATE_VF_RESET_REQ) 1281 iflib_iov_intr_deferred(ctx); 1282 #endif 1283 1284 ixl_process_adminq(pf, &pending); 1285 ixl_update_link_status(pf); 1286 1287 /* 1288 * If there are still messages to process, reschedule ourselves. 1289 * Otherwise, re-enable our interrupt and go to sleep. 1290 */ 1291 if (pending > 0) 1292 iflib_admin_intr_deferred(ctx); 1293 else 1294 ixl_enable_intr0(hw); 1295 } 1296 1297 static void 1298 ixl_if_multi_set(if_ctx_t ctx) 1299 { 1300 struct ixl_pf *pf = iflib_get_softc(ctx); 1301 struct ixl_vsi *vsi = &pf->vsi; 1302 struct i40e_hw *hw = vsi->hw; 1303 int mcnt = 0, flags; 1304 1305 IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); 1306 1307 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); 1308 /* delete existing MC filters */ 1309 ixl_del_multi(vsi); 1310 1311 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { 1312 i40e_aq_set_vsi_multicast_promiscuous(hw, 1313 vsi->seid, TRUE, NULL); 1314 return; 1315 } 1316 /* (re-)install filters for all mcast addresses */ 1317 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); 1318 1319 if (mcnt > 0) { 1320 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 1321 ixl_add_hw_filters(vsi, flags, mcnt); 1322 } 1323 1324 IOCTL_DEBUGOUT("ixl_if_multi_set: end"); 1325 } 1326 1327 static int 1328 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1329 { 1330 struct ixl_pf *pf = iflib_get_softc(ctx); 1331 struct ixl_vsi *vsi = &pf->vsi; 1332 1333 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 1334 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - 1335 ETHER_VLAN_ENCAP_LEN) 1336 return (EINVAL); 1337 1338 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1339 ETHER_VLAN_ENCAP_LEN; 1340 1341 return (0); 1342 } 1343 1344 static void 1345 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1346 { 1347 struct ixl_pf *pf = iflib_get_softc(ctx); 1348 struct i40e_hw *hw = &pf->hw; 1349 1350 INIT_DEBUGOUT("ixl_media_status: begin"); 1351 1352 ifmr->ifm_status = IFM_AVALID; 1353 ifmr->ifm_active = IFM_ETHER; 1354 1355 if (!pf->link_up) { 1356 return; 1357 } 1358 1359 ifmr->ifm_status |= IFM_ACTIVE; 1360 /* Hardware is always full-duplex */ 1361 ifmr->ifm_active |= IFM_FDX; 1362 1363 switch (hw->phy.link_info.phy_type) { 1364 /* 100 M */ 1365 case I40E_PHY_TYPE_100BASE_TX: 1366 ifmr->ifm_active |= IFM_100_TX; 1367 break; 1368 /* 1 G */ 1369 case I40E_PHY_TYPE_1000BASE_T: 1370 ifmr->ifm_active |= IFM_1000_T; 1371 break; 1372 case I40E_PHY_TYPE_1000BASE_SX: 1373 ifmr->ifm_active |= IFM_1000_SX; 1374 break; 1375 case I40E_PHY_TYPE_1000BASE_LX: 1376 ifmr->ifm_active |= IFM_1000_LX; 1377 break; 1378 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 1379 ifmr->ifm_active |= IFM_1000_T; 1380 break; 1381 /* 10 G */ 1382 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1383 ifmr->ifm_active |= IFM_10G_TWINAX; 1384 break; 1385 case I40E_PHY_TYPE_10GBASE_SR: 1386 ifmr->ifm_active |= IFM_10G_SR; 1387 break; 1388 case I40E_PHY_TYPE_10GBASE_LR: 1389 ifmr->ifm_active |= IFM_10G_LR; 1390 break; 1391 case I40E_PHY_TYPE_10GBASE_T: 1392 ifmr->ifm_active |= IFM_10G_T; 1393 break; 1394 case I40E_PHY_TYPE_XAUI: 1395 case I40E_PHY_TYPE_XFI: 1396 ifmr->ifm_active |= IFM_10G_TWINAX; 1397 break; 1398 case I40E_PHY_TYPE_10GBASE_AOC: 1399 ifmr->ifm_active |= IFM_10G_AOC; 1400 break; 1401 /* 25 G */ 1402 case I40E_PHY_TYPE_25GBASE_KR: 1403 ifmr->ifm_active |= IFM_25G_KR; 1404 break; 1405 case I40E_PHY_TYPE_25GBASE_CR: 1406 ifmr->ifm_active |= IFM_25G_CR; 1407 break; 1408 case I40E_PHY_TYPE_25GBASE_SR: 1409 ifmr->ifm_active |= IFM_25G_SR; 1410 break; 1411 case I40E_PHY_TYPE_25GBASE_LR: 1412 ifmr->ifm_active |= IFM_25G_LR; 1413 break; 1414 case I40E_PHY_TYPE_25GBASE_AOC: 1415 ifmr->ifm_active |= IFM_25G_AOC; 1416 break; 1417 case I40E_PHY_TYPE_25GBASE_ACC: 1418 ifmr->ifm_active |= IFM_25G_ACC; 1419 break; 1420 /* 40 G */ 1421 case I40E_PHY_TYPE_40GBASE_CR4: 1422 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1423 ifmr->ifm_active |= IFM_40G_CR4; 1424 break; 1425 case I40E_PHY_TYPE_40GBASE_SR4: 1426 ifmr->ifm_active |= IFM_40G_SR4; 1427 break; 1428 case I40E_PHY_TYPE_40GBASE_LR4: 1429 ifmr->ifm_active |= IFM_40G_LR4; 1430 break; 1431 case I40E_PHY_TYPE_XLAUI: 1432 ifmr->ifm_active |= IFM_OTHER; 1433 break; 1434 case I40E_PHY_TYPE_1000BASE_KX: 1435 ifmr->ifm_active |= IFM_1000_KX; 1436 break; 1437 case I40E_PHY_TYPE_SGMII: 1438 ifmr->ifm_active |= IFM_1000_SGMII; 1439 break; 1440 /* ERJ: What's the difference between these? */ 1441 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1442 case I40E_PHY_TYPE_10GBASE_CR1: 1443 ifmr->ifm_active |= IFM_10G_CR1; 1444 break; 1445 case I40E_PHY_TYPE_10GBASE_KX4: 1446 ifmr->ifm_active |= IFM_10G_KX4; 1447 break; 1448 case I40E_PHY_TYPE_10GBASE_KR: 1449 ifmr->ifm_active |= IFM_10G_KR; 1450 break; 1451 case I40E_PHY_TYPE_SFI: 1452 ifmr->ifm_active |= IFM_10G_SFI; 1453 break; 1454 /* Our single 20G media type */ 1455 case I40E_PHY_TYPE_20GBASE_KR2: 1456 ifmr->ifm_active |= IFM_20G_KR2; 1457 break; 1458 case I40E_PHY_TYPE_40GBASE_KR4: 1459 ifmr->ifm_active |= IFM_40G_KR4; 1460 break; 1461 case I40E_PHY_TYPE_XLPPI: 1462 case I40E_PHY_TYPE_40GBASE_AOC: 1463 ifmr->ifm_active |= IFM_40G_XLPPI; 1464 break; 1465 /* Unknown to driver */ 1466 default: 1467 ifmr->ifm_active |= IFM_UNKNOWN; 1468 break; 1469 } 1470 /* Report flow control status as well */ 1471 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 1472 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1473 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 1474 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1475 } 1476 1477 static int 1478 ixl_if_media_change(if_ctx_t ctx) 1479 { 1480 struct ifmedia *ifm = iflib_get_media(ctx); 1481 1482 INIT_DEBUGOUT("ixl_media_change: begin"); 1483 1484 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1485 return (EINVAL); 1486 1487 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); 1488 return (ENODEV); 1489 } 1490 1491 static int 1492 ixl_if_promisc_set(if_ctx_t ctx, int flags) 1493 { 1494 struct ixl_pf *pf = iflib_get_softc(ctx); 1495 struct ixl_vsi *vsi = &pf->vsi; 1496 struct ifnet *ifp = iflib_get_ifp(ctx); 1497 struct i40e_hw *hw = vsi->hw; 1498 int err; 1499 bool uni = FALSE, multi = FALSE; 1500 1501 if (flags & IFF_PROMISC) 1502 uni = multi = TRUE; 1503 else if (flags & IFF_ALLMULTI || 1504 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) 1505 multi = TRUE; 1506 1507 err = i40e_aq_set_vsi_unicast_promiscuous(hw, 1508 vsi->seid, uni, NULL, true); 1509 if (err) 1510 return (err); 1511 err = i40e_aq_set_vsi_multicast_promiscuous(hw, 1512 vsi->seid, multi, NULL); 1513 return (err); 1514 } 1515 1516 static void 1517 ixl_if_timer(if_ctx_t ctx, uint16_t qid) 1518 { 1519 struct ixl_pf *pf = iflib_get_softc(ctx); 1520 //struct i40e_hw *hw = &pf->hw; 1521 //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; 1522 #if 0 1523 u32 mask; 1524 1525 /* 1526 ** Check status of the queues 1527 */ 1528 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 1529 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 1530 1531 /* If queue param has outstanding work, trigger sw irq */ 1532 // TODO: TX queues in iflib don't use HW interrupts; does this do anything? 1533 if (que->busy) 1534 wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); 1535 #endif 1536 1537 if (qid != 0) 1538 return; 1539 1540 /* Fire off the adminq task */ 1541 iflib_admin_intr_deferred(ctx); 1542 1543 /* Update stats */ 1544 ixl_update_stats_counters(pf); 1545 } 1546 1547 static void 1548 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) 1549 { 1550 struct ixl_pf *pf = iflib_get_softc(ctx); 1551 struct ixl_vsi *vsi = &pf->vsi; 1552 struct i40e_hw *hw = vsi->hw; 1553 1554 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1555 return; 1556 1557 ++vsi->num_vlans; 1558 ixl_add_filter(vsi, hw->mac.addr, vtag); 1559 } 1560 1561 static void 1562 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1563 { 1564 struct ixl_pf *pf = iflib_get_softc(ctx); 1565 struct ixl_vsi *vsi = &pf->vsi; 1566 struct i40e_hw *hw = vsi->hw; 1567 1568 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1569 return; 1570 1571 --vsi->num_vlans; 1572 ixl_del_filter(vsi, hw->mac.addr, vtag); 1573 } 1574 1575 static uint64_t 1576 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1577 { 1578 struct ixl_pf *pf = iflib_get_softc(ctx); 1579 struct ixl_vsi *vsi = &pf->vsi; 1580 if_t ifp = iflib_get_ifp(ctx); 1581 1582 switch (cnt) { 1583 case IFCOUNTER_IPACKETS: 1584 return (vsi->ipackets); 1585 case IFCOUNTER_IERRORS: 1586 return (vsi->ierrors); 1587 case IFCOUNTER_OPACKETS: 1588 return (vsi->opackets); 1589 case IFCOUNTER_OERRORS: 1590 return (vsi->oerrors); 1591 case IFCOUNTER_COLLISIONS: 1592 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1593 return (0); 1594 case IFCOUNTER_IBYTES: 1595 return (vsi->ibytes); 1596 case IFCOUNTER_OBYTES: 1597 return (vsi->obytes); 1598 case IFCOUNTER_IMCASTS: 1599 return (vsi->imcasts); 1600 case IFCOUNTER_OMCASTS: 1601 return (vsi->omcasts); 1602 case IFCOUNTER_IQDROPS: 1603 return (vsi->iqdrops); 1604 case IFCOUNTER_OQDROPS: 1605 return (vsi->oqdrops); 1606 case IFCOUNTER_NOPROTO: 1607 return (vsi->noproto); 1608 default: 1609 return (if_get_counter_default(ifp, cnt)); 1610 } 1611 } 1612 1613 static void 1614 ixl_if_vflr_handle(if_ctx_t ctx) 1615 { 1616 IXL_DEV_ERR(iflib_get_dev(ctx), ""); 1617 1618 // TODO: call ixl_handle_vflr() 1619 } 1620 1621 static int 1622 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1623 { 1624 struct ixl_pf *pf = iflib_get_softc(ctx); 1625 1626 if (pf->read_i2c_byte == NULL) 1627 return (EINVAL); 1628 1629 for (int i = 0; i < req->len; i++) 1630 if (pf->read_i2c_byte(pf, req->offset + i, 1631 req->dev_addr, &req->data[i])) 1632 return (EIO); 1633 return (0); 1634 } 1635 1636 static int 1637 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) 1638 { 1639 struct ixl_pf *pf = iflib_get_softc(ctx); 1640 struct ifdrv *ifd = (struct ifdrv *)data; 1641 int error = 0; 1642 1643 /* NVM update command */ 1644 if (ifd->ifd_cmd == I40E_NVM_ACCESS) 1645 error = ixl_handle_nvmupd_cmd(pf, ifd); 1646 else 1647 error = EINVAL; 1648 1649 return (error); 1650 } 1651 1652 static int 1653 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) 1654 { 1655 struct ixl_vsi *vsi = arg; 1656 1657 if (ifma->ifma_addr->sa_family != AF_LINK) 1658 return (0); 1659 ixl_add_mc_filter(vsi, 1660 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); 1661 return (1); 1662 } 1663 1664 /* 1665 * Sanity check and save off tunable values. 1666 */ 1667 static void 1668 ixl_save_pf_tunables(struct ixl_pf *pf) 1669 { 1670 device_t dev = pf->dev; 1671 1672 /* Save tunable information */ 1673 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 1674 pf->dbg_mask = ixl_core_debug_mask; 1675 pf->hw.debug_mask = ixl_shared_debug_mask; 1676 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 1677 #if 0 1678 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 1679 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 1680 #endif 1681 1682 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) 1683 pf->i2c_access_method = 0; 1684 else 1685 pf->i2c_access_method = ixl_i2c_access_method; 1686 1687 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 1688 device_printf(dev, "Invalid tx_itr value of %d set!\n", 1689 ixl_tx_itr); 1690 device_printf(dev, "tx_itr must be between %d and %d, " 1691 "inclusive\n", 1692 0, IXL_MAX_ITR); 1693 device_printf(dev, "Using default value of %d instead\n", 1694 IXL_ITR_4K); 1695 pf->tx_itr = IXL_ITR_4K; 1696 } else 1697 pf->tx_itr = ixl_tx_itr; 1698 1699 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 1700 device_printf(dev, "Invalid rx_itr value of %d set!\n", 1701 ixl_rx_itr); 1702 device_printf(dev, "rx_itr must be between %d and %d, " 1703 "inclusive\n", 1704 0, IXL_MAX_ITR); 1705 device_printf(dev, "Using default value of %d instead\n", 1706 IXL_ITR_8K); 1707 pf->rx_itr = IXL_ITR_8K; 1708 } else 1709 pf->rx_itr = ixl_rx_itr; 1710 } 1711 1712