1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 2 51 #define IXL_DRIVER_VERSION_MINOR 0 52 #define IXL_DRIVER_VERSION_BUILD 0 53 54 #define IXL_DRIVER_VERSION_STRING \ 55 __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ 56 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ 57 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" 58 59 /********************************************************************* 60 * PCI Device ID Table 61 * 62 * Used by probe to select devices to load on 63 * 64 * ( Vendor ID, Device ID, Branding String ) 65 *********************************************************************/ 66 67 static pci_vendor_info_t ixl_vendor_info_array[] = 68 { 69 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), 70 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), 71 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), 72 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 73 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), 74 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), 75 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), 76 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), 77 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), 78 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), 79 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 80 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), 81 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), 82 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), 83 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), 84 PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), 85 /* required last entry */ 86 PVID_END 87 }; 88 89 /********************************************************************* 90 * Function prototypes 91 *********************************************************************/ 92 /*** IFLIB interface ***/ 93 static void *ixl_register(device_t dev); 94 static int ixl_if_attach_pre(if_ctx_t ctx); 95 static int ixl_if_attach_post(if_ctx_t ctx); 96 static int ixl_if_detach(if_ctx_t ctx); 97 static int ixl_if_shutdown(if_ctx_t ctx); 98 static int ixl_if_suspend(if_ctx_t ctx); 99 static int ixl_if_resume(if_ctx_t ctx); 100 static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); 101 static void ixl_if_enable_intr(if_ctx_t ctx); 102 static void ixl_if_disable_intr(if_ctx_t ctx); 103 static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); 104 static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); 105 static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); 106 static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); 107 static void ixl_if_queues_free(if_ctx_t ctx); 108 static void ixl_if_update_admin_status(if_ctx_t ctx); 109 static void ixl_if_multi_set(if_ctx_t ctx); 110 static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); 111 static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); 112 static int ixl_if_media_change(if_ctx_t ctx); 113 static int ixl_if_promisc_set(if_ctx_t ctx, int flags); 114 static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); 115 static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); 116 static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); 117 static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); 118 static void ixl_if_vflr_handle(if_ctx_t ctx); 119 // static void ixl_if_link_intr_enable(if_ctx_t ctx); 120 static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); 121 static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); 122 123 /*** Other ***/ 124 static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int); 125 static void ixl_save_pf_tunables(struct ixl_pf *); 126 static int ixl_allocate_pci_resources(struct ixl_pf *); 127 128 /********************************************************************* 129 * FreeBSD Device Interface Entry Points 130 *********************************************************************/ 131 132 static device_method_t ixl_methods[] = { 133 /* Device interface */ 134 DEVMETHOD(device_register, ixl_register), 135 DEVMETHOD(device_probe, iflib_device_probe), 136 DEVMETHOD(device_attach, iflib_device_attach), 137 DEVMETHOD(device_detach, iflib_device_detach), 138 DEVMETHOD(device_shutdown, iflib_device_shutdown), 139 #ifdef PCI_IOV 140 DEVMETHOD(pci_iov_init, ixl_iov_init), 141 DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), 142 DEVMETHOD(pci_iov_add_vf, ixl_add_vf), 143 #endif 144 DEVMETHOD_END 145 }; 146 147 static driver_t ixl_driver = { 148 "ixl", ixl_methods, sizeof(struct ixl_pf), 149 }; 150 151 devclass_t ixl_devclass; 152 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); 153 MODULE_VERSION(ixl, 3); 154 155 MODULE_DEPEND(ixl, pci, 1, 1, 1); 156 MODULE_DEPEND(ixl, ether, 1, 1, 1); 157 MODULE_DEPEND(ixl, iflib, 1, 1, 1); 158 159 static device_method_t ixl_if_methods[] = { 160 DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), 161 DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), 162 DEVMETHOD(ifdi_detach, ixl_if_detach), 163 DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), 164 DEVMETHOD(ifdi_suspend, ixl_if_suspend), 165 DEVMETHOD(ifdi_resume, ixl_if_resume), 166 DEVMETHOD(ifdi_init, ixl_if_init), 167 DEVMETHOD(ifdi_stop, ixl_if_stop), 168 DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), 169 DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), 170 DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), 171 //DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable), 172 DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), 173 DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), 174 DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), 175 DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), 176 DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), 177 DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), 178 DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), 179 DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), 180 DEVMETHOD(ifdi_media_status, ixl_if_media_status), 181 DEVMETHOD(ifdi_media_change, ixl_if_media_change), 182 DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), 183 DEVMETHOD(ifdi_timer, ixl_if_timer), 184 DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), 185 DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), 186 DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), 187 DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), 188 DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), 189 DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), 190 // ifdi_led_func 191 // ifdi_debug 192 DEVMETHOD_END 193 }; 194 195 static driver_t ixl_if_driver = { 196 "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) 197 }; 198 199 /* 200 ** TUNEABLE PARAMETERS: 201 */ 202 203 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, 204 "IXL driver parameters"); 205 206 /* 207 * Leave this on unless you need to send flow control 208 * frames (or other control frames) from software 209 */ 210 static int ixl_enable_tx_fc_filter = 1; 211 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 212 &ixl_enable_tx_fc_filter); 213 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 214 &ixl_enable_tx_fc_filter, 0, 215 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 216 217 static int ixl_i2c_access_method = 0; 218 TUNABLE_INT("hw.ixl.i2c_access_method", 219 &ixl_i2c_access_method); 220 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, 221 &ixl_i2c_access_method, 0, 222 IXL_SYSCTL_HELP_I2C_METHOD); 223 224 /* 225 * Different method for processing TX descriptor 226 * completion. 227 */ 228 static int ixl_enable_head_writeback = 1; 229 TUNABLE_INT("hw.ixl.enable_head_writeback", 230 &ixl_enable_head_writeback); 231 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 232 &ixl_enable_head_writeback, 0, 233 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 234 235 static int ixl_core_debug_mask = 0; 236 TUNABLE_INT("hw.ixl.core_debug_mask", 237 &ixl_core_debug_mask); 238 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 239 &ixl_core_debug_mask, 0, 240 "Display debug statements that are printed in non-shared code"); 241 242 static int ixl_shared_debug_mask = 0; 243 TUNABLE_INT("hw.ixl.shared_debug_mask", 244 &ixl_shared_debug_mask); 245 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 246 &ixl_shared_debug_mask, 0, 247 "Display debug statements that are printed in shared code"); 248 249 #if 0 250 /* 251 ** Controls for Interrupt Throttling 252 ** - true/false for dynamic adjustment 253 ** - default values for static ITR 254 */ 255 static int ixl_dynamic_rx_itr = 0; 256 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 257 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 258 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 259 260 static int ixl_dynamic_tx_itr = 0; 261 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 262 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 263 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 264 #endif 265 266 static int ixl_rx_itr = IXL_ITR_8K; 267 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 268 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 269 &ixl_rx_itr, 0, "RX Interrupt Rate"); 270 271 static int ixl_tx_itr = IXL_ITR_4K; 272 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 273 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 274 &ixl_tx_itr, 0, "TX Interrupt Rate"); 275 276 #ifdef IXL_IW 277 int ixl_enable_iwarp = 0; 278 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 279 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 280 &ixl_enable_iwarp, 0, "iWARP enabled"); 281 282 #if __FreeBSD_version < 1100000 283 int ixl_limit_iwarp_msix = 1; 284 #else 285 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 286 #endif 287 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 288 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 289 &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); 290 #endif 291 292 extern struct if_txrx ixl_txrx_hwb; 293 extern struct if_txrx ixl_txrx_dwb; 294 295 static struct if_shared_ctx ixl_sctx_init = { 296 .isc_magic = IFLIB_MAGIC, 297 .isc_q_align = PAGE_SIZE, 298 .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 299 .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 300 .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), 301 .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 302 .isc_rx_maxsize = 16384, 303 .isc_rx_nsegments = IXL_MAX_RX_SEGS, 304 .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, 305 .isc_nfl = 1, 306 .isc_ntxqs = 1, 307 .isc_nrxqs = 1, 308 309 .isc_admin_intrcnt = 1, 310 .isc_vendor_info = ixl_vendor_info_array, 311 .isc_driver_version = IXL_DRIVER_VERSION_STRING, 312 .isc_driver = &ixl_if_driver, 313 .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN, 314 315 .isc_nrxd_min = {IXL_MIN_RING}, 316 .isc_ntxd_min = {IXL_MIN_RING}, 317 .isc_nrxd_max = {IXL_MAX_RING}, 318 .isc_ntxd_max = {IXL_MAX_RING}, 319 .isc_nrxd_default = {IXL_DEFAULT_RING}, 320 .isc_ntxd_default = {IXL_DEFAULT_RING}, 321 }; 322 323 if_shared_ctx_t ixl_sctx = &ixl_sctx_init; 324 325 /*** Functions ***/ 326 static void * 327 ixl_register(device_t dev) 328 { 329 return (ixl_sctx); 330 } 331 332 static int 333 ixl_allocate_pci_resources(struct ixl_pf *pf) 334 { 335 int rid; 336 struct i40e_hw *hw = &pf->hw; 337 device_t dev = iflib_get_dev(pf->vsi.ctx); 338 339 /* Map BAR0 */ 340 rid = PCIR_BAR(0); 341 pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 342 &rid, RF_ACTIVE); 343 344 if (!(pf->pci_mem)) { 345 device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); 346 return (ENXIO); 347 } 348 349 /* Save off the PCI information */ 350 hw->vendor_id = pci_get_vendor(dev); 351 hw->device_id = pci_get_device(dev); 352 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); 353 hw->subsystem_vendor_id = 354 pci_read_config(dev, PCIR_SUBVEND_0, 2); 355 hw->subsystem_device_id = 356 pci_read_config(dev, PCIR_SUBDEV_0, 2); 357 358 hw->bus.device = pci_get_slot(dev); 359 hw->bus.func = pci_get_function(dev); 360 361 /* Save off register access information */ 362 pf->osdep.mem_bus_space_tag = 363 rman_get_bustag(pf->pci_mem); 364 pf->osdep.mem_bus_space_handle = 365 rman_get_bushandle(pf->pci_mem); 366 pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); 367 pf->osdep.flush_reg = I40E_GLGEN_STAT; 368 pf->osdep.dev = dev; 369 370 pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; 371 pf->hw.back = &pf->osdep; 372 373 return (0); 374 } 375 376 static int 377 ixl_if_attach_pre(if_ctx_t ctx) 378 { 379 device_t dev; 380 struct ixl_pf *pf; 381 struct i40e_hw *hw; 382 struct ixl_vsi *vsi; 383 if_softc_ctx_t scctx; 384 struct i40e_filter_control_settings filter; 385 enum i40e_status_code status; 386 int error = 0; 387 388 INIT_DEBUGOUT("ixl_if_attach_pre: begin"); 389 390 /* Allocate, clear, and link in our primary soft structure */ 391 dev = iflib_get_dev(ctx); 392 pf = iflib_get_softc(ctx); 393 vsi = &pf->vsi; 394 vsi->back = pf; 395 pf->dev = dev; 396 hw = &pf->hw; 397 398 /* 399 ** Note this assumes we have a single embedded VSI, 400 ** this could be enhanced later to allocate multiple 401 */ 402 //vsi->dev = pf->dev; 403 vsi->hw = &pf->hw; 404 vsi->id = 0; 405 vsi->num_vlans = 0; 406 vsi->ctx = ctx; 407 vsi->media = iflib_get_media(ctx); 408 vsi->shared = scctx = iflib_get_softc_ctx(ctx); 409 410 /* Save tunable values */ 411 ixl_save_pf_tunables(pf); 412 413 /* Do PCI setup - map BAR0, etc */ 414 if (ixl_allocate_pci_resources(pf)) { 415 device_printf(dev, "Allocation of PCI resources failed\n"); 416 error = ENXIO; 417 goto err_pci_res; 418 } 419 420 /* Establish a clean starting point */ 421 i40e_clear_hw(hw); 422 status = i40e_pf_reset(hw); 423 if (status) { 424 device_printf(dev, "PF reset failure %s\n", 425 i40e_stat_str(hw, status)); 426 error = EIO; 427 goto err_out; 428 } 429 430 /* Initialize the shared code */ 431 status = i40e_init_shared_code(hw); 432 if (status) { 433 device_printf(dev, "Unable to initialize shared code, error %s\n", 434 i40e_stat_str(hw, status)); 435 error = EIO; 436 goto err_out; 437 } 438 439 /* Set up the admin queue */ 440 hw->aq.num_arq_entries = IXL_AQ_LEN; 441 hw->aq.num_asq_entries = IXL_AQ_LEN; 442 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 443 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 444 445 status = i40e_init_adminq(hw); 446 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 447 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 448 i40e_stat_str(hw, status)); 449 error = EIO; 450 goto err_out; 451 } 452 ixl_print_nvm_version(pf); 453 454 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 455 device_printf(dev, "The driver for the device stopped " 456 "because the NVM image is newer than expected.\n"); 457 device_printf(dev, "You must install the most recent version of " 458 "the network driver.\n"); 459 error = EIO; 460 goto err_out; 461 } 462 463 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 464 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 465 device_printf(dev, "The driver for the device detected " 466 "a newer version of the NVM image than expected.\n"); 467 device_printf(dev, "Please install the most recent version " 468 "of the network driver.\n"); 469 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 470 device_printf(dev, "The driver for the device detected " 471 "an older version of the NVM image than expected.\n"); 472 device_printf(dev, "Please update the NVM image.\n"); 473 } 474 475 /* Clear PXE mode */ 476 i40e_clear_pxe_mode(hw); 477 478 /* Get capabilities from the device */ 479 error = ixl_get_hw_capabilities(pf); 480 if (error) { 481 device_printf(dev, "get_hw_capabilities failed: %d\n", 482 error); 483 goto err_get_cap; 484 } 485 486 /* Set up host memory cache */ 487 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 488 hw->func_caps.num_rx_qp, 0, 0); 489 if (status) { 490 device_printf(dev, "init_lan_hmc failed: %s\n", 491 i40e_stat_str(hw, status)); 492 goto err_get_cap; 493 } 494 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 495 if (status) { 496 device_printf(dev, "configure_lan_hmc failed: %s\n", 497 i40e_stat_str(hw, status)); 498 goto err_mac_hmc; 499 } 500 501 /* Disable LLDP from the firmware for certain NVM versions */ 502 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 503 (pf->hw.aq.fw_maj_ver < 4)) { 504 i40e_aq_stop_lldp(hw, TRUE, NULL); 505 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; 506 } 507 508 /* Get MAC addresses from hardware */ 509 i40e_get_mac_addr(hw, hw->mac.addr); 510 error = i40e_validate_mac_addr(hw->mac.addr); 511 if (error) { 512 device_printf(dev, "validate_mac_addr failed: %d\n", error); 513 goto err_mac_hmc; 514 } 515 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 516 iflib_set_mac(ctx, hw->mac.addr); 517 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 518 519 /* Set up the device filtering */ 520 bzero(&filter, sizeof(filter)); 521 filter.enable_ethtype = TRUE; 522 filter.enable_macvlan = TRUE; 523 filter.enable_fdir = FALSE; 524 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 525 if (i40e_set_filter_control(hw, &filter)) 526 device_printf(dev, "i40e_set_filter_control() failed\n"); 527 528 /* Query device FW LLDP status */ 529 ixl_get_fw_lldp_status(pf); 530 /* Tell FW to apply DCB config on link up */ 531 i40e_aq_set_dcb_parameters(hw, true, NULL); 532 533 /* Fill out iflib parameters */ 534 if (hw->mac.type == I40E_MAC_X722) 535 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; 536 else 537 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; 538 if (vsi->enable_head_writeback) { 539 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 540 * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); 541 scctx->isc_txrx = &ixl_txrx_hwb; 542 } else { 543 scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] 544 * sizeof(struct i40e_tx_desc), DBA_ALIGN); 545 scctx->isc_txrx = &ixl_txrx_dwb; 546 } 547 scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] 548 * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); 549 scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); 550 scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; 551 scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; 552 scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; 553 scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; 554 scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; 555 scctx->isc_tx_csum_flags = CSUM_OFFLOAD; 556 scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; 557 558 INIT_DEBUGOUT("ixl_if_attach_pre: end"); 559 return (0); 560 561 err_mac_hmc: 562 i40e_shutdown_lan_hmc(hw); 563 err_get_cap: 564 i40e_shutdown_adminq(hw); 565 err_out: 566 ixl_free_pci_resources(pf); 567 err_pci_res: 568 return (error); 569 } 570 571 static int 572 ixl_if_attach_post(if_ctx_t ctx) 573 { 574 device_t dev; 575 struct ixl_pf *pf; 576 struct i40e_hw *hw; 577 struct ixl_vsi *vsi; 578 int error = 0; 579 enum i40e_status_code status; 580 581 INIT_DEBUGOUT("ixl_if_attach_post: begin"); 582 583 dev = iflib_get_dev(ctx); 584 pf = iflib_get_softc(ctx); 585 vsi = &pf->vsi; 586 vsi->ifp = iflib_get_ifp(ctx); 587 hw = &pf->hw; 588 589 /* Setup OS network interface / ifnet */ 590 if (ixl_setup_interface(dev, pf)) { 591 device_printf(dev, "interface setup failed!\n"); 592 error = EIO; 593 goto err; 594 } 595 596 /* Determine link state */ 597 if (ixl_attach_get_link_status(pf)) { 598 error = EINVAL; 599 goto err; 600 } 601 602 error = ixl_switch_config(pf); 603 if (error) { 604 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 605 error); 606 goto err; 607 } 608 609 /* Add protocol filters to list */ 610 ixl_init_filters(vsi); 611 612 /* Init queue allocation manager */ 613 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 614 if (error) { 615 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 616 error); 617 goto err; 618 } 619 /* reserve a contiguous allocation for the PF's VSI */ 620 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, 621 max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); 622 if (error) { 623 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 624 error); 625 goto err; 626 } 627 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 628 pf->qtag.num_allocated, pf->qtag.num_active); 629 630 /* Limit PHY interrupts to link, autoneg, and modules failure */ 631 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 632 NULL); 633 if (status) { 634 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 635 " aq_err %s\n", i40e_stat_str(hw, status), 636 i40e_aq_str(hw, hw->aq.asq_last_status)); 637 goto err; 638 } 639 640 /* Get the bus configuration and set the shared code */ 641 ixl_get_bus_info(pf); 642 643 /* Keep admin queue interrupts active while driver is loaded */ 644 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 645 ixl_configure_intr0_msix(pf); 646 ixl_enable_intr0(hw); 647 } 648 649 /* Set initial advertised speed sysctl value */ 650 ixl_set_initial_advertised_speeds(pf); 651 652 /* Initialize statistics & add sysctls */ 653 ixl_add_device_sysctls(pf); 654 ixl_pf_reset_stats(pf); 655 ixl_update_stats_counters(pf); 656 ixl_add_hw_stats(pf); 657 658 hw->phy.get_link_info = true; 659 i40e_get_link_status(hw, &pf->link_up); 660 ixl_update_link_status(pf); 661 662 #ifdef PCI_IOV 663 ixl_initialize_sriov(pf); 664 #endif 665 666 #ifdef IXL_IW 667 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 668 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 669 if (pf->iw_enabled) { 670 error = ixl_iw_pf_attach(pf); 671 if (error) { 672 device_printf(dev, 673 "interfacing to iwarp driver failed: %d\n", 674 error); 675 goto err; 676 } else 677 device_printf(dev, "iWARP ready\n"); 678 } else 679 device_printf(dev, 680 "iwarp disabled on this device (no msix vectors)\n"); 681 } else { 682 pf->iw_enabled = false; 683 device_printf(dev, "The device is not iWARP enabled\n"); 684 } 685 #endif 686 687 INIT_DBG_DEV(dev, "end"); 688 return (0); 689 690 err: 691 INIT_DEBUGOUT("end: error %d", error); 692 /* ixl_if_detach() is called on error from this */ 693 return (error); 694 } 695 696 static int 697 ixl_if_detach(if_ctx_t ctx) 698 { 699 struct ixl_pf *pf = iflib_get_softc(ctx); 700 struct ixl_vsi *vsi = &pf->vsi; 701 struct i40e_hw *hw = &pf->hw; 702 device_t dev = pf->dev; 703 enum i40e_status_code status; 704 #if defined(PCI_IOV) || defined(IXL_IW) 705 int error; 706 #endif 707 708 INIT_DBG_DEV(dev, "begin"); 709 710 #ifdef IXL_IW 711 if (ixl_enable_iwarp && pf->iw_enabled) { 712 error = ixl_iw_pf_detach(pf); 713 if (error == EBUSY) { 714 device_printf(dev, "iwarp in use; stop it first.\n"); 715 return (error); 716 } 717 } 718 #endif 719 #ifdef PCI_IOV 720 error = pci_iov_detach(dev); 721 if (error != 0) { 722 device_printf(dev, "SR-IOV in use; detach first.\n"); 723 return (error); 724 } 725 #endif 726 /* Remove all previously allocated media types */ 727 ifmedia_removeall(vsi->media); 728 729 /* Shutdown LAN HMC */ 730 if (hw->hmc.hmc_obj) { 731 status = i40e_shutdown_lan_hmc(hw); 732 if (status) 733 device_printf(dev, 734 "i40e_shutdown_lan_hmc() failed with status %s\n", 735 i40e_stat_str(hw, status)); 736 } 737 738 /* Shutdown admin queue */ 739 ixl_disable_intr0(hw); 740 status = i40e_shutdown_adminq(hw); 741 if (status) 742 device_printf(dev, 743 "i40e_shutdown_adminq() failed with status %s\n", 744 i40e_stat_str(hw, status)); 745 746 ixl_pf_qmgr_destroy(&pf->qmgr); 747 ixl_free_pci_resources(pf); 748 ixl_free_mac_filters(vsi); 749 INIT_DBG_DEV(dev, "end"); 750 return (0); 751 } 752 753 /* TODO: Do shutdown-specific stuff here */ 754 static int 755 ixl_if_shutdown(if_ctx_t ctx) 756 { 757 int error = 0; 758 759 INIT_DEBUGOUT("ixl_if_shutdown: begin"); 760 761 /* TODO: Call ixl_if_stop()? */ 762 763 /* TODO: Then setup low power mode */ 764 765 return (error); 766 } 767 768 static int 769 ixl_if_suspend(if_ctx_t ctx) 770 { 771 int error = 0; 772 773 INIT_DEBUGOUT("ixl_if_suspend: begin"); 774 775 /* TODO: Call ixl_if_stop()? */ 776 777 /* TODO: Then setup low power mode */ 778 779 return (error); 780 } 781 782 static int 783 ixl_if_resume(if_ctx_t ctx) 784 { 785 struct ifnet *ifp = iflib_get_ifp(ctx); 786 787 INIT_DEBUGOUT("ixl_if_resume: begin"); 788 789 /* Read & clear wake-up registers */ 790 791 /* Required after D3->D0 transition */ 792 if (ifp->if_flags & IFF_UP) 793 ixl_if_init(ctx); 794 795 return (0); 796 } 797 798 /* Set Report Status queue fields to 0 */ 799 static void 800 ixl_init_tx_rsqs(struct ixl_vsi *vsi) 801 { 802 if_softc_ctx_t scctx = vsi->shared; 803 struct ixl_tx_queue *tx_que; 804 int i, j; 805 806 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 807 struct tx_ring *txr = &tx_que->txr; 808 809 txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; 810 811 for (j = 0; j < scctx->isc_ntxd[0]; j++) 812 txr->tx_rsq[j] = QIDX_INVALID; 813 } 814 } 815 816 static void 817 ixl_init_tx_cidx(struct ixl_vsi *vsi) 818 { 819 struct ixl_tx_queue *tx_que; 820 int i; 821 822 for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { 823 struct tx_ring *txr = &tx_que->txr; 824 825 txr->tx_cidx_processed = 0; 826 } 827 } 828 829 void 830 ixl_if_init(if_ctx_t ctx) 831 { 832 struct ixl_pf *pf = iflib_get_softc(ctx); 833 struct ixl_vsi *vsi = &pf->vsi; 834 struct i40e_hw *hw = &pf->hw; 835 device_t dev = iflib_get_dev(ctx); 836 u8 tmpaddr[ETHER_ADDR_LEN]; 837 int ret; 838 839 /* 840 * If the aq is dead here, it probably means something outside of the driver 841 * did something to the adapter, like a PF reset. 842 * So rebuild the driver's state here if that occurs. 843 */ 844 if (!i40e_check_asq_alive(&pf->hw)) { 845 device_printf(dev, "Admin Queue is down; resetting...\n"); 846 ixl_teardown_hw_structs(pf); 847 ixl_reset(pf); 848 } 849 850 /* Get the latest mac address... User might use a LAA */ 851 bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); 852 if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 853 (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { 854 ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 855 bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); 856 ret = i40e_aq_mac_address_write(hw, 857 I40E_AQC_WRITE_TYPE_LAA_ONLY, 858 hw->mac.addr, NULL); 859 if (ret) { 860 device_printf(dev, "LLA address change failed!!\n"); 861 return; 862 } 863 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); 864 } 865 866 iflib_set_mac(ctx, hw->mac.addr); 867 868 /* Prepare the VSI: rings, hmc contexts, etc... */ 869 if (ixl_initialize_vsi(vsi)) { 870 device_printf(dev, "initialize vsi failed!!\n"); 871 return; 872 } 873 874 // TODO: Call iflib setup multicast filters here? 875 // It's called in ixgbe in D5213 876 ixl_if_multi_set(ctx); 877 878 /* Set up RSS */ 879 ixl_config_rss(pf); 880 881 /* Set up MSI/X routing and the ITR settings */ 882 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 883 ixl_configure_queue_intr_msix(pf); 884 ixl_configure_itr(pf); 885 } else 886 ixl_configure_legacy(pf); 887 888 if (vsi->enable_head_writeback) 889 ixl_init_tx_cidx(vsi); 890 else 891 ixl_init_tx_rsqs(vsi); 892 893 ixl_enable_rings(vsi); 894 895 i40e_aq_set_default_vsi(hw, vsi->seid, NULL); 896 897 ixl_reconfigure_filters(vsi); 898 899 #ifdef IXL_IW 900 if (ixl_enable_iwarp && pf->iw_enabled) { 901 ret = ixl_iw_pf_init(pf); 902 if (ret) 903 device_printf(dev, 904 "initialize iwarp failed, code %d\n", ret); 905 } 906 #endif 907 } 908 909 void 910 ixl_if_stop(if_ctx_t ctx) 911 { 912 struct ixl_pf *pf = iflib_get_softc(ctx); 913 struct ixl_vsi *vsi = &pf->vsi; 914 915 INIT_DEBUGOUT("ixl_if_stop: begin\n"); 916 917 // TODO: This may need to be reworked 918 #ifdef IXL_IW 919 /* Stop iWARP device */ 920 if (ixl_enable_iwarp && pf->iw_enabled) 921 ixl_iw_pf_stop(pf); 922 #endif 923 924 ixl_disable_rings_intr(vsi); 925 ixl_disable_rings(vsi); 926 } 927 928 static int 929 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) 930 { 931 struct ixl_pf *pf = iflib_get_softc(ctx); 932 struct ixl_vsi *vsi = &pf->vsi; 933 struct ixl_rx_queue *rx_que = vsi->rx_queues; 934 struct ixl_tx_queue *tx_que = vsi->tx_queues; 935 int err, i, rid, vector = 0; 936 char buf[16]; 937 938 /* Admin Que must use vector 0*/ 939 rid = vector + 1; 940 err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, 941 ixl_msix_adminq, pf, 0, "aq"); 942 if (err) { 943 iflib_irq_free(ctx, &vsi->irq); 944 device_printf(iflib_get_dev(ctx), 945 "Failed to register Admin que handler"); 946 return (err); 947 } 948 // TODO: Re-enable this at some point 949 // iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); 950 951 /* Now set up the stations */ 952 for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) { 953 rid = vector + 1; 954 955 snprintf(buf, sizeof(buf), "rxq%d", i); 956 err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, 957 IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); 958 /* XXX: Does the driver work as expected if there are fewer num_rx_queues than 959 * what's expected in the iflib context? */ 960 if (err) { 961 device_printf(iflib_get_dev(ctx), 962 "Failed to allocate q int %d err: %d", i, err); 963 vsi->num_rx_queues = i + 1; 964 goto fail; 965 } 966 rx_que->msix = vector; 967 } 968 969 bzero(buf, sizeof(buf)); 970 971 for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) { 972 snprintf(buf, sizeof(buf), "txq%d", i); 973 iflib_softirq_alloc_generic(ctx, 974 &vsi->rx_queues[i % vsi->num_rx_queues].que_irq, 975 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); 976 977 /* TODO: Maybe call a strategy function for this to figure out which 978 * interrupts to map Tx queues to. I don't know if there's an immediately 979 * better way than this other than a user-supplied map, though. */ 980 tx_que->msix = (i % vsi->num_rx_queues) + 1; 981 } 982 983 return (0); 984 fail: 985 iflib_irq_free(ctx, &vsi->irq); 986 rx_que = vsi->rx_queues; 987 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 988 iflib_irq_free(ctx, &rx_que->que_irq); 989 return (err); 990 } 991 992 /* 993 * Enable all interrupts 994 * 995 * Called in: 996 * iflib_init_locked, after ixl_if_init() 997 */ 998 static void 999 ixl_if_enable_intr(if_ctx_t ctx) 1000 { 1001 struct ixl_pf *pf = iflib_get_softc(ctx); 1002 struct ixl_vsi *vsi = &pf->vsi; 1003 struct i40e_hw *hw = vsi->hw; 1004 struct ixl_rx_queue *que = vsi->rx_queues; 1005 1006 ixl_enable_intr0(hw); 1007 /* Enable queue interrupts */ 1008 for (int i = 0; i < vsi->num_rx_queues; i++, que++) 1009 /* TODO: Queue index parameter is probably wrong */ 1010 ixl_enable_queue(hw, que->rxr.me); 1011 } 1012 1013 /* 1014 * Disable queue interrupts 1015 * 1016 * Other interrupt causes need to remain active. 1017 */ 1018 static void 1019 ixl_if_disable_intr(if_ctx_t ctx) 1020 { 1021 struct ixl_pf *pf = iflib_get_softc(ctx); 1022 struct ixl_vsi *vsi = &pf->vsi; 1023 struct i40e_hw *hw = vsi->hw; 1024 struct ixl_rx_queue *rx_que = vsi->rx_queues; 1025 1026 if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { 1027 for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) 1028 ixl_disable_queue(hw, rx_que->msix - 1); 1029 } else { 1030 // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF 1031 // stops queues from triggering interrupts 1032 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); 1033 } 1034 } 1035 1036 static int 1037 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) 1038 { 1039 struct ixl_pf *pf = iflib_get_softc(ctx); 1040 struct ixl_vsi *vsi = &pf->vsi; 1041 struct i40e_hw *hw = vsi->hw; 1042 struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; 1043 1044 ixl_enable_queue(hw, rx_que->msix - 1); 1045 return (0); 1046 } 1047 1048 static int 1049 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) 1050 { 1051 struct ixl_pf *pf = iflib_get_softc(ctx); 1052 struct ixl_vsi *vsi = &pf->vsi; 1053 struct i40e_hw *hw = vsi->hw; 1054 struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; 1055 1056 ixl_enable_queue(hw, tx_que->msix - 1); 1057 1058 return (0); 1059 } 1060 1061 static int 1062 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) 1063 { 1064 struct ixl_pf *pf = iflib_get_softc(ctx); 1065 struct ixl_vsi *vsi = &pf->vsi; 1066 if_softc_ctx_t scctx = vsi->shared; 1067 struct ixl_tx_queue *que; 1068 // int i; 1069 int i, j, error = 0; 1070 1071 MPASS(vsi->num_tx_queues > 0); 1072 MPASS(ntxqs == 1); 1073 MPASS(vsi->num_tx_queues == ntxqsets); 1074 1075 /* Allocate queue structure memory */ 1076 if (!(vsi->tx_queues = 1077 (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1078 device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); 1079 return (ENOMEM); 1080 } 1081 1082 for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { 1083 struct tx_ring *txr = &que->txr; 1084 1085 txr->me = i; 1086 que->vsi = vsi; 1087 1088 if (!vsi->enable_head_writeback) { 1089 /* Allocate report status array */ 1090 if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { 1091 device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); 1092 error = ENOMEM; 1093 goto fail; 1094 } 1095 /* Init report status array */ 1096 for (j = 0; j < scctx->isc_ntxd[0]; j++) 1097 txr->tx_rsq[j] = QIDX_INVALID; 1098 } 1099 /* get the virtual and physical address of the hardware queues */ 1100 txr->tail = I40E_QTX_TAIL(txr->me); 1101 txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; 1102 txr->tx_paddr = paddrs[i * ntxqs]; 1103 txr->que = que; 1104 } 1105 1106 return (0); 1107 fail: 1108 ixl_if_queues_free(ctx); 1109 return (error); 1110 } 1111 1112 static int 1113 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) 1114 { 1115 struct ixl_pf *pf = iflib_get_softc(ctx); 1116 struct ixl_vsi *vsi = &pf->vsi; 1117 struct ixl_rx_queue *que; 1118 int i, error = 0; 1119 1120 MPASS(vsi->num_rx_queues > 0); 1121 MPASS(nrxqs == 1); 1122 MPASS(vsi->num_rx_queues == nrxqsets); 1123 1124 /* Allocate queue structure memory */ 1125 if (!(vsi->rx_queues = 1126 (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * 1127 nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { 1128 device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); 1129 error = ENOMEM; 1130 goto fail; 1131 } 1132 1133 for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { 1134 struct rx_ring *rxr = &que->rxr; 1135 1136 rxr->me = i; 1137 que->vsi = vsi; 1138 1139 /* get the virtual and physical address of the hardware queues */ 1140 rxr->tail = I40E_QRX_TAIL(rxr->me); 1141 rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; 1142 rxr->rx_paddr = paddrs[i * nrxqs]; 1143 rxr->que = que; 1144 } 1145 1146 return (0); 1147 fail: 1148 ixl_if_queues_free(ctx); 1149 return (error); 1150 } 1151 1152 static void 1153 ixl_if_queues_free(if_ctx_t ctx) 1154 { 1155 struct ixl_pf *pf = iflib_get_softc(ctx); 1156 struct ixl_vsi *vsi = &pf->vsi; 1157 1158 if (vsi->enable_head_writeback) { 1159 struct ixl_tx_queue *que; 1160 int i = 0; 1161 1162 for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { 1163 struct tx_ring *txr = &que->txr; 1164 if (txr->tx_rsq != NULL) { 1165 free(txr->tx_rsq, M_IXL); 1166 txr->tx_rsq = NULL; 1167 } 1168 } 1169 } 1170 1171 if (vsi->tx_queues != NULL) { 1172 free(vsi->tx_queues, M_IXL); 1173 vsi->tx_queues = NULL; 1174 } 1175 if (vsi->rx_queues != NULL) { 1176 free(vsi->rx_queues, M_IXL); 1177 vsi->rx_queues = NULL; 1178 } 1179 } 1180 1181 void 1182 ixl_update_link_status(struct ixl_pf *pf) 1183 { 1184 struct ixl_vsi *vsi = &pf->vsi; 1185 struct i40e_hw *hw = &pf->hw; 1186 u64 baudrate; 1187 1188 if (pf->link_up) { 1189 if (vsi->link_active == FALSE) { 1190 vsi->link_active = TRUE; 1191 baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); 1192 iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); 1193 ixl_link_up_msg(pf); 1194 #ifdef PCI_IOV 1195 ixl_broadcast_link_state(pf); 1196 #endif 1197 1198 } 1199 } else { /* Link down */ 1200 if (vsi->link_active == TRUE) { 1201 vsi->link_active = FALSE; 1202 iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); 1203 #ifdef PCI_IOV 1204 ixl_broadcast_link_state(pf); 1205 #endif 1206 } 1207 } 1208 } 1209 1210 static int 1211 ixl_process_adminq(struct ixl_pf *pf, u16 *pending) 1212 { 1213 enum i40e_status_code status = I40E_SUCCESS; 1214 struct i40e_arq_event_info event; 1215 struct i40e_hw *hw = &pf->hw; 1216 device_t dev = pf->dev; 1217 u16 opcode; 1218 u32 loop = 0, reg; 1219 1220 event.buf_len = IXL_AQ_BUF_SZ; 1221 event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); 1222 if (!event.msg_buf) { 1223 device_printf(dev, "%s: Unable to allocate memory for Admin" 1224 " Queue event!\n", __func__); 1225 return (ENOMEM); 1226 } 1227 1228 /* clean and process any events */ 1229 do { 1230 status = i40e_clean_arq_element(hw, &event, pending); 1231 if (status) 1232 break; 1233 opcode = LE16_TO_CPU(event.desc.opcode); 1234 ixl_dbg(pf, IXL_DBG_AQ, 1235 "Admin Queue event: %#06x\n", opcode); 1236 switch (opcode) { 1237 case i40e_aqc_opc_get_link_status: 1238 ixl_link_event(pf, &event); 1239 break; 1240 case i40e_aqc_opc_send_msg_to_pf: 1241 #ifdef PCI_IOV 1242 ixl_handle_vf_msg(pf, &event); 1243 #endif 1244 break; 1245 /* 1246 * This should only occur on no-drop queues, which 1247 * aren't currently configured. 1248 */ 1249 case i40e_aqc_opc_event_lan_overflow: 1250 device_printf(dev, "LAN overflow event\n"); 1251 break; 1252 default: 1253 break; 1254 } 1255 } while (*pending && (loop++ < IXL_ADM_LIMIT)); 1256 1257 free(event.msg_buf, M_IXL); 1258 1259 /* Re-enable admin queue interrupt cause */ 1260 reg = rd32(hw, I40E_PFINT_ICR0_ENA); 1261 reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; 1262 wr32(hw, I40E_PFINT_ICR0_ENA, reg); 1263 1264 return (status); 1265 } 1266 1267 static void 1268 ixl_if_update_admin_status(if_ctx_t ctx) 1269 { 1270 struct ixl_pf *pf = iflib_get_softc(ctx); 1271 struct i40e_hw *hw = &pf->hw; 1272 u16 pending; 1273 1274 if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) 1275 ixl_handle_empr_reset(pf); 1276 1277 if (pf->state & IXL_PF_STATE_MDD_PENDING) 1278 ixl_handle_mdd_event(pf); 1279 1280 #ifdef PCI_IOV 1281 if (pf->state & IXL_PF_STATE_VF_RESET_REQ) 1282 iflib_iov_intr_deferred(ctx); 1283 #endif 1284 1285 ixl_process_adminq(pf, &pending); 1286 ixl_update_link_status(pf); 1287 1288 /* 1289 * If there are still messages to process, reschedule ourselves. 1290 * Otherwise, re-enable our interrupt and go to sleep. 1291 */ 1292 if (pending > 0) 1293 iflib_admin_intr_deferred(ctx); 1294 else 1295 ixl_enable_intr0(hw); 1296 } 1297 1298 static void 1299 ixl_if_multi_set(if_ctx_t ctx) 1300 { 1301 struct ixl_pf *pf = iflib_get_softc(ctx); 1302 struct ixl_vsi *vsi = &pf->vsi; 1303 struct i40e_hw *hw = vsi->hw; 1304 int mcnt = 0, flags; 1305 1306 IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); 1307 1308 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); 1309 /* delete existing MC filters */ 1310 ixl_del_multi(vsi); 1311 1312 if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { 1313 i40e_aq_set_vsi_multicast_promiscuous(hw, 1314 vsi->seid, TRUE, NULL); 1315 return; 1316 } 1317 /* (re-)install filters for all mcast addresses */ 1318 mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); 1319 1320 if (mcnt > 0) { 1321 flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); 1322 ixl_add_hw_filters(vsi, flags, mcnt); 1323 } 1324 1325 IOCTL_DEBUGOUT("ixl_if_multi_set: end"); 1326 } 1327 1328 static int 1329 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) 1330 { 1331 struct ixl_pf *pf = iflib_get_softc(ctx); 1332 struct ixl_vsi *vsi = &pf->vsi; 1333 1334 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); 1335 if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - 1336 ETHER_VLAN_ENCAP_LEN) 1337 return (EINVAL); 1338 1339 vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 1340 ETHER_VLAN_ENCAP_LEN; 1341 1342 return (0); 1343 } 1344 1345 static void 1346 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) 1347 { 1348 struct ixl_pf *pf = iflib_get_softc(ctx); 1349 struct i40e_hw *hw = &pf->hw; 1350 1351 INIT_DEBUGOUT("ixl_media_status: begin"); 1352 1353 ifmr->ifm_status = IFM_AVALID; 1354 ifmr->ifm_active = IFM_ETHER; 1355 1356 if (!pf->link_up) { 1357 return; 1358 } 1359 1360 ifmr->ifm_status |= IFM_ACTIVE; 1361 /* Hardware is always full-duplex */ 1362 ifmr->ifm_active |= IFM_FDX; 1363 1364 switch (hw->phy.link_info.phy_type) { 1365 /* 100 M */ 1366 case I40E_PHY_TYPE_100BASE_TX: 1367 ifmr->ifm_active |= IFM_100_TX; 1368 break; 1369 /* 1 G */ 1370 case I40E_PHY_TYPE_1000BASE_T: 1371 ifmr->ifm_active |= IFM_1000_T; 1372 break; 1373 case I40E_PHY_TYPE_1000BASE_SX: 1374 ifmr->ifm_active |= IFM_1000_SX; 1375 break; 1376 case I40E_PHY_TYPE_1000BASE_LX: 1377 ifmr->ifm_active |= IFM_1000_LX; 1378 break; 1379 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 1380 ifmr->ifm_active |= IFM_1000_T; 1381 break; 1382 /* 10 G */ 1383 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1384 ifmr->ifm_active |= IFM_10G_TWINAX; 1385 break; 1386 case I40E_PHY_TYPE_10GBASE_SR: 1387 ifmr->ifm_active |= IFM_10G_SR; 1388 break; 1389 case I40E_PHY_TYPE_10GBASE_LR: 1390 ifmr->ifm_active |= IFM_10G_LR; 1391 break; 1392 case I40E_PHY_TYPE_10GBASE_T: 1393 ifmr->ifm_active |= IFM_10G_T; 1394 break; 1395 case I40E_PHY_TYPE_XAUI: 1396 case I40E_PHY_TYPE_XFI: 1397 ifmr->ifm_active |= IFM_10G_TWINAX; 1398 break; 1399 case I40E_PHY_TYPE_10GBASE_AOC: 1400 ifmr->ifm_active |= IFM_10G_AOC; 1401 break; 1402 /* 25 G */ 1403 case I40E_PHY_TYPE_25GBASE_KR: 1404 ifmr->ifm_active |= IFM_25G_KR; 1405 break; 1406 case I40E_PHY_TYPE_25GBASE_CR: 1407 ifmr->ifm_active |= IFM_25G_CR; 1408 break; 1409 case I40E_PHY_TYPE_25GBASE_SR: 1410 ifmr->ifm_active |= IFM_25G_SR; 1411 break; 1412 case I40E_PHY_TYPE_25GBASE_LR: 1413 ifmr->ifm_active |= IFM_25G_LR; 1414 break; 1415 case I40E_PHY_TYPE_25GBASE_AOC: 1416 ifmr->ifm_active |= IFM_25G_AOC; 1417 break; 1418 case I40E_PHY_TYPE_25GBASE_ACC: 1419 ifmr->ifm_active |= IFM_25G_ACC; 1420 break; 1421 /* 40 G */ 1422 case I40E_PHY_TYPE_40GBASE_CR4: 1423 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1424 ifmr->ifm_active |= IFM_40G_CR4; 1425 break; 1426 case I40E_PHY_TYPE_40GBASE_SR4: 1427 ifmr->ifm_active |= IFM_40G_SR4; 1428 break; 1429 case I40E_PHY_TYPE_40GBASE_LR4: 1430 ifmr->ifm_active |= IFM_40G_LR4; 1431 break; 1432 case I40E_PHY_TYPE_XLAUI: 1433 ifmr->ifm_active |= IFM_OTHER; 1434 break; 1435 case I40E_PHY_TYPE_1000BASE_KX: 1436 ifmr->ifm_active |= IFM_1000_KX; 1437 break; 1438 case I40E_PHY_TYPE_SGMII: 1439 ifmr->ifm_active |= IFM_1000_SGMII; 1440 break; 1441 /* ERJ: What's the difference between these? */ 1442 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1443 case I40E_PHY_TYPE_10GBASE_CR1: 1444 ifmr->ifm_active |= IFM_10G_CR1; 1445 break; 1446 case I40E_PHY_TYPE_10GBASE_KX4: 1447 ifmr->ifm_active |= IFM_10G_KX4; 1448 break; 1449 case I40E_PHY_TYPE_10GBASE_KR: 1450 ifmr->ifm_active |= IFM_10G_KR; 1451 break; 1452 case I40E_PHY_TYPE_SFI: 1453 ifmr->ifm_active |= IFM_10G_SFI; 1454 break; 1455 /* Our single 20G media type */ 1456 case I40E_PHY_TYPE_20GBASE_KR2: 1457 ifmr->ifm_active |= IFM_20G_KR2; 1458 break; 1459 case I40E_PHY_TYPE_40GBASE_KR4: 1460 ifmr->ifm_active |= IFM_40G_KR4; 1461 break; 1462 case I40E_PHY_TYPE_XLPPI: 1463 case I40E_PHY_TYPE_40GBASE_AOC: 1464 ifmr->ifm_active |= IFM_40G_XLPPI; 1465 break; 1466 /* Unknown to driver */ 1467 default: 1468 ifmr->ifm_active |= IFM_UNKNOWN; 1469 break; 1470 } 1471 /* Report flow control status as well */ 1472 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) 1473 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 1474 if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) 1475 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 1476 } 1477 1478 static int 1479 ixl_if_media_change(if_ctx_t ctx) 1480 { 1481 struct ifmedia *ifm = iflib_get_media(ctx); 1482 1483 INIT_DEBUGOUT("ixl_media_change: begin"); 1484 1485 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1486 return (EINVAL); 1487 1488 if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); 1489 return (ENODEV); 1490 } 1491 1492 static int 1493 ixl_if_promisc_set(if_ctx_t ctx, int flags) 1494 { 1495 struct ixl_pf *pf = iflib_get_softc(ctx); 1496 struct ixl_vsi *vsi = &pf->vsi; 1497 struct ifnet *ifp = iflib_get_ifp(ctx); 1498 struct i40e_hw *hw = vsi->hw; 1499 int err; 1500 bool uni = FALSE, multi = FALSE; 1501 1502 if (flags & IFF_PROMISC) 1503 uni = multi = TRUE; 1504 else if (flags & IFF_ALLMULTI || 1505 if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) 1506 multi = TRUE; 1507 1508 err = i40e_aq_set_vsi_unicast_promiscuous(hw, 1509 vsi->seid, uni, NULL, true); 1510 if (err) 1511 return (err); 1512 err = i40e_aq_set_vsi_multicast_promiscuous(hw, 1513 vsi->seid, multi, NULL); 1514 return (err); 1515 } 1516 1517 static void 1518 ixl_if_timer(if_ctx_t ctx, uint16_t qid) 1519 { 1520 struct ixl_pf *pf = iflib_get_softc(ctx); 1521 //struct i40e_hw *hw = &pf->hw; 1522 //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; 1523 #if 0 1524 u32 mask; 1525 1526 /* 1527 ** Check status of the queues 1528 */ 1529 mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | 1530 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); 1531 1532 /* If queue param has outstanding work, trigger sw irq */ 1533 // TODO: TX queues in iflib don't use HW interrupts; does this do anything? 1534 if (que->busy) 1535 wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); 1536 #endif 1537 1538 if (qid != 0) 1539 return; 1540 1541 /* Fire off the adminq task */ 1542 iflib_admin_intr_deferred(ctx); 1543 1544 /* Update stats */ 1545 ixl_update_stats_counters(pf); 1546 } 1547 1548 static void 1549 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) 1550 { 1551 struct ixl_pf *pf = iflib_get_softc(ctx); 1552 struct ixl_vsi *vsi = &pf->vsi; 1553 struct i40e_hw *hw = vsi->hw; 1554 1555 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1556 return; 1557 1558 ++vsi->num_vlans; 1559 ixl_add_filter(vsi, hw->mac.addr, vtag); 1560 } 1561 1562 static void 1563 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) 1564 { 1565 struct ixl_pf *pf = iflib_get_softc(ctx); 1566 struct ixl_vsi *vsi = &pf->vsi; 1567 struct i40e_hw *hw = vsi->hw; 1568 1569 if ((vtag == 0) || (vtag > 4095)) /* Invalid */ 1570 return; 1571 1572 --vsi->num_vlans; 1573 ixl_del_filter(vsi, hw->mac.addr, vtag); 1574 } 1575 1576 static uint64_t 1577 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) 1578 { 1579 struct ixl_pf *pf = iflib_get_softc(ctx); 1580 struct ixl_vsi *vsi = &pf->vsi; 1581 if_t ifp = iflib_get_ifp(ctx); 1582 1583 switch (cnt) { 1584 case IFCOUNTER_IPACKETS: 1585 return (vsi->ipackets); 1586 case IFCOUNTER_IERRORS: 1587 return (vsi->ierrors); 1588 case IFCOUNTER_OPACKETS: 1589 return (vsi->opackets); 1590 case IFCOUNTER_OERRORS: 1591 return (vsi->oerrors); 1592 case IFCOUNTER_COLLISIONS: 1593 /* Collisions are by standard impossible in 40G/10G Ethernet */ 1594 return (0); 1595 case IFCOUNTER_IBYTES: 1596 return (vsi->ibytes); 1597 case IFCOUNTER_OBYTES: 1598 return (vsi->obytes); 1599 case IFCOUNTER_IMCASTS: 1600 return (vsi->imcasts); 1601 case IFCOUNTER_OMCASTS: 1602 return (vsi->omcasts); 1603 case IFCOUNTER_IQDROPS: 1604 return (vsi->iqdrops); 1605 case IFCOUNTER_OQDROPS: 1606 return (vsi->oqdrops); 1607 case IFCOUNTER_NOPROTO: 1608 return (vsi->noproto); 1609 default: 1610 return (if_get_counter_default(ifp, cnt)); 1611 } 1612 } 1613 1614 static void 1615 ixl_if_vflr_handle(if_ctx_t ctx) 1616 { 1617 IXL_DEV_ERR(iflib_get_dev(ctx), ""); 1618 1619 // TODO: call ixl_handle_vflr() 1620 } 1621 1622 static int 1623 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) 1624 { 1625 struct ixl_pf *pf = iflib_get_softc(ctx); 1626 1627 if (pf->read_i2c_byte == NULL) 1628 return (EINVAL); 1629 1630 for (int i = 0; i < req->len; i++) 1631 if (pf->read_i2c_byte(pf, req->offset + i, 1632 req->dev_addr, &req->data[i])) 1633 return (EIO); 1634 return (0); 1635 } 1636 1637 static int 1638 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) 1639 { 1640 struct ixl_pf *pf = iflib_get_softc(ctx); 1641 struct ifdrv *ifd = (struct ifdrv *)data; 1642 int error = 0; 1643 1644 /* NVM update command */ 1645 if (ifd->ifd_cmd == I40E_NVM_ACCESS) 1646 error = ixl_handle_nvmupd_cmd(pf, ifd); 1647 else 1648 error = EINVAL; 1649 1650 return (error); 1651 } 1652 1653 static int 1654 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) 1655 { 1656 struct ixl_vsi *vsi = arg; 1657 1658 if (ifma->ifma_addr->sa_family != AF_LINK) 1659 return (0); 1660 ixl_add_mc_filter(vsi, 1661 (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); 1662 return (1); 1663 } 1664 1665 /* 1666 * Sanity check and save off tunable values. 1667 */ 1668 static void 1669 ixl_save_pf_tunables(struct ixl_pf *pf) 1670 { 1671 device_t dev = pf->dev; 1672 1673 /* Save tunable information */ 1674 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 1675 pf->dbg_mask = ixl_core_debug_mask; 1676 pf->hw.debug_mask = ixl_shared_debug_mask; 1677 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 1678 #if 0 1679 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 1680 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 1681 #endif 1682 1683 if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) 1684 pf->i2c_access_method = 0; 1685 else 1686 pf->i2c_access_method = ixl_i2c_access_method; 1687 1688 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 1689 device_printf(dev, "Invalid tx_itr value of %d set!\n", 1690 ixl_tx_itr); 1691 device_printf(dev, "tx_itr must be between %d and %d, " 1692 "inclusive\n", 1693 0, IXL_MAX_ITR); 1694 device_printf(dev, "Using default value of %d instead\n", 1695 IXL_ITR_4K); 1696 pf->tx_itr = IXL_ITR_4K; 1697 } else 1698 pf->tx_itr = ixl_tx_itr; 1699 1700 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 1701 device_printf(dev, "Invalid rx_itr value of %d set!\n", 1702 ixl_rx_itr); 1703 device_printf(dev, "rx_itr must be between %d and %d, " 1704 "inclusive\n", 1705 0, IXL_MAX_ITR); 1706 device_printf(dev, "Using default value of %d instead\n", 1707 IXL_ITR_8K); 1708 pf->rx_itr = IXL_ITR_8K; 1709 } else 1710 pf->rx_itr = ixl_rx_itr; 1711 } 1712 1713