1 /****************************************************************************** 2 3 Copyright (c) 2013-2017, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixl.h" 36 #include "ixl_pf.h" 37 38 #ifdef IXL_IW 39 #include "ixl_iw.h" 40 #include "ixl_iw_int.h" 41 #endif 42 43 #ifdef PCI_IOV 44 #include "ixl_pf_iov.h" 45 #endif 46 47 /********************************************************************* 48 * Driver version 49 *********************************************************************/ 50 #define IXL_DRIVER_VERSION_MAJOR 1 51 #define IXL_DRIVER_VERSION_MINOR 9 52 #define IXL_DRIVER_VERSION_BUILD 9 53 54 char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." 55 __XSTRING(IXL_DRIVER_VERSION_MINOR) "." 56 __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"; 57 58 /********************************************************************* 59 * PCI Device ID Table 60 * 61 * Used by probe to select devices to load on 62 * Last field stores an index into ixl_strings 63 * Last entry must be all 0s 64 * 65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } 66 *********************************************************************/ 67 68 static ixl_vendor_info_t ixl_vendor_info_array[] = 69 { 70 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, 71 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, 72 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, 73 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, 74 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, 75 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, 76 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, 77 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, 78 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0}, 79 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0}, 80 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, 81 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, 82 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0}, 83 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0}, 84 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0}, 85 {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0}, 86 /* required last entry */ 87 {0, 0, 0, 0, 0} 88 }; 89 90 /********************************************************************* 91 * Table of branding strings 92 *********************************************************************/ 93 94 static char *ixl_strings[] = { 95 "Intel(R) Ethernet Connection 700 Series PF Driver" 96 }; 97 98 99 /********************************************************************* 100 * Function prototypes 101 *********************************************************************/ 102 static int ixl_probe(device_t); 103 static int ixl_attach(device_t); 104 static int ixl_detach(device_t); 105 static int ixl_shutdown(device_t); 106 107 static int ixl_save_pf_tunables(struct ixl_pf *); 108 109 /********************************************************************* 110 * FreeBSD Device Interface Entry Points 111 *********************************************************************/ 112 113 static device_method_t ixl_methods[] = { 114 /* Device interface */ 115 DEVMETHOD(device_probe, ixl_probe), 116 DEVMETHOD(device_attach, ixl_attach), 117 DEVMETHOD(device_detach, ixl_detach), 118 DEVMETHOD(device_shutdown, ixl_shutdown), 119 #ifdef PCI_IOV 120 DEVMETHOD(pci_iov_init, ixl_iov_init), 121 DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), 122 DEVMETHOD(pci_iov_add_vf, ixl_add_vf), 123 #endif 124 {0, 0} 125 }; 126 127 static driver_t ixl_driver = { 128 "ixl", ixl_methods, sizeof(struct ixl_pf), 129 }; 130 131 devclass_t ixl_devclass; 132 DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); 133 134 MODULE_VERSION(ixl, 1); 135 136 MODULE_DEPEND(ixl, pci, 1, 1, 1); 137 MODULE_DEPEND(ixl, ether, 1, 1, 1); 138 #if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000 139 MODULE_DEPEND(ixl, netmap, 1, 1, 1); 140 #endif /* DEV_NETMAP */ 141 142 /* 143 ** TUNEABLE PARAMETERS: 144 */ 145 146 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, 147 "IXL driver parameters"); 148 149 /* 150 * MSIX should be the default for best performance, 151 * but this allows it to be forced off for testing. 152 */ 153 static int ixl_enable_msix = 1; 154 TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); 155 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, 156 "Enable MSI-X interrupts"); 157 158 /* 159 ** Number of descriptors per ring 160 ** - TX and RX sizes are independently configurable 161 */ 162 static int ixl_tx_ring_size = IXL_DEFAULT_RING; 163 TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size); 164 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, 165 &ixl_tx_ring_size, 0, "TX Descriptor Ring Size"); 166 167 static int ixl_rx_ring_size = IXL_DEFAULT_RING; 168 TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size); 169 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, 170 &ixl_rx_ring_size, 0, "RX Descriptor Ring Size"); 171 172 /* 173 ** This can be set manually, if left as 0 the 174 ** number of queues will be calculated based 175 ** on cpus and msix vectors available. 176 */ 177 static int ixl_max_queues = 0; 178 TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); 179 SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, 180 &ixl_max_queues, 0, "Number of Queues"); 181 182 /* 183 * Leave this on unless you need to send flow control 184 * frames (or other control frames) from software 185 */ 186 static int ixl_enable_tx_fc_filter = 1; 187 TUNABLE_INT("hw.ixl.enable_tx_fc_filter", 188 &ixl_enable_tx_fc_filter); 189 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, 190 &ixl_enable_tx_fc_filter, 0, 191 "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); 192 193 /* 194 * Different method for processing TX descriptor 195 * completion. 196 */ 197 static int ixl_enable_head_writeback = 1; 198 TUNABLE_INT("hw.ixl.enable_head_writeback", 199 &ixl_enable_head_writeback); 200 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, 201 &ixl_enable_head_writeback, 0, 202 "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); 203 204 static int ixl_core_debug_mask = 0; 205 TUNABLE_INT("hw.ixl.core_debug_mask", 206 &ixl_core_debug_mask); 207 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, 208 &ixl_core_debug_mask, 0, 209 "Display debug statements that are printed in non-shared code"); 210 211 static int ixl_shared_debug_mask = 0; 212 TUNABLE_INT("hw.ixl.shared_debug_mask", 213 &ixl_shared_debug_mask); 214 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, 215 &ixl_shared_debug_mask, 0, 216 "Display debug statements that are printed in shared code"); 217 218 /* 219 ** Controls for Interrupt Throttling 220 ** - true/false for dynamic adjustment 221 ** - default values for static ITR 222 */ 223 static int ixl_dynamic_rx_itr = 1; 224 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); 225 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, 226 &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); 227 228 static int ixl_dynamic_tx_itr = 1; 229 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); 230 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, 231 &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); 232 233 static int ixl_rx_itr = IXL_ITR_8K; 234 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); 235 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, 236 &ixl_rx_itr, 0, "RX Interrupt Rate"); 237 238 static int ixl_tx_itr = IXL_ITR_4K; 239 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); 240 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, 241 &ixl_tx_itr, 0, "TX Interrupt Rate"); 242 243 #ifdef IXL_IW 244 int ixl_enable_iwarp = 0; 245 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); 246 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, 247 &ixl_enable_iwarp, 0, "iWARP enabled"); 248 249 #if __FreeBSD_version < 1100000 250 int ixl_limit_iwarp_msix = 1; 251 #else 252 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; 253 #endif 254 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); 255 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, 256 &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); 257 #endif 258 259 #ifdef DEV_NETMAP 260 #define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ 261 #include <dev/netmap/if_ixl_netmap.h> 262 #endif /* DEV_NETMAP */ 263 264 /********************************************************************* 265 * Device identification routine 266 * 267 * ixl_probe determines if the driver should be loaded on 268 * the hardware based on PCI vendor/device id of the device. 269 * 270 * return BUS_PROBE_DEFAULT on success, positive on failure 271 *********************************************************************/ 272 273 static int 274 ixl_probe(device_t dev) 275 { 276 ixl_vendor_info_t *ent; 277 278 u16 pci_vendor_id, pci_device_id; 279 u16 pci_subvendor_id, pci_subdevice_id; 280 char device_name[256]; 281 282 #if 0 283 INIT_DEBUGOUT("ixl_probe: begin"); 284 #endif 285 pci_vendor_id = pci_get_vendor(dev); 286 if (pci_vendor_id != I40E_INTEL_VENDOR_ID) 287 return (ENXIO); 288 289 pci_device_id = pci_get_device(dev); 290 pci_subvendor_id = pci_get_subvendor(dev); 291 pci_subdevice_id = pci_get_subdevice(dev); 292 293 ent = ixl_vendor_info_array; 294 while (ent->vendor_id != 0) { 295 if ((pci_vendor_id == ent->vendor_id) && 296 (pci_device_id == ent->device_id) && 297 298 ((pci_subvendor_id == ent->subvendor_id) || 299 (ent->subvendor_id == 0)) && 300 301 ((pci_subdevice_id == ent->subdevice_id) || 302 (ent->subdevice_id == 0))) { 303 sprintf(device_name, "%s, Version - %s", 304 ixl_strings[ent->index], 305 ixl_driver_version); 306 device_set_desc_copy(dev, device_name); 307 return (BUS_PROBE_DEFAULT); 308 } 309 ent++; 310 } 311 return (ENXIO); 312 } 313 314 /* 315 * Sanity check and save off tunable values. 316 */ 317 static int 318 ixl_save_pf_tunables(struct ixl_pf *pf) 319 { 320 device_t dev = pf->dev; 321 322 /* Save tunable information */ 323 pf->enable_msix = ixl_enable_msix; 324 pf->max_queues = ixl_max_queues; 325 pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; 326 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; 327 pf->dynamic_tx_itr = ixl_dynamic_tx_itr; 328 pf->dbg_mask = ixl_core_debug_mask; 329 pf->hw.debug_mask = ixl_shared_debug_mask; 330 #ifdef DEV_NETMAP 331 if (ixl_enable_head_writeback == 0) 332 device_printf(dev, "Head writeback mode cannot be disabled " 333 "when netmap is enabled\n"); 334 pf->vsi.enable_head_writeback = 1; 335 #else 336 pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); 337 #endif 338 339 ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size); 340 341 if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { 342 device_printf(dev, "Invalid tx_itr value of %d set!\n", 343 ixl_tx_itr); 344 device_printf(dev, "tx_itr must be between %d and %d, " 345 "inclusive\n", 346 0, IXL_MAX_ITR); 347 device_printf(dev, "Using default value of %d instead\n", 348 IXL_ITR_4K); 349 pf->tx_itr = IXL_ITR_4K; 350 } else 351 pf->tx_itr = ixl_tx_itr; 352 353 if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { 354 device_printf(dev, "Invalid rx_itr value of %d set!\n", 355 ixl_rx_itr); 356 device_printf(dev, "rx_itr must be between %d and %d, " 357 "inclusive\n", 358 0, IXL_MAX_ITR); 359 device_printf(dev, "Using default value of %d instead\n", 360 IXL_ITR_8K); 361 pf->rx_itr = IXL_ITR_8K; 362 } else 363 pf->rx_itr = ixl_rx_itr; 364 365 return (0); 366 } 367 368 /********************************************************************* 369 * Device initialization routine 370 * 371 * The attach entry point is called when the driver is being loaded. 372 * This routine identifies the type of hardware, allocates all resources 373 * and initializes the hardware. 374 * 375 * return 0 on success, positive on failure 376 *********************************************************************/ 377 378 static int 379 ixl_attach(device_t dev) 380 { 381 struct ixl_pf *pf; 382 struct i40e_hw *hw; 383 struct ixl_vsi *vsi; 384 enum i40e_status_code status; 385 int error = 0; 386 387 INIT_DEBUGOUT("ixl_attach: begin"); 388 389 /* Allocate, clear, and link in our primary soft structure */ 390 pf = device_get_softc(dev); 391 pf->dev = pf->osdep.dev = dev; 392 hw = &pf->hw; 393 394 /* 395 ** Note this assumes we have a single embedded VSI, 396 ** this could be enhanced later to allocate multiple 397 */ 398 vsi = &pf->vsi; 399 vsi->dev = pf->dev; 400 vsi->back = pf; 401 402 /* Save tunable values */ 403 error = ixl_save_pf_tunables(pf); 404 if (error) 405 return (error); 406 407 /* Core Lock Init*/ 408 IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); 409 410 /* Set up the timer callout */ 411 callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); 412 413 /* Do PCI setup - map BAR0, etc */ 414 if (ixl_allocate_pci_resources(pf)) { 415 device_printf(dev, "Allocation of PCI resources failed\n"); 416 error = ENXIO; 417 goto err_out; 418 } 419 420 /* Establish a clean starting point */ 421 i40e_clear_hw(hw); 422 status = i40e_pf_reset(hw); 423 if (status) { 424 device_printf(dev, "PF reset failure %s\n", 425 i40e_stat_str(hw, status)); 426 error = EIO; 427 goto err_out; 428 } 429 430 /* Initialize the shared code */ 431 status = i40e_init_shared_code(hw); 432 if (status) { 433 device_printf(dev, "Unable to initialize shared code, error %s\n", 434 i40e_stat_str(hw, status)); 435 error = EIO; 436 goto err_out; 437 } 438 439 /* Set up the admin queue */ 440 hw->aq.num_arq_entries = IXL_AQ_LEN; 441 hw->aq.num_asq_entries = IXL_AQ_LEN; 442 hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; 443 hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; 444 445 status = i40e_init_adminq(hw); 446 if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { 447 device_printf(dev, "Unable to initialize Admin Queue, error %s\n", 448 i40e_stat_str(hw, status)); 449 error = EIO; 450 goto err_out; 451 } 452 ixl_print_nvm_version(pf); 453 454 if (status == I40E_ERR_FIRMWARE_API_VERSION) { 455 device_printf(dev, "The driver for the device stopped " 456 "because the NVM image is newer than expected.\n"); 457 device_printf(dev, "You must install the most recent version of " 458 "the network driver.\n"); 459 error = EIO; 460 goto err_out; 461 } 462 463 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 464 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { 465 device_printf(dev, "The driver for the device detected " 466 "a newer version of the NVM image than expected.\n"); 467 device_printf(dev, "Please install the most recent version " 468 "of the network driver.\n"); 469 } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { 470 device_printf(dev, "The driver for the device detected " 471 "an older version of the NVM image than expected.\n"); 472 device_printf(dev, "Please update the NVM image.\n"); 473 } 474 475 /* Clear PXE mode */ 476 i40e_clear_pxe_mode(hw); 477 478 /* Get capabilities from the device */ 479 error = ixl_get_hw_capabilities(pf); 480 if (error) { 481 device_printf(dev, "HW capabilities failure!\n"); 482 goto err_get_cap; 483 } 484 485 /* 486 * Allocate interrupts and figure out number of queues to use 487 * for PF interface 488 */ 489 pf->msix = ixl_init_msix(pf); 490 491 /* Set up host memory cache */ 492 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 493 hw->func_caps.num_rx_qp, 0, 0); 494 if (status) { 495 device_printf(dev, "init_lan_hmc failed: %s\n", 496 i40e_stat_str(hw, status)); 497 goto err_get_cap; 498 } 499 500 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 501 if (status) { 502 device_printf(dev, "configure_lan_hmc failed: %s\n", 503 i40e_stat_str(hw, status)); 504 goto err_mac_hmc; 505 } 506 507 /* Init queue allocation manager */ 508 error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); 509 if (error) { 510 device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", 511 error); 512 goto err_mac_hmc; 513 } 514 /* reserve a contiguous allocation for the PF's VSI */ 515 error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); 516 if (error) { 517 device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", 518 error); 519 goto err_mac_hmc; 520 } 521 device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", 522 pf->qtag.num_allocated, pf->qtag.num_active); 523 524 /* Disable LLDP from the firmware for certain NVM versions */ 525 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || 526 (pf->hw.aq.fw_maj_ver < 4)) { 527 i40e_aq_stop_lldp(hw, TRUE, NULL); 528 pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; 529 } 530 531 /* Get MAC addresses from hardware */ 532 i40e_get_mac_addr(hw, hw->mac.addr); 533 error = i40e_validate_mac_addr(hw->mac.addr); 534 if (error) { 535 device_printf(dev, "validate_mac_addr failed: %d\n", error); 536 goto err_mac_hmc; 537 } 538 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); 539 i40e_get_port_mac_addr(hw, hw->mac.port_addr); 540 541 /* Query device FW LLDP status */ 542 ixl_get_fw_lldp_status(pf); 543 /* Tell FW to apply DCB config on link up */ 544 if ((hw->mac.type != I40E_MAC_X722) 545 && ((pf->hw.aq.api_maj_ver > 1) 546 || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7))) 547 i40e_aq_set_dcb_parameters(hw, true, NULL); 548 549 /* Initialize mac filter list for VSI */ 550 SLIST_INIT(&vsi->ftl); 551 552 /* Set up SW VSI and allocate queue memory and rings */ 553 if (ixl_setup_stations(pf)) { 554 device_printf(dev, "setup stations failed!\n"); 555 error = ENOMEM; 556 goto err_mac_hmc; 557 } 558 559 /* Setup OS network interface / ifnet */ 560 if (ixl_setup_interface(dev, vsi)) { 561 device_printf(dev, "interface setup failed!\n"); 562 error = EIO; 563 goto err_late; 564 } 565 566 /* Determine link state */ 567 if (ixl_attach_get_link_status(pf)) { 568 error = EINVAL; 569 goto err_late; 570 } 571 572 error = ixl_switch_config(pf); 573 if (error) { 574 device_printf(dev, "Initial ixl_switch_config() failed: %d\n", 575 error); 576 goto err_late; 577 } 578 579 /* Limit PHY interrupts to link, autoneg, and modules failure */ 580 status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, 581 NULL); 582 if (status) { 583 device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," 584 " aq_err %s\n", i40e_stat_str(hw, status), 585 i40e_aq_str(hw, hw->aq.asq_last_status)); 586 goto err_late; 587 } 588 589 /* Get the bus configuration and set the shared code's config */ 590 ixl_get_bus_info(pf); 591 592 /* 593 * In MSI-X mode, initialize the Admin Queue interrupt, 594 * so userland tools can communicate with the adapter regardless of 595 * the ifnet interface's status. 596 */ 597 if (pf->msix > 1) { 598 error = ixl_setup_adminq_msix(pf); 599 if (error) { 600 device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", 601 error); 602 goto err_late; 603 } 604 error = ixl_setup_adminq_tq(pf); 605 if (error) { 606 device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", 607 error); 608 goto err_late; 609 } 610 ixl_configure_intr0_msix(pf); 611 ixl_enable_intr0(hw); 612 613 error = ixl_setup_queue_msix(vsi); 614 if (error) 615 device_printf(dev, "ixl_setup_queue_msix() error: %d\n", 616 error); 617 error = ixl_setup_queue_tqs(vsi); 618 if (error) 619 device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", 620 error); 621 } else { 622 error = ixl_setup_legacy(pf); 623 624 error = ixl_setup_adminq_tq(pf); 625 if (error) { 626 device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", 627 error); 628 goto err_late; 629 } 630 631 error = ixl_setup_queue_tqs(vsi); 632 if (error) 633 device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", 634 error); 635 } 636 637 if (error) { 638 device_printf(dev, "interrupt setup error: %d\n", error); 639 } 640 641 /* Set initial advertised speed sysctl value */ 642 ixl_set_initial_advertised_speeds(pf); 643 644 /* Initialize statistics & add sysctls */ 645 ixl_add_device_sysctls(pf); 646 647 ixl_pf_reset_stats(pf); 648 ixl_update_stats_counters(pf); 649 ixl_add_hw_stats(pf); 650 651 /* Register for VLAN events */ 652 vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 653 ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); 654 vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 655 ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); 656 657 #ifdef PCI_IOV 658 ixl_initialize_sriov(pf); 659 #endif 660 661 #ifdef DEV_NETMAP 662 if (vsi->num_rx_desc == vsi->num_tx_desc) { 663 vsi->queues[0].num_desc = vsi->num_rx_desc; 664 ixl_netmap_attach(vsi); 665 } else 666 device_printf(dev, 667 "Netmap is not supported when RX and TX descriptor ring sizes differ\n"); 668 669 #endif /* DEV_NETMAP */ 670 671 #ifdef IXL_IW 672 if (hw->func_caps.iwarp && ixl_enable_iwarp) { 673 pf->iw_enabled = (pf->iw_msix > 0) ? true : false; 674 if (pf->iw_enabled) { 675 error = ixl_iw_pf_attach(pf); 676 if (error) { 677 device_printf(dev, 678 "interfacing to iwarp driver failed: %d\n", 679 error); 680 goto err_late; 681 } else 682 device_printf(dev, "iWARP ready\n"); 683 } else 684 device_printf(dev, 685 "iwarp disabled on this device (no msix vectors)\n"); 686 } else { 687 pf->iw_enabled = false; 688 device_printf(dev, "The device is not iWARP enabled\n"); 689 } 690 #endif 691 692 INIT_DEBUGOUT("ixl_attach: end"); 693 return (0); 694 695 err_late: 696 if (vsi->ifp != NULL) { 697 ether_ifdetach(vsi->ifp); 698 if_free(vsi->ifp); 699 } 700 err_mac_hmc: 701 i40e_shutdown_lan_hmc(hw); 702 err_get_cap: 703 i40e_shutdown_adminq(hw); 704 err_out: 705 ixl_free_pci_resources(pf); 706 ixl_free_vsi(vsi); 707 IXL_PF_LOCK_DESTROY(pf); 708 return (error); 709 } 710 711 /********************************************************************* 712 * Device removal routine 713 * 714 * The detach entry point is called when the driver is being removed. 715 * This routine stops the adapter and deallocates all the resources 716 * that were allocated for driver operation. 717 * 718 * return 0 on success, positive on failure 719 *********************************************************************/ 720 721 static int 722 ixl_detach(device_t dev) 723 { 724 struct ixl_pf *pf = device_get_softc(dev); 725 struct i40e_hw *hw = &pf->hw; 726 struct ixl_vsi *vsi = &pf->vsi; 727 enum i40e_status_code status; 728 #if defined(PCI_IOV) || defined(IXL_IW) 729 int error; 730 #endif 731 732 INIT_DEBUGOUT("ixl_detach: begin"); 733 734 /* Make sure VLANS are not using driver */ 735 if (vsi->ifp->if_vlantrunk != NULL) { 736 device_printf(dev, "Vlan in use, detach first\n"); 737 return (EBUSY); 738 } 739 740 #ifdef PCI_IOV 741 error = pci_iov_detach(dev); 742 if (error != 0) { 743 device_printf(dev, "SR-IOV in use; detach first.\n"); 744 return (error); 745 } 746 #endif 747 748 /* Remove all previously allocated media types */ 749 ifmedia_removeall(&vsi->media); 750 751 ether_ifdetach(vsi->ifp); 752 if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) 753 ixl_stop(pf); 754 755 /* Shutdown LAN HMC */ 756 status = i40e_shutdown_lan_hmc(hw); 757 if (status) 758 device_printf(dev, 759 "Shutdown LAN HMC failed with code %d\n", status); 760 761 /* Teardown LAN queue resources */ 762 ixl_teardown_queue_msix(vsi); 763 ixl_free_queue_tqs(vsi); 764 /* Shutdown admin queue */ 765 ixl_disable_intr0(hw); 766 ixl_teardown_adminq_msix(pf); 767 ixl_free_adminq_tq(pf); 768 status = i40e_shutdown_adminq(hw); 769 if (status) 770 device_printf(dev, 771 "Shutdown Admin queue failed with code %d\n", status); 772 773 /* Unregister VLAN events */ 774 if (vsi->vlan_attach != NULL) 775 EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); 776 if (vsi->vlan_detach != NULL) 777 EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); 778 779 callout_drain(&pf->timer); 780 781 #ifdef IXL_IW 782 if (ixl_enable_iwarp && pf->iw_enabled) { 783 error = ixl_iw_pf_detach(pf); 784 if (error == EBUSY) { 785 device_printf(dev, "iwarp in use; stop it first.\n"); 786 return (error); 787 } 788 } 789 #endif 790 791 #ifdef DEV_NETMAP 792 netmap_detach(vsi->ifp); 793 #endif /* DEV_NETMAP */ 794 ixl_pf_qmgr_destroy(&pf->qmgr); 795 ixl_free_pci_resources(pf); 796 bus_generic_detach(dev); 797 if_free(vsi->ifp); 798 ixl_free_vsi(vsi); 799 IXL_PF_LOCK_DESTROY(pf); 800 return (0); 801 } 802 803 /********************************************************************* 804 * 805 * Shutdown entry point 806 * 807 **********************************************************************/ 808 809 static int 810 ixl_shutdown(device_t dev) 811 { 812 struct ixl_pf *pf = device_get_softc(dev); 813 ixl_stop(pf); 814 return (0); 815 } 816 817