1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved. 14 * Copyright 2016 Joyent, Inc. 15 * Copyright 2017 Tegile Systems, Inc. All rights reserved. 16 */ 17 18 /* 19 * i40e - Intel 10/40 Gb Ethernet driver 20 * 21 * The i40e driver is the main software device driver for the Intel 40 Gb family 22 * of devices. Note that these devices come in many flavors with both 40 GbE 23 * ports and 10 GbE ports. This device is the successor to the 82599 family of 24 * devices (ixgbe). 25 * 26 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE 27 * devices defined in the XL710 controller (previously known as Fortville) are a 28 * rather different beast and have a small switch embedded inside of them. In 29 * addition, the way that most of the programming is done has been overhauled. 30 * As opposed to just using PCIe memory mapped registers, it also has an 31 * administrative queue which is used to communicate with firmware running on 32 * the chip. 33 * 34 * Each physical function in the hardware shows up as a device that this driver 35 * will bind to. The hardware splits many resources evenly across all of the 36 * physical functions present on the device, while other resources are instead 37 * shared across the entire card and its up to the device driver to 38 * intelligently partition them. 39 * 40 * ------------ 41 * Organization 42 * ------------ 43 * 44 * This driver is made up of several files which have their own theory 45 * statements spread across them. We'll touch on the high level purpose of each 46 * file here, and then we'll get into more discussion on how the device is 47 * generally modelled with respect to the interfaces in illumos. 48 * 49 * i40e_gld.c: This file contains all of the bindings to MAC and the networking 50 * stack. 51 * 52 * i40e_intr.c: This file contains all of the interrupt service routines and 53 * contains logic to enable and disable interrupts on the hardware. 54 * It also contains the logic to map hardware resources such as the 55 * rings to and from interrupts and controls their ability to fire. 56 * 57 * There is a big theory statement on interrupts present there. 58 * 59 * i40e_main.c: The file that you're currently in. It interfaces with the 60 * traditional OS DDI interfaces and is in charge of configuring 61 * the device. 62 * 63 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to 64 * work with Intel's common code for the device. 65 * 66 * i40e_stats.c: This file contains the general work and logic around our 67 * kstats. A theory statement on their organization and use of the 68 * hardware exists there. 69 * 70 * i40e_sw.h: This header file contains all of the primary structure definitions 71 * and constants that are used across the entire driver. 72 * 73 * i40e_transceiver.c: This file contains all of the logic for sending and 74 * receiving data. It contains all of the ring and DMA 75 * allocation logic, as well as, the actual interfaces to 76 * send and receive data. 77 * 78 * A big theory statement on ring management, descriptors, 79 * and how it ties into the OS is present there. 80 * 81 * -------------- 82 * General Design 83 * -------------- 84 * 85 * Before we go too far into the general way we've laid out data structures and 86 * the like, it's worth taking some time to explain how the hardware is 87 * organized. This organization informs a lot of how we do things at this time 88 * in the driver. 89 * 90 * Each physical device consists of a number of one or more ports, which are 91 * considered physical functions in the PCI sense and thus each get enumerated 92 * by the system, resulting in an instance being created and attached to. While 93 * there are many resources that are unique to each physical function eg. 94 * instance of the device, there are many that are shared across all of them. 95 * Several resources have an amount reserved for each Virtual Station Interface 96 * (VSI) and then a static pool of resources, available for all functions on the 97 * card. 98 * 99 * The most important resource in hardware are its transmit and receive queue 100 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3 101 * parlance. There are a set number of these on each device; however, they are 102 * statically partitioned among all of the different physical functions. 103 * 104 * 'Fortville' (the code name for this device family) is basically a switch. To 105 * map MAC addresses and other things to queues, we end up having to create 106 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct 107 * traffic to a queue. A VSI owns a collection of queues and has a series of 108 * forwarding rules that point to it. One way to think of this is to treat it 109 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and 110 * classification resources, that is a VSI in i40e. 111 * 112 * The sets of VSIs is shared across the entire device, though there may be some 113 * amount that are reserved to each PF. Because the GLDv3 does not let us change 114 * the number of groups dynamically, we instead statically divide this amount 115 * evenly between all the functions that exist. In addition, we have the same 116 * problem with the mac address forwarding rules. There are a static number that 117 * exist shared across all the functions. 118 * 119 * To handle both of these resources, what we end up doing is going through and 120 * determining which functions belong to the same device. Nominally one might do 121 * this by having a nexus driver; however, a prime requirement for a nexus 122 * driver is identifying the various children and activating them. While it is 123 * possible to get this information from NVRAM, we would end up duplicating a 124 * lot of the PCI enumeration logic. Really, at the end of the day, the device 125 * doesn't give us the traditional identification properties we want from a 126 * nexus driver. 127 * 128 * Instead, we rely on some properties that are guaranteed to be unique. While 129 * it might be tempting to leverage the PBA or serial number of the device from 130 * NVRAM, there is nothing that says that two devices can't be mis-programmed to 131 * have the same values in NVRAM. Instead, we uniquely identify a group of 132 * functions based on their parent in the /devices tree, their PCI bus and PCI 133 * function identifiers. Using either on their own may not be sufficient. 134 * 135 * For each unique PCI device that we encounter, we'll create a i40e_device_t. 136 * From there, because we don't have a good way to tell the GLDv3 about sharing 137 * resources between everything, we'll end up just dividing the resources 138 * evenly between all of the functions. Longer term, if we don't have to declare 139 * to the GLDv3 that these resources are shared, then we'll maintain a pool and 140 * hae each PF allocate from the pool in the device, thus if only two of four 141 * ports are being used, for example, then all of the resources can still be 142 * used. 143 * 144 * ------------------------------------------- 145 * Transmit and Receive Queue Pair Allocations 146 * ------------------------------------------- 147 * 148 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN 149 * queue pairs, we have no way of modifying it, only observing it. From there, 150 * it's up to us to map these queues to VSIs and VFs. Since we don't support any 151 * VFs at this time, we only focus on assignments to VSIs. 152 * 153 * At the moment, we used a static mapping of transmit/receive queue pairs to a 154 * given VSI (eg. rings to a group). Though in the fullness of time, we want to 155 * make this something which is fully dynamic and take advantage of documented, 156 * but not yet available functionality for adding filters based on VXLAN and 157 * other encapsulation technologies. 158 * 159 * ------------------------------------- 160 * Broadcast, Multicast, and Promiscuous 161 * ------------------------------------- 162 * 163 * As part of the GLDv3, we need to make sure that we can handle receiving 164 * broadcast and multicast traffic. As well as enabling promiscuous mode when 165 * requested. GLDv3 requires that all broadcast and multicast traffic be 166 * retrieved by the default group, eg. the first one. This is the same thing as 167 * the default VSI. 168 * 169 * To receieve broadcast traffic, we enable it through the admin queue, rather 170 * than use one of our filters for it. For multicast traffic, we reserve a 171 * certain number of the hash filters and assign them to a given PF. When we 172 * exceed those, we then switch to using promicuous mode for multicast traffic. 173 * 174 * More specifically, once we exceed the number of filters (indicated because 175 * the i40e_t`i40e_resources.ifr_nmcastfilt == 176 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle 177 * promiscuous mode. If promiscuous mode is toggled then we keep track of the 178 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count. 179 * That will stay enabled until that count reaches zero indicating that we have 180 * only added multicast addresses that we have a corresponding entry for. 181 * 182 * Because MAC itself wants to toggle promiscuous mode, which includes both 183 * unicast and multicast traffic, we go through and keep track of that 184 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on 185 * member. 186 * 187 * -------------- 188 * VSI Management 189 * -------------- 190 * 191 * At this time, we currently only support a single MAC group, and thus a single 192 * VSI. This VSI is considered the default VSI and should be the only one that 193 * exists after a reset. Currently it is stored as the member 194 * i40e_t`i40e_vsi_id. While this works for the moment and for an initial 195 * driver, it's not sufficient for the longer-term path of the driver. Instead, 196 * we'll want to actually have a unique i40e_vsi_t structure which is used 197 * everywhere. Note that this means that every place that uses the 198 * i40e_t`i40e_vsi_id will need to be refactored. 199 * 200 * ---------------- 201 * Structure Layout 202 * ---------------- 203 * 204 * The following images relates the core data structures together. The primary 205 * structure in the system is the i40e_t. It itself contains multiple rings, 206 * i40e_trqpair_t's which contain the various transmit and receive data. The 207 * receive data is stored outside of the i40e_trqpair_t and instead in the 208 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps 209 * track of per-physical device state. Finally, for every active descriptor, 210 * there is a corresponding control block, which is where the 211 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from. 212 * 213 * +-----------------------+ +-----------------------+ 214 * | Global i40e_t list | | Global Device list | 215 * | | +--| | 216 * | i40e_glist | | | i40e_dlist | 217 * +-----------------------+ | +-----------------------+ 218 * | v 219 * | +------------------------+ +-----------------------+ 220 * | | Device-wide Structure |----->| Device-wide Structure |--> ... 221 * | | i40e_device_t | | i40e_device_t | 222 * | | | +-----------------------+ 223 * | | dev_info_t * ------+--> Parent in devices tree. 224 * | | uint_t ------+--> PCI bus number 225 * | | uint_t ------+--> PCI device number 226 * | | uint_t ------+--> Number of functions 227 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources 228 * | | list_t ------+-------------+ 229 * | +------------------------+ | 230 * | ^ | 231 * | +--------+ | 232 * | | v 233 * | +---------------------------+ | +-------------------+ 234 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ... 235 * | i40e_t | | | i40e_t | 236 * | **Primary Structure** | | +-------------------+ 237 * | | | 238 * | i40e_device_t * --+-----+ 239 * | i40e_state_t --+---> Device State 240 * | i40e_hw_t --+---> Intel common code structure 241 * | mac_handle_t --+---> GLDv3 handle to MAC 242 * | ddi_periodic_t --+---> Link activity timer 243 * | int (vsi_id) --+---> VSI ID, main identifier 244 * | i40e_func_rsrc_t --+---> Available hardware resources 245 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot 246 * | i40e_sdu --+---> Current MTU 247 * | i40e_frame_max --+---> Current HW frame size 248 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs 249 * | i40e_maddr_t * --+---> Array of assigned multicast MACs 250 * | i40e_mcast_promisccount --+---> Active multicast state 251 * | i40e_promisc_on --+---> Current promiscuous mode state 252 * | int --+---> Number of transmit/receive pairs 253 * | kstat_t * --+---> PF kstats 254 * | kstat_t * --+---> VSI kstats 255 * | i40e_pf_stats_t --+---> PF kstat backing data 256 * | i40e_vsi_stats_t --+---> VSI kstat backing data 257 * | i40e_trqpair_t * --+---------+ 258 * +---------------------------+ | 259 * | 260 * v 261 * +-------------------------------+ +-----------------------------+ 262 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->... 263 * | i40e_trqpair_t | | i40e_trqpair_t | 264 * + Ring Data Structure | +-----------------------------+ 265 * | | 266 * | mac_ring_handle_t +--> MAC RX ring handle 267 * | mac_ring_handle_t +--> MAC TX ring handle 268 * | i40e_rxq_stat_t --+--> RX Queue stats 269 * | i40e_txq_stat_t --+--> TX Queue stats 270 * | uint32_t (tx ring size) +--> TX Ring Size 271 * | uint32_t (tx free list size) +--> TX Free List Size 272 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA 273 * | i40e_tx_desc_t * --------+--> TX descriptor ring 274 * | volatile unt32_t * +--> TX Write back head 275 * | uint32_t -------+--> TX ring head 276 * | uint32_t -------+--> TX ring tail 277 * | uint32_t -------+--> Num TX desc free 278 * | i40e_tx_control_block_t * --+--> TX control block array ---+ 279 * | i40e_tx_control_block_t ** --+--> TCB work list ----+ 280 * | i40e_tx_control_block_t ** --+--> TCB free list ---+ 281 * | uint32_t -------+--> Free TCB count | 282 * | i40e_rx_data_t * -------+--+ v 283 * +-------------------------------+ | +---------------------------+ 284 * | | Per-TX Frame Metadata | 285 * | | i40e_tx_control_block_t | 286 * +--------------------+ | | 287 * | mblk to transmit <--+--- mblk_t * | 288 * | type of transmit <--+--- i40e_tx_type_t | 289 * | TX DMA handle <--+--- ddi_dma_handle_t | 290 * v TX DMA buffer <--+--- i40e_dma_buffer_t | 291 * +------------------------------+ +---------------------------+ 292 * | Core Receive Data | 293 * | i40e_rx_data_t | 294 * | | 295 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data 296 * | i40e_rx_desc_t --+--> RX descriptor ring 297 * | uint32_t --+--> Next free desc. 298 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+ 299 * | i40e_rx_control_block_t ** --+--> RCB work list ---+ 300 * | i40e_rx_control_block_t ** --+--> RCB free list ---+ 301 * +------------------------------+ | 302 * ^ | 303 * | +---------------------------+ | 304 * | | Per-RX Frame Metadata |<---------------+ 305 * | | i40e_rx_control_block_t | 306 * | | | 307 * | | mblk_t * ----+--> Received mblk_t data 308 * | | uint32_t ----+--> Reference count 309 * | | i40e_dma_buffer_t ----+--> Receive data DMA info 310 * | | frtn_t ----+--> mblk free function info 311 * +-----+-- i40e_rx_data_t * | 312 * +---------------------------+ 313 * 314 * ------------- 315 * Lock Ordering 316 * ------------- 317 * 318 * In order to ensure that we don't deadlock, the following represents the 319 * lock order being used. When grabbing locks, follow the following order. Lower 320 * numbers are more important. Thus, the i40e_glock which is number 0, must be 321 * taken before any other locks in the driver. On the other hand, the 322 * i40e_t`i40e_stat_lock, has the highest number because it's the least 323 * important lock. Note, that just because one lock is higher than another does 324 * not mean that all intermediary locks are required. 325 * 326 * 0) i40e_glock 327 * 1) i40e_t`i40e_general_lock 328 * 329 * 2) i40e_trqpair_t`itrq_rx_lock 330 * 3) i40e_trqpair_t`itrq_tx_lock 331 * 4) i40e_t`i40e_rx_pending_lock 332 * 5) i40e_trqpair_t`itrq_tcb_lock 333 * 334 * 6) i40e_t`i40e_stat_lock 335 * 336 * Rules and expectations: 337 * 338 * 1) A thread holding locks belong to one PF should not hold locks belonging to 339 * a second. If for some reason this becomes necessary, locks should be grabbed 340 * based on the list order in the i40e_device_t, which implies that the 341 * i40e_glock is held. 342 * 343 * 2) When grabbing locks between multiple transmit and receive queues, the 344 * locks for the lowest number transmit/receive queue should be grabbed first. 345 * 346 * 3) When grabbing both the transmit and receive lock for a given queue, always 347 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock. 348 * 349 * 4) The following pairs of locks are not expected to be held at the same time: 350 * 351 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock 352 * 353 * ----------- 354 * Future Work 355 * ----------- 356 * 357 * At the moment the i40e_t driver is rather bare bones, allowing us to start 358 * getting data flowing and folks using it while we develop additional features. 359 * While bugs have been filed to cover this future work, the following gives an 360 * overview of expected work: 361 * 362 * o TSO support 363 * o Multiple group support 364 * o DMA binding and breaking up the locking in ring recycling. 365 * o Enhanced detection of device errors 366 * o Participation in IRM 367 * o FMA device reset 368 * o Stall detection, temperature error detection, etc. 369 * o More dynamic resource pools 370 */ 371 372 #include "i40e_sw.h" 373 374 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.1"; 375 376 /* 377 * The i40e_glock primarily protects the lists below and the i40e_device_t 378 * structures. 379 */ 380 static kmutex_t i40e_glock; 381 static list_t i40e_glist; 382 static list_t i40e_dlist; 383 384 /* 385 * Access attributes for register mapping. 386 */ 387 static ddi_device_acc_attr_t i40e_regs_acc_attr = { 388 DDI_DEVICE_ATTR_V1, 389 DDI_STRUCTURE_LE_ACC, 390 DDI_STRICTORDER_ACC, 391 DDI_FLAGERR_ACC 392 }; 393 394 /* 395 * Logging function for this driver. 396 */ 397 static void 398 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt, 399 va_list ap) 400 { 401 char buf[1024]; 402 403 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 404 405 if (i40e == NULL) { 406 cmn_err(level, (console) ? "%s: %s" : "!%s: %s", 407 I40E_MODULE_NAME, buf); 408 } else { 409 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s", 410 buf); 411 } 412 } 413 414 /* 415 * Because there's the stupid trailing-comma problem with the C preprocessor 416 * and variable arguments, I need to instantiate these. Pardon the redundant 417 * code. 418 */ 419 /*PRINTFLIKE2*/ 420 void 421 i40e_error(i40e_t *i40e, const char *fmt, ...) 422 { 423 va_list ap; 424 425 va_start(ap, fmt); 426 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap); 427 va_end(ap); 428 } 429 430 /*PRINTFLIKE2*/ 431 void 432 i40e_log(i40e_t *i40e, const char *fmt, ...) 433 { 434 va_list ap; 435 436 va_start(ap, fmt); 437 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap); 438 va_end(ap); 439 } 440 441 /*PRINTFLIKE2*/ 442 void 443 i40e_notice(i40e_t *i40e, const char *fmt, ...) 444 { 445 va_list ap; 446 447 va_start(ap, fmt); 448 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap); 449 va_end(ap); 450 } 451 452 static void 453 i40e_device_rele(i40e_t *i40e) 454 { 455 i40e_device_t *idp = i40e->i40e_device; 456 457 if (idp == NULL) 458 return; 459 460 mutex_enter(&i40e_glock); 461 VERIFY(idp->id_nreg > 0); 462 list_remove(&idp->id_i40e_list, i40e); 463 idp->id_nreg--; 464 if (idp->id_nreg == 0) { 465 list_remove(&i40e_dlist, idp); 466 list_destroy(&idp->id_i40e_list); 467 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) * 468 idp->id_rsrcs_alloc); 469 kmem_free(idp, sizeof (i40e_device_t)); 470 } 471 i40e->i40e_device = NULL; 472 mutex_exit(&i40e_glock); 473 } 474 475 static i40e_device_t * 476 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device) 477 { 478 i40e_device_t *idp; 479 mutex_enter(&i40e_glock); 480 for (idp = list_head(&i40e_dlist); idp != NULL; 481 idp = list_next(&i40e_dlist, idp)) { 482 if (idp->id_parent == parent && idp->id_pci_bus == bus && 483 idp->id_pci_device == device) { 484 break; 485 } 486 } 487 488 if (idp != NULL) { 489 VERIFY(idp->id_nreg < idp->id_nfuncs); 490 idp->id_nreg++; 491 } else { 492 i40e_hw_t *hw = &i40e->i40e_hw_space; 493 ASSERT(hw->num_ports > 0); 494 ASSERT(hw->num_partitions > 0); 495 496 /* 497 * The Intel common code doesn't exactly keep the number of PCI 498 * functions. But it calculates it during discovery of 499 * partitions and ports. So what we do is undo the calculation 500 * that it does originally, as functions are evenly spread 501 * across ports in the rare case of partitions. 502 */ 503 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP); 504 idp->id_parent = parent; 505 idp->id_pci_bus = bus; 506 idp->id_pci_device = device; 507 idp->id_nfuncs = hw->num_ports * hw->num_partitions; 508 idp->id_nreg = 1; 509 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc; 510 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual; 511 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) * 512 idp->id_rsrcs_alloc, KM_SLEEP); 513 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs, 514 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc); 515 list_create(&idp->id_i40e_list, sizeof (i40e_t), 516 offsetof(i40e_t, i40e_dlink)); 517 518 list_insert_tail(&i40e_dlist, idp); 519 } 520 521 list_insert_tail(&idp->id_i40e_list, i40e); 522 mutex_exit(&i40e_glock); 523 524 return (idp); 525 } 526 527 static void 528 i40e_link_state_set(i40e_t *i40e, link_state_t state) 529 { 530 if (i40e->i40e_link_state == state) 531 return; 532 533 i40e->i40e_link_state = state; 534 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state); 535 } 536 537 /* 538 * This is a basic link check routine. Mostly we're using this just to see 539 * if we can get any accurate information about the state of the link being 540 * up or down, as well as updating the link state, speed, etc. information. 541 */ 542 void 543 i40e_link_check(i40e_t *i40e) 544 { 545 i40e_hw_t *hw = &i40e->i40e_hw_space; 546 boolean_t ls; 547 int ret; 548 549 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 550 551 hw->phy.get_link_info = B_TRUE; 552 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) { 553 i40e->i40e_s_link_status_errs++; 554 i40e->i40e_s_link_status_lasterr = ret; 555 return; 556 } 557 558 /* 559 * Firmware abstracts all of the mac and phy information for us, so we 560 * can use i40e_get_link_status to determine the current state. 561 */ 562 if (ls == B_TRUE) { 563 enum i40e_aq_link_speed speed; 564 565 speed = i40e_get_link_speed(hw); 566 567 /* 568 * Translate from an i40e value to a value in Mbits/s. 569 */ 570 switch (speed) { 571 case I40E_LINK_SPEED_100MB: 572 i40e->i40e_link_speed = 100; 573 break; 574 case I40E_LINK_SPEED_1GB: 575 i40e->i40e_link_speed = 1000; 576 break; 577 case I40E_LINK_SPEED_10GB: 578 i40e->i40e_link_speed = 10000; 579 break; 580 case I40E_LINK_SPEED_20GB: 581 i40e->i40e_link_speed = 20000; 582 break; 583 case I40E_LINK_SPEED_40GB: 584 i40e->i40e_link_speed = 40000; 585 break; 586 case I40E_LINK_SPEED_25GB: 587 i40e->i40e_link_speed = 25000; 588 break; 589 default: 590 i40e->i40e_link_speed = 0; 591 break; 592 } 593 594 /* 595 * At this time, hardware does not support half-duplex 596 * operation, hence why we don't ask the hardware about our 597 * current speed. 598 */ 599 i40e->i40e_link_duplex = LINK_DUPLEX_FULL; 600 i40e_link_state_set(i40e, LINK_STATE_UP); 601 } else { 602 i40e->i40e_link_speed = 0; 603 i40e->i40e_link_duplex = 0; 604 i40e_link_state_set(i40e, LINK_STATE_DOWN); 605 } 606 } 607 608 static void 609 i40e_rem_intrs(i40e_t *i40e) 610 { 611 int i, rc; 612 613 for (i = 0; i < i40e->i40e_intr_count; i++) { 614 rc = ddi_intr_free(i40e->i40e_intr_handles[i]); 615 if (rc != DDI_SUCCESS) { 616 i40e_log(i40e, "failed to free interrupt %d: %d", 617 i, rc); 618 } 619 } 620 621 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size); 622 i40e->i40e_intr_handles = NULL; 623 } 624 625 static void 626 i40e_rem_intr_handlers(i40e_t *i40e) 627 { 628 int i, rc; 629 630 for (i = 0; i < i40e->i40e_intr_count; i++) { 631 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]); 632 if (rc != DDI_SUCCESS) { 633 i40e_log(i40e, "failed to remove interrupt %d: %d", 634 i, rc); 635 } 636 } 637 } 638 639 /* 640 * illumos Fault Management Architecture (FMA) support. 641 */ 642 643 int 644 i40e_check_acc_handle(ddi_acc_handle_t handle) 645 { 646 ddi_fm_error_t de; 647 648 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 649 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 650 return (de.fme_status); 651 } 652 653 int 654 i40e_check_dma_handle(ddi_dma_handle_t handle) 655 { 656 ddi_fm_error_t de; 657 658 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 659 return (de.fme_status); 660 } 661 662 /* 663 * Fault service error handling callback function. 664 */ 665 /* ARGSUSED */ 666 static int 667 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 668 { 669 pci_ereport_post(dip, err, NULL); 670 return (err->fme_status); 671 } 672 673 static void 674 i40e_fm_init(i40e_t *i40e) 675 { 676 ddi_iblock_cookie_t iblk; 677 678 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, 679 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable", 680 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 681 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 682 683 if (i40e->i40e_fm_capabilities < 0) { 684 i40e->i40e_fm_capabilities = 0; 685 } else if (i40e->i40e_fm_capabilities > 0xf) { 686 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE | 687 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 688 DDI_FM_ERRCB_CAPABLE; 689 } 690 691 /* 692 * Only register with IO Fault Services if we have some capability 693 */ 694 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 695 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 696 } else { 697 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 698 } 699 700 if (i40e->i40e_fm_capabilities) { 701 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk); 702 703 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 704 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 705 pci_ereport_setup(i40e->i40e_dip); 706 } 707 708 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 709 ddi_fm_handler_register(i40e->i40e_dip, 710 i40e_fm_error_cb, (void*)i40e); 711 } 712 } 713 714 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 715 i40e_init_dma_attrs(i40e, B_TRUE); 716 } else { 717 i40e_init_dma_attrs(i40e, B_FALSE); 718 } 719 } 720 721 static void 722 i40e_fm_fini(i40e_t *i40e) 723 { 724 if (i40e->i40e_fm_capabilities) { 725 726 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 727 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 728 pci_ereport_teardown(i40e->i40e_dip); 729 730 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 731 ddi_fm_handler_unregister(i40e->i40e_dip); 732 733 ddi_fm_fini(i40e->i40e_dip); 734 } 735 } 736 737 void 738 i40e_fm_ereport(i40e_t *i40e, char *detail) 739 { 740 uint64_t ena; 741 char buf[FM_MAX_CLASS]; 742 743 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 744 ena = fm_ena_generate(0, FM_ENA_FMT1); 745 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) { 746 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP, 747 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 748 } 749 } 750 751 /* 752 * Here we're trying to get the ID of the default VSI. In general, when we come 753 * through and look at this shortly after attach, we expect there to only be a 754 * single element present, which is the default VSI. Importantly, each PF seems 755 * to not see any other devices, in part because of the simple switch mode that 756 * we're using. If for some reason, we see more artifact, we'll need to revisit 757 * what we're doing here. 758 */ 759 static int 760 i40e_get_vsi_id(i40e_t *i40e) 761 { 762 i40e_hw_t *hw = &i40e->i40e_hw_space; 763 struct i40e_aqc_get_switch_config_resp *sw_config; 764 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 765 uint16_t next = 0; 766 int rc; 767 768 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 769 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 770 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 771 NULL); 772 if (rc != I40E_SUCCESS) { 773 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 774 rc, hw->aq.asq_last_status); 775 return (-1); 776 } 777 778 if (LE_16(sw_config->header.num_reported) != 1) { 779 i40e_error(i40e, "encountered multiple (%d) switching units " 780 "during attach, not proceeding", 781 LE_16(sw_config->header.num_reported)); 782 return (-1); 783 } 784 785 return (sw_config->element[0].seid); 786 } 787 788 /* 789 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We 790 * must also provide the memory for it; however, we don't need to keep it around 791 * to the call to the common code. It takes it and parses it into an internal 792 * structure. 793 */ 794 static boolean_t 795 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw) 796 { 797 struct i40e_aqc_list_capabilities_element_resp *buf; 798 int rc; 799 size_t len; 800 uint16_t needed; 801 int nelems = I40E_HW_CAP_DEFAULT; 802 803 len = nelems * sizeof (*buf); 804 805 for (;;) { 806 ASSERT(len > 0); 807 buf = kmem_alloc(len, KM_SLEEP); 808 rc = i40e_aq_discover_capabilities(hw, buf, len, 809 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 810 kmem_free(buf, len); 811 812 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM && 813 nelems == I40E_HW_CAP_DEFAULT) { 814 if (nelems == needed) { 815 i40e_error(i40e, "Capability discovery failed " 816 "due to byzantine common code"); 817 return (B_FALSE); 818 } 819 len = needed; 820 continue; 821 } else if (rc != I40E_SUCCESS || 822 hw->aq.asq_last_status != I40E_AQ_RC_OK) { 823 i40e_error(i40e, "Capability discovery failed: %d", rc); 824 return (B_FALSE); 825 } 826 827 break; 828 } 829 830 return (B_TRUE); 831 } 832 833 /* 834 * Obtain the switch's capabilities as seen by this PF and keep it around for 835 * our later use. 836 */ 837 static boolean_t 838 i40e_get_switch_resources(i40e_t *i40e) 839 { 840 i40e_hw_t *hw = &i40e->i40e_hw_space; 841 uint8_t cnt = 2; 842 uint8_t act; 843 size_t size; 844 i40e_switch_rsrc_t *buf; 845 846 for (;;) { 847 enum i40e_status_code ret; 848 size = cnt * sizeof (i40e_switch_rsrc_t); 849 ASSERT(size > 0); 850 if (size > UINT16_MAX) 851 return (B_FALSE); 852 buf = kmem_alloc(size, KM_SLEEP); 853 854 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf, 855 cnt, NULL); 856 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR && 857 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) { 858 kmem_free(buf, size); 859 cnt += I40E_SWITCH_CAP_DEFAULT; 860 continue; 861 } else if (ret != I40E_SUCCESS) { 862 kmem_free(buf, size); 863 i40e_error(i40e, 864 "failed to retrieve switch statistics: %d", ret); 865 return (B_FALSE); 866 } 867 868 break; 869 } 870 871 i40e->i40e_switch_rsrc_alloc = cnt; 872 i40e->i40e_switch_rsrc_actual = act; 873 i40e->i40e_switch_rsrcs = buf; 874 875 return (B_TRUE); 876 } 877 878 static void 879 i40e_cleanup_resources(i40e_t *i40e) 880 { 881 if (i40e->i40e_uaddrs != NULL) { 882 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) * 883 i40e->i40e_resources.ifr_nmacfilt); 884 i40e->i40e_uaddrs = NULL; 885 } 886 887 if (i40e->i40e_maddrs != NULL) { 888 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) * 889 i40e->i40e_resources.ifr_nmcastfilt); 890 i40e->i40e_maddrs = NULL; 891 } 892 893 if (i40e->i40e_switch_rsrcs != NULL) { 894 size_t sz = sizeof (i40e_switch_rsrc_t) * 895 i40e->i40e_switch_rsrc_alloc; 896 ASSERT(sz > 0); 897 kmem_free(i40e->i40e_switch_rsrcs, sz); 898 i40e->i40e_switch_rsrcs = NULL; 899 } 900 901 if (i40e->i40e_device != NULL) 902 i40e_device_rele(i40e); 903 } 904 905 static boolean_t 906 i40e_get_available_resources(i40e_t *i40e) 907 { 908 dev_info_t *parent; 909 uint16_t bus, device, func; 910 uint_t nregs; 911 int *regs, i; 912 i40e_device_t *idp; 913 i40e_hw_t *hw = &i40e->i40e_hw_space; 914 915 parent = ddi_get_parent(i40e->i40e_dip); 916 917 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg", 918 ®s, &nregs) != DDI_PROP_SUCCESS) { 919 return (B_FALSE); 920 } 921 922 if (nregs < 1) { 923 ddi_prop_free(regs); 924 return (B_FALSE); 925 } 926 927 bus = PCI_REG_BUS_G(regs[0]); 928 device = PCI_REG_DEV_G(regs[0]); 929 func = PCI_REG_FUNC_G(regs[0]); 930 ddi_prop_free(regs); 931 932 i40e->i40e_hw_space.bus.func = func; 933 i40e->i40e_hw_space.bus.device = device; 934 935 if (i40e_get_switch_resources(i40e) == B_FALSE) { 936 return (B_FALSE); 937 } 938 939 /* 940 * To calculate the total amount of a resource we have available, we 941 * need to add how many our i40e_t thinks it has guaranteed, if any, and 942 * then we need to go through and divide the number of available on the 943 * device, which was snapshotted before anyone should have allocated 944 * anything, and use that to derive how many are available from the 945 * pool. Longer term, we may want to turn this into something that's 946 * more of a pool-like resource that everything can share (though that 947 * may require some more assistance from MAC). 948 * 949 * Though for transmit and receive queue pairs, we just have to ask 950 * firmware instead. 951 */ 952 idp = i40e_device_find(i40e, parent, bus, device); 953 i40e->i40e_device = idp; 954 i40e->i40e_resources.ifr_nvsis = 0; 955 i40e->i40e_resources.ifr_nvsis_used = 0; 956 i40e->i40e_resources.ifr_nmacfilt = 0; 957 i40e->i40e_resources.ifr_nmacfilt_used = 0; 958 i40e->i40e_resources.ifr_nmcastfilt = 0; 959 i40e->i40e_resources.ifr_nmcastfilt_used = 0; 960 961 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) { 962 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 963 964 switch (srp->resource_type) { 965 case I40E_AQ_RESOURCE_TYPE_VSI: 966 i40e->i40e_resources.ifr_nvsis += 967 LE_16(srp->guaranteed); 968 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used); 969 break; 970 case I40E_AQ_RESOURCE_TYPE_MACADDR: 971 i40e->i40e_resources.ifr_nmacfilt += 972 LE_16(srp->guaranteed); 973 i40e->i40e_resources.ifr_nmacfilt_used = 974 LE_16(srp->used); 975 break; 976 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 977 i40e->i40e_resources.ifr_nmcastfilt += 978 LE_16(srp->guaranteed); 979 i40e->i40e_resources.ifr_nmcastfilt_used = 980 LE_16(srp->used); 981 break; 982 default: 983 break; 984 } 985 } 986 987 for (i = 0; i < idp->id_rsrcs_act; i++) { 988 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 989 switch (srp->resource_type) { 990 case I40E_AQ_RESOURCE_TYPE_VSI: 991 i40e->i40e_resources.ifr_nvsis += 992 LE_16(srp->total_unalloced) / idp->id_nfuncs; 993 break; 994 case I40E_AQ_RESOURCE_TYPE_MACADDR: 995 i40e->i40e_resources.ifr_nmacfilt += 996 LE_16(srp->total_unalloced) / idp->id_nfuncs; 997 break; 998 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 999 i40e->i40e_resources.ifr_nmcastfilt += 1000 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1001 default: 1002 break; 1003 } 1004 } 1005 1006 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp; 1007 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp; 1008 1009 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) * 1010 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP); 1011 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) * 1012 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP); 1013 1014 /* 1015 * Initialize these as multicast addresses to indicate it's invalid for 1016 * sanity purposes. Think of it like 0xdeadbeef. 1017 */ 1018 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++) 1019 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01; 1020 1021 return (B_TRUE); 1022 } 1023 1024 static boolean_t 1025 i40e_enable_interrupts(i40e_t *i40e) 1026 { 1027 int i, rc; 1028 1029 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1030 rc = ddi_intr_block_enable(i40e->i40e_intr_handles, 1031 i40e->i40e_intr_count); 1032 if (rc != DDI_SUCCESS) { 1033 i40e_error(i40e, "Interrupt block-enable failed: %d", 1034 rc); 1035 return (B_FALSE); 1036 } 1037 } else { 1038 for (i = 0; i < i40e->i40e_intr_count; i++) { 1039 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]); 1040 if (rc != DDI_SUCCESS) { 1041 i40e_error(i40e, 1042 "Failed to enable interrupt %d: %d", i, rc); 1043 while (--i >= 0) { 1044 (void) ddi_intr_disable( 1045 i40e->i40e_intr_handles[i]); 1046 } 1047 return (B_FALSE); 1048 } 1049 } 1050 } 1051 1052 return (B_TRUE); 1053 } 1054 1055 static boolean_t 1056 i40e_disable_interrupts(i40e_t *i40e) 1057 { 1058 int i, rc; 1059 1060 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1061 rc = ddi_intr_block_disable(i40e->i40e_intr_handles, 1062 i40e->i40e_intr_count); 1063 if (rc != DDI_SUCCESS) { 1064 i40e_error(i40e, 1065 "Interrupt block-disabled failed: %d", rc); 1066 return (B_FALSE); 1067 } 1068 } else { 1069 for (i = 0; i < i40e->i40e_intr_count; i++) { 1070 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]); 1071 if (rc != DDI_SUCCESS) { 1072 i40e_error(i40e, 1073 "Failed to disable interrupt %d: %d", 1074 i, rc); 1075 return (B_FALSE); 1076 } 1077 } 1078 } 1079 1080 return (B_TRUE); 1081 } 1082 1083 /* 1084 * Free receive & transmit rings. 1085 */ 1086 static void 1087 i40e_free_trqpairs(i40e_t *i40e) 1088 { 1089 int i; 1090 i40e_trqpair_t *itrq; 1091 1092 if (i40e->i40e_trqpairs != NULL) { 1093 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 1094 itrq = &i40e->i40e_trqpairs[i]; 1095 mutex_destroy(&itrq->itrq_rx_lock); 1096 mutex_destroy(&itrq->itrq_tx_lock); 1097 mutex_destroy(&itrq->itrq_tcb_lock); 1098 1099 /* 1100 * Should have already been cleaned up by start/stop, 1101 * etc. 1102 */ 1103 ASSERT(itrq->itrq_txkstat == NULL); 1104 ASSERT(itrq->itrq_rxkstat == NULL); 1105 } 1106 1107 kmem_free(i40e->i40e_trqpairs, 1108 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs); 1109 i40e->i40e_trqpairs = NULL; 1110 } 1111 1112 cv_destroy(&i40e->i40e_rx_pending_cv); 1113 mutex_destroy(&i40e->i40e_rx_pending_lock); 1114 mutex_destroy(&i40e->i40e_general_lock); 1115 } 1116 1117 /* 1118 * Allocate transmit and receive rings, as well as other data structures that we 1119 * need. 1120 */ 1121 static boolean_t 1122 i40e_alloc_trqpairs(i40e_t *i40e) 1123 { 1124 int i; 1125 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri); 1126 1127 /* 1128 * Now that we have the priority for the interrupts, initialize 1129 * all relevant locks. 1130 */ 1131 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri); 1132 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri); 1133 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL); 1134 1135 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) * 1136 i40e->i40e_num_trqpairs, KM_SLEEP); 1137 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 1138 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 1139 1140 itrq->itrq_i40e = i40e; 1141 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri); 1142 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri); 1143 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri); 1144 itrq->itrq_index = i; 1145 } 1146 1147 return (B_TRUE); 1148 } 1149 1150 1151 1152 /* 1153 * Unless a .conf file already overrode i40e_t structure values, they will 1154 * be 0, and need to be set in conjunction with the now-available HW report. 1155 * 1156 * However, at the moment, we cap all of these resources as we only support a 1157 * single receive ring and a single group. 1158 */ 1159 /* ARGSUSED */ 1160 static void 1161 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw) 1162 { 1163 if (i40e->i40e_num_trqpairs == 0) { 1164 i40e->i40e_num_trqpairs = I40E_TRQPAIR_MAX; 1165 } 1166 1167 if (i40e->i40e_num_rx_groups == 0) { 1168 i40e->i40e_num_rx_groups = I40E_GROUP_MAX; 1169 } 1170 } 1171 1172 /* 1173 * Free any resources required by, or setup by, the Intel common code. 1174 */ 1175 static void 1176 i40e_common_code_fini(i40e_t *i40e) 1177 { 1178 i40e_hw_t *hw = &i40e->i40e_hw_space; 1179 int rc; 1180 1181 rc = i40e_shutdown_lan_hmc(hw); 1182 if (rc != I40E_SUCCESS) 1183 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc); 1184 1185 rc = i40e_shutdown_adminq(hw); 1186 if (rc != I40E_SUCCESS) 1187 i40e_error(i40e, "failed to shutdown admin queue: %d", rc); 1188 } 1189 1190 /* 1191 * Initialize and call Intel common-code routines, includes some setup 1192 * the common code expects from the driver. Also prints on failure, so 1193 * the caller doesn't have to. 1194 */ 1195 static boolean_t 1196 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw) 1197 { 1198 int rc; 1199 1200 i40e_clear_hw(hw); 1201 rc = i40e_pf_reset(hw); 1202 if (rc != 0) { 1203 i40e_error(i40e, "failed to reset hardware: %d", rc); 1204 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE); 1205 return (B_FALSE); 1206 } 1207 1208 rc = i40e_init_shared_code(hw); 1209 if (rc != 0) { 1210 i40e_error(i40e, "failed to initialize i40e core: %d", rc); 1211 return (B_FALSE); 1212 } 1213 1214 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE; 1215 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE; 1216 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ; 1217 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ; 1218 1219 rc = i40e_init_adminq(hw); 1220 if (rc != 0) { 1221 i40e_error(i40e, "failed to initialize firmware admin queue: " 1222 "%d, potential firmware version mismatch", rc); 1223 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 1224 return (B_FALSE); 1225 } 1226 1227 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1228 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) { 1229 i40e_log(i40e, "The driver for the device detected a newer " 1230 "version of the NVM image (%d.%d) than expected (%d.%d).\n" 1231 "Please install the most recent version of the network " 1232 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, 1233 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 1234 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 1235 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) { 1236 i40e_log(i40e, "The driver for the device detected an older" 1237 " version of the NVM image (%d.%d) than expected (%d.%d)." 1238 "\nPlease update the NVM image.\n", 1239 hw->aq.api_maj_ver, hw->aq.api_min_ver, 1240 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1); 1241 } 1242 1243 i40e_clear_pxe_mode(hw); 1244 1245 /* 1246 * We need to call this so that the common code can discover 1247 * capabilities of the hardware, which it uses throughout the rest. 1248 */ 1249 if (!i40e_get_hw_capabilities(i40e, hw)) { 1250 i40e_error(i40e, "failed to obtain hardware capabilities"); 1251 return (B_FALSE); 1252 } 1253 1254 if (i40e_get_available_resources(i40e) == B_FALSE) { 1255 i40e_error(i40e, "failed to obtain hardware resources"); 1256 return (B_FALSE); 1257 } 1258 1259 i40e_hw_to_instance(i40e, hw); 1260 1261 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 1262 hw->func_caps.num_rx_qp, 0, 0); 1263 if (rc != 0) { 1264 i40e_error(i40e, "failed to initialize hardware memory cache: " 1265 "%d", rc); 1266 return (B_FALSE); 1267 } 1268 1269 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 1270 if (rc != 0) { 1271 i40e_error(i40e, "failed to configure hardware memory cache: " 1272 "%d", rc); 1273 return (B_FALSE); 1274 } 1275 1276 (void) i40e_aq_stop_lldp(hw, TRUE, NULL); 1277 1278 rc = i40e_get_mac_addr(hw, hw->mac.addr); 1279 if (rc != I40E_SUCCESS) { 1280 i40e_error(i40e, "failed to retrieve hardware mac address: %d", 1281 rc); 1282 return (B_FALSE); 1283 } 1284 1285 rc = i40e_validate_mac_addr(hw->mac.addr); 1286 if (rc != 0) { 1287 i40e_error(i40e, "failed to validate internal mac address: " 1288 "%d", rc); 1289 return (B_FALSE); 1290 } 1291 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 1292 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) != 1293 I40E_SUCCESS) { 1294 i40e_error(i40e, "failed to retrieve port mac address: %d", 1295 rc); 1296 return (B_FALSE); 1297 } 1298 1299 /* 1300 * We need to obtain the Virtual Station ID (VSI) before we can 1301 * perform other operations on the device. 1302 */ 1303 i40e->i40e_vsi_id = i40e_get_vsi_id(i40e); 1304 if (i40e->i40e_vsi_id == -1) { 1305 i40e_error(i40e, "failed to obtain VSI ID"); 1306 return (B_FALSE); 1307 } 1308 1309 return (B_TRUE); 1310 } 1311 1312 static void 1313 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e) 1314 { 1315 int rc; 1316 1317 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR) 1318 (void) i40e_disable_interrupts(i40e); 1319 1320 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) && 1321 i40e->i40e_periodic_id != 0) { 1322 ddi_periodic_delete(i40e->i40e_periodic_id); 1323 i40e->i40e_periodic_id = 0; 1324 } 1325 1326 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) { 1327 rc = mac_unregister(i40e->i40e_mac_hdl); 1328 if (rc != 0) { 1329 i40e_error(i40e, "failed to unregister from mac: %d", 1330 rc); 1331 } 1332 } 1333 1334 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) { 1335 i40e_stats_fini(i40e); 1336 } 1337 1338 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR) 1339 i40e_rem_intr_handlers(i40e); 1340 1341 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS) 1342 i40e_free_trqpairs(i40e); 1343 1344 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR) 1345 i40e_rem_intrs(i40e); 1346 1347 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE) 1348 i40e_common_code_fini(i40e); 1349 1350 i40e_cleanup_resources(i40e); 1351 1352 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS) 1353 (void) ddi_prop_remove_all(devinfo); 1354 1355 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP && 1356 i40e->i40e_osdep_space.ios_reg_handle != NULL) { 1357 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle); 1358 i40e->i40e_osdep_space.ios_reg_handle = NULL; 1359 } 1360 1361 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) && 1362 i40e->i40e_osdep_space.ios_cfg_handle != NULL) { 1363 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle); 1364 i40e->i40e_osdep_space.ios_cfg_handle = NULL; 1365 } 1366 1367 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT) 1368 i40e_fm_fini(i40e); 1369 1370 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ); 1371 kmem_free(i40e, sizeof (i40e_t)); 1372 1373 ddi_set_driver_private(devinfo, NULL); 1374 } 1375 1376 static boolean_t 1377 i40e_final_init(i40e_t *i40e) 1378 { 1379 i40e_hw_t *hw = &i40e->i40e_hw_space; 1380 struct i40e_osdep *osdep = OS_DEP(hw); 1381 uint8_t pbanum[I40E_PBANUM_STRLEN]; 1382 enum i40e_status_code irc; 1383 char buf[I40E_DDI_PROP_LEN]; 1384 1385 pbanum[0] = '\0'; 1386 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum)); 1387 if (irc != I40E_SUCCESS) { 1388 i40e_log(i40e, "failed to read PBA string: %d", irc); 1389 } else { 1390 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1391 "printed-board-assembly", (char *)pbanum); 1392 } 1393 1394 #ifdef DEBUG 1395 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver, 1396 hw->aq.fw_min_ver) < sizeof (buf)); 1397 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf)); 1398 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver, 1399 hw->aq.api_min_ver) < sizeof (buf)); 1400 #endif 1401 1402 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver, 1403 hw->aq.fw_min_ver); 1404 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1405 "firmware-version", buf); 1406 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build); 1407 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1408 "firmware-build", buf); 1409 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver, 1410 hw->aq.api_min_ver); 1411 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1412 "api-version", buf); 1413 1414 if (!i40e_set_hw_bus_info(hw)) 1415 return (B_FALSE); 1416 1417 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) { 1418 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 1419 return (B_FALSE); 1420 } 1421 1422 return (B_TRUE); 1423 } 1424 1425 static void 1426 i40e_identify_hardware(i40e_t *i40e) 1427 { 1428 i40e_hw_t *hw = &i40e->i40e_hw_space; 1429 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1430 1431 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID); 1432 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID); 1433 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle, 1434 PCI_CONF_REVID); 1435 hw->subsystem_device_id = 1436 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID); 1437 hw->subsystem_vendor_id = 1438 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID); 1439 1440 /* 1441 * Note that we set the hardware's bus information later on, in 1442 * i40e_get_available_resources(). The common code doesn't seem to 1443 * require that it be set in any ways, it seems to be mostly for 1444 * book-keeping. 1445 */ 1446 } 1447 1448 static boolean_t 1449 i40e_regs_map(i40e_t *i40e) 1450 { 1451 dev_info_t *devinfo = i40e->i40e_dip; 1452 i40e_hw_t *hw = &i40e->i40e_hw_space; 1453 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1454 off_t memsize; 1455 int ret; 1456 1457 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) != 1458 DDI_SUCCESS) { 1459 i40e_error(i40e, "Used invalid register set to map PCIe regs"); 1460 return (B_FALSE); 1461 } 1462 1463 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET, 1464 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr, 1465 &osdep->ios_reg_handle)) != DDI_SUCCESS) { 1466 i40e_error(i40e, "failed to map device registers: %d", ret); 1467 return (B_FALSE); 1468 } 1469 1470 osdep->ios_reg_size = memsize; 1471 return (B_TRUE); 1472 } 1473 1474 /* 1475 * Update parameters required when a new MTU has been configured. Calculate the 1476 * maximum frame size, as well as, size our DMA buffers which we size in 1477 * increments of 1K. 1478 */ 1479 void 1480 i40e_update_mtu(i40e_t *i40e) 1481 { 1482 uint32_t rx, tx; 1483 1484 i40e->i40e_frame_max = i40e->i40e_sdu + 1485 sizeof (struct ether_vlan_header) + ETHERFCSL; 1486 1487 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT; 1488 i40e->i40e_rx_buf_size = ((rx >> 10) + 1489 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1490 1491 tx = i40e->i40e_frame_max; 1492 i40e->i40e_tx_buf_size = ((tx >> 10) + 1493 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1494 } 1495 1496 static int 1497 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def) 1498 { 1499 int val; 1500 1501 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS, 1502 prop, def); 1503 if (val > max) 1504 val = max; 1505 if (val < min) 1506 val = min; 1507 return (val); 1508 } 1509 1510 static void 1511 i40e_init_properties(i40e_t *i40e) 1512 { 1513 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu", 1514 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU); 1515 1516 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force", 1517 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE); 1518 1519 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable", 1520 B_FALSE, B_TRUE, B_TRUE); 1521 1522 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size", 1523 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE, 1524 I40E_DEF_TX_RING_SIZE); 1525 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) { 1526 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size, 1527 I40E_DESC_ALIGN); 1528 } 1529 1530 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold", 1531 I40E_MIN_TX_BLOCK_THRESH, 1532 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE, 1533 I40E_DEF_TX_BLOCK_THRESH); 1534 1535 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size", 1536 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE, 1537 I40E_DEF_RX_RING_SIZE); 1538 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) { 1539 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size, 1540 I40E_DESC_ALIGN); 1541 } 1542 1543 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr", 1544 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR, 1545 I40E_DEF_RX_LIMIT_PER_INTR); 1546 1547 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable", 1548 B_FALSE, B_TRUE, B_TRUE); 1549 1550 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable", 1551 B_FALSE, B_TRUE, B_TRUE); 1552 1553 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold", 1554 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH, 1555 I40E_DEF_RX_DMA_THRESH); 1556 1557 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold", 1558 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH, 1559 I40E_DEF_TX_DMA_THRESH); 1560 1561 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle", 1562 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR); 1563 1564 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle", 1565 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR); 1566 1567 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle", 1568 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR); 1569 1570 if (!i40e->i40e_mr_enable) { 1571 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1572 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1573 } 1574 1575 i40e_update_mtu(i40e); 1576 } 1577 1578 /* 1579 * There are a few constraints on interrupts that we're currently imposing, some 1580 * of which are restrictions from hardware. For a fuller treatment, see 1581 * i40e_intr.c. 1582 * 1583 * Currently, to use MSI-X we require two interrupts be available though in 1584 * theory we should participate in IRM and happily use more interrupts. 1585 * 1586 * Hardware only supports a single MSI being programmed and therefore if we 1587 * don't have MSI-X interrupts available at this time, then we ratchet down the 1588 * number of rings and groups available. Obviously, we only bother with a single 1589 * fixed interrupt. 1590 */ 1591 static boolean_t 1592 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type) 1593 { 1594 i40e_hw_t *hw = &i40e->i40e_hw_space; 1595 ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle; 1596 int request, count, actual, rc, min; 1597 uint32_t reg; 1598 1599 switch (intr_type) { 1600 case DDI_INTR_TYPE_FIXED: 1601 case DDI_INTR_TYPE_MSI: 1602 request = 1; 1603 min = 1; 1604 break; 1605 case DDI_INTR_TYPE_MSIX: 1606 min = 2; 1607 if (!i40e->i40e_mr_enable) { 1608 request = 2; 1609 break; 1610 } 1611 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2); 1612 /* 1613 * Should this read fail, we will drop back to using 1614 * MSI or fixed interrupts. 1615 */ 1616 if (i40e_check_acc_handle(rh) != DDI_FM_OK) { 1617 ddi_fm_service_impact(i40e->i40e_dip, 1618 DDI_SERVICE_DEGRADED); 1619 return (B_FALSE); 1620 } 1621 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1622 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1623 request++; /* the register value is n - 1 */ 1624 break; 1625 default: 1626 panic("bad interrupt type passed to i40e_alloc_intr_handles: " 1627 "%d", intr_type); 1628 return (B_FALSE); 1629 } 1630 1631 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 1632 if (rc != DDI_SUCCESS || count < min) { 1633 i40e_log(i40e, "Get interrupt number failed, " 1634 "returned %d, count %d", rc, count); 1635 return (B_FALSE); 1636 } 1637 1638 rc = ddi_intr_get_navail(devinfo, intr_type, &count); 1639 if (rc != DDI_SUCCESS || count < min) { 1640 i40e_log(i40e, "Get AVAILABLE interrupt number failed, " 1641 "returned %d, count %d", rc, count); 1642 return (B_FALSE); 1643 } 1644 1645 actual = 0; 1646 i40e->i40e_intr_count = 0; 1647 i40e->i40e_intr_count_max = 0; 1648 i40e->i40e_intr_count_min = 0; 1649 1650 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t); 1651 ASSERT(i40e->i40e_intr_size != 0); 1652 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP); 1653 1654 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0, 1655 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL); 1656 if (rc != DDI_SUCCESS) { 1657 i40e_log(i40e, "Interrupt allocation failed with %d.", rc); 1658 goto alloc_handle_fail; 1659 } 1660 1661 i40e->i40e_intr_count = actual; 1662 i40e->i40e_intr_count_max = request; 1663 i40e->i40e_intr_count_min = min; 1664 1665 if (actual < min) { 1666 i40e_log(i40e, "actual (%d) is less than minimum (%d).", 1667 actual, min); 1668 goto alloc_handle_fail; 1669 } 1670 1671 /* 1672 * Record the priority and capabilities for our first vector. Once 1673 * we have it, that's our priority until detach time. Even if we 1674 * eventually participate in IRM, our priority shouldn't change. 1675 */ 1676 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri); 1677 if (rc != DDI_SUCCESS) { 1678 i40e_log(i40e, 1679 "Getting interrupt priority failed with %d.", rc); 1680 goto alloc_handle_fail; 1681 } 1682 1683 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap); 1684 if (rc != DDI_SUCCESS) { 1685 i40e_log(i40e, 1686 "Getting interrupt capabilities failed with %d.", rc); 1687 goto alloc_handle_fail; 1688 } 1689 1690 i40e->i40e_intr_type = intr_type; 1691 return (B_TRUE); 1692 1693 alloc_handle_fail: 1694 1695 i40e_rem_intrs(i40e); 1696 return (B_FALSE); 1697 } 1698 1699 static boolean_t 1700 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo) 1701 { 1702 int intr_types, rc; 1703 1704 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 1705 if (rc != DDI_SUCCESS) { 1706 i40e_error(i40e, "failed to get supported interrupt types: %d", 1707 rc); 1708 return (B_FALSE); 1709 } 1710 1711 i40e->i40e_intr_type = 0; 1712 1713 if ((intr_types & DDI_INTR_TYPE_MSIX) && 1714 i40e->i40e_intr_force <= I40E_INTR_MSIX) { 1715 if (i40e_alloc_intr_handles(i40e, devinfo, 1716 DDI_INTR_TYPE_MSIX)) { 1717 i40e->i40e_num_trqpairs = 1718 MIN(i40e->i40e_intr_count - 1, 1719 I40E_AQ_VSI_TC_QUE_SIZE_MAX); 1720 return (B_TRUE); 1721 } 1722 } 1723 1724 /* 1725 * We only use multiple transmit/receive pairs when MSI-X interrupts are 1726 * available due to the fact that the device basically only supports a 1727 * single MSI interrupt. 1728 */ 1729 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1730 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1731 1732 if ((intr_types & DDI_INTR_TYPE_MSI) && 1733 (i40e->i40e_intr_force <= I40E_INTR_MSI)) { 1734 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI)) 1735 return (B_TRUE); 1736 } 1737 1738 if (intr_types & DDI_INTR_TYPE_FIXED) { 1739 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED)) 1740 return (B_TRUE); 1741 } 1742 1743 return (B_FALSE); 1744 } 1745 1746 /* 1747 * Map different interrupts to MSI-X vectors. 1748 */ 1749 static boolean_t 1750 i40e_map_intrs_to_vectors(i40e_t *i40e) 1751 { 1752 int i; 1753 1754 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) { 1755 return (B_TRUE); 1756 } 1757 1758 /* 1759 * Each queue pair is mapped to a single interrupt, so transmit 1760 * and receive interrupts for a given queue share the same vector. 1761 * The number of queue pairs is one less than the number of interrupt 1762 * vectors and is assigned the vector one higher than its index. 1763 * Vector zero is reserved for the admin queue. 1764 */ 1765 ASSERT(i40e->i40e_intr_count == i40e->i40e_num_trqpairs + 1); 1766 1767 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 1768 i40e->i40e_trqpairs[i].itrq_rx_intrvec = i + 1; 1769 i40e->i40e_trqpairs[i].itrq_tx_intrvec = i + 1; 1770 } 1771 1772 return (B_TRUE); 1773 } 1774 1775 static boolean_t 1776 i40e_add_intr_handlers(i40e_t *i40e) 1777 { 1778 int rc, vector; 1779 1780 switch (i40e->i40e_intr_type) { 1781 case DDI_INTR_TYPE_MSIX: 1782 for (vector = 0; vector < i40e->i40e_intr_count; vector++) { 1783 rc = ddi_intr_add_handler( 1784 i40e->i40e_intr_handles[vector], 1785 (ddi_intr_handler_t *)i40e_intr_msix, i40e, 1786 (void *)(uintptr_t)vector); 1787 if (rc != DDI_SUCCESS) { 1788 i40e_log(i40e, "Add interrupt handler (MSI-X) " 1789 "failed: return %d, vector %d", rc, vector); 1790 for (vector--; vector >= 0; vector--) { 1791 (void) ddi_intr_remove_handler( 1792 i40e->i40e_intr_handles[vector]); 1793 } 1794 return (B_FALSE); 1795 } 1796 } 1797 break; 1798 case DDI_INTR_TYPE_MSI: 1799 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1800 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL); 1801 if (rc != DDI_SUCCESS) { 1802 i40e_log(i40e, "Add interrupt handler (MSI) failed: " 1803 "return %d", rc); 1804 return (B_FALSE); 1805 } 1806 break; 1807 case DDI_INTR_TYPE_FIXED: 1808 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1809 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL); 1810 if (rc != DDI_SUCCESS) { 1811 i40e_log(i40e, "Add interrupt handler (legacy) failed:" 1812 " return %d", rc); 1813 return (B_FALSE); 1814 } 1815 break; 1816 default: 1817 /* Cast to pacify lint */ 1818 panic("i40e_intr_type %p contains an unknown type: %d", 1819 (void *)i40e, i40e->i40e_intr_type); 1820 } 1821 1822 return (B_TRUE); 1823 } 1824 1825 /* 1826 * Perform periodic checks. Longer term, we should be thinking about additional 1827 * things here: 1828 * 1829 * o Stall Detection 1830 * o Temperature sensor detection 1831 * o Device resetting 1832 * o Statistics updating to avoid wraparound 1833 */ 1834 static void 1835 i40e_timer(void *arg) 1836 { 1837 i40e_t *i40e = arg; 1838 1839 mutex_enter(&i40e->i40e_general_lock); 1840 i40e_link_check(i40e); 1841 mutex_exit(&i40e->i40e_general_lock); 1842 } 1843 1844 /* 1845 * Get the hardware state, and scribble away anything that needs scribbling. 1846 */ 1847 static void 1848 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw) 1849 { 1850 int rc; 1851 1852 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 1853 1854 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL); 1855 i40e_link_check(i40e); 1856 1857 /* 1858 * Try and determine our PHY. Note that we may have to retry to and 1859 * delay to detect fiber correctly. 1860 */ 1861 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy, 1862 NULL); 1863 if (rc == I40E_ERR_UNKNOWN_PHY) { 1864 i40e_msec_delay(200); 1865 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, 1866 &i40e->i40e_phy, NULL); 1867 } 1868 1869 if (rc != I40E_SUCCESS) { 1870 if (rc == I40E_ERR_UNKNOWN_PHY) { 1871 i40e_error(i40e, "encountered unknown PHY type, " 1872 "not attaching."); 1873 } else { 1874 i40e_error(i40e, "error getting physical capabilities: " 1875 "%d, %d", rc, hw->aq.asq_last_status); 1876 } 1877 } 1878 1879 rc = i40e_update_link_info(hw); 1880 if (rc != I40E_SUCCESS) { 1881 i40e_error(i40e, "failed to update link information: %d", rc); 1882 } 1883 1884 /* 1885 * In general, we don't want to mask off (as in stop from being a cause) 1886 * any of the interrupts that the phy might be able to generate. 1887 */ 1888 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL); 1889 if (rc != I40E_SUCCESS) { 1890 i40e_error(i40e, "failed to update phy link mask: %d", rc); 1891 } 1892 } 1893 1894 /* 1895 * Go through and re-initialize any existing filters that we may have set up for 1896 * this device. Note that we would only expect them to exist if hardware had 1897 * already been initialized and we had just reset it. While we're not 1898 * implementing this yet, we're keeping this around for when we add reset 1899 * capabilities, so this isn't forgotten. 1900 */ 1901 /* ARGSUSED */ 1902 static void 1903 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw) 1904 { 1905 } 1906 1907 /* 1908 * Configure the hardware for the Virtual Station Interface (VSI). Currently 1909 * we only support one, but in the future we could instantiate more than one 1910 * per attach-point. 1911 */ 1912 static boolean_t 1913 i40e_config_vsi(i40e_t *i40e, i40e_hw_t *hw) 1914 { 1915 struct i40e_vsi_context context; 1916 int err, tc_queues; 1917 1918 bzero(&context, sizeof (struct i40e_vsi_context)); 1919 context.seid = i40e->i40e_vsi_id; 1920 context.pf_num = hw->pf_id; 1921 err = i40e_aq_get_vsi_params(hw, &context, NULL); 1922 if (err != I40E_SUCCESS) { 1923 i40e_error(i40e, "get VSI params failed with %d", err); 1924 return (B_FALSE); 1925 } 1926 1927 i40e->i40e_vsi_num = context.vsi_number; 1928 1929 /* 1930 * Set the queue and traffic class bits. Keep it simple for now. 1931 */ 1932 context.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1933 context.info.mapping_flags = I40E_AQ_VSI_QUE_MAP_CONTIG; 1934 context.info.queue_mapping[0] = I40E_ASSIGN_ALL_QUEUES; 1935 1936 /* 1937 * tc_queues determines the size of the traffic class, where the 1938 * size is 2^^tc_queues to a maximum of 64. 1939 * Some examples: 1940 * i40e_num_trqpairs == 1 => tc_queues = 0, 2^^0 = 1. 1941 * i40e_num_trqpairs == 7 => tc_queues = 3, 2^^3 = 8. 1942 * i40e_num_trqpairs == 8 => tc_queues = 3, 2^^3 = 8. 1943 * i40e_num_trqpairs == 9 => tc_queues = 4, 2^^4 = 16. 1944 * i40e_num_trqpairs == 17 => tc_queues = 5, 2^^5 = 32. 1945 * i40e_num_trqpairs == 64 => tc_queues = 6, 2^^6 = 64. 1946 */ 1947 tc_queues = ddi_fls(i40e->i40e_num_trqpairs - 1); 1948 1949 context.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & 1950 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | 1951 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & 1952 I40E_AQ_VSI_TC_QUE_NUMBER_MASK); 1953 1954 context.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; 1955 context.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1956 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1957 1958 context.flags = LE16_TO_CPU(I40E_AQ_VSI_TYPE_PF); 1959 1960 i40e->i40e_vsi_stat_id = LE16_TO_CPU(context.info.stat_counter_idx); 1961 if (i40e_stat_vsi_init(i40e) == B_FALSE) 1962 return (B_FALSE); 1963 1964 err = i40e_aq_update_vsi_params(hw, &context, NULL); 1965 if (err != I40E_SUCCESS) { 1966 i40e_error(i40e, "Update VSI params failed with %d", err); 1967 return (B_FALSE); 1968 } 1969 1970 1971 return (B_TRUE); 1972 } 1973 1974 /* 1975 * Set up RSS. 1976 * 1. Seed the hash key. 1977 * 2. Enable PCTYPEs for the hash filter. 1978 * 3. Populate the LUT. 1979 * 1980 * Note: When/if X722 support is added the hash key is seeded via a call 1981 * to i40e_aq_set_rss_key(), and the LUT is populated using 1982 * i40e_aq_set_rss_lut(). 1983 */ 1984 static boolean_t 1985 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw) 1986 { 1987 int i; 1988 uint8_t lut_mask; 1989 uint32_t *hlut; 1990 uint64_t hena; 1991 boolean_t rv = B_TRUE; 1992 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1]; 1993 1994 /* 1995 * 1. Seed the hash key 1996 */ 1997 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed)); 1998 1999 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 2000 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]); 2001 2002 /* 2003 * 2. Configure PCTYPES 2004 */ 2005 hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | 2006 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | 2007 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 2008 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | 2009 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | 2010 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | 2011 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 2012 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | 2013 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); 2014 2015 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); 2016 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); 2017 2018 /* 2019 * 3. Populate LUT 2020 * 2021 * Each entry in the LUT is 8 bits and is used to index 2022 * the rx queue. Populate the LUT in a round robin fashion 2023 * with rx queue indices from 0 to i40e_num_trqpairs - 1. 2024 */ 2025 hlut = kmem_alloc(hw->func_caps.rss_table_size, KM_NOSLEEP); 2026 if (hlut == NULL) { 2027 i40e_error(i40e, "i40e_config_rss() buffer allocation failed"); 2028 return (B_FALSE); 2029 } 2030 2031 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1; 2032 2033 for (i = 0; i < hw->func_caps.rss_table_size; i++) 2034 ((uint8_t *)hlut)[i] = (i % i40e->i40e_num_trqpairs) & lut_mask; 2035 2036 for (i = 0; i < hw->func_caps.rss_table_size >> 2; i++) 2037 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]); 2038 2039 kmem_free(hlut, hw->func_caps.rss_table_size); 2040 2041 return (rv); 2042 } 2043 2044 /* 2045 * Wrapper to kick the chipset on. 2046 */ 2047 static boolean_t 2048 i40e_chip_start(i40e_t *i40e) 2049 { 2050 i40e_hw_t *hw = &i40e->i40e_hw_space; 2051 struct i40e_filter_control_settings filter; 2052 int rc; 2053 2054 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 2055 (hw->aq.fw_maj_ver < 4)) { 2056 i40e_msec_delay(75); 2057 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) != 2058 I40E_SUCCESS) { 2059 i40e_error(i40e, "failed to restart link: admin queue " 2060 "error: %d", hw->aq.asq_last_status); 2061 return (B_FALSE); 2062 } 2063 } 2064 2065 /* Determine hardware state */ 2066 i40e_get_hw_state(i40e, hw); 2067 2068 /* Initialize mac addresses. */ 2069 i40e_init_macaddrs(i40e, hw); 2070 2071 /* 2072 * Set up the filter control. 2073 */ 2074 bzero(&filter, sizeof (filter)); 2075 filter.enable_ethtype = TRUE; 2076 filter.enable_macvlan = TRUE; 2077 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 2078 2079 rc = i40e_set_filter_control(hw, &filter); 2080 if (rc != I40E_SUCCESS) { 2081 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc); 2082 return (B_FALSE); 2083 } 2084 2085 i40e_intr_chip_init(i40e); 2086 2087 if (!i40e_config_vsi(i40e, hw)) 2088 return (B_FALSE); 2089 2090 if (!i40e_config_rss(i40e, hw)) 2091 return (B_FALSE); 2092 2093 i40e_flush(hw); 2094 2095 return (B_TRUE); 2096 } 2097 2098 /* 2099 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information. 2100 */ 2101 static void 2102 i40e_shutdown_rx_rings(i40e_t *i40e) 2103 { 2104 int i; 2105 uint32_t reg; 2106 2107 i40e_hw_t *hw = &i40e->i40e_hw_space; 2108 2109 /* 2110 * Step 1. The interrupt linked list (see i40e_intr.c for more 2111 * information) should have already been cleared before calling this 2112 * function. 2113 */ 2114 #ifdef DEBUG 2115 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2116 for (i = 1; i < i40e->i40e_intr_count; i++) { 2117 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2118 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2119 } 2120 } else { 2121 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2122 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2123 } 2124 2125 #endif /* DEBUG */ 2126 2127 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2128 /* 2129 * Step 1. Request the queue by clearing QENA_REQ. It may not be 2130 * set due to unwinding from failures and a partially enabled 2131 * ring set. 2132 */ 2133 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2134 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK)) 2135 continue; 2136 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) == 2137 I40E_QRX_ENA_QENA_REQ_MASK); 2138 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 2139 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2140 } 2141 2142 /* 2143 * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM 2144 * be cleared. Note that we could still receive data in the queue during 2145 * this time. We don't actually wait for this now and instead defer this 2146 * to i40e_shutdown_rings_wait(), after we've interleaved disabling the 2147 * TX queues as well. 2148 */ 2149 } 2150 2151 static void 2152 i40e_shutdown_tx_rings(i40e_t *i40e) 2153 { 2154 int i; 2155 uint32_t reg; 2156 2157 i40e_hw_t *hw = &i40e->i40e_hw_space; 2158 2159 /* 2160 * Step 1. The interrupt linked list should already have been cleared. 2161 */ 2162 #ifdef DEBUG 2163 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2164 for (i = 1; i < i40e->i40e_intr_count; i++) { 2165 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2166 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2167 } 2168 } else { 2169 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2170 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2171 2172 } 2173 #endif /* DEBUG */ 2174 2175 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2176 /* 2177 * Step 2. Set the SET_QDIS flag for every queue. 2178 */ 2179 i40e_pre_tx_queue_cfg(hw, i, B_FALSE); 2180 } 2181 2182 /* 2183 * Step 3. Wait at least 400 usec (can be done once for all queues). 2184 */ 2185 drv_usecwait(500); 2186 2187 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2188 /* 2189 * Step 4. Clear the QENA_REQ flag which tells hardware to 2190 * quiesce. If QENA_REQ is not already set then that means that 2191 * we likely already tried to disable this queue. 2192 */ 2193 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2194 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK)) 2195 continue; 2196 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 2197 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2198 } 2199 2200 /* 2201 * Step 5. Wait for all drains to finish. This will be done by the 2202 * hardware removing the QENA_STAT flag from the queue. Rather than 2203 * waiting here, we interleave it with all the others in 2204 * i40e_shutdown_rings_wait(). 2205 */ 2206 } 2207 2208 /* 2209 * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above 2210 * functions. 2211 */ 2212 static boolean_t 2213 i40e_shutdown_rings_wait(i40e_t *i40e) 2214 { 2215 int i, try; 2216 i40e_hw_t *hw = &i40e->i40e_hw_space; 2217 2218 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2219 uint32_t reg; 2220 2221 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2222 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2223 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) 2224 break; 2225 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2226 } 2227 2228 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) { 2229 i40e_error(i40e, "timed out disabling rx queue %d", 2230 i); 2231 return (B_FALSE); 2232 } 2233 2234 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2235 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2236 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2237 break; 2238 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2239 } 2240 2241 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) { 2242 i40e_error(i40e, "timed out disabling tx queue %d", 2243 i); 2244 return (B_FALSE); 2245 } 2246 } 2247 2248 return (B_TRUE); 2249 } 2250 2251 static boolean_t 2252 i40e_shutdown_rings(i40e_t *i40e) 2253 { 2254 i40e_shutdown_rx_rings(i40e); 2255 i40e_shutdown_tx_rings(i40e); 2256 return (i40e_shutdown_rings_wait(i40e)); 2257 } 2258 2259 static void 2260 i40e_setup_rx_descs(i40e_trqpair_t *itrq) 2261 { 2262 int i; 2263 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2264 2265 for (i = 0; i < rxd->rxd_ring_size; i++) { 2266 i40e_rx_control_block_t *rcb; 2267 i40e_rx_desc_t *rdesc; 2268 2269 rcb = rxd->rxd_work_list[i]; 2270 rdesc = &rxd->rxd_desc_ring[i]; 2271 2272 rdesc->read.pkt_addr = 2273 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address); 2274 rdesc->read.hdr_addr = 0; 2275 } 2276 } 2277 2278 static boolean_t 2279 i40e_setup_rx_hmc(i40e_trqpair_t *itrq) 2280 { 2281 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2282 i40e_t *i40e = itrq->itrq_i40e; 2283 i40e_hw_t *hw = &i40e->i40e_hw_space; 2284 2285 struct i40e_hmc_obj_rxq rctx; 2286 int err; 2287 2288 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq)); 2289 rctx.base = rxd->rxd_desc_area.dmab_dma_address / 2290 I40E_HMC_RX_CTX_UNIT; 2291 rctx.qlen = rxd->rxd_ring_size; 2292 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN); 2293 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX); 2294 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 2295 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; 2296 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT; 2297 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE; 2298 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE; 2299 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE; 2300 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER; 2301 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2302 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2303 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP; 2304 rctx.rxmax = i40e->i40e_frame_max; 2305 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2306 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2307 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE; 2308 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE; 2309 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR; 2310 2311 /* 2312 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2. 2313 */ 2314 rctx.prefena = I40E_HMC_RX_PREFENA; 2315 2316 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index); 2317 if (err != I40E_SUCCESS) { 2318 i40e_error(i40e, "failed to clear rx queue %d context: %d", 2319 itrq->itrq_index, err); 2320 return (B_FALSE); 2321 } 2322 2323 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx); 2324 if (err != I40E_SUCCESS) { 2325 i40e_error(i40e, "failed to set rx queue %d context: %d", 2326 itrq->itrq_index, err); 2327 return (B_FALSE); 2328 } 2329 2330 return (B_TRUE); 2331 } 2332 2333 /* 2334 * Take care of setting up the descriptor rings and actually programming the 2335 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the 2336 * rx rings. 2337 */ 2338 static boolean_t 2339 i40e_setup_rx_rings(i40e_t *i40e) 2340 { 2341 int i; 2342 i40e_hw_t *hw = &i40e->i40e_hw_space; 2343 2344 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2345 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2346 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2347 uint32_t reg; 2348 2349 /* 2350 * Step 1. Program all receive ring descriptors. 2351 */ 2352 i40e_setup_rx_descs(itrq); 2353 2354 /* 2355 * Step 2. Program the queue's FPM/HMC context. 2356 */ 2357 if (i40e_setup_rx_hmc(itrq) == B_FALSE) 2358 return (B_FALSE); 2359 2360 /* 2361 * Step 3. Clear the queue's tail pointer and set it to the end 2362 * of the space. 2363 */ 2364 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0); 2365 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1); 2366 2367 /* 2368 * Step 4. Enable the queue via the QENA_REQ. 2369 */ 2370 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2371 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK | 2372 I40E_QRX_ENA_QENA_STAT_MASK)); 2373 reg |= I40E_QRX_ENA_QENA_REQ_MASK; 2374 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2375 } 2376 2377 /* 2378 * Note, we wait for every queue to be enabled before we start checking. 2379 * This will hopefully cause most queues to be enabled at this point. 2380 */ 2381 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2382 uint32_t j, reg; 2383 2384 /* 2385 * Step 5. Verify that QENA_STAT has been set. It's promised 2386 * that this should occur within about 10 us, but like other 2387 * systems, we give the card a bit more time. 2388 */ 2389 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2390 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2391 2392 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 2393 break; 2394 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2395 } 2396 2397 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 2398 i40e_error(i40e, "failed to enable rx queue %d, timed " 2399 "out.", i); 2400 return (B_FALSE); 2401 } 2402 } 2403 2404 return (B_TRUE); 2405 } 2406 2407 static boolean_t 2408 i40e_setup_tx_hmc(i40e_trqpair_t *itrq) 2409 { 2410 i40e_t *i40e = itrq->itrq_i40e; 2411 i40e_hw_t *hw = &i40e->i40e_hw_space; 2412 2413 struct i40e_hmc_obj_txq tctx; 2414 struct i40e_vsi_context context; 2415 int err; 2416 2417 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq)); 2418 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT; 2419 tctx.base = itrq->itrq_desc_area.dmab_dma_address / 2420 I40E_HMC_TX_CTX_UNIT; 2421 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE; 2422 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE; 2423 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE; 2424 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE; 2425 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE; 2426 tctx.qlen = itrq->itrq_tx_ring_size; 2427 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2428 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE; 2429 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2430 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address + 2431 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size; 2432 2433 /* 2434 * This field isn't actually documented, like crc, but it suggests that 2435 * it should be zeroed. We leave both of these here because of that for 2436 * now. We should check with Intel on why these are here even. 2437 */ 2438 tctx.crc = 0; 2439 tctx.rdylist_act = 0; 2440 2441 /* 2442 * We're supposed to assign the rdylist field with the value of the 2443 * traffic class index for the first device. We query the VSI parameters 2444 * again to get what the handle is. Note that every queue is always 2445 * assigned to traffic class zero, because we don't actually use them. 2446 */ 2447 bzero(&context, sizeof (struct i40e_vsi_context)); 2448 context.seid = i40e->i40e_vsi_id; 2449 context.pf_num = hw->pf_id; 2450 err = i40e_aq_get_vsi_params(hw, &context, NULL); 2451 if (err != I40E_SUCCESS) { 2452 i40e_error(i40e, "get VSI params failed with %d", err); 2453 return (B_FALSE); 2454 } 2455 tctx.rdylist = LE_16(context.info.qs_handle[0]); 2456 2457 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index); 2458 if (err != I40E_SUCCESS) { 2459 i40e_error(i40e, "failed to clear tx queue %d context: %d", 2460 itrq->itrq_index, err); 2461 return (B_FALSE); 2462 } 2463 2464 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx); 2465 if (err != I40E_SUCCESS) { 2466 i40e_error(i40e, "failed to set tx queue %d context: %d", 2467 itrq->itrq_index, err); 2468 return (B_FALSE); 2469 } 2470 2471 return (B_TRUE); 2472 } 2473 2474 /* 2475 * Take care of setting up the descriptor rings and actually programming the 2476 * device. See 8.4.3.1.1 for what we need to do here. 2477 */ 2478 static boolean_t 2479 i40e_setup_tx_rings(i40e_t *i40e) 2480 { 2481 int i; 2482 i40e_hw_t *hw = &i40e->i40e_hw_space; 2483 2484 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2485 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2486 uint32_t reg; 2487 2488 /* 2489 * Step 1. Clear the queue disable flag and verify that the 2490 * index is set correctly. 2491 */ 2492 i40e_pre_tx_queue_cfg(hw, i, B_TRUE); 2493 2494 /* 2495 * Step 2. Prepare the queue's FPM/HMC context. 2496 */ 2497 if (i40e_setup_tx_hmc(itrq) == B_FALSE) 2498 return (B_FALSE); 2499 2500 /* 2501 * Step 3. Verify that it's clear that this PF owns this queue. 2502 */ 2503 reg = I40E_QTX_CTL_PF_QUEUE; 2504 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2505 I40E_QTX_CTL_PF_INDX_MASK; 2506 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg); 2507 i40e_flush(hw); 2508 2509 /* 2510 * Step 4. Set the QENA_REQ flag. 2511 */ 2512 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2513 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK | 2514 I40E_QTX_ENA_QENA_STAT_MASK)); 2515 reg |= I40E_QTX_ENA_QENA_REQ_MASK; 2516 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2517 } 2518 2519 /* 2520 * Note, we wait for every queue to be enabled before we start checking. 2521 * This will hopefully cause most queues to be enabled at this point. 2522 */ 2523 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2524 uint32_t j, reg; 2525 2526 /* 2527 * Step 5. Verify that QENA_STAT has been set. It's promised 2528 * that this should occur within about 10 us, but like BSD, 2529 * we'll try for up to 100 ms for this queue. 2530 */ 2531 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2532 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2533 2534 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 2535 break; 2536 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2537 } 2538 2539 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 2540 i40e_error(i40e, "failed to enable tx queue %d, timed " 2541 "out", i); 2542 return (B_FALSE); 2543 } 2544 } 2545 2546 return (B_TRUE); 2547 } 2548 2549 void 2550 i40e_stop(i40e_t *i40e, boolean_t free_allocations) 2551 { 2552 int i; 2553 2554 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 2555 2556 /* 2557 * Shutdown and drain the tx and rx pipeline. We do this using the 2558 * following steps. 2559 * 2560 * 1) Shutdown interrupts to all the queues (trying to keep the admin 2561 * queue alive). 2562 * 2563 * 2) Remove all of the interrupt tx and rx causes by setting the 2564 * interrupt linked lists to zero. 2565 * 2566 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should 2567 * wait for all the queues to be disabled, once we reach that point 2568 * it should be safe to free associated data. 2569 * 2570 * 4) Wait 50ms after all that is done. This ensures that the rings are 2571 * ready for programming again and we don't have to think about this 2572 * in other parts of the driver. 2573 * 2574 * 5) Disable remaining chip interrupts, (admin queue, etc.) 2575 * 2576 * 6) Verify that FM is happy with all the register accesses we 2577 * performed. 2578 */ 2579 i40e_intr_io_disable_all(i40e); 2580 i40e_intr_io_clear_cause(i40e); 2581 2582 if (i40e_shutdown_rings(i40e) == B_FALSE) { 2583 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2584 } 2585 2586 delay(50 * drv_usectohz(1000)); 2587 2588 i40e_intr_chip_fini(i40e); 2589 2590 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2591 mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock); 2592 mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock); 2593 } 2594 2595 /* 2596 * We should consider refactoring this to be part of the ring start / 2597 * stop routines at some point. 2598 */ 2599 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2600 i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]); 2601 } 2602 2603 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 2604 DDI_FM_OK) { 2605 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2606 } 2607 2608 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2609 i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]); 2610 } 2611 2612 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2613 mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock); 2614 mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock); 2615 } 2616 2617 i40e_stat_vsi_fini(i40e); 2618 2619 i40e->i40e_link_speed = 0; 2620 i40e->i40e_link_duplex = 0; 2621 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN); 2622 2623 if (free_allocations) { 2624 i40e_free_ring_mem(i40e, B_FALSE); 2625 } 2626 } 2627 2628 boolean_t 2629 i40e_start(i40e_t *i40e, boolean_t alloc) 2630 { 2631 i40e_hw_t *hw = &i40e->i40e_hw_space; 2632 boolean_t rc = B_TRUE; 2633 int i, err; 2634 2635 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 2636 2637 if (alloc) { 2638 if (i40e_alloc_ring_mem(i40e) == B_FALSE) { 2639 i40e_error(i40e, 2640 "Failed to allocate ring memory"); 2641 return (B_FALSE); 2642 } 2643 } 2644 2645 /* 2646 * This should get refactored to be part of ring start and stop at 2647 * some point, along with most of the logic here. 2648 */ 2649 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2650 if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) == 2651 B_FALSE) { 2652 int j; 2653 2654 for (j = 0; j < i; j++) { 2655 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j]; 2656 i40e_stats_trqpair_fini(itrq); 2657 } 2658 return (B_FALSE); 2659 } 2660 } 2661 2662 if (!i40e_chip_start(i40e)) { 2663 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 2664 rc = B_FALSE; 2665 goto done; 2666 } 2667 2668 if (i40e_setup_rx_rings(i40e) == B_FALSE) { 2669 rc = B_FALSE; 2670 goto done; 2671 } 2672 2673 if (i40e_setup_tx_rings(i40e) == B_FALSE) { 2674 rc = B_FALSE; 2675 goto done; 2676 } 2677 2678 /* 2679 * Enable broadcast traffic; however, do not enable multicast traffic. 2680 * That's handle exclusively through MAC's mc_multicst routines. 2681 */ 2682 err = i40e_aq_set_vsi_broadcast(hw, i40e->i40e_vsi_id, B_TRUE, NULL); 2683 if (err != I40E_SUCCESS) { 2684 i40e_error(i40e, "failed to set default VSI: %d", err); 2685 rc = B_FALSE; 2686 goto done; 2687 } 2688 2689 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL); 2690 if (err != I40E_SUCCESS) { 2691 i40e_error(i40e, "failed to set MAC config: %d", err); 2692 rc = B_FALSE; 2693 goto done; 2694 } 2695 2696 /* 2697 * Finally, make sure that we're happy from an FM perspective. 2698 */ 2699 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 2700 DDI_FM_OK) { 2701 rc = B_FALSE; 2702 goto done; 2703 } 2704 2705 /* Clear state bits prior to final interrupt enabling. */ 2706 atomic_and_32(&i40e->i40e_state, 2707 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP)); 2708 2709 i40e_intr_io_enable_all(i40e); 2710 2711 done: 2712 if (rc == B_FALSE) { 2713 i40e_stop(i40e, B_FALSE); 2714 if (alloc == B_TRUE) { 2715 i40e_free_ring_mem(i40e, B_TRUE); 2716 } 2717 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2718 } 2719 2720 return (rc); 2721 } 2722 2723 /* 2724 * We may have loaned up descriptors to the stack. As such, if we still have 2725 * them outstanding, then we will not continue with detach. 2726 */ 2727 static boolean_t 2728 i40e_drain_rx(i40e_t *i40e) 2729 { 2730 mutex_enter(&i40e->i40e_rx_pending_lock); 2731 while (i40e->i40e_rx_pending > 0) { 2732 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv, 2733 &i40e->i40e_rx_pending_lock, 2734 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) { 2735 mutex_exit(&i40e->i40e_rx_pending_lock); 2736 return (B_FALSE); 2737 } 2738 } 2739 mutex_exit(&i40e->i40e_rx_pending_lock); 2740 2741 return (B_TRUE); 2742 } 2743 2744 static int 2745 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2746 { 2747 i40e_t *i40e; 2748 struct i40e_osdep *osdep; 2749 i40e_hw_t *hw; 2750 int instance; 2751 2752 if (cmd != DDI_ATTACH) 2753 return (DDI_FAILURE); 2754 2755 instance = ddi_get_instance(devinfo); 2756 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP); 2757 2758 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP); 2759 i40e->i40e_instance = instance; 2760 i40e->i40e_dip = devinfo; 2761 2762 hw = &i40e->i40e_hw_space; 2763 osdep = &i40e->i40e_osdep_space; 2764 hw->back = osdep; 2765 osdep->ios_i40e = i40e; 2766 2767 ddi_set_driver_private(devinfo, i40e); 2768 2769 i40e_fm_init(i40e); 2770 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT; 2771 2772 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) { 2773 i40e_error(i40e, "Failed to map PCI configurations."); 2774 goto attach_fail; 2775 } 2776 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG; 2777 2778 i40e_identify_hardware(i40e); 2779 2780 if (!i40e_regs_map(i40e)) { 2781 i40e_error(i40e, "Failed to map device registers."); 2782 goto attach_fail; 2783 } 2784 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP; 2785 2786 i40e_init_properties(i40e); 2787 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS; 2788 2789 if (!i40e_common_code_init(i40e, hw)) 2790 goto attach_fail; 2791 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE; 2792 2793 /* 2794 * When we participate in IRM, we should make sure that we register 2795 * ourselves with it before callbacks. 2796 */ 2797 if (!i40e_alloc_intrs(i40e, devinfo)) { 2798 i40e_error(i40e, "Failed to allocate interrupts."); 2799 goto attach_fail; 2800 } 2801 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR; 2802 2803 if (!i40e_alloc_trqpairs(i40e)) { 2804 i40e_error(i40e, 2805 "Failed to allocate receive & transmit rings."); 2806 goto attach_fail; 2807 } 2808 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS; 2809 2810 if (!i40e_map_intrs_to_vectors(i40e)) { 2811 i40e_error(i40e, "Failed to map interrupts to vectors."); 2812 goto attach_fail; 2813 } 2814 2815 if (!i40e_add_intr_handlers(i40e)) { 2816 i40e_error(i40e, "Failed to add the interrupt handlers."); 2817 goto attach_fail; 2818 } 2819 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR; 2820 2821 if (!i40e_final_init(i40e)) { 2822 i40e_error(i40e, "Final initialization failed."); 2823 goto attach_fail; 2824 } 2825 i40e->i40e_attach_progress |= I40E_ATTACH_INIT; 2826 2827 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 2828 DDI_FM_OK) { 2829 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2830 goto attach_fail; 2831 } 2832 2833 if (!i40e_stats_init(i40e)) { 2834 i40e_error(i40e, "Stats initialization failed."); 2835 goto attach_fail; 2836 } 2837 i40e->i40e_attach_progress |= I40E_ATTACH_STATS; 2838 2839 if (!i40e_register_mac(i40e)) { 2840 i40e_error(i40e, "Failed to register to MAC/GLDv3"); 2841 goto attach_fail; 2842 } 2843 i40e->i40e_attach_progress |= I40E_ATTACH_MAC; 2844 2845 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e, 2846 I40E_CYCLIC_PERIOD, DDI_IPL_0); 2847 if (i40e->i40e_periodic_id == 0) { 2848 i40e_error(i40e, "Failed to add the link-check timer"); 2849 goto attach_fail; 2850 } 2851 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER; 2852 2853 if (!i40e_enable_interrupts(i40e)) { 2854 i40e_error(i40e, "Failed to enable DDI interrupts"); 2855 goto attach_fail; 2856 } 2857 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR; 2858 2859 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED); 2860 2861 mutex_enter(&i40e_glock); 2862 list_insert_tail(&i40e_glist, i40e); 2863 mutex_exit(&i40e_glock); 2864 2865 return (DDI_SUCCESS); 2866 2867 attach_fail: 2868 i40e_unconfigure(devinfo, i40e); 2869 return (DDI_FAILURE); 2870 } 2871 2872 static int 2873 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2874 { 2875 i40e_t *i40e; 2876 2877 if (cmd != DDI_DETACH) 2878 return (DDI_FAILURE); 2879 2880 i40e = (i40e_t *)ddi_get_driver_private(devinfo); 2881 if (i40e == NULL) { 2882 i40e_log(NULL, "i40e_detach() called with no i40e pointer!"); 2883 return (DDI_FAILURE); 2884 } 2885 2886 if (i40e_drain_rx(i40e) == B_FALSE) { 2887 i40e_log(i40e, "timed out draining DMA resources, %d buffers " 2888 "remain", i40e->i40e_rx_pending); 2889 return (DDI_FAILURE); 2890 } 2891 2892 mutex_enter(&i40e_glock); 2893 list_remove(&i40e_glist, i40e); 2894 mutex_exit(&i40e_glock); 2895 2896 i40e_unconfigure(devinfo, i40e); 2897 2898 return (DDI_SUCCESS); 2899 } 2900 2901 static struct cb_ops i40e_cb_ops = { 2902 nulldev, /* cb_open */ 2903 nulldev, /* cb_close */ 2904 nodev, /* cb_strategy */ 2905 nodev, /* cb_print */ 2906 nodev, /* cb_dump */ 2907 nodev, /* cb_read */ 2908 nodev, /* cb_write */ 2909 nodev, /* cb_ioctl */ 2910 nodev, /* cb_devmap */ 2911 nodev, /* cb_mmap */ 2912 nodev, /* cb_segmap */ 2913 nochpoll, /* cb_chpoll */ 2914 ddi_prop_op, /* cb_prop_op */ 2915 NULL, /* cb_stream */ 2916 D_MP | D_HOTPLUG, /* cb_flag */ 2917 CB_REV, /* cb_rev */ 2918 nodev, /* cb_aread */ 2919 nodev /* cb_awrite */ 2920 }; 2921 2922 static struct dev_ops i40e_dev_ops = { 2923 DEVO_REV, /* devo_rev */ 2924 0, /* devo_refcnt */ 2925 NULL, /* devo_getinfo */ 2926 nulldev, /* devo_identify */ 2927 nulldev, /* devo_probe */ 2928 i40e_attach, /* devo_attach */ 2929 i40e_detach, /* devo_detach */ 2930 nodev, /* devo_reset */ 2931 &i40e_cb_ops, /* devo_cb_ops */ 2932 NULL, /* devo_bus_ops */ 2933 ddi_power, /* devo_power */ 2934 ddi_quiesce_not_supported /* devo_quiesce */ 2935 }; 2936 2937 static struct modldrv i40e_modldrv = { 2938 &mod_driverops, 2939 i40e_ident, 2940 &i40e_dev_ops 2941 }; 2942 2943 static struct modlinkage i40e_modlinkage = { 2944 MODREV_1, 2945 &i40e_modldrv, 2946 NULL 2947 }; 2948 2949 /* 2950 * Module Initialization Functions. 2951 */ 2952 int 2953 _init(void) 2954 { 2955 int status; 2956 2957 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink)); 2958 list_create(&i40e_dlist, sizeof (i40e_device_t), 2959 offsetof(i40e_device_t, id_link)); 2960 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL); 2961 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME); 2962 2963 status = mod_install(&i40e_modlinkage); 2964 if (status != DDI_SUCCESS) { 2965 mac_fini_ops(&i40e_dev_ops); 2966 mutex_destroy(&i40e_glock); 2967 list_destroy(&i40e_dlist); 2968 list_destroy(&i40e_glist); 2969 } 2970 2971 return (status); 2972 } 2973 2974 int 2975 _info(struct modinfo *modinfop) 2976 { 2977 return (mod_info(&i40e_modlinkage, modinfop)); 2978 } 2979 2980 int 2981 _fini(void) 2982 { 2983 int status; 2984 2985 status = mod_remove(&i40e_modlinkage); 2986 if (status == DDI_SUCCESS) { 2987 mac_fini_ops(&i40e_dev_ops); 2988 mutex_destroy(&i40e_glock); 2989 list_destroy(&i40e_dlist); 2990 list_destroy(&i40e_glist); 2991 } 2992 2993 return (status); 2994 } 2995