xref: /illumos-gate/usr/src/uts/common/io/i40e/i40e_main.c (revision 20a7641f9918de8574b8b3b47dbe35c4bfc78df1)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
14  * Copyright 2019 Joyent, Inc.
15  * Copyright 2017 Tegile Systems, Inc.  All rights reserved.
16  * Copyright 2020 RackTop Systems, Inc.
17  * Copyright 2020 Ryan Zezeski
18  * Copyright 2021 Oxide Computer Company
19  */
20 
21 /*
22  * i40e - Intel 10/40 Gb Ethernet driver
23  *
24  * The i40e driver is the main software device driver for the Intel 40 Gb family
25  * of devices. Note that these devices come in many flavors with both 40 GbE
26  * ports and 10 GbE ports. This device is the successor to the 82599 family of
27  * devices (ixgbe).
28  *
29  * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE
30  * devices defined in the XL710 controller (previously known as Fortville) are a
31  * rather different beast and have a small switch embedded inside of them. In
32  * addition, the way that most of the programming is done has been overhauled.
33  * As opposed to just using PCIe memory mapped registers, it also has an
34  * administrative queue which is used to communicate with firmware running on
35  * the chip.
36  *
37  * Each physical function in the hardware shows up as a device that this driver
38  * will bind to. The hardware splits many resources evenly across all of the
39  * physical functions present on the device, while other resources are instead
40  * shared across the entire card and its up to the device driver to
41  * intelligently partition them.
42  *
43  * ------------
44  * Organization
45  * ------------
46  *
47  * This driver is made up of several files which have their own theory
48  * statements spread across them. We'll touch on the high level purpose of each
49  * file here, and then we'll get into more discussion on how the device is
50  * generally modelled with respect to the interfaces in illumos.
51  *
52  * i40e_gld.c: This file contains all of the bindings to MAC and the networking
53  *             stack.
54  *
55  * i40e_intr.c: This file contains all of the interrupt service routines and
56  *              contains logic to enable and disable interrupts on the hardware.
57  *              It also contains the logic to map hardware resources such as the
58  *              rings to and from interrupts and controls their ability to fire.
59  *
60  *              There is a big theory statement on interrupts present there.
61  *
62  * i40e_main.c: The file that you're currently in. It interfaces with the
63  *              traditional OS DDI interfaces and is in charge of configuring
64  *              the device.
65  *
66  * i40e_osdep.[ch]: These files contain interfaces and definitions needed to
67  *                  work with Intel's common code for the device.
68  *
69  * i40e_stats.c: This file contains the general work and logic around our
70  *               kstats. A theory statement on their organization and use of the
71  *               hardware exists there.
72  *
73  * i40e_sw.h: This header file contains all of the primary structure definitions
74  *            and constants that are used across the entire driver.
75  *
76  * i40e_transceiver.c: This file contains all of the logic for sending and
77  *                     receiving data. It contains all of the ring and DMA
78  *                     allocation logic, as well as, the actual interfaces to
79  *                     send and receive data.
80  *
81  *                     A big theory statement on ring management, descriptors,
82  *                     and how it ties into the OS is present there.
83  *
84  * --------------
85  * General Design
86  * --------------
87  *
88  * Before we go too far into the general way we've laid out data structures and
89  * the like, it's worth taking some time to explain how the hardware is
90  * organized. This organization informs a lot of how we do things at this time
91  * in the driver.
92  *
93  * Each physical device consists of a number of one or more ports, which are
94  * considered physical functions in the PCI sense and thus each get enumerated
95  * by the system, resulting in an instance being created and attached to. While
96  * there are many resources that are unique to each physical function eg.
97  * instance of the device, there are many that are shared across all of them.
98  * Several resources have an amount reserved for each Virtual Station Interface
99  * (VSI) and then a static pool of resources, available for all functions on the
100  * card.
101  *
102  * The most important resource in hardware are its transmit and receive queue
103  * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3
104  * parlance. There are a set number of these on each device; however, they are
105  * statically partitioned among all of the different physical functions.
106  *
107  * 'Fortville' (the code name for this device family) is basically a switch. To
108  * map MAC addresses and other things to queues, we end up having to create
109  * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct
110  * traffic to a queue. A VSI owns a collection of queues and has a series of
111  * forwarding rules that point to it. One way to think of this is to treat it
112  * like MAC does a VNIC. When MAC refers to a group, a collection of rings and
113  * classification resources, that is a VSI in i40e.
114  *
115  * The sets of VSIs is shared across the entire device, though there may be some
116  * amount that are reserved to each PF. Because the GLDv3 does not let us change
117  * the number of groups dynamically, we instead statically divide this amount
118  * evenly between all the functions that exist. In addition, we have the same
119  * problem with the mac address forwarding rules. There are a static number that
120  * exist shared across all the functions.
121  *
122  * To handle both of these resources, what we end up doing is going through and
123  * determining which functions belong to the same device. Nominally one might do
124  * this by having a nexus driver; however, a prime requirement for a nexus
125  * driver is identifying the various children and activating them. While it is
126  * possible to get this information from NVRAM, we would end up duplicating a
127  * lot of the PCI enumeration logic. Really, at the end of the day, the device
128  * doesn't give us the traditional identification properties we want from a
129  * nexus driver.
130  *
131  * Instead, we rely on some properties that are guaranteed to be unique. While
132  * it might be tempting to leverage the PBA or serial number of the device from
133  * NVRAM, there is nothing that says that two devices can't be mis-programmed to
134  * have the same values in NVRAM. Instead, we uniquely identify a group of
135  * functions based on their parent in the /devices tree, their PCI bus and PCI
136  * function identifiers. Using either on their own may not be sufficient.
137  *
138  * For each unique PCI device that we encounter, we'll create a i40e_device_t.
139  * From there, because we don't have a good way to tell the GLDv3 about sharing
140  * resources between everything, we'll end up just dividing the resources
141  * evenly between all of the functions. Longer term, if we don't have to declare
142  * to the GLDv3 that these resources are shared, then we'll maintain a pool and
143  * have each PF allocate from the pool in the device, thus if only two of four
144  * ports are being used, for example, then all of the resources can still be
145  * used.
146  *
147  * -------------------------------------------
148  * Transmit and Receive Queue Pair Allocations
149  * -------------------------------------------
150  *
151  * NVRAM ends up assigning each PF its own share of the transmit and receive LAN
152  * queue pairs, we have no way of modifying it, only observing it. From there,
153  * it's up to us to map these queues to VSIs and VFs. Since we don't support any
154  * VFs at this time, we only focus on assignments to VSIs.
155  *
156  * At the moment, we used a static mapping of transmit/receive queue pairs to a
157  * given VSI (eg. rings to a group). Though in the fullness of time, we want to
158  * make this something which is fully dynamic and take advantage of documented,
159  * but not yet available functionality for adding filters based on VXLAN and
160  * other encapsulation technologies.
161  *
162  * -------------------------------------
163  * Broadcast, Multicast, and Promiscuous
164  * -------------------------------------
165  *
166  * As part of the GLDv3, we need to make sure that we can handle receiving
167  * broadcast and multicast traffic. As well as enabling promiscuous mode when
168  * requested. GLDv3 requires that all broadcast and multicast traffic be
169  * retrieved by the default group, eg. the first one. This is the same thing as
170  * the default VSI.
171  *
172  * To receieve broadcast traffic, we enable it through the admin queue, rather
173  * than use one of our filters for it. For multicast traffic, we reserve a
174  * certain number of the hash filters and assign them to a given PF. When we
175  * exceed those, we then switch to using promiscuous mode for multicast traffic.
176  *
177  * More specifically, once we exceed the number of filters (indicated because
178  * the i40e_t`i40e_resources.ifr_nmcastfilt ==
179  * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle
180  * promiscuous mode. If promiscuous mode is toggled then we keep track of the
181  * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count.
182  * That will stay enabled until that count reaches zero indicating that we have
183  * only added multicast addresses that we have a corresponding entry for.
184  *
185  * Because MAC itself wants to toggle promiscuous mode, which includes both
186  * unicast and multicast traffic, we go through and keep track of that
187  * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on
188  * member.
189  *
190  * --------------
191  * VSI Management
192  * --------------
193  *
194  * The PFs share 384 VSIs. The firmware creates one VSI per PF by default.
195  * During chip start we retrieve the SEID of this VSI and assign it as the
196  * default VSI for our VEB (one VEB per PF). We then add additional VSIs to
197  * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups.
198  * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can
199  * allocate the same number of VSIs, and b) to keep the interrupt multiplexing
200  * under control. In the future, when we improve the interrupt allocation, we
201  * may want to revisit this cap to make better use of the available VSIs. The
202  * VSI allocation and configuration can be found in i40e_chip_start().
203  *
204  * ----------------
205  * Structure Layout
206  * ----------------
207  *
208  * The following images relates the core data structures together. The primary
209  * structure in the system is the i40e_t. It itself contains multiple rings,
210  * i40e_trqpair_t's which contain the various transmit and receive data. The
211  * receive data is stored outside of the i40e_trqpair_t and instead in the
212  * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps
213  * track of per-physical device state. Finally, for every active descriptor,
214  * there is a corresponding control block, which is where the
215  * i40e_rx_control_block_t and the i40e_tx_control_block_t come from.
216  *
217  *   +-----------------------+       +-----------------------+
218  *   | Global i40e_t list    |       | Global Device list    |
219  *   |                       |    +--|                       |
220  *   | i40e_glist            |    |  | i40e_dlist            |
221  *   +-----------------------+    |  +-----------------------+
222  *       |                        v
223  *       |      +------------------------+      +-----------------------+
224  *       |      | Device-wide Structure  |----->| Device-wide Structure |--> ...
225  *       |      | i40e_device_t          |      | i40e_device_t         |
226  *       |      |                        |      +-----------------------+
227  *       |      | dev_info_t *     ------+--> Parent in devices tree.
228  *       |      | uint_t           ------+--> PCI bus number
229  *       |      | uint_t           ------+--> PCI device number
230  *       |      | uint_t           ------+--> Number of functions
231  *       |      | i40e_switch_rsrcs_t ---+--> Captured total switch resources
232  *       |      | list_t           ------+-------------+
233  *       |      +------------------------+             |
234  *       |                           ^                 |
235  *       |                           +--------+        |
236  *       |                                    |        v
237  *       |  +---------------------------+     |   +-------------------+
238  *       +->| GLDv3 Device, per PF      |-----|-->| GLDv3 Device (PF) |--> ...
239  *          | i40e_t                    |     |   | i40e_t            |
240  *          | **Primary Structure**     |     |   +-------------------+
241  *          |                           |     |
242  *          | i40e_device_t *         --+-----+
243  *          | i40e_state_t            --+---> Device State
244  *          | i40e_hw_t               --+---> Intel common code structure
245  *          | mac_handle_t            --+---> GLDv3 handle to MAC
246  *          | ddi_periodic_t          --+---> Link activity timer
247  *          | i40e_vsi_t *            --+---> Array of VSIs
248  *          | i40e_func_rsrc_t        --+---> Available hardware resources
249  *          | i40e_switch_rsrc_t *    --+---> Switch resource snapshot
250  *          | i40e_sdu                --+---> Current MTU
251  *          | i40e_frame_max          --+---> Current HW frame size
252  *          | i40e_uaddr_t *          --+---> Array of assigned unicast MACs
253  *          | i40e_maddr_t *          --+---> Array of assigned multicast MACs
254  *          | i40e_mcast_promisccount --+---> Active multicast state
255  *          | i40e_promisc_on         --+---> Current promiscuous mode state
256  *          | uint_t                  --+---> Number of transmit/receive pairs
257  *          | i40e_rx_group_t *       --+---> Array of Rx groups
258  *          | kstat_t *               --+---> PF kstats
259  *          | i40e_pf_stats_t         --+---> PF kstat backing data
260  *          | i40e_trqpair_t *        --+---------+
261  *          +---------------------------+         |
262  *                                                |
263  *                                                v
264  *  +-------------------------------+       +-----------------------------+
265  *  | Transmit/Receive Queue Pair   |-------| Transmit/Receive Queue Pair |->...
266  *  | i40e_trqpair_t                |       | i40e_trqpair_t              |
267  *  + Ring Data Structure           |       +-----------------------------+
268  *  |                               |
269  *  | mac_ring_handle_t             +--> MAC RX ring handle
270  *  | mac_ring_handle_t             +--> MAC TX ring handle
271  *  | i40e_rxq_stat_t             --+--> RX Queue stats
272  *  | i40e_txq_stat_t             --+--> TX Queue stats
273  *  | uint32_t (tx ring size)       +--> TX Ring Size
274  *  | uint32_t (tx free list size)  +--> TX Free List Size
275  *  | i40e_dma_buffer_t     --------+--> TX Descriptor ring DMA
276  *  | i40e_tx_desc_t *      --------+--> TX descriptor ring
277  *  | volatile unt32_t *            +--> TX Write back head
278  *  | uint32_t               -------+--> TX ring head
279  *  | uint32_t               -------+--> TX ring tail
280  *  | uint32_t               -------+--> Num TX desc free
281  *  | i40e_tx_control_block_t *   --+--> TX control block array  ---+
282  *  | i40e_tx_control_block_t **  --+--> TCB work list          ----+
283  *  | i40e_tx_control_block_t **  --+--> TCB free list           ---+
284  *  | uint32_t               -------+--> Free TCB count             |
285  *  | i40e_rx_data_t *       -------+--+                            v
286  *  +-------------------------------+  |          +---------------------------+
287  *                                     |          | Per-TX Frame Metadata     |
288  *                                     |          | i40e_tx_control_block_t   |
289  *                +--------------------+          |                           |
290  *                |           mblk to transmit <--+---      mblk_t *          |
291  *                |           type of transmit <--+---      i40e_tx_type_t    |
292  *                |              TX DMA handle <--+---      ddi_dma_handle_t  |
293  *                v              TX DMA buffer <--+---      i40e_dma_buffer_t |
294  *    +------------------------------+            +---------------------------+
295  *    | Core Receive Data            |
296  *    | i40e_rx_data_t               |
297  *    |                              |
298  *    | i40e_dma_buffer_t          --+--> RX descriptor DMA Data
299  *    | i40e_rx_desc_t             --+--> RX descriptor ring
300  *    | uint32_t                   --+--> Next free desc.
301  *    | i40e_rx_control_block_t *  --+--> RX Control Block Array  ---+
302  *    | i40e_rx_control_block_t ** --+--> RCB work list           ---+
303  *    | i40e_rx_control_block_t ** --+--> RCB free list           ---+
304  *    +------------------------------+                               |
305  *                ^                                                  |
306  *                |     +---------------------------+                |
307  *                |     | Per-RX Frame Metadata     |<---------------+
308  *                |     | i40e_rx_control_block_t   |
309  *                |     |                           |
310  *                |     | mblk_t *              ----+--> Received mblk_t data
311  *                |     | uint32_t              ----+--> Reference count
312  *                |     | i40e_dma_buffer_t     ----+--> Receive data DMA info
313  *                |     | frtn_t                ----+--> mblk free function info
314  *                +-----+-- i40e_rx_data_t *        |
315  *                      +---------------------------+
316  *
317  * -------------
318  * Lock Ordering
319  * -------------
320  *
321  * In order to ensure that we don't deadlock, the following represents the
322  * lock order being used. When grabbing locks, follow the following order. Lower
323  * numbers are more important. Thus, the i40e_glock which is number 0, must be
324  * taken before any other locks in the driver. On the other hand, the
325  * i40e_t`i40e_stat_lock, has the highest number because it's the least
326  * important lock. Note, that just because one lock is higher than another does
327  * not mean that all intermediary locks are required.
328  *
329  * 0) i40e_glock
330  * 1) i40e_t`i40e_general_lock
331  *
332  * 2) i40e_trqpair_t`itrq_rx_lock
333  * 3) i40e_trqpair_t`itrq_tx_lock
334  * 4) i40e_trqpair_t`itrq_intr_lock
335  * 5) i40e_t`i40e_rx_pending_lock
336  * 6) i40e_trqpair_t`itrq_tcb_lock
337  *
338  * 7) i40e_t`i40e_stat_lock
339  *
340  * Rules and expectations:
341  *
342  * 1) A thread holding locks belong to one PF should not hold locks belonging to
343  * a second. If for some reason this becomes necessary, locks should be grabbed
344  * based on the list order in the i40e_device_t, which implies that the
345  * i40e_glock is held.
346  *
347  * 2) When grabbing locks between multiple transmit and receive queues, the
348  * locks for the lowest number transmit/receive queue should be grabbed first.
349  *
350  * 3) When grabbing both the transmit and receive lock for a given queue, always
351  * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock.
352  *
353  * 4) The following pairs of locks are not expected to be held at the same time:
354  *
355  * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock
356  * o i40e_trqpair_t`itrq_intr_lock is not expected to be held with any
357  *   other lock except i40e_t`i40e_general_lock in mc_start(9E) and
358  *   mc_stop(9e).
359  *
360  * -----------
361  * Future Work
362  * -----------
363  *
364  * At the moment the i40e_t driver is rather bare bones, allowing us to start
365  * getting data flowing and folks using it while we develop additional features.
366  * While bugs have been filed to cover this future work, the following gives an
367  * overview of expected work:
368  *
369  *  o DMA binding and breaking up the locking in ring recycling.
370  *  o Enhanced detection of device errors
371  *  o Participation in IRM
372  *  o FMA device reset
373  *  o Stall detection, temperature error detection, etc.
374  *  o More dynamic resource pools
375  */
376 
377 #include "i40e_sw.h"
378 
379 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3";
380 
381 /*
382  * The i40e_glock primarily protects the lists below and the i40e_device_t
383  * structures.
384  */
385 static kmutex_t i40e_glock;
386 static list_t i40e_glist;
387 static list_t i40e_dlist;
388 
389 /*
390  * Access attributes for register mapping.
391  */
392 static ddi_device_acc_attr_t i40e_regs_acc_attr = {
393 	DDI_DEVICE_ATTR_V1,
394 	DDI_STRUCTURE_LE_ACC,
395 	DDI_STRICTORDER_ACC,
396 	DDI_FLAGERR_ACC
397 };
398 
399 /*
400  * Logging function for this driver.
401  */
402 static void
403 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
404     va_list ap)
405 {
406 	char buf[1024];
407 
408 	(void) vsnprintf(buf, sizeof (buf), fmt, ap);
409 
410 	if (i40e == NULL) {
411 		cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
412 		    I40E_MODULE_NAME, buf);
413 	} else {
414 		dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
415 		    buf);
416 	}
417 }
418 
419 /*
420  * Because there's the stupid trailing-comma problem with the C preprocessor
421  * and variable arguments, I need to instantiate these.	 Pardon the redundant
422  * code.
423  */
424 /*PRINTFLIKE2*/
425 void
426 i40e_error(i40e_t *i40e, const char *fmt, ...)
427 {
428 	va_list ap;
429 
430 	va_start(ap, fmt);
431 	i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
432 	va_end(ap);
433 }
434 
435 /*PRINTFLIKE2*/
436 void
437 i40e_log(i40e_t *i40e, const char *fmt, ...)
438 {
439 	va_list ap;
440 
441 	va_start(ap, fmt);
442 	i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
443 	va_end(ap);
444 }
445 
446 /*PRINTFLIKE2*/
447 void
448 i40e_notice(i40e_t *i40e, const char *fmt, ...)
449 {
450 	va_list ap;
451 
452 	va_start(ap, fmt);
453 	i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
454 	va_end(ap);
455 }
456 
457 /*
458  * Various parts of the driver need to know if the controller is from the X722
459  * family, which has a few additional capabilities and different programming
460  * means. We don't consider virtual functions as part of this as they are quite
461  * different and will require substantially more work.
462  */
463 static boolean_t
464 i40e_is_x722(i40e_t *i40e)
465 {
466 	return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722);
467 }
468 
469 static void
470 i40e_device_rele(i40e_t *i40e)
471 {
472 	i40e_device_t *idp = i40e->i40e_device;
473 
474 	if (idp == NULL)
475 		return;
476 
477 	mutex_enter(&i40e_glock);
478 	VERIFY(idp->id_nreg > 0);
479 	list_remove(&idp->id_i40e_list, i40e);
480 	idp->id_nreg--;
481 	if (idp->id_nreg == 0) {
482 		list_remove(&i40e_dlist, idp);
483 		list_destroy(&idp->id_i40e_list);
484 		kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
485 		    idp->id_rsrcs_alloc);
486 		kmem_free(idp, sizeof (i40e_device_t));
487 	}
488 	i40e->i40e_device = NULL;
489 	mutex_exit(&i40e_glock);
490 }
491 
492 static i40e_device_t *
493 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
494 {
495 	i40e_device_t *idp;
496 	mutex_enter(&i40e_glock);
497 	for (idp = list_head(&i40e_dlist); idp != NULL;
498 	    idp = list_next(&i40e_dlist, idp)) {
499 		if (idp->id_parent == parent && idp->id_pci_bus == bus &&
500 		    idp->id_pci_device == device) {
501 			break;
502 		}
503 	}
504 
505 	if (idp != NULL) {
506 		VERIFY(idp->id_nreg < idp->id_nfuncs);
507 		idp->id_nreg++;
508 	} else {
509 		i40e_hw_t *hw = &i40e->i40e_hw_space;
510 		ASSERT(hw->num_ports > 0);
511 		ASSERT(hw->num_partitions > 0);
512 
513 		/*
514 		 * The Intel common code doesn't exactly keep the number of PCI
515 		 * functions. But it calculates it during discovery of
516 		 * partitions and ports. So what we do is undo the calculation
517 		 * that it does originally, as functions are evenly spread
518 		 * across ports in the rare case of partitions.
519 		 */
520 		idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
521 		idp->id_parent = parent;
522 		idp->id_pci_bus = bus;
523 		idp->id_pci_device = device;
524 		idp->id_nfuncs = hw->num_ports * hw->num_partitions;
525 		idp->id_nreg = 1;
526 		idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
527 		idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
528 		idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
529 		    idp->id_rsrcs_alloc, KM_SLEEP);
530 		bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
531 		    sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
532 		list_create(&idp->id_i40e_list, sizeof (i40e_t),
533 		    offsetof(i40e_t, i40e_dlink));
534 
535 		list_insert_tail(&i40e_dlist, idp);
536 	}
537 
538 	list_insert_tail(&idp->id_i40e_list, i40e);
539 	mutex_exit(&i40e_glock);
540 
541 	return (idp);
542 }
543 
544 static void
545 i40e_link_state_set(i40e_t *i40e, link_state_t state)
546 {
547 	if (i40e->i40e_link_state == state)
548 		return;
549 
550 	i40e->i40e_link_state = state;
551 	mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
552 }
553 
554 /*
555  * This is a basic link check routine. Mostly we're using this just to see
556  * if we can get any accurate information about the state of the link being
557  * up or down, as well as updating the link state, speed, etc. information.
558  */
559 void
560 i40e_link_check(i40e_t *i40e)
561 {
562 	i40e_hw_t *hw = &i40e->i40e_hw_space;
563 	boolean_t ls;
564 	int ret;
565 
566 	ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
567 
568 	hw->phy.get_link_info = B_TRUE;
569 	if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
570 		i40e->i40e_s_link_status_errs++;
571 		i40e->i40e_s_link_status_lasterr = ret;
572 		return;
573 	}
574 
575 	/*
576 	 * Firmware abstracts all of the mac and phy information for us, so we
577 	 * can use i40e_get_link_status to determine the current state.
578 	 */
579 	if (ls == B_TRUE) {
580 		enum i40e_aq_link_speed speed;
581 
582 		speed = i40e_get_link_speed(hw);
583 
584 		/*
585 		 * Translate from an i40e value to a value in Mbits/s.
586 		 */
587 		switch (speed) {
588 		case I40E_LINK_SPEED_100MB:
589 			i40e->i40e_link_speed = 100;
590 			break;
591 		case I40E_LINK_SPEED_1GB:
592 			i40e->i40e_link_speed = 1000;
593 			break;
594 		case I40E_LINK_SPEED_2_5GB:
595 			i40e->i40e_link_speed = 2500;
596 			break;
597 		case I40E_LINK_SPEED_5GB:
598 			i40e->i40e_link_speed = 5000;
599 			break;
600 		case I40E_LINK_SPEED_10GB:
601 			i40e->i40e_link_speed = 10000;
602 			break;
603 		case I40E_LINK_SPEED_20GB:
604 			i40e->i40e_link_speed = 20000;
605 			break;
606 		case I40E_LINK_SPEED_40GB:
607 			i40e->i40e_link_speed = 40000;
608 			break;
609 		case I40E_LINK_SPEED_25GB:
610 			i40e->i40e_link_speed = 25000;
611 			break;
612 		default:
613 			i40e->i40e_link_speed = 0;
614 			break;
615 		}
616 
617 		/*
618 		 * At this time, hardware does not support half-duplex
619 		 * operation, hence why we don't ask the hardware about our
620 		 * current speed.
621 		 */
622 		i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
623 		i40e_link_state_set(i40e, LINK_STATE_UP);
624 	} else {
625 		i40e->i40e_link_speed = 0;
626 		i40e->i40e_link_duplex = 0;
627 		i40e_link_state_set(i40e, LINK_STATE_DOWN);
628 	}
629 }
630 
631 static void
632 i40e_rem_intrs(i40e_t *i40e)
633 {
634 	int i, rc;
635 
636 	for (i = 0; i < i40e->i40e_intr_count; i++) {
637 		rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
638 		if (rc != DDI_SUCCESS) {
639 			i40e_log(i40e, "failed to free interrupt %d: %d",
640 			    i, rc);
641 		}
642 	}
643 
644 	kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
645 	i40e->i40e_intr_handles = NULL;
646 }
647 
648 static void
649 i40e_rem_intr_handlers(i40e_t *i40e)
650 {
651 	int i, rc;
652 
653 	for (i = 0; i < i40e->i40e_intr_count; i++) {
654 		rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
655 		if (rc != DDI_SUCCESS) {
656 			i40e_log(i40e, "failed to remove interrupt %d: %d",
657 			    i, rc);
658 		}
659 	}
660 }
661 
662 /*
663  * illumos Fault Management Architecture (FMA) support.
664  */
665 
666 int
667 i40e_check_acc_handle(ddi_acc_handle_t handle)
668 {
669 	ddi_fm_error_t de;
670 
671 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
672 	ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
673 	return (de.fme_status);
674 }
675 
676 int
677 i40e_check_dma_handle(ddi_dma_handle_t handle)
678 {
679 	ddi_fm_error_t de;
680 
681 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
682 	return (de.fme_status);
683 }
684 
685 /*
686  * Fault service error handling callback function.
687  */
688 /* ARGSUSED */
689 static int
690 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
691 {
692 	pci_ereport_post(dip, err, NULL);
693 	return (err->fme_status);
694 }
695 
696 static void
697 i40e_fm_init(i40e_t *i40e)
698 {
699 	ddi_iblock_cookie_t iblk;
700 
701 	i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
702 	    i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
703 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
704 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
705 
706 	if (i40e->i40e_fm_capabilities < 0) {
707 		i40e->i40e_fm_capabilities = 0;
708 	} else if (i40e->i40e_fm_capabilities > 0xf) {
709 		i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
710 		    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
711 		    DDI_FM_ERRCB_CAPABLE;
712 	}
713 
714 	/*
715 	 * Only register with IO Fault Services if we have some capability
716 	 */
717 	if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
718 		i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
719 	} else {
720 		i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
721 	}
722 
723 	if (i40e->i40e_fm_capabilities) {
724 		ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
725 
726 		if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
727 		    DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
728 			pci_ereport_setup(i40e->i40e_dip);
729 		}
730 
731 		if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
732 			ddi_fm_handler_register(i40e->i40e_dip,
733 			    i40e_fm_error_cb, (void*)i40e);
734 		}
735 	}
736 
737 	if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
738 		i40e_init_dma_attrs(i40e, B_TRUE);
739 	} else {
740 		i40e_init_dma_attrs(i40e, B_FALSE);
741 	}
742 }
743 
744 static void
745 i40e_fm_fini(i40e_t *i40e)
746 {
747 	if (i40e->i40e_fm_capabilities) {
748 
749 		if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
750 		    DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
751 			pci_ereport_teardown(i40e->i40e_dip);
752 
753 		if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
754 			ddi_fm_handler_unregister(i40e->i40e_dip);
755 
756 		ddi_fm_fini(i40e->i40e_dip);
757 	}
758 }
759 
760 void
761 i40e_fm_ereport(i40e_t *i40e, char *detail)
762 {
763 	uint64_t ena;
764 	char buf[FM_MAX_CLASS];
765 
766 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
767 	ena = fm_ena_generate(0, FM_ENA_FMT1);
768 	if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
769 		ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
770 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
771 	}
772 }
773 
774 /*
775  * Here we're trying to set the SEID of the default VSI. In general,
776  * when we come through and look at this shortly after attach, we
777  * expect there to only be a single element present, which is the
778  * default VSI. Importantly, each PF seems to not see any other
779  * devices, in part because of the simple switch mode that we're
780  * using. If for some reason, we see more artifacts, we'll need to
781  * revisit what we're doing here.
782  */
783 static boolean_t
784 i40e_set_def_vsi_seid(i40e_t *i40e)
785 {
786 	i40e_hw_t *hw = &i40e->i40e_hw_space;
787 	struct i40e_aqc_get_switch_config_resp *sw_config;
788 	uint8_t aq_buf[I40E_AQ_LARGE_BUF];
789 	uint16_t next = 0;
790 	int rc;
791 
792 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
793 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
794 	rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
795 	    NULL);
796 	if (rc != I40E_SUCCESS) {
797 		i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
798 		    rc, hw->aq.asq_last_status);
799 		return (B_FALSE);
800 	}
801 
802 	if (LE_16(sw_config->header.num_reported) != 1) {
803 		i40e_error(i40e, "encountered multiple (%d) switching units "
804 		    "during attach, not proceeding",
805 		    LE_16(sw_config->header.num_reported));
806 		return (B_FALSE);
807 	}
808 
809 	I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid;
810 	return (B_TRUE);
811 }
812 
813 /*
814  * Get the SEID of the uplink MAC.
815  */
816 static int
817 i40e_get_mac_seid(i40e_t *i40e)
818 {
819 	i40e_hw_t *hw = &i40e->i40e_hw_space;
820 	struct i40e_aqc_get_switch_config_resp *sw_config;
821 	uint8_t aq_buf[I40E_AQ_LARGE_BUF];
822 	uint16_t next = 0;
823 	int rc;
824 
825 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
826 	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
827 	rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
828 	    NULL);
829 	if (rc != I40E_SUCCESS) {
830 		i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
831 		    rc, hw->aq.asq_last_status);
832 		return (-1);
833 	}
834 
835 	return (LE_16(sw_config->element[0].uplink_seid));
836 }
837 
838 /*
839  * We need to fill the i40e_hw_t structure with the capabilities of this PF. We
840  * must also provide the memory for it; however, we don't need to keep it around
841  * to the call to the common code. It takes it and parses it into an internal
842  * structure.
843  */
844 static boolean_t
845 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
846 {
847 	struct i40e_aqc_list_capabilities_element_resp *buf;
848 	int rc;
849 	size_t len;
850 	uint16_t needed;
851 	int nelems = I40E_HW_CAP_DEFAULT;
852 
853 	len = nelems * sizeof (*buf);
854 
855 	for (;;) {
856 		ASSERT(len > 0);
857 		buf = kmem_alloc(len, KM_SLEEP);
858 		rc = i40e_aq_discover_capabilities(hw, buf, len,
859 		    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
860 		kmem_free(buf, len);
861 
862 		if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
863 		    nelems == I40E_HW_CAP_DEFAULT) {
864 			if (nelems == needed) {
865 				i40e_error(i40e, "Capability discovery failed "
866 				    "due to byzantine common code");
867 				return (B_FALSE);
868 			}
869 			len = needed;
870 			continue;
871 		} else if (rc != I40E_SUCCESS ||
872 		    hw->aq.asq_last_status != I40E_AQ_RC_OK) {
873 			i40e_error(i40e, "Capability discovery failed: %d", rc);
874 			return (B_FALSE);
875 		}
876 
877 		break;
878 	}
879 
880 	return (B_TRUE);
881 }
882 
883 /*
884  * Obtain the switch's capabilities as seen by this PF and keep it around for
885  * our later use.
886  */
887 static boolean_t
888 i40e_get_switch_resources(i40e_t *i40e)
889 {
890 	i40e_hw_t *hw = &i40e->i40e_hw_space;
891 	uint8_t cnt = 2;
892 	uint8_t act;
893 	size_t size;
894 	i40e_switch_rsrc_t *buf;
895 
896 	for (;;) {
897 		enum i40e_status_code ret;
898 		size = cnt * sizeof (i40e_switch_rsrc_t);
899 		ASSERT(size > 0);
900 		if (size > UINT16_MAX)
901 			return (B_FALSE);
902 		buf = kmem_alloc(size, KM_SLEEP);
903 
904 		ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
905 		    cnt, NULL);
906 		if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
907 		    hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
908 			kmem_free(buf, size);
909 			cnt += I40E_SWITCH_CAP_DEFAULT;
910 			continue;
911 		} else if (ret != I40E_SUCCESS) {
912 			kmem_free(buf, size);
913 			i40e_error(i40e,
914 			    "failed to retrieve switch statistics: %d", ret);
915 			return (B_FALSE);
916 		}
917 
918 		break;
919 	}
920 
921 	i40e->i40e_switch_rsrc_alloc = cnt;
922 	i40e->i40e_switch_rsrc_actual = act;
923 	i40e->i40e_switch_rsrcs = buf;
924 
925 	return (B_TRUE);
926 }
927 
928 static void
929 i40e_cleanup_resources(i40e_t *i40e)
930 {
931 	if (i40e->i40e_uaddrs != NULL) {
932 		kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
933 		    i40e->i40e_resources.ifr_nmacfilt);
934 		i40e->i40e_uaddrs = NULL;
935 	}
936 
937 	if (i40e->i40e_maddrs != NULL) {
938 		kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
939 		    i40e->i40e_resources.ifr_nmcastfilt);
940 		i40e->i40e_maddrs = NULL;
941 	}
942 
943 	if (i40e->i40e_switch_rsrcs != NULL) {
944 		size_t sz = sizeof (i40e_switch_rsrc_t) *
945 		    i40e->i40e_switch_rsrc_alloc;
946 		ASSERT(sz > 0);
947 		kmem_free(i40e->i40e_switch_rsrcs, sz);
948 		i40e->i40e_switch_rsrcs = NULL;
949 	}
950 
951 	if (i40e->i40e_device != NULL)
952 		i40e_device_rele(i40e);
953 }
954 
955 static boolean_t
956 i40e_get_available_resources(i40e_t *i40e)
957 {
958 	dev_info_t *parent;
959 	uint16_t bus, device, func;
960 	uint_t nregs;
961 	int *regs, i;
962 	i40e_device_t *idp;
963 	i40e_hw_t *hw = &i40e->i40e_hw_space;
964 
965 	parent = ddi_get_parent(i40e->i40e_dip);
966 
967 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
968 	    &regs, &nregs) != DDI_PROP_SUCCESS) {
969 		return (B_FALSE);
970 	}
971 
972 	if (nregs < 1) {
973 		ddi_prop_free(regs);
974 		return (B_FALSE);
975 	}
976 
977 	bus = PCI_REG_BUS_G(regs[0]);
978 	device = PCI_REG_DEV_G(regs[0]);
979 	func = PCI_REG_FUNC_G(regs[0]);
980 	ddi_prop_free(regs);
981 
982 	i40e->i40e_hw_space.bus.func = func;
983 	i40e->i40e_hw_space.bus.device = device;
984 
985 	if (i40e_get_switch_resources(i40e) == B_FALSE) {
986 		return (B_FALSE);
987 	}
988 
989 	/*
990 	 * To calculate the total amount of a resource we have available, we
991 	 * need to add how many our i40e_t thinks it has guaranteed, if any, and
992 	 * then we need to go through and divide the number of available on the
993 	 * device, which was snapshotted before anyone should have allocated
994 	 * anything, and use that to derive how many are available from the
995 	 * pool. Longer term, we may want to turn this into something that's
996 	 * more of a pool-like resource that everything can share (though that
997 	 * may require some more assistance from MAC).
998 	 *
999 	 * Though for transmit and receive queue pairs, we just have to ask
1000 	 * firmware instead.
1001 	 */
1002 	idp = i40e_device_find(i40e, parent, bus, device);
1003 	i40e->i40e_device = idp;
1004 	i40e->i40e_resources.ifr_nvsis = 0;
1005 	i40e->i40e_resources.ifr_nvsis_used = 0;
1006 	i40e->i40e_resources.ifr_nmacfilt = 0;
1007 	i40e->i40e_resources.ifr_nmacfilt_used = 0;
1008 	i40e->i40e_resources.ifr_nmcastfilt = 0;
1009 	i40e->i40e_resources.ifr_nmcastfilt_used = 0;
1010 
1011 	for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
1012 		i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1013 
1014 		switch (srp->resource_type) {
1015 		case I40E_AQ_RESOURCE_TYPE_VSI:
1016 			i40e->i40e_resources.ifr_nvsis +=
1017 			    LE_16(srp->guaranteed);
1018 			i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
1019 			break;
1020 		case I40E_AQ_RESOURCE_TYPE_MACADDR:
1021 			i40e->i40e_resources.ifr_nmacfilt +=
1022 			    LE_16(srp->guaranteed);
1023 			i40e->i40e_resources.ifr_nmacfilt_used =
1024 			    LE_16(srp->used);
1025 			break;
1026 		case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1027 			i40e->i40e_resources.ifr_nmcastfilt +=
1028 			    LE_16(srp->guaranteed);
1029 			i40e->i40e_resources.ifr_nmcastfilt_used =
1030 			    LE_16(srp->used);
1031 			break;
1032 		default:
1033 			break;
1034 		}
1035 	}
1036 
1037 	for (i = 0; i < idp->id_rsrcs_act; i++) {
1038 		i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1039 		switch (srp->resource_type) {
1040 		case I40E_AQ_RESOURCE_TYPE_VSI:
1041 			i40e->i40e_resources.ifr_nvsis +=
1042 			    LE_16(srp->total_unalloced) / idp->id_nfuncs;
1043 			break;
1044 		case I40E_AQ_RESOURCE_TYPE_MACADDR:
1045 			i40e->i40e_resources.ifr_nmacfilt +=
1046 			    LE_16(srp->total_unalloced) / idp->id_nfuncs;
1047 			break;
1048 		case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1049 			i40e->i40e_resources.ifr_nmcastfilt +=
1050 			    LE_16(srp->total_unalloced) / idp->id_nfuncs;
1051 		default:
1052 			break;
1053 		}
1054 	}
1055 
1056 	i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
1057 	i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
1058 
1059 	i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
1060 	    i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
1061 	i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
1062 	    i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
1063 
1064 	/*
1065 	 * Initialize these as multicast addresses to indicate it's invalid for
1066 	 * sanity purposes. Think of it like 0xdeadbeef.
1067 	 */
1068 	for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
1069 		i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
1070 
1071 	return (B_TRUE);
1072 }
1073 
1074 static boolean_t
1075 i40e_enable_interrupts(i40e_t *i40e)
1076 {
1077 	int i, rc;
1078 
1079 	if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1080 		rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
1081 		    i40e->i40e_intr_count);
1082 		if (rc != DDI_SUCCESS) {
1083 			i40e_error(i40e, "Interrupt block-enable failed: %d",
1084 			    rc);
1085 			return (B_FALSE);
1086 		}
1087 	} else {
1088 		for (i = 0; i < i40e->i40e_intr_count; i++) {
1089 			rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
1090 			if (rc != DDI_SUCCESS) {
1091 				i40e_error(i40e,
1092 				    "Failed to enable interrupt %d: %d", i, rc);
1093 				while (--i >= 0) {
1094 					(void) ddi_intr_disable(
1095 					    i40e->i40e_intr_handles[i]);
1096 				}
1097 				return (B_FALSE);
1098 			}
1099 		}
1100 	}
1101 
1102 	return (B_TRUE);
1103 }
1104 
1105 static boolean_t
1106 i40e_disable_interrupts(i40e_t *i40e)
1107 {
1108 	int i, rc;
1109 
1110 	if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111 		rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
1112 		    i40e->i40e_intr_count);
1113 		if (rc != DDI_SUCCESS) {
1114 			i40e_error(i40e,
1115 			    "Interrupt block-disabled failed: %d", rc);
1116 			return (B_FALSE);
1117 		}
1118 	} else {
1119 		for (i = 0; i < i40e->i40e_intr_count; i++) {
1120 			rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
1121 			if (rc != DDI_SUCCESS) {
1122 				i40e_error(i40e,
1123 				    "Failed to disable interrupt %d: %d",
1124 				    i, rc);
1125 				return (B_FALSE);
1126 			}
1127 		}
1128 	}
1129 
1130 	return (B_TRUE);
1131 }
1132 
1133 /*
1134  * Free receive & transmit rings.
1135  */
1136 static void
1137 i40e_free_trqpairs(i40e_t *i40e)
1138 {
1139 	i40e_trqpair_t *itrq;
1140 
1141 	if (i40e->i40e_rx_groups != NULL) {
1142 		kmem_free(i40e->i40e_rx_groups,
1143 		    sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups);
1144 		i40e->i40e_rx_groups = NULL;
1145 	}
1146 
1147 	if (i40e->i40e_trqpairs != NULL) {
1148 		for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1149 			itrq = &i40e->i40e_trqpairs[i];
1150 			mutex_destroy(&itrq->itrq_intr_lock);
1151 			mutex_destroy(&itrq->itrq_rx_lock);
1152 			mutex_destroy(&itrq->itrq_tx_lock);
1153 			mutex_destroy(&itrq->itrq_tcb_lock);
1154 			cv_destroy(&itrq->itrq_intr_cv);
1155 			cv_destroy(&itrq->itrq_tx_cv);
1156 
1157 			i40e_stats_trqpair_fini(itrq);
1158 		}
1159 
1160 		kmem_free(i40e->i40e_trqpairs,
1161 		    sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
1162 		i40e->i40e_trqpairs = NULL;
1163 	}
1164 
1165 	cv_destroy(&i40e->i40e_rx_pending_cv);
1166 	mutex_destroy(&i40e->i40e_rx_pending_lock);
1167 	mutex_destroy(&i40e->i40e_general_lock);
1168 }
1169 
1170 /*
1171  * Allocate transmit and receive rings, as well as other data structures that we
1172  * need.
1173  */
1174 static boolean_t
1175 i40e_alloc_trqpairs(i40e_t *i40e)
1176 {
1177 	void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
1178 
1179 	/*
1180 	 * Now that we have the priority for the interrupts, initialize
1181 	 * all relevant locks.
1182 	 */
1183 	mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
1184 	mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
1185 	cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
1186 
1187 	i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
1188 	    i40e->i40e_num_trqpairs, KM_SLEEP);
1189 	for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1190 		i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
1191 
1192 		itrq->itrq_i40e = i40e;
1193 		mutex_init(&itrq->itrq_intr_lock, NULL, MUTEX_DRIVER, mutexpri);
1194 		mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
1195 		mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
1196 		mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
1197 		cv_init(&itrq->itrq_intr_cv, NULL, CV_DRIVER, NULL);
1198 		cv_init(&itrq->itrq_tx_cv, NULL, CV_DRIVER, NULL);
1199 		itrq->itrq_index = i;
1200 		itrq->itrq_intr_quiesce = B_TRUE;
1201 		itrq->itrq_tx_quiesce = B_TRUE;
1202 	}
1203 
1204 	for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1205 		/*
1206 		 * Keeping this in a separate iteration makes the
1207 		 * clean up path safe.
1208 		 */
1209 		if (!i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i])) {
1210 			i40e_free_trqpairs(i40e);
1211 			return (B_FALSE);
1212 		}
1213 	}
1214 
1215 	i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) *
1216 	    i40e->i40e_num_rx_groups, KM_SLEEP);
1217 
1218 	for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
1219 		i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i];
1220 
1221 		rxg->irg_index = i;
1222 		rxg->irg_i40e = i40e;
1223 	}
1224 
1225 	return (B_TRUE);
1226 }
1227 
1228 
1229 
1230 /*
1231  * Unless a .conf file already overrode i40e_t structure values, they will
1232  * be 0, and need to be set in conjunction with the now-available HW report.
1233  */
1234 /* ARGSUSED */
1235 static void
1236 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
1237 {
1238 	if (i40e->i40e_num_trqpairs_per_vsi == 0) {
1239 		if (i40e_is_x722(i40e)) {
1240 			i40e->i40e_num_trqpairs_per_vsi =
1241 			    I40E_722_MAX_TC_QUEUES;
1242 		} else {
1243 			i40e->i40e_num_trqpairs_per_vsi =
1244 			    I40E_710_MAX_TC_QUEUES;
1245 		}
1246 	}
1247 
1248 	if (i40e->i40e_num_rx_groups == 0) {
1249 		i40e->i40e_num_rx_groups = I40E_DEF_NUM_RX_GROUPS;
1250 	}
1251 }
1252 
1253 /*
1254  * Free any resources required by, or setup by, the Intel common code.
1255  */
1256 static void
1257 i40e_common_code_fini(i40e_t *i40e)
1258 {
1259 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1260 	int rc;
1261 
1262 	rc = i40e_shutdown_lan_hmc(hw);
1263 	if (rc != I40E_SUCCESS)
1264 		i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
1265 
1266 	rc = i40e_shutdown_adminq(hw);
1267 	if (rc != I40E_SUCCESS)
1268 		i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
1269 }
1270 
1271 /*
1272  * Initialize and call Intel common-code routines, includes some setup
1273  * the common code expects from the driver.  Also prints on failure, so
1274  * the caller doesn't have to.
1275  */
1276 static boolean_t
1277 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
1278 {
1279 	int rc;
1280 
1281 	i40e_clear_hw(hw);
1282 	rc = i40e_pf_reset(hw);
1283 	if (rc != 0) {
1284 		i40e_error(i40e, "failed to reset hardware: %d", rc);
1285 		i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
1286 		return (B_FALSE);
1287 	}
1288 
1289 	rc = i40e_init_shared_code(hw);
1290 	if (rc != 0) {
1291 		i40e_error(i40e, "failed to initialize i40e core: %d", rc);
1292 		return (B_FALSE);
1293 	}
1294 
1295 	hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
1296 	hw->aq.num_asq_entries =  I40E_DEF_ADMINQ_SIZE;
1297 	hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
1298 	hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
1299 
1300 	rc = i40e_init_adminq(hw);
1301 	if (rc != 0) {
1302 		i40e_error(i40e, "failed to initialize firmware admin queue: "
1303 		    "%d, potential firmware version mismatch", rc);
1304 		i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
1305 		return (B_FALSE);
1306 	}
1307 
1308 	if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1309 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
1310 		i40e_log(i40e, "The driver for the device detected a newer "
1311 		    "version of the NVM image (%d.%d) than expected (%d.%d).\n"
1312 		    "Please install the most recent version of the network "
1313 		    "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
1314 		    I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw));
1315 	} else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
1316 	    hw->aq.api_min_ver < (I40E_FW_MINOR_VERSION(hw) - 1)) {
1317 		i40e_log(i40e, "The driver for the device detected an older"
1318 		    " version of the NVM image (%d.%d) than expected (%d.%d)."
1319 		    "\nPlease update the NVM image.\n",
1320 		    hw->aq.api_maj_ver, hw->aq.api_min_ver,
1321 		    I40E_FW_API_VERSION_MAJOR, I40E_FW_MINOR_VERSION(hw) - 1);
1322 	}
1323 
1324 	i40e_clear_pxe_mode(hw);
1325 
1326 	/*
1327 	 * We need to call this so that the common code can discover
1328 	 * capabilities of the hardware, which it uses throughout the rest.
1329 	 */
1330 	if (!i40e_get_hw_capabilities(i40e, hw)) {
1331 		i40e_error(i40e, "failed to obtain hardware capabilities");
1332 		return (B_FALSE);
1333 	}
1334 
1335 	if (i40e_get_available_resources(i40e) == B_FALSE) {
1336 		i40e_error(i40e, "failed to obtain hardware resources");
1337 		return (B_FALSE);
1338 	}
1339 
1340 	i40e_hw_to_instance(i40e, hw);
1341 
1342 	rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1343 	    hw->func_caps.num_rx_qp, 0, 0);
1344 	if (rc != 0) {
1345 		i40e_error(i40e, "failed to initialize hardware memory cache: "
1346 		    "%d", rc);
1347 		return (B_FALSE);
1348 	}
1349 
1350 	rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1351 	if (rc != 0) {
1352 		i40e_error(i40e, "failed to configure hardware memory cache: "
1353 		    "%d", rc);
1354 		return (B_FALSE);
1355 	}
1356 
1357 	(void) i40e_aq_stop_lldp(hw, TRUE, FALSE, NULL);
1358 
1359 	rc = i40e_get_mac_addr(hw, hw->mac.addr);
1360 	if (rc != I40E_SUCCESS) {
1361 		i40e_error(i40e, "failed to retrieve hardware mac address: %d",
1362 		    rc);
1363 		return (B_FALSE);
1364 	}
1365 
1366 	rc = i40e_validate_mac_addr(hw->mac.addr);
1367 	if (rc != 0) {
1368 		i40e_error(i40e, "failed to validate internal mac address: "
1369 		    "%d", rc);
1370 		return (B_FALSE);
1371 	}
1372 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
1373 	if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
1374 	    I40E_SUCCESS) {
1375 		i40e_error(i40e, "failed to retrieve port mac address: %d",
1376 		    rc);
1377 		return (B_FALSE);
1378 	}
1379 
1380 	/*
1381 	 * We need to obtain the Default Virtual Station SEID (VSI)
1382 	 * before we can perform other operations on the device.
1383 	 */
1384 	if (!i40e_set_def_vsi_seid(i40e)) {
1385 		i40e_error(i40e, "failed to obtain Default VSI SEID");
1386 		return (B_FALSE);
1387 	}
1388 
1389 	return (B_TRUE);
1390 }
1391 
1392 static void
1393 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
1394 {
1395 	int rc;
1396 
1397 	if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
1398 		(void) i40e_disable_interrupts(i40e);
1399 
1400 	if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
1401 	    i40e->i40e_periodic_id != 0) {
1402 		ddi_periodic_delete(i40e->i40e_periodic_id);
1403 		i40e->i40e_periodic_id = 0;
1404 	}
1405 
1406 	if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT)
1407 		ddi_ufm_fini(i40e->i40e_ufmh);
1408 
1409 	if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
1410 		rc = mac_unregister(i40e->i40e_mac_hdl);
1411 		if (rc != 0) {
1412 			i40e_error(i40e, "failed to unregister from mac: %d",
1413 			    rc);
1414 		}
1415 	}
1416 
1417 	if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
1418 		i40e_stats_fini(i40e);
1419 	}
1420 
1421 	if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
1422 		i40e_rem_intr_handlers(i40e);
1423 
1424 	if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
1425 		i40e_free_trqpairs(i40e);
1426 
1427 	if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
1428 		i40e_rem_intrs(i40e);
1429 
1430 	if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
1431 		i40e_common_code_fini(i40e);
1432 
1433 	i40e_cleanup_resources(i40e);
1434 
1435 	if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
1436 		(void) ddi_prop_remove_all(devinfo);
1437 
1438 	if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
1439 	    i40e->i40e_osdep_space.ios_reg_handle != NULL) {
1440 		ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
1441 		i40e->i40e_osdep_space.ios_reg_handle = NULL;
1442 	}
1443 
1444 	if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
1445 	    i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
1446 		pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
1447 		i40e->i40e_osdep_space.ios_cfg_handle = NULL;
1448 	}
1449 
1450 	if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
1451 		i40e_fm_fini(i40e);
1452 
1453 	kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
1454 	kmem_free(i40e, sizeof (i40e_t));
1455 
1456 	ddi_set_driver_private(devinfo, NULL);
1457 }
1458 
1459 static boolean_t
1460 i40e_final_init(i40e_t *i40e)
1461 {
1462 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1463 	struct i40e_osdep *osdep = OS_DEP(hw);
1464 	uint8_t pbanum[I40E_PBANUM_STRLEN];
1465 	enum i40e_status_code irc;
1466 	char buf[I40E_DDI_PROP_LEN];
1467 
1468 	pbanum[0] = '\0';
1469 	irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
1470 	if (irc != I40E_SUCCESS) {
1471 		i40e_log(i40e, "failed to read PBA string: %d", irc);
1472 	} else {
1473 		(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1474 		    "printed-board-assembly", (char *)pbanum);
1475 	}
1476 
1477 #ifdef	DEBUG
1478 	ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
1479 	    hw->aq.fw_min_ver) < sizeof (buf));
1480 	ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
1481 	ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
1482 	    hw->aq.api_min_ver) < sizeof (buf));
1483 #endif
1484 
1485 	(void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
1486 	    hw->aq.fw_min_ver);
1487 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1488 	    "firmware-version", buf);
1489 	(void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
1490 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1491 	    "firmware-build", buf);
1492 	(void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
1493 	    hw->aq.api_min_ver);
1494 	(void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1495 	    "api-version", buf);
1496 
1497 	if (!i40e_set_hw_bus_info(hw))
1498 		return (B_FALSE);
1499 
1500 	if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
1501 		ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
1502 		return (B_FALSE);
1503 	}
1504 
1505 	return (B_TRUE);
1506 }
1507 
1508 static void
1509 i40e_identify_hardware(i40e_t *i40e)
1510 {
1511 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1512 	struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1513 
1514 	hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
1515 	hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
1516 	hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
1517 	    PCI_CONF_REVID);
1518 	hw->subsystem_device_id =
1519 	    pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
1520 	hw->subsystem_vendor_id =
1521 	    pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
1522 
1523 	/*
1524 	 * Note that we set the hardware's bus information later on, in
1525 	 * i40e_get_available_resources(). The common code doesn't seem to
1526 	 * require that it be set in any ways, it seems to be mostly for
1527 	 * book-keeping.
1528 	 */
1529 }
1530 
1531 static boolean_t
1532 i40e_regs_map(i40e_t *i40e)
1533 {
1534 	dev_info_t *devinfo = i40e->i40e_dip;
1535 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1536 	struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1537 	off_t memsize;
1538 	int ret;
1539 
1540 	if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
1541 	    DDI_SUCCESS) {
1542 		i40e_error(i40e, "Used invalid register set to map PCIe regs");
1543 		return (B_FALSE);
1544 	}
1545 
1546 	if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
1547 	    (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
1548 	    &osdep->ios_reg_handle)) != DDI_SUCCESS) {
1549 		i40e_error(i40e, "failed to map device registers: %d", ret);
1550 		return (B_FALSE);
1551 	}
1552 
1553 	osdep->ios_reg_size = memsize;
1554 	return (B_TRUE);
1555 }
1556 
1557 /*
1558  * Update parameters required when a new MTU has been configured.  Calculate the
1559  * maximum frame size, as well as, size our DMA buffers which we size in
1560  * increments of 1K.
1561  */
1562 void
1563 i40e_update_mtu(i40e_t *i40e)
1564 {
1565 	uint32_t rx, tx;
1566 
1567 	i40e->i40e_frame_max = i40e->i40e_sdu +
1568 	    sizeof (struct ether_vlan_header) + ETHERFCSL;
1569 
1570 	rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
1571 	i40e->i40e_rx_buf_size = ((rx >> 10) +
1572 	    ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1573 
1574 	tx = i40e->i40e_frame_max;
1575 	i40e->i40e_tx_buf_size = ((tx >> 10) +
1576 	    ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1577 }
1578 
1579 static int
1580 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
1581 {
1582 	int val;
1583 
1584 	val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
1585 	    prop, def);
1586 	if (val > max)
1587 		val = max;
1588 	if (val < min)
1589 		val = min;
1590 	return (val);
1591 }
1592 
1593 static void
1594 i40e_init_properties(i40e_t *i40e)
1595 {
1596 	i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
1597 	    I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
1598 
1599 	i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
1600 	    I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
1601 
1602 	i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
1603 	    B_FALSE, B_TRUE, B_TRUE);
1604 
1605 	i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
1606 	    I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
1607 	    I40E_DEF_TX_RING_SIZE);
1608 	if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
1609 		i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
1610 		    I40E_DESC_ALIGN);
1611 	}
1612 
1613 	i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
1614 	    I40E_MIN_TX_BLOCK_THRESH,
1615 	    i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
1616 	    I40E_DEF_TX_BLOCK_THRESH);
1617 
1618 	i40e->i40e_num_rx_groups = i40e_get_prop(i40e, "rx_num_groups",
1619 	    I40E_MIN_NUM_RX_GROUPS, I40E_MAX_NUM_RX_GROUPS,
1620 	    I40E_DEF_NUM_RX_GROUPS);
1621 
1622 	i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
1623 	    I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
1624 	    I40E_DEF_RX_RING_SIZE);
1625 	if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
1626 		i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
1627 		    I40E_DESC_ALIGN);
1628 	}
1629 
1630 	i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
1631 	    I40E_MIN_RX_LIMIT_PER_INTR,	I40E_MAX_RX_LIMIT_PER_INTR,
1632 	    I40E_DEF_RX_LIMIT_PER_INTR);
1633 
1634 	i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
1635 	    B_FALSE, B_TRUE, B_TRUE);
1636 
1637 	i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable",
1638 	    B_FALSE, B_TRUE, B_TRUE);
1639 
1640 	i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
1641 	    B_FALSE, B_TRUE, B_TRUE);
1642 
1643 	i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
1644 	    I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
1645 	    I40E_DEF_RX_DMA_THRESH);
1646 
1647 	i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
1648 	    I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
1649 	    I40E_DEF_TX_DMA_THRESH);
1650 
1651 	i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
1652 	    I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
1653 
1654 	i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
1655 	    I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
1656 
1657 	i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
1658 	    I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
1659 
1660 	if (!i40e->i40e_mr_enable) {
1661 		i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1662 		i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1663 	}
1664 
1665 	i40e_update_mtu(i40e);
1666 }
1667 
1668 /*
1669  * There are a few constraints on interrupts that we're currently imposing, some
1670  * of which are restrictions from hardware. For a fuller treatment, see
1671  * i40e_intr.c.
1672  *
1673  * Currently, to use MSI-X we require two interrupts be available though in
1674  * theory we should participate in IRM and happily use more interrupts.
1675  *
1676  * Hardware only supports a single MSI being programmed and therefore if we
1677  * don't have MSI-X interrupts available at this time, then we ratchet down the
1678  * number of rings and groups available. Obviously, we only bother with a single
1679  * fixed interrupt.
1680  */
1681 static boolean_t
1682 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
1683 {
1684 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1685 	ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
1686 	int request, count, actual, rc, min;
1687 	uint32_t reg;
1688 
1689 	switch (intr_type) {
1690 	case DDI_INTR_TYPE_FIXED:
1691 	case DDI_INTR_TYPE_MSI:
1692 		request = 1;
1693 		min = 1;
1694 		break;
1695 	case DDI_INTR_TYPE_MSIX:
1696 		min = 2;
1697 		if (!i40e->i40e_mr_enable) {
1698 			request = 2;
1699 			break;
1700 		}
1701 		reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2);
1702 		/*
1703 		 * Should this read fail, we will drop back to using
1704 		 * MSI or fixed interrupts.
1705 		 */
1706 		if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
1707 			ddi_fm_service_impact(i40e->i40e_dip,
1708 			    DDI_SERVICE_DEGRADED);
1709 			return (B_FALSE);
1710 		}
1711 		request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1712 		    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1713 		request++;	/* the register value is n - 1 */
1714 		break;
1715 	default:
1716 		panic("bad interrupt type passed to i40e_alloc_intr_handles: "
1717 		    "%d", intr_type);
1718 	}
1719 
1720 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
1721 	if (rc != DDI_SUCCESS || count < min) {
1722 		i40e_log(i40e, "Get interrupt number failed, "
1723 		    "returned %d, count %d", rc, count);
1724 		return (B_FALSE);
1725 	}
1726 
1727 	rc = ddi_intr_get_navail(devinfo, intr_type, &count);
1728 	if (rc != DDI_SUCCESS || count < min) {
1729 		i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
1730 		    "returned %d, count %d", rc, count);
1731 		return (B_FALSE);
1732 	}
1733 
1734 	actual = 0;
1735 	i40e->i40e_intr_count = 0;
1736 	i40e->i40e_intr_count_max = 0;
1737 	i40e->i40e_intr_count_min = 0;
1738 
1739 	i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
1740 	ASSERT(i40e->i40e_intr_size != 0);
1741 	i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
1742 
1743 	rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
1744 	    min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
1745 	if (rc != DDI_SUCCESS) {
1746 		i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
1747 		goto alloc_handle_fail;
1748 	}
1749 
1750 	i40e->i40e_intr_count = actual;
1751 	i40e->i40e_intr_count_max = request;
1752 	i40e->i40e_intr_count_min = min;
1753 
1754 	if (actual < min) {
1755 		i40e_log(i40e, "actual (%d) is less than minimum (%d).",
1756 		    actual, min);
1757 		goto alloc_handle_fail;
1758 	}
1759 
1760 	/*
1761 	 * Record the priority and capabilities for our first vector.  Once
1762 	 * we have it, that's our priority until detach time.  Even if we
1763 	 * eventually participate in IRM, our priority shouldn't change.
1764 	 */
1765 	rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
1766 	if (rc != DDI_SUCCESS) {
1767 		i40e_log(i40e,
1768 		    "Getting interrupt priority failed with %d.", rc);
1769 		goto alloc_handle_fail;
1770 	}
1771 
1772 	rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
1773 	if (rc != DDI_SUCCESS) {
1774 		i40e_log(i40e,
1775 		    "Getting interrupt capabilities failed with %d.", rc);
1776 		goto alloc_handle_fail;
1777 	}
1778 
1779 	i40e->i40e_intr_type = intr_type;
1780 	return (B_TRUE);
1781 
1782 alloc_handle_fail:
1783 
1784 	i40e_rem_intrs(i40e);
1785 	return (B_FALSE);
1786 }
1787 
1788 static boolean_t
1789 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
1790 {
1791 	i40e_hw_t *hw = &i40e->i40e_hw_space;
1792 	int intr_types, rc;
1793 	uint_t max_trqpairs;
1794 
1795 	if (i40e_is_x722(i40e)) {
1796 		max_trqpairs = I40E_722_MAX_TC_QUEUES;
1797 	} else {
1798 		max_trqpairs = I40E_710_MAX_TC_QUEUES;
1799 	}
1800 
1801 	rc = ddi_intr_get_supported_types(devinfo, &intr_types);
1802 	if (rc != DDI_SUCCESS) {
1803 		i40e_error(i40e, "failed to get supported interrupt types: %d",
1804 		    rc);
1805 		return (B_FALSE);
1806 	}
1807 
1808 	i40e->i40e_intr_type = 0;
1809 
1810 	/*
1811 	 * We need to determine the number of queue pairs per traffic
1812 	 * class. We only have one traffic class (TC0), so we'll base
1813 	 * this off the number of interrupts provided. Furthermore,
1814 	 * since we only use one traffic class, the number of queues
1815 	 * per traffic class and per VSI are the same.
1816 	 */
1817 	if ((intr_types & DDI_INTR_TYPE_MSIX) &&
1818 	    (i40e->i40e_intr_force <= I40E_INTR_MSIX) &&
1819 	    (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) {
1820 		uint32_t n, qp_cap, num_trqpairs;
1821 
1822 		/*
1823 		 * While we want the number of queue pairs to match
1824 		 * the number of interrupts, we must keep stay in
1825 		 * bounds of the maximum number of queues per traffic
1826 		 * class. We subtract one from i40e_intr_count to
1827 		 * account for interrupt zero; which is currently
1828 		 * restricted to admin queue commands and other
1829 		 * interrupt causes.
1830 		 */
1831 		n = MIN(i40e->i40e_intr_count - 1, max_trqpairs);
1832 		ASSERT3U(n, >, 0);
1833 
1834 		/*
1835 		 * Round up to the nearest power of two to ensure that
1836 		 * the QBASE aligns with the TC size which must be
1837 		 * programmed as a power of two. See the queue mapping
1838 		 * description in section 7.4.9.5.5.1.
1839 		 *
1840 		 * If i40e_intr_count - 1 is not a power of two then
1841 		 * some queue pairs on the same VSI will have to share
1842 		 * an interrupt.
1843 		 *
1844 		 * We may want to revisit this logic in a future where
1845 		 * we have more interrupts and more VSIs. Otherwise,
1846 		 * each VSI will use as many interrupts as possible.
1847 		 * Using more QPs per VSI means better RSS for each
1848 		 * group, but at the same time may require more
1849 		 * sharing of interrupts across VSIs. This may be a
1850 		 * good candidate for a .conf tunable.
1851 		 */
1852 		n = 0x1 << ddi_fls(n);
1853 		i40e->i40e_num_trqpairs_per_vsi = n;
1854 
1855 		/*
1856 		 * Make sure the number of tx/rx qpairs does not exceed
1857 		 * the device's capabilities.
1858 		 */
1859 		ASSERT3U(i40e->i40e_num_rx_groups, >, 0);
1860 		qp_cap = MIN(hw->func_caps.num_rx_qp, hw->func_caps.num_tx_qp);
1861 		num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1862 		    i40e->i40e_num_rx_groups;
1863 		if (num_trqpairs > qp_cap) {
1864 			i40e->i40e_num_rx_groups = MAX(1, qp_cap /
1865 			    i40e->i40e_num_trqpairs_per_vsi);
1866 			num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1867 			    i40e->i40e_num_rx_groups;
1868 			i40e_log(i40e, "Rx groups restricted to %u",
1869 			    i40e->i40e_num_rx_groups);
1870 		}
1871 		ASSERT3U(num_trqpairs, >, 0);
1872 		i40e->i40e_num_trqpairs = num_trqpairs;
1873 		return (B_TRUE);
1874 	}
1875 
1876 	/*
1877 	 * We only use multiple transmit/receive pairs when MSI-X interrupts are
1878 	 * available due to the fact that the device basically only supports a
1879 	 * single MSI interrupt.
1880 	 */
1881 	i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1882 	i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs;
1883 	i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1884 
1885 	if ((intr_types & DDI_INTR_TYPE_MSI) &&
1886 	    (i40e->i40e_intr_force <= I40E_INTR_MSI)) {
1887 		if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
1888 			return (B_TRUE);
1889 	}
1890 
1891 	if (intr_types & DDI_INTR_TYPE_FIXED) {
1892 		if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
1893 			return (B_TRUE);
1894 	}
1895 
1896 	return (B_FALSE);
1897 }
1898 
1899 /*
1900  * Map different interrupts to MSI-X vectors.
1901  */
1902 static boolean_t
1903 i40e_map_intrs_to_vectors(i40e_t *i40e)
1904 {
1905 	if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
1906 		return (B_TRUE);
1907 	}
1908 
1909 	/*
1910 	 * Each queue pair is mapped to a single interrupt, so
1911 	 * transmit and receive interrupts for a given queue share the
1912 	 * same vector. Vector zero is reserved for the admin queue.
1913 	 */
1914 	for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1915 		uint_t vector = i % (i40e->i40e_intr_count - 1);
1916 
1917 		i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1;
1918 		i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1;
1919 	}
1920 
1921 	return (B_TRUE);
1922 }
1923 
1924 static boolean_t
1925 i40e_add_intr_handlers(i40e_t *i40e)
1926 {
1927 	int rc, vector;
1928 
1929 	switch (i40e->i40e_intr_type) {
1930 	case DDI_INTR_TYPE_MSIX:
1931 		for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
1932 			rc = ddi_intr_add_handler(
1933 			    i40e->i40e_intr_handles[vector],
1934 			    (ddi_intr_handler_t *)i40e_intr_msix, i40e,
1935 			    (void *)(uintptr_t)vector);
1936 			if (rc != DDI_SUCCESS) {
1937 				i40e_log(i40e, "Add interrupt handler (MSI-X) "
1938 				    "failed: return %d, vector %d", rc, vector);
1939 				for (vector--; vector >= 0; vector--) {
1940 					(void) ddi_intr_remove_handler(
1941 					    i40e->i40e_intr_handles[vector]);
1942 				}
1943 				return (B_FALSE);
1944 			}
1945 		}
1946 		break;
1947 	case DDI_INTR_TYPE_MSI:
1948 		rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1949 		    (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
1950 		if (rc != DDI_SUCCESS) {
1951 			i40e_log(i40e, "Add interrupt handler (MSI) failed: "
1952 			    "return %d", rc);
1953 			return (B_FALSE);
1954 		}
1955 		break;
1956 	case DDI_INTR_TYPE_FIXED:
1957 		rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1958 		    (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
1959 		if (rc != DDI_SUCCESS) {
1960 			i40e_log(i40e, "Add interrupt handler (legacy) failed:"
1961 			    " return %d", rc);
1962 			return (B_FALSE);
1963 		}
1964 		break;
1965 	default:
1966 		/* Cast to pacify lint */
1967 		panic("i40e_intr_type %p contains an unknown type: %d",
1968 		    (void *)i40e, i40e->i40e_intr_type);
1969 	}
1970 
1971 	return (B_TRUE);
1972 }
1973 
1974 /*
1975  * Perform periodic checks. Longer term, we should be thinking about additional
1976  * things here:
1977  *
1978  * o Stall Detection
1979  * o Temperature sensor detection
1980  * o Device resetting
1981  * o Statistics updating to avoid wraparound
1982  */
1983 static void
1984 i40e_timer(void *arg)
1985 {
1986 	i40e_t *i40e = arg;
1987 
1988 	mutex_enter(&i40e->i40e_general_lock);
1989 	i40e_link_check(i40e);
1990 	mutex_exit(&i40e->i40e_general_lock);
1991 }
1992 
1993 /*
1994  * Get the hardware state, and scribble away anything that needs scribbling.
1995  */
1996 static void
1997 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
1998 {
1999 	int rc;
2000 
2001 	ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
2002 
2003 	(void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
2004 	i40e_link_check(i40e);
2005 
2006 	/*
2007 	 * Try and determine our PHY. Note that we may have to retry to and
2008 	 * delay to detect fiber correctly.
2009 	 */
2010 	rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy,
2011 	    NULL);
2012 	if (rc == I40E_ERR_UNKNOWN_PHY) {
2013 		i40e_msec_delay(200);
2014 		rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE,
2015 		    &i40e->i40e_phy, NULL);
2016 	}
2017 
2018 	if (rc != I40E_SUCCESS) {
2019 		if (rc == I40E_ERR_UNKNOWN_PHY) {
2020 			i40e_error(i40e, "encountered unknown PHY type, "
2021 			    "not attaching.");
2022 		} else {
2023 			i40e_error(i40e, "error getting physical capabilities: "
2024 			    "%d, %d", rc, hw->aq.asq_last_status);
2025 		}
2026 	}
2027 
2028 	rc = i40e_update_link_info(hw);
2029 	if (rc != I40E_SUCCESS) {
2030 		i40e_error(i40e, "failed to update link information: %d", rc);
2031 	}
2032 
2033 	/*
2034 	 * In general, we don't want to mask off (as in stop from being a cause)
2035 	 * any of the interrupts that the phy might be able to generate.
2036 	 */
2037 	rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
2038 	if (rc != I40E_SUCCESS) {
2039 		i40e_error(i40e, "failed to update phy link mask: %d", rc);
2040 	}
2041 }
2042 
2043 /*
2044  * Go through and re-initialize any existing filters that we may have set up for
2045  * this device. Note that we would only expect them to exist if hardware had
2046  * already been initialized and we had just reset it. While we're not
2047  * implementing this yet, we're keeping this around for when we add reset
2048  * capabilities, so this isn't forgotten.
2049  */
2050 /* ARGSUSED */
2051 static void
2052 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
2053 {
2054 }
2055 
2056 /*
2057  * Set the properties which have common values across all the VSIs.
2058  * Consult the "Add VSI" command section (7.4.9.5.5.1) for a
2059  * complete description of these properties.
2060  */
2061 static void
2062 i40e_set_shared_vsi_props(i40e_t *i40e,
2063     struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx)
2064 {
2065 	uint_t tc_queues;
2066 	uint16_t vsi_qp_base;
2067 
2068 	/*
2069 	 * It's important that we use bitwise-OR here; callers to this
2070 	 * function might enable other sections before calling this
2071 	 * function.
2072 	 */
2073 	info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
2074 	    I40E_AQ_VSI_PROP_VLAN_VALID);
2075 
2076 	/*
2077 	 * Calculate the starting QP index for this VSI. This base is
2078 	 * relative to the PF queue space; so a value of 0 for PF#1
2079 	 * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1.
2080 	 */
2081 	vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi;
2082 	info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2083 	info->queue_mapping[0] =
2084 	    LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) &
2085 	    I40E_AQ_VSI_QUEUE_MASK);
2086 
2087 	/*
2088 	 * tc_queues determines the size of the traffic class, where
2089 	 * the size is 2^^tc_queues to a maximum of 64 for the X710
2090 	 * and 128 for the X722.
2091 	 *
2092 	 * Some examples:
2093 	 *	i40e_num_trqpairs_per_vsi == 1 =>  tc_queues = 0, 2^^0 = 1.
2094 	 *	i40e_num_trqpairs_per_vsi == 7 =>  tc_queues = 3, 2^^3 = 8.
2095 	 *	i40e_num_trqpairs_per_vsi == 8 =>  tc_queues = 3, 2^^3 = 8.
2096 	 *	i40e_num_trqpairs_per_vsi == 9 =>  tc_queues = 4, 2^^4 = 16.
2097 	 *	i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32.
2098 	 *	i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64.
2099 	 */
2100 	tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1);
2101 
2102 	/*
2103 	 * The TC queue mapping is in relation to the VSI queue space.
2104 	 * Since we are only using one traffic class (TC0) we always
2105 	 * start at queue offset 0.
2106 	 */
2107 	info->tc_mapping[0] =
2108 	    LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) &
2109 	    I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2110 	    ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) &
2111 	    I40E_AQ_VSI_TC_QUE_NUMBER_MASK));
2112 
2113 	/*
2114 	 * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode")
2115 	 *
2116 	 *	Allow tagged and untagged packets to be sent to this
2117 	 *	VSI from the host.
2118 	 *
2119 	 * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode")
2120 	 *
2121 	 *	Leave the tag on the frame and place no VLAN
2122 	 *	information in the descriptor. We want this mode
2123 	 *	because our MAC layer will take care of the VLAN tag,
2124 	 *	if there is one.
2125 	 */
2126 	info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2127 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2128 }
2129 
2130 /*
2131  * Delete the VSI at this index, if one exists. We assume there is no
2132  * action we can take if this command fails but to log the failure.
2133  */
2134 static void
2135 i40e_delete_vsi(i40e_t *i40e, uint_t idx)
2136 {
2137 	i40e_hw_t	*hw = &i40e->i40e_hw_space;
2138 	uint16_t	seid = i40e->i40e_vsis[idx].iv_seid;
2139 
2140 	if (seid != 0) {
2141 		int rc;
2142 
2143 		rc = i40e_aq_delete_element(hw, seid, NULL);
2144 
2145 		if (rc != I40E_SUCCESS) {
2146 			i40e_error(i40e, "Failed to delete VSI %d: %d",
2147 			    rc, hw->aq.asq_last_status);
2148 		}
2149 
2150 		i40e->i40e_vsis[idx].iv_seid = 0;
2151 	}
2152 }
2153 
2154 /*
2155  * Add a new VSI.
2156  */
2157 static boolean_t
2158 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx)
2159 {
2160 	struct i40e_vsi_context	ctx;
2161 	i40e_rx_group_t		*rxg;
2162 	int			rc;
2163 
2164 	/*
2165 	 * The default VSI is created by the controller. This function
2166 	 * creates new, non-defualt VSIs only.
2167 	 */
2168 	ASSERT3U(idx, !=, 0);
2169 
2170 	bzero(&ctx, sizeof (struct i40e_vsi_context));
2171 	ctx.uplink_seid = i40e->i40e_veb_seid;
2172 	ctx.pf_num = hw->pf_id;
2173 	ctx.flags = I40E_AQ_VSI_TYPE_PF;
2174 	ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
2175 	i40e_set_shared_vsi_props(i40e, &ctx.info, idx);
2176 
2177 	rc = i40e_aq_add_vsi(hw, &ctx, NULL);
2178 	if (rc != I40E_SUCCESS) {
2179 		i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc,
2180 		    hw->aq.asq_last_status);
2181 		return (B_FALSE);
2182 	}
2183 
2184 	rxg = &i40e->i40e_rx_groups[idx];
2185 	rxg->irg_vsi_seid = ctx.seid;
2186 	i40e->i40e_vsis[idx].iv_number = ctx.vsi_number;
2187 	i40e->i40e_vsis[idx].iv_seid = ctx.seid;
2188 	i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2189 
2190 	if (i40e_stat_vsi_init(i40e, idx) == B_FALSE)
2191 		return (B_FALSE);
2192 
2193 	return (B_TRUE);
2194 }
2195 
2196 /*
2197  * Configure the hardware for the Default Virtual Station Interface (VSI).
2198  */
2199 static boolean_t
2200 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw)
2201 {
2202 	struct i40e_vsi_context	ctx;
2203 	i40e_rx_group_t *def_rxg;
2204 	int err;
2205 	struct i40e_aqc_remove_macvlan_element_data filt;
2206 
2207 	bzero(&ctx, sizeof (struct i40e_vsi_context));
2208 	ctx.seid = I40E_DEF_VSI_SEID(i40e);
2209 	ctx.pf_num = hw->pf_id;
2210 	err = i40e_aq_get_vsi_params(hw, &ctx, NULL);
2211 	if (err != I40E_SUCCESS) {
2212 		i40e_error(i40e, "get VSI params failed with %d", err);
2213 		return (B_FALSE);
2214 	}
2215 
2216 	ctx.info.valid_sections = 0;
2217 	i40e->i40e_vsis[0].iv_number = ctx.vsi_number;
2218 	i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2219 	if (i40e_stat_vsi_init(i40e, 0) == B_FALSE)
2220 		return (B_FALSE);
2221 
2222 	i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX);
2223 
2224 	err = i40e_aq_update_vsi_params(hw, &ctx, NULL);
2225 	if (err != I40E_SUCCESS) {
2226 		i40e_error(i40e, "Update VSI params failed with %d", err);
2227 		return (B_FALSE);
2228 	}
2229 
2230 	def_rxg = &i40e->i40e_rx_groups[0];
2231 	def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e);
2232 
2233 	/*
2234 	 * We have seen three different behaviors in regards to the
2235 	 * Default VSI and its implicit L2 MAC+VLAN filter.
2236 	 *
2237 	 * 1. It has an implicit filter for the factory MAC address
2238 	 *    and this filter counts against 'ifr_nmacfilt_used'.
2239 	 *
2240 	 * 2. It has an implicit filter for the factory MAC address
2241 	 *    and this filter DOES NOT count against 'ifr_nmacfilt_used'.
2242 	 *
2243 	 * 3. It DOES NOT have an implicit filter.
2244 	 *
2245 	 * All three of these cases are accounted for below. If we
2246 	 * fail to remove the L2 filter (ENOENT) then we assume there
2247 	 * wasn't one. Otherwise, if we successfully remove the
2248 	 * filter, we make sure to update the 'ifr_nmacfilt_used'
2249 	 * count accordingly.
2250 	 *
2251 	 * We remove this filter to prevent duplicate delivery of
2252 	 * packets destined for the primary MAC address as DLS will
2253 	 * create the same filter on a non-default VSI for the primary
2254 	 * MAC client.
2255 	 *
2256 	 * If you change the following code please test it across as
2257 	 * many X700 series controllers and firmware revisions as you
2258 	 * can.
2259 	 */
2260 	bzero(&filt, sizeof (filt));
2261 	bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2262 	filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2263 	filt.vlan_tag = 0;
2264 
2265 	ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2266 	i40e_log(i40e, "Num L2 filters: %u",
2267 	    i40e->i40e_resources.ifr_nmacfilt_used);
2268 
2269 	err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2270 	    NULL);
2271 	if (err == I40E_SUCCESS) {
2272 		i40e_log(i40e,
2273 		    "Removed L2 filter from Default VSI with SEID %u",
2274 		    I40E_DEF_VSI_SEID(i40e));
2275 	} else if (hw->aq.asq_last_status == ENOENT) {
2276 		i40e_log(i40e,
2277 		    "No L2 filter for Default VSI with SEID %u",
2278 		    I40E_DEF_VSI_SEID(i40e));
2279 	} else {
2280 		i40e_error(i40e, "Failed to remove L2 filter from"
2281 		    " Default VSI with SEID %u: %d (%d)",
2282 		    I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2283 
2284 		return (B_FALSE);
2285 	}
2286 
2287 	/*
2288 	 *  As mentioned above, the controller created an implicit L2
2289 	 *  filter for the primary MAC. We want to remove both the
2290 	 *  filter and decrement the filter count. However, not all
2291 	 *  controllers count this implicit filter against the total
2292 	 *  MAC filter count. So here we are making sure it is either
2293 	 *  one or zero. If it is one, then we know it is for the
2294 	 *  implicit filter and we should decrement since we just
2295 	 *  removed the filter above. If it is zero then we know the
2296 	 *  controller that does not count the implicit filter, and it
2297 	 *  was enough to just remove it; we leave the count alone.
2298 	 *  But if it is neither, then we have never seen a controller
2299 	 *  like this before and we should fail to attach.
2300 	 *
2301 	 *  It is unfortunate that this code must exist but the
2302 	 *  behavior of this implicit L2 filter and its corresponding
2303 	 *  count were dicovered through empirical testing. The
2304 	 *  programming manuals hint at this filter but do not
2305 	 *  explicitly call out the exact behavior.
2306 	 */
2307 	if (i40e->i40e_resources.ifr_nmacfilt_used == 1) {
2308 		i40e->i40e_resources.ifr_nmacfilt_used--;
2309 	} else {
2310 		if (i40e->i40e_resources.ifr_nmacfilt_used != 0) {
2311 			i40e_error(i40e, "Unexpected L2 filter count: %u"
2312 			    " (expected 0)",
2313 			    i40e->i40e_resources.ifr_nmacfilt_used);
2314 			return (B_FALSE);
2315 		}
2316 	}
2317 
2318 	return (B_TRUE);
2319 }
2320 
2321 static boolean_t
2322 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw)
2323 {
2324 	for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
2325 		uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2326 		struct i40e_aqc_get_set_rss_key_data key;
2327 		const char *u8seed;
2328 		enum i40e_status_code status;
2329 		uint16_t vsi_number = i40e->i40e_vsis[i].iv_number;
2330 
2331 		(void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2332 		u8seed = (char *)seed;
2333 
2334 		CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) +
2335 		    sizeof (key.extended_hash_key)));
2336 
2337 		bcopy(u8seed, key.standard_rss_key,
2338 		    sizeof (key.standard_rss_key));
2339 		bcopy(&u8seed[sizeof (key.standard_rss_key)],
2340 		    key.extended_hash_key, sizeof (key.extended_hash_key));
2341 
2342 		ASSERT3U(vsi_number, !=, 0);
2343 		status = i40e_aq_set_rss_key(hw, vsi_number, &key);
2344 
2345 		if (status != I40E_SUCCESS) {
2346 			i40e_error(i40e, "failed to set RSS key for VSI %u: %d",
2347 			    vsi_number, status);
2348 			return (B_FALSE);
2349 		}
2350 	}
2351 
2352 	return (B_TRUE);
2353 }
2354 
2355 /*
2356  * Configure the RSS key. For the X710 controller family, this is set on a
2357  * per-PF basis via registers. For the X722, this is done on a per-VSI basis
2358  * through the admin queue.
2359  */
2360 static boolean_t
2361 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw)
2362 {
2363 	if (i40e_is_x722(i40e)) {
2364 		if (!i40e_config_rss_key_x722(i40e, hw))
2365 			return (B_FALSE);
2366 	} else {
2367 		uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2368 
2369 		(void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2370 		for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
2371 			i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]);
2372 	}
2373 
2374 	return (B_TRUE);
2375 }
2376 
2377 /*
2378  * Populate the LUT. The size of each entry in the LUT depends on the controller
2379  * family, with the X722 using a known 7-bit width. On the X710 controller, this
2380  * is programmed through its control registers where as on the X722 this is
2381  * configured through the admin queue. Also of note, the X722 allows the LUT to
2382  * be set on a per-PF or VSI basis. At this time we use the PF setting. If we
2383  * decide to use the per-VSI LUT in the future, then we will need to modify the
2384  * i40e_add_vsi() function to set the RSS LUT bits in the queueing section.
2385  *
2386  * We populate the LUT in a round robin fashion with the rx queue indices from 0
2387  * to i40e_num_trqpairs_per_vsi - 1.
2388  */
2389 static boolean_t
2390 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw)
2391 {
2392 	uint32_t *hlut;
2393 	uint8_t lut_mask;
2394 	uint_t i;
2395 	boolean_t ret = B_FALSE;
2396 
2397 	/*
2398 	 * We always configure the PF with a table size of 512 bytes in
2399 	 * i40e_chip_start().
2400 	 */
2401 	hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP);
2402 	if (hlut == NULL) {
2403 		i40e_error(i40e, "i40e_config_rss() buffer allocation failed");
2404 		return (B_FALSE);
2405 	}
2406 
2407 	/*
2408 	 * The width of the X722 is apparently defined to be 7 bits, regardless
2409 	 * of the capability.
2410 	 */
2411 	if (i40e_is_x722(i40e)) {
2412 		lut_mask = (1 << 7) - 1;
2413 	} else {
2414 		lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1;
2415 	}
2416 
2417 	for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) {
2418 		((uint8_t *)hlut)[i] =
2419 		    (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask;
2420 	}
2421 
2422 	if (i40e_is_x722(i40e)) {
2423 		enum i40e_status_code status;
2424 
2425 		status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut,
2426 		    I40E_HLUT_TABLE_SIZE);
2427 
2428 		if (status != I40E_SUCCESS) {
2429 			i40e_error(i40e, "failed to set RSS LUT %d: %d",
2430 			    status, hw->aq.asq_last_status);
2431 			goto out;
2432 		}
2433 	} else {
2434 		for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) {
2435 			I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]);
2436 		}
2437 	}
2438 	ret = B_TRUE;
2439 out:
2440 	kmem_free(hlut, I40E_HLUT_TABLE_SIZE);
2441 	return (ret);
2442 }
2443 
2444 /*
2445  * Set up RSS.
2446  *	1. Seed the hash key.
2447  *	2. Enable PCTYPEs for the hash filter.
2448  *	3. Populate the LUT.
2449  */
2450 static boolean_t
2451 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw)
2452 {
2453 	uint64_t hena;
2454 
2455 	/*
2456 	 * 1. Seed the hash key
2457 	 */
2458 	if (!i40e_config_rss_key(i40e, hw))
2459 		return (B_FALSE);
2460 
2461 	/*
2462 	 * 2. Configure PCTYPES
2463 	 */
2464 	hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2465 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2466 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2467 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2468 	    (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2469 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2470 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2471 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2472 	    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2473 	    (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2474 	    (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2475 
2476 	/*
2477 	 * Add additional types supported by the X722 controller.
2478 	 */
2479 	if (i40e_is_x722(i40e)) {
2480 		hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
2481 		    (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
2482 		    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) |
2483 		    (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
2484 		    (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
2485 		    (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
2486 	}
2487 
2488 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
2489 	i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
2490 
2491 	/*
2492 	 * 3. Populate LUT
2493 	 */
2494 	return (i40e_config_rss_hlut(i40e, hw));
2495 }
2496 
2497 /*
2498  * Wrapper to kick the chipset on.
2499  */
2500 static boolean_t
2501 i40e_chip_start(i40e_t *i40e)
2502 {
2503 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2504 	struct i40e_filter_control_settings filter;
2505 	int rc;
2506 	uint8_t err;
2507 
2508 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
2509 	    (hw->aq.fw_maj_ver < 4)) {
2510 		i40e_msec_delay(75);
2511 		if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
2512 		    I40E_SUCCESS) {
2513 			i40e_error(i40e, "failed to restart link: admin queue "
2514 			    "error: %d", hw->aq.asq_last_status);
2515 			return (B_FALSE);
2516 		}
2517 	}
2518 
2519 	/* Determine hardware state */
2520 	i40e_get_hw_state(i40e, hw);
2521 
2522 	/* For now, we always disable Ethernet Flow Control. */
2523 	hw->fc.requested_mode = I40E_FC_NONE;
2524 	rc = i40e_set_fc(hw, &err, B_TRUE);
2525 	if (rc != I40E_SUCCESS) {
2526 		i40e_error(i40e, "Setting flow control failed, returned %d"
2527 		    " with error: 0x%x", rc, err);
2528 		return (B_FALSE);
2529 	}
2530 
2531 	/* Initialize mac addresses. */
2532 	i40e_init_macaddrs(i40e, hw);
2533 
2534 	/*
2535 	 * Set up the filter control. If the hash lut size is changed from
2536 	 * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and
2537 	 * i40e_config_rss_hlut() will need to be updated.
2538 	 */
2539 	bzero(&filter, sizeof (filter));
2540 	filter.enable_ethtype = TRUE;
2541 	filter.enable_macvlan = TRUE;
2542 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
2543 
2544 	rc = i40e_set_filter_control(hw, &filter);
2545 	if (rc != I40E_SUCCESS) {
2546 		i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
2547 		return (B_FALSE);
2548 	}
2549 
2550 	i40e_intr_chip_init(i40e);
2551 
2552 	rc = i40e_get_mac_seid(i40e);
2553 	if (rc == -1) {
2554 		i40e_error(i40e, "failed to obtain MAC Uplink SEID");
2555 		return (B_FALSE);
2556 	}
2557 	i40e->i40e_mac_seid = (uint16_t)rc;
2558 
2559 	/*
2560 	 * Create a VEB in order to support multiple VSIs. Each VSI
2561 	 * functions as a MAC group. This call sets the PF's MAC as
2562 	 * the uplink port and the PF's default VSI as the default
2563 	 * downlink port.
2564 	 */
2565 	rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e),
2566 	    0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL);
2567 	if (rc != I40E_SUCCESS) {
2568 		i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc,
2569 		    hw->aq.asq_last_status);
2570 		return (B_FALSE);
2571 	}
2572 
2573 	if (!i40e_config_def_vsi(i40e, hw))
2574 		return (B_FALSE);
2575 
2576 	for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) {
2577 		if (!i40e_add_vsi(i40e, hw, i))
2578 			return (B_FALSE);
2579 	}
2580 
2581 	if (!i40e_config_rss(i40e, hw))
2582 		return (B_FALSE);
2583 
2584 	i40e_flush(hw);
2585 
2586 	return (B_TRUE);
2587 }
2588 
2589 /*
2590  * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information.
2591  */
2592 static void
2593 i40e_shutdown_rx_ring(i40e_trqpair_t *itrq)
2594 {
2595 	i40e_t *i40e = itrq->itrq_i40e;
2596 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2597 	uint32_t reg;
2598 
2599 	/*
2600 	 * Step 1. 8.3.3.1.2 suggests the interrupt is removed from the
2601 	 * hardware interrupt linked list (see i40e_intr.c) but for
2602 	 * simplicity we keep this list immutable until the device
2603 	 * (distinct from an individual ring) is stopped.
2604 	 */
2605 
2606 	/*
2607 	 * Step 2. Request the queue by clearing QENA_REQ. It may not be
2608 	 * set due to unwinding from failures and a partially enabled
2609 	 * ring set.
2610 	 */
2611 	reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2612 	if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
2613 		return;
2614 	VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
2615 	    I40E_QRX_ENA_QENA_REQ_MASK);
2616 	reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2617 	I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2618 
2619 	/*
2620 	 * Step 3. Wait for the disable to take, by having QENA_STAT in the FPM
2621 	 * be cleared. Note that we could still receive data in the queue during
2622 	 * this time. We don't actually wait for this now and instead defer this
2623 	 * to i40e_shutdown_ring_wait(), after we've interleaved disabling the
2624 	 * TX queue as well.
2625 	 */
2626 }
2627 
2628 static void
2629 i40e_shutdown_tx_ring(i40e_trqpair_t *itrq)
2630 {
2631 	i40e_t *i40e = itrq->itrq_i40e;
2632 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2633 	uint32_t reg;
2634 
2635 	/*
2636 	 * Step 2. Set the SET_QDIS flag for the queue.
2637 	 */
2638 	i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_FALSE);
2639 
2640 	/*
2641 	 * Step 3. Wait at least 400 usec.
2642 	 */
2643 	drv_usecwait(500);
2644 
2645 	/*
2646 	 * Step 4. Clear the QENA_REQ flag which tells hardware to
2647 	 * quiesce. If QENA_REQ is not already set then that means that
2648 	 * we likely already tried to disable this queue.
2649 	 */
2650 	reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2651 	if ((reg & I40E_QTX_ENA_QENA_REQ_MASK) != 0) {
2652 		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2653 		I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
2654 	}
2655 
2656 	/*
2657 	 * Step 5. Wait for the drain to finish. This will be done by the
2658 	 * hardware removing the QENA_STAT flag from the queue. Rather than
2659 	 * waiting here, we interleave it with the receive shutdown in
2660 	 * i40e_shutdown_ring_wait().
2661 	 */
2662 }
2663 
2664 /*
2665  * Wait for a ring to be shut down. e.g. Steps 2 and 5 from the above
2666  * functions.
2667  */
2668 static boolean_t
2669 i40e_shutdown_ring_wait(i40e_trqpair_t *itrq)
2670 {
2671 	i40e_t *i40e = itrq->itrq_i40e;
2672 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2673 	uint32_t reg;
2674 	int try;
2675 
2676 	for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2677 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2678 		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2679 			break;
2680 		i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2681 	}
2682 
2683 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
2684 		i40e_error(i40e, "timed out disabling rx queue %d",
2685 		    itrq->itrq_index);
2686 		return (B_FALSE);
2687 	}
2688 
2689 	for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2690 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
2691 		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2692 			break;
2693 		i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2694 	}
2695 
2696 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
2697 		i40e_error(i40e, "timed out disabling tx queue %d",
2698 		    itrq->itrq_index);
2699 		return (B_FALSE);
2700 	}
2701 
2702 	return (B_TRUE);
2703 }
2704 
2705 
2706 /*
2707  * Shutdown an individual ring and release any memory.
2708  */
2709 boolean_t
2710 i40e_shutdown_ring(i40e_trqpair_t *itrq)
2711 {
2712 	boolean_t rv = B_TRUE;
2713 
2714 	/*
2715 	 * Tell transmit path to quiesce, and wait until done.
2716 	 */
2717 	if (i40e_ring_tx_quiesce(itrq)) {
2718 		/* Already quiesced. */
2719 		return (B_TRUE);
2720 	}
2721 
2722 	i40e_shutdown_rx_ring(itrq);
2723 	i40e_shutdown_tx_ring(itrq);
2724 	if (!i40e_shutdown_ring_wait(itrq))
2725 		rv = B_FALSE;
2726 
2727 	/*
2728 	 * After the ring has stopped, we need to wait 50ms before
2729 	 * programming it again. Rather than wait here, we'll record
2730 	 * the time the ring was stopped. When the ring is started, we'll
2731 	 * check if enough time has expired and then wait if necessary.
2732 	 */
2733 	itrq->irtq_time_stopped = gethrtime();
2734 
2735 	/*
2736 	 * The rings have been stopped in the hardware, now wait for
2737 	 * a possibly active interrupt thread.
2738 	 */
2739 	i40e_intr_quiesce(itrq);
2740 
2741 	mutex_enter(&itrq->itrq_tx_lock);
2742 	i40e_tx_cleanup_ring(itrq);
2743 	mutex_exit(&itrq->itrq_tx_lock);
2744 
2745 	i40e_free_ring_mem(itrq, B_FALSE);
2746 
2747 	return (rv);
2748 }
2749 
2750 /*
2751  * Shutdown all the rings.
2752  * Called from i40e_stop(), and hopefully the mac layer has already
2753  * called ring stop for each ring, which would make this almost a no-op.
2754  */
2755 static boolean_t
2756 i40e_shutdown_rings(i40e_t *i40e)
2757 {
2758 	boolean_t rv = B_TRUE;
2759 	int i;
2760 
2761 	for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2762 		if (!i40e_shutdown_ring(&i40e->i40e_trqpairs[i]))
2763 			rv = B_FALSE;
2764 	}
2765 
2766 	return (rv);
2767 }
2768 
2769 static void
2770 i40e_setup_rx_descs(i40e_trqpair_t *itrq)
2771 {
2772 	int i;
2773 	i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2774 
2775 	for (i = 0; i < rxd->rxd_ring_size; i++) {
2776 		i40e_rx_control_block_t *rcb;
2777 		i40e_rx_desc_t *rdesc;
2778 
2779 		rcb = rxd->rxd_work_list[i];
2780 		rdesc = &rxd->rxd_desc_ring[i];
2781 
2782 		rdesc->read.pkt_addr =
2783 		    CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
2784 		rdesc->read.hdr_addr = 0;
2785 	}
2786 }
2787 
2788 static boolean_t
2789 i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
2790 {
2791 	i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2792 	i40e_t *i40e = itrq->itrq_i40e;
2793 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2794 
2795 	struct i40e_hmc_obj_rxq rctx;
2796 	int err;
2797 
2798 	bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
2799 	rctx.base = rxd->rxd_desc_area.dmab_dma_address /
2800 	    I40E_HMC_RX_CTX_UNIT;
2801 	rctx.qlen = rxd->rxd_ring_size;
2802 	VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
2803 	VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
2804 	rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
2805 	rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2806 	rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
2807 	rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
2808 	rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
2809 	rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
2810 	rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
2811 	rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2812 	rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2813 	rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
2814 	rctx.rxmax = i40e->i40e_frame_max;
2815 	rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2816 	rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2817 	rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
2818 	rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
2819 	rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
2820 
2821 	/*
2822 	 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2.
2823 	 */
2824 	rctx.prefena = I40E_HMC_RX_PREFENA;
2825 
2826 	err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
2827 	if (err != I40E_SUCCESS) {
2828 		i40e_error(i40e, "failed to clear rx queue %d context: %d",
2829 		    itrq->itrq_index, err);
2830 		return (B_FALSE);
2831 	}
2832 
2833 	err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
2834 	if (err != I40E_SUCCESS) {
2835 		i40e_error(i40e, "failed to set rx queue %d context: %d",
2836 		    itrq->itrq_index, err);
2837 		return (B_FALSE);
2838 	}
2839 
2840 	return (B_TRUE);
2841 }
2842 
2843 /*
2844  * Take care of setting up the descriptor ring and actually programming the
2845  * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the
2846  * rx rings.
2847  */
2848 static boolean_t
2849 i40e_setup_rx_ring(i40e_trqpair_t *itrq)
2850 {
2851 	i40e_t *i40e = itrq->itrq_i40e;
2852 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2853 	i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2854 	uint32_t reg;
2855 	int i;
2856 
2857 	/*
2858 	 * Step 1. Program all receive ring descriptors.
2859 	 */
2860 	i40e_setup_rx_descs(itrq);
2861 
2862 	/*
2863 	 * Step 2. Program the queue's FPM/HMC context.
2864 	 */
2865 	if (!i40e_setup_rx_hmc(itrq))
2866 		return (B_FALSE);
2867 
2868 	/*
2869 	 * Step 3. Clear the queue's tail pointer and set it to the end
2870 	 * of the space.
2871 	 */
2872 	I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index), 0);
2873 	I40E_WRITE_REG(hw, I40E_QRX_TAIL(itrq->itrq_index),
2874 	    rxd->rxd_ring_size - 1);
2875 
2876 	/*
2877 	 * Step 4. Enable the queue via the QENA_REQ.
2878 	 */
2879 	reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2880 	VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
2881 	    I40E_QRX_ENA_QENA_STAT_MASK));
2882 	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2883 	I40E_WRITE_REG(hw, I40E_QRX_ENA(itrq->itrq_index), reg);
2884 
2885 	/*
2886 	 * Step 5. Verify that QENA_STAT has been set. It's promised
2887 	 * that this should occur within about 10 us, but like other
2888 	 * systems, we give the card a bit more time.
2889 	 */
2890 	for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
2891 		reg = I40E_READ_REG(hw, I40E_QRX_ENA(itrq->itrq_index));
2892 
2893 		if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2894 			break;
2895 		i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2896 	}
2897 
2898 	if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2899 		i40e_error(i40e, "failed to enable rx queue %d, timed "
2900 		    "out.", itrq->itrq_index);
2901 		return (B_FALSE);
2902 	}
2903 
2904 	return (B_TRUE);
2905 }
2906 
2907 static boolean_t
2908 i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
2909 {
2910 	i40e_t *i40e = itrq->itrq_i40e;
2911 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2912 
2913 	struct i40e_hmc_obj_txq tctx;
2914 	struct i40e_vsi_context	context;
2915 	int err;
2916 
2917 	bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
2918 	tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
2919 	tctx.base = itrq->itrq_desc_area.dmab_dma_address /
2920 	    I40E_HMC_TX_CTX_UNIT;
2921 	tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
2922 	tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
2923 	tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
2924 	tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
2925 	tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
2926 	tctx.qlen = itrq->itrq_tx_ring_size;
2927 	tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2928 	tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
2929 	tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2930 	tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
2931 	    sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
2932 
2933 	/*
2934 	 * This field isn't actually documented, like crc, but it suggests that
2935 	 * it should be zeroed. We leave both of these here because of that for
2936 	 * now. We should check with Intel on why these are here even.
2937 	 */
2938 	tctx.crc = 0;
2939 	tctx.rdylist_act = 0;
2940 
2941 	/*
2942 	 * We're supposed to assign the rdylist field with the value of the
2943 	 * traffic class index for the first device. We query the VSI parameters
2944 	 * again to get what the handle is. Note that every queue is always
2945 	 * assigned to traffic class zero, because we don't actually use them.
2946 	 */
2947 	bzero(&context, sizeof (struct i40e_vsi_context));
2948 	context.seid = I40E_DEF_VSI_SEID(i40e);
2949 	context.pf_num = hw->pf_id;
2950 	err = i40e_aq_get_vsi_params(hw, &context, NULL);
2951 	if (err != I40E_SUCCESS) {
2952 		i40e_error(i40e, "get VSI params failed with %d", err);
2953 		return (B_FALSE);
2954 	}
2955 	tctx.rdylist = LE_16(context.info.qs_handle[0]);
2956 
2957 	err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
2958 	if (err != I40E_SUCCESS) {
2959 		i40e_error(i40e, "failed to clear tx queue %d context: %d",
2960 		    itrq->itrq_index, err);
2961 		return (B_FALSE);
2962 	}
2963 
2964 	err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
2965 	if (err != I40E_SUCCESS) {
2966 		i40e_error(i40e, "failed to set tx queue %d context: %d",
2967 		    itrq->itrq_index, err);
2968 		return (B_FALSE);
2969 	}
2970 
2971 	return (B_TRUE);
2972 }
2973 
2974 /*
2975  * Take care of setting up the descriptor ring and actually programming the
2976  * device. See 8.4.3.1.1 for what we need to do here.
2977  */
2978 static boolean_t
2979 i40e_setup_tx_ring(i40e_trqpair_t *itrq)
2980 {
2981 	i40e_t *i40e = itrq->itrq_i40e;
2982 	i40e_hw_t *hw = &i40e->i40e_hw_space;
2983 	uint32_t reg;
2984 	int i;
2985 
2986 	/*
2987 	 * Step 1. Clear the queue disable flag and verify that the
2988 	 * index is set correctly.
2989 	 */
2990 	i40e_pre_tx_queue_cfg(hw, itrq->itrq_index, B_TRUE);
2991 
2992 	/*
2993 	 * Step 2. Prepare the queue's FPM/HMC context.
2994 	 */
2995 	if (!i40e_setup_tx_hmc(itrq))
2996 		return (B_FALSE);
2997 
2998 	/*
2999 	 * Step 3. Verify that it's clear that this PF owns this queue.
3000 	 */
3001 	reg = I40E_QTX_CTL_PF_QUEUE;
3002 	reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3003 	    I40E_QTX_CTL_PF_INDX_MASK;
3004 	I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
3005 	i40e_flush(hw);
3006 
3007 	/*
3008 	 * Step 4. Set the QENA_REQ flag.
3009 	 */
3010 	reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3011 	VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
3012 	    I40E_QTX_ENA_QENA_STAT_MASK));
3013 	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3014 	I40E_WRITE_REG(hw, I40E_QTX_ENA(itrq->itrq_index), reg);
3015 
3016 	/*
3017 	 * Step 5. Verify that QENA_STAT has been set. It's promised
3018 	 * that this should occur within about 10 us, but like BSD,
3019 	 * we'll try for up to 100 ms for this queue.
3020 	 */
3021 	for (i = 0; i < I40E_RING_WAIT_NTRIES; i++) {
3022 		reg = I40E_READ_REG(hw, I40E_QTX_ENA(itrq->itrq_index));
3023 
3024 		if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
3025 			break;
3026 		i40e_msec_delay(I40E_RING_WAIT_PAUSE);
3027 	}
3028 
3029 	if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
3030 		i40e_error(i40e, "failed to enable tx queue %d, timed "
3031 		    "out", itrq->itrq_index);
3032 		return (B_FALSE);
3033 	}
3034 
3035 	return (B_TRUE);
3036 }
3037 
3038 int
3039 i40e_setup_ring(i40e_trqpair_t *itrq)
3040 {
3041 	i40e_t *i40e = itrq->itrq_i40e;
3042 	hrtime_t now, gap;
3043 
3044 	if (!i40e_alloc_ring_mem(itrq)) {
3045 		i40e_error(i40e, "Failed to allocate ring memory");
3046 		return (ENOMEM);
3047 	}
3048 
3049 	/*
3050 	 * 8.3.3.1.1 Receive Queue Enable Flow states software should
3051 	 * wait at least 50ms between ring disable and enable. See how
3052 	 * long we need to wait, and wait only if required.
3053 	 */
3054 	now = gethrtime();
3055 	gap = NSEC2MSEC(now - itrq->irtq_time_stopped);
3056 	if (gap < I40E_RING_ENABLE_GAP && gap != 0)
3057 		delay(drv_usectohz(gap * 1000));
3058 
3059 	mutex_enter(&itrq->itrq_intr_lock);
3060 	if (!i40e_setup_rx_ring(itrq))
3061 		goto failed;
3062 
3063 	if (!i40e_setup_tx_ring(itrq))
3064 		goto failed;
3065 
3066 	if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3067 	    DDI_FM_OK)
3068 		goto failed;
3069 
3070 	itrq->itrq_intr_quiesce = B_FALSE;
3071 	mutex_exit(&itrq->itrq_intr_lock);
3072 
3073 	mutex_enter(&itrq->itrq_tx_lock);
3074 	itrq->itrq_tx_quiesce = B_FALSE;
3075 	mutex_exit(&itrq->itrq_tx_lock);
3076 
3077 	return (0);
3078 
3079 failed:
3080 	mutex_exit(&itrq->itrq_intr_lock);
3081 	i40e_free_ring_mem(itrq, B_TRUE);
3082 	ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3083 
3084 	return (EIO);
3085 }
3086 
3087 void
3088 i40e_stop(i40e_t *i40e)
3089 {
3090 	uint_t i;
3091 	i40e_hw_t *hw = &i40e->i40e_hw_space;
3092 
3093 	ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3094 
3095 	/*
3096 	 * Shutdown and drain the tx and rx pipeline. We do this using the
3097 	 * following steps.
3098 	 *
3099 	 * 1) Shutdown interrupts to all the queues (trying to keep the admin
3100 	 *    queue alive).
3101 	 *
3102 	 * 2) Remove all of the interrupt tx and rx causes by setting the
3103 	 *    interrupt linked lists to zero.
3104 	 *
3105 	 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should
3106 	 *    wait for all the queues to be disabled, once we reach that point
3107 	 *    it should be safe to free associated data.
3108 	 *
3109 	 * 4) Wait 50ms after all that is done. This ensures that the rings are
3110 	 *    ready for programming again and we don't have to think about this
3111 	 *    in other parts of the driver.
3112 	 *
3113 	 * 5) Disable remaining chip interrupts, (admin queue, etc.)
3114 	 *
3115 	 * 6) Verify that FM is happy with all the register accesses we
3116 	 *    performed.
3117 	 */
3118 	i40e_intr_io_disable_all(i40e);
3119 	i40e_intr_io_clear_cause(i40e);
3120 
3121 	if (!i40e_shutdown_rings(i40e))
3122 		ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3123 
3124 	/*
3125 	 * We don't delete the default VSI because it replaces the VEB
3126 	 * after VEB deletion (see the "Delete Element" section).
3127 	 * Furthermore, since the default VSI is provided by the
3128 	 * firmware, we never attempt to delete it.
3129 	 */
3130 	for (i = 1; i < i40e->i40e_num_rx_groups; i++) {
3131 		i40e_delete_vsi(i40e, i);
3132 	}
3133 
3134 	if (i40e->i40e_veb_seid != 0) {
3135 		int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL);
3136 
3137 		if (rc != I40E_SUCCESS) {
3138 			i40e_error(i40e, "Failed to delete VEB %d: %d", rc,
3139 			    hw->aq.asq_last_status);
3140 		}
3141 
3142 		i40e->i40e_veb_seid = 0;
3143 	}
3144 
3145 	i40e_intr_chip_fini(i40e);
3146 
3147 	if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3148 	    DDI_FM_OK) {
3149 		ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3150 	}
3151 
3152 	for (i = 0; i < i40e->i40e_num_rx_groups; i++) {
3153 		i40e_stat_vsi_fini(i40e, i);
3154 	}
3155 
3156 	i40e->i40e_link_speed = 0;
3157 	i40e->i40e_link_duplex = 0;
3158 	i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
3159 }
3160 
3161 boolean_t
3162 i40e_start(i40e_t *i40e)
3163 {
3164 	i40e_hw_t *hw = &i40e->i40e_hw_space;
3165 	boolean_t rc = B_TRUE;
3166 	int err;
3167 
3168 	ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3169 
3170 	if (!i40e_chip_start(i40e)) {
3171 		i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
3172 		rc = B_FALSE;
3173 		goto done;
3174 	}
3175 
3176 	/*
3177 	 * Enable broadcast traffic; however, do not enable multicast traffic.
3178 	 * That's handle exclusively through MAC's mc_multicst routines.
3179 	 */
3180 	err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE,
3181 	    NULL);
3182 	if (err != I40E_SUCCESS) {
3183 		i40e_error(i40e, "failed to set default VSI: %d", err);
3184 		rc = B_FALSE;
3185 		goto done;
3186 	}
3187 
3188 	err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0,
3189 	    B_FALSE, NULL);
3190 	if (err != I40E_SUCCESS) {
3191 		i40e_error(i40e, "failed to set MAC config: %d", err);
3192 		rc = B_FALSE;
3193 		goto done;
3194 	}
3195 
3196 	/*
3197 	 * Finally, make sure that we're happy from an FM perspective.
3198 	 */
3199 	if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3200 	    DDI_FM_OK) {
3201 		rc = B_FALSE;
3202 		goto done;
3203 	}
3204 
3205 	/* Clear state bits prior to final interrupt enabling. */
3206 	atomic_and_32(&i40e->i40e_state,
3207 	    ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
3208 
3209 	i40e_intr_io_enable_all(i40e);
3210 
3211 done:
3212 	if (rc == B_FALSE) {
3213 		i40e_stop(i40e);
3214 		ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3215 	}
3216 
3217 	return (rc);
3218 }
3219 
3220 /*
3221  * We may have loaned up descriptors to the stack. As such, if we still have
3222  * them outstanding, then we will not continue with detach.
3223  */
3224 static boolean_t
3225 i40e_drain_rx(i40e_t *i40e)
3226 {
3227 	mutex_enter(&i40e->i40e_rx_pending_lock);
3228 	while (i40e->i40e_rx_pending > 0) {
3229 		if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
3230 		    &i40e->i40e_rx_pending_lock,
3231 		    drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
3232 			mutex_exit(&i40e->i40e_rx_pending_lock);
3233 			return (B_FALSE);
3234 		}
3235 	}
3236 	mutex_exit(&i40e->i40e_rx_pending_lock);
3237 
3238 	return (B_TRUE);
3239 }
3240 
3241 /*
3242  * DDI UFM Callbacks
3243  */
3244 static int
3245 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3246     ddi_ufm_image_t *img)
3247 {
3248 	if (imgno != 0)
3249 		return (EINVAL);
3250 
3251 	ddi_ufm_image_set_desc(img, "Firmware");
3252 	ddi_ufm_image_set_nslots(img, 1);
3253 
3254 	return (0);
3255 }
3256 
3257 static int
3258 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3259     uint_t slotno, ddi_ufm_slot_t *slot)
3260 {
3261 	i40e_t *i40e = (i40e_t *)arg;
3262 	char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL;
3263 	nvlist_t *misc = NULL;
3264 	uint_t flags = DDI_PROP_DONTPASS;
3265 	int err;
3266 
3267 	if (imgno != 0 || slotno != 0 ||
3268 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3269 	    "firmware-version", &fw_ver) != DDI_PROP_SUCCESS ||
3270 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3271 	    "firmware-build", &fw_bld) != DDI_PROP_SUCCESS ||
3272 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3273 	    "api-version", &api_ver) != DDI_PROP_SUCCESS) {
3274 		err = EINVAL;
3275 		goto err;
3276 	}
3277 
3278 	ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
3279 	ddi_ufm_slot_set_version(slot, fw_ver);
3280 
3281 	(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3282 	if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 ||
3283 	    (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) {
3284 		goto err;
3285 	}
3286 	ddi_ufm_slot_set_misc(slot, misc);
3287 
3288 	ddi_prop_free(fw_ver);
3289 	ddi_prop_free(fw_bld);
3290 	ddi_prop_free(api_ver);
3291 
3292 	return (0);
3293 err:
3294 	nvlist_free(misc);
3295 	if (fw_ver != NULL)
3296 		ddi_prop_free(fw_ver);
3297 	if (fw_bld != NULL)
3298 		ddi_prop_free(fw_bld);
3299 	if (api_ver != NULL)
3300 		ddi_prop_free(api_ver);
3301 
3302 	return (err);
3303 }
3304 
3305 static int
3306 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3307 {
3308 	*caps = DDI_UFM_CAP_REPORT;
3309 
3310 	return (0);
3311 }
3312 
3313 static ddi_ufm_ops_t i40e_ufm_ops = {
3314 	NULL,
3315 	i40e_ufm_fill_image,
3316 	i40e_ufm_fill_slot,
3317 	i40e_ufm_getcaps
3318 };
3319 
3320 static int
3321 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3322 {
3323 	i40e_t *i40e;
3324 	struct i40e_osdep *osdep;
3325 	i40e_hw_t *hw;
3326 	int instance;
3327 
3328 	if (cmd != DDI_ATTACH)
3329 		return (DDI_FAILURE);
3330 
3331 	instance = ddi_get_instance(devinfo);
3332 	i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
3333 
3334 	i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
3335 	i40e->i40e_instance = instance;
3336 	i40e->i40e_dip = devinfo;
3337 
3338 	hw = &i40e->i40e_hw_space;
3339 	osdep = &i40e->i40e_osdep_space;
3340 	hw->back = osdep;
3341 	osdep->ios_i40e = i40e;
3342 
3343 	ddi_set_driver_private(devinfo, i40e);
3344 
3345 	i40e_fm_init(i40e);
3346 	i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
3347 
3348 	if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
3349 		i40e_error(i40e, "Failed to map PCI configurations.");
3350 		goto attach_fail;
3351 	}
3352 	i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
3353 
3354 	i40e_identify_hardware(i40e);
3355 
3356 	if (!i40e_regs_map(i40e)) {
3357 		i40e_error(i40e, "Failed to map device registers.");
3358 		goto attach_fail;
3359 	}
3360 	i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
3361 
3362 	i40e_init_properties(i40e);
3363 	i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
3364 
3365 	if (!i40e_common_code_init(i40e, hw))
3366 		goto attach_fail;
3367 	i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
3368 
3369 	/*
3370 	 * When we participate in IRM, we should make sure that we register
3371 	 * ourselves with it before callbacks.
3372 	 */
3373 	if (!i40e_alloc_intrs(i40e, devinfo)) {
3374 		i40e_error(i40e, "Failed to allocate interrupts.");
3375 		goto attach_fail;
3376 	}
3377 	i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
3378 
3379 	if (!i40e_alloc_trqpairs(i40e)) {
3380 		i40e_error(i40e,
3381 		    "Failed to allocate receive & transmit rings.");
3382 		goto attach_fail;
3383 	}
3384 	i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
3385 
3386 	if (!i40e_map_intrs_to_vectors(i40e)) {
3387 		i40e_error(i40e, "Failed to map interrupts to vectors.");
3388 		goto attach_fail;
3389 	}
3390 
3391 	if (!i40e_add_intr_handlers(i40e)) {
3392 		i40e_error(i40e, "Failed to add the interrupt handlers.");
3393 		goto attach_fail;
3394 	}
3395 	i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
3396 
3397 	if (!i40e_final_init(i40e)) {
3398 		i40e_error(i40e, "Final initialization failed.");
3399 		goto attach_fail;
3400 	}
3401 	i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
3402 
3403 	if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3404 	    DDI_FM_OK) {
3405 		ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3406 		goto attach_fail;
3407 	}
3408 
3409 	if (!i40e_stats_init(i40e)) {
3410 		i40e_error(i40e, "Stats initialization failed.");
3411 		goto attach_fail;
3412 	}
3413 	i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
3414 
3415 	if (!i40e_register_mac(i40e)) {
3416 		i40e_error(i40e, "Failed to register to MAC/GLDv3");
3417 		goto attach_fail;
3418 	}
3419 	i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
3420 
3421 	i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
3422 	    I40E_CYCLIC_PERIOD, DDI_IPL_0);
3423 	if (i40e->i40e_periodic_id == 0) {
3424 		i40e_error(i40e, "Failed to add the link-check timer");
3425 		goto attach_fail;
3426 	}
3427 	i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
3428 
3429 	if (!i40e_enable_interrupts(i40e)) {
3430 		i40e_error(i40e, "Failed to enable DDI interrupts");
3431 		goto attach_fail;
3432 	}
3433 	i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
3434 
3435 	if (i40e->i40e_hw_space.bus.func == 0) {
3436 		if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION,
3437 		    &i40e_ufm_ops, &i40e->i40e_ufmh, i40e) != 0) {
3438 			i40e_error(i40e, "failed to initialize UFM subsystem");
3439 			goto attach_fail;
3440 		}
3441 		ddi_ufm_update(i40e->i40e_ufmh);
3442 		i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT;
3443 	}
3444 
3445 	atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
3446 
3447 	mutex_enter(&i40e_glock);
3448 	list_insert_tail(&i40e_glist, i40e);
3449 	mutex_exit(&i40e_glock);
3450 
3451 	return (DDI_SUCCESS);
3452 
3453 attach_fail:
3454 	i40e_unconfigure(devinfo, i40e);
3455 	return (DDI_FAILURE);
3456 }
3457 
3458 static int
3459 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3460 {
3461 	i40e_t *i40e;
3462 
3463 	if (cmd != DDI_DETACH)
3464 		return (DDI_FAILURE);
3465 
3466 	i40e = (i40e_t *)ddi_get_driver_private(devinfo);
3467 	if (i40e == NULL) {
3468 		i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
3469 		return (DDI_FAILURE);
3470 	}
3471 
3472 	if (i40e_drain_rx(i40e) == B_FALSE) {
3473 		i40e_log(i40e, "timed out draining DMA resources, %d buffers "
3474 		    "remain", i40e->i40e_rx_pending);
3475 		return (DDI_FAILURE);
3476 	}
3477 
3478 	mutex_enter(&i40e_glock);
3479 	list_remove(&i40e_glist, i40e);
3480 	mutex_exit(&i40e_glock);
3481 
3482 	i40e_unconfigure(devinfo, i40e);
3483 
3484 	return (DDI_SUCCESS);
3485 }
3486 
3487 static struct cb_ops i40e_cb_ops = {
3488 	nulldev,		/* cb_open */
3489 	nulldev,		/* cb_close */
3490 	nodev,			/* cb_strategy */
3491 	nodev,			/* cb_print */
3492 	nodev,			/* cb_dump */
3493 	nodev,			/* cb_read */
3494 	nodev,			/* cb_write */
3495 	nodev,			/* cb_ioctl */
3496 	nodev,			/* cb_devmap */
3497 	nodev,			/* cb_mmap */
3498 	nodev,			/* cb_segmap */
3499 	nochpoll,		/* cb_chpoll */
3500 	ddi_prop_op,		/* cb_prop_op */
3501 	NULL,			/* cb_stream */
3502 	D_MP | D_HOTPLUG,	/* cb_flag */
3503 	CB_REV,			/* cb_rev */
3504 	nodev,			/* cb_aread */
3505 	nodev			/* cb_awrite */
3506 };
3507 
3508 static struct dev_ops i40e_dev_ops = {
3509 	DEVO_REV,		/* devo_rev */
3510 	0,			/* devo_refcnt */
3511 	NULL,			/* devo_getinfo */
3512 	nulldev,		/* devo_identify */
3513 	nulldev,		/* devo_probe */
3514 	i40e_attach,		/* devo_attach */
3515 	i40e_detach,		/* devo_detach */
3516 	nodev,			/* devo_reset */
3517 	&i40e_cb_ops,		/* devo_cb_ops */
3518 	NULL,			/* devo_bus_ops */
3519 	nulldev,		/* devo_power */
3520 	ddi_quiesce_not_supported /* devo_quiesce */
3521 };
3522 
3523 static struct modldrv i40e_modldrv = {
3524 	&mod_driverops,
3525 	i40e_ident,
3526 	&i40e_dev_ops
3527 };
3528 
3529 static struct modlinkage i40e_modlinkage = {
3530 	MODREV_1,
3531 	&i40e_modldrv,
3532 	NULL
3533 };
3534 
3535 /*
3536  * Module Initialization Functions.
3537  */
3538 int
3539 _init(void)
3540 {
3541 	int status;
3542 
3543 	list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
3544 	list_create(&i40e_dlist, sizeof (i40e_device_t),
3545 	    offsetof(i40e_device_t, id_link));
3546 	mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
3547 	mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
3548 
3549 	status = mod_install(&i40e_modlinkage);
3550 	if (status != DDI_SUCCESS) {
3551 		mac_fini_ops(&i40e_dev_ops);
3552 		mutex_destroy(&i40e_glock);
3553 		list_destroy(&i40e_dlist);
3554 		list_destroy(&i40e_glist);
3555 	}
3556 
3557 	return (status);
3558 }
3559 
3560 int
3561 _info(struct modinfo *modinfop)
3562 {
3563 	return (mod_info(&i40e_modlinkage, modinfop));
3564 }
3565 
3566 int
3567 _fini(void)
3568 {
3569 	int status;
3570 
3571 	status = mod_remove(&i40e_modlinkage);
3572 	if (status == DDI_SUCCESS) {
3573 		mac_fini_ops(&i40e_dev_ops);
3574 		mutex_destroy(&i40e_glock);
3575 		list_destroy(&i40e_dlist);
3576 		list_destroy(&i40e_glist);
3577 	}
3578 
3579 	return (status);
3580 }
3581