xref: /freebsd/sys/dev/ixl/if_ixl.c (revision b8f51b8c5423af0795429836a00f2a968e791f6e)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	3
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
90 	/* required last entry */
91 	PVID_END
92 };
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 /*** IFLIB interface ***/
98 static void	*ixl_register(device_t dev);
99 static int	 ixl_if_attach_pre(if_ctx_t ctx);
100 static int	 ixl_if_attach_post(if_ctx_t ctx);
101 static int	 ixl_if_detach(if_ctx_t ctx);
102 static int	 ixl_if_shutdown(if_ctx_t ctx);
103 static int	 ixl_if_suspend(if_ctx_t ctx);
104 static int	 ixl_if_resume(if_ctx_t ctx);
105 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
106 static void	 ixl_if_enable_intr(if_ctx_t ctx);
107 static void	 ixl_if_disable_intr(if_ctx_t ctx);
108 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
109 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
110 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
111 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
112 static void	 ixl_if_queues_free(if_ctx_t ctx);
113 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
114 static void	 ixl_if_multi_set(if_ctx_t ctx);
115 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
116 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
117 static int	 ixl_if_media_change(if_ctx_t ctx);
118 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
119 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
120 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
121 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
122 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
123 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
124 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
125 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 #ifdef PCI_IOV
127 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
128 #endif
129 
130 /*** Other ***/
131 static void	 ixl_save_pf_tunables(struct ixl_pf *);
132 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
133 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
134 static void	 ixl_admin_timer(void *arg);
135 
136 /*********************************************************************
137  *  FreeBSD Device Interface Entry Points
138  *********************************************************************/
139 
140 static device_method_t ixl_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_register, ixl_register),
143 	DEVMETHOD(device_probe, iflib_device_probe),
144 	DEVMETHOD(device_attach, iflib_device_attach),
145 	DEVMETHOD(device_detach, iflib_device_detach),
146 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 #ifdef PCI_IOV
148 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
151 #endif
152 	DEVMETHOD_END
153 };
154 
155 static driver_t ixl_driver = {
156 	"ixl", ixl_methods, sizeof(struct ixl_pf),
157 };
158 
159 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
162 
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 
167 static device_method_t ixl_if_methods[] = {
168 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 	DEVMETHOD(ifdi_detach, ixl_if_detach),
171 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 	DEVMETHOD(ifdi_resume, ixl_if_resume),
174 	DEVMETHOD(ifdi_init, ixl_if_init),
175 	DEVMETHOD(ifdi_stop, ixl_if_stop),
176 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 	DEVMETHOD(ifdi_timer, ixl_if_timer),
191 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 #ifdef PCI_IOV
198 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
202 #endif
203 	// ifdi_led_func
204 	// ifdi_debug
205 	DEVMETHOD_END
206 };
207 
208 static driver_t ixl_if_driver = {
209 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
210 };
211 
212 /*
213 ** TUNEABLE PARAMETERS:
214 */
215 
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217     "ixl driver parameters");
218 
219 #ifdef IXL_DEBUG_FC
220 /*
221  * Leave this on unless you need to send flow control
222  * frames (or other control frames) from software
223  */
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226     &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228     &ixl_enable_tx_fc_filter, 0,
229     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
230 #endif
231 
232 #ifdef IXL_DEBUG
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235     &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237     &ixl_debug_recovery_mode, 0,
238     "Act like when FW entered recovery mode (for debugging)");
239 #endif
240 
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243     &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245     &ixl_i2c_access_method, 0,
246     IXL_SYSCTL_HELP_I2C_METHOD);
247 
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250     &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252     &ixl_enable_vf_loopback, 0,
253     IXL_SYSCTL_HELP_VF_LOOPBACK);
254 
255 /*
256  * Different method for processing TX descriptor
257  * completion.
258  */
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261     &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263     &ixl_enable_head_writeback, 0,
264     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268     &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270     &ixl_core_debug_mask, 0,
271     "Display debug statements that are printed in non-shared code");
272 
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275     &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277     &ixl_shared_debug_mask, 0,
278     "Display debug statements that are printed in shared code");
279 
280 #if 0
281 /*
282 ** Controls for Interrupt Throttling
283 **	- true/false for dynamic adjustment
284 ** 	- default values for static ITR
285 */
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
295 #endif
296 
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300     &ixl_rx_itr, 0, "RX Interrupt Rate");
301 
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305     &ixl_tx_itr, 0, "TX Interrupt Rate");
306 
307 static int ixl_flow_control = -1;
308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
309     &ixl_flow_control, 0, "Initial Flow Control setting");
310 
311 #ifdef IXL_IW
312 int ixl_enable_iwarp = 0;
313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
315     &ixl_enable_iwarp, 0, "iWARP enabled");
316 
317 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
318 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
319 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
320     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
321 #endif
322 
323 extern struct if_txrx ixl_txrx_hwb;
324 extern struct if_txrx ixl_txrx_dwb;
325 
326 static struct if_shared_ctx ixl_sctx_init = {
327 	.isc_magic = IFLIB_MAGIC,
328 	.isc_q_align = PAGE_SIZE,
329 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
330 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
331 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
332 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
333 	.isc_rx_maxsize = 16384,
334 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
335 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
336 	.isc_nfl = 1,
337 	.isc_ntxqs = 1,
338 	.isc_nrxqs = 1,
339 
340 	.isc_admin_intrcnt = 1,
341 	.isc_vendor_info = ixl_vendor_info_array,
342 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
343 	.isc_driver = &ixl_if_driver,
344 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
345 
346 	.isc_nrxd_min = {IXL_MIN_RING},
347 	.isc_ntxd_min = {IXL_MIN_RING},
348 	.isc_nrxd_max = {IXL_MAX_RING},
349 	.isc_ntxd_max = {IXL_MAX_RING},
350 	.isc_nrxd_default = {IXL_DEFAULT_RING},
351 	.isc_ntxd_default = {IXL_DEFAULT_RING},
352 };
353 
354 /*** Functions ***/
355 static void *
356 ixl_register(device_t dev)
357 {
358 	return (&ixl_sctx_init);
359 }
360 
361 static int
362 ixl_allocate_pci_resources(struct ixl_pf *pf)
363 {
364 	device_t dev = iflib_get_dev(pf->vsi.ctx);
365 	struct i40e_hw *hw = &pf->hw;
366 	int             rid;
367 
368 	/* Map BAR0 */
369 	rid = PCIR_BAR(0);
370 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
371 	    &rid, RF_ACTIVE);
372 
373 	if (!(pf->pci_mem)) {
374 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
375 		return (ENXIO);
376 	}
377 
378 	/* Save off the PCI information */
379 	hw->vendor_id = pci_get_vendor(dev);
380 	hw->device_id = pci_get_device(dev);
381 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
382 	hw->subsystem_vendor_id =
383 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
384 	hw->subsystem_device_id =
385 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
386 
387 	hw->bus.device = pci_get_slot(dev);
388 	hw->bus.func = pci_get_function(dev);
389 
390 	/* Save off register access information */
391 	pf->osdep.mem_bus_space_tag =
392 		rman_get_bustag(pf->pci_mem);
393 	pf->osdep.mem_bus_space_handle =
394 		rman_get_bushandle(pf->pci_mem);
395 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
396 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
397 	pf->osdep.dev = dev;
398 
399 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
400 	pf->hw.back = &pf->osdep;
401 
402  	return (0);
403 }
404 
405 static void
406 ixl_setup_ssctx(struct ixl_pf *pf)
407 {
408 	if_softc_ctx_t scctx = pf->vsi.shared;
409 	struct i40e_hw *hw = &pf->hw;
410 
411 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
412 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
413 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
414 	} else if (hw->mac.type == I40E_MAC_X722)
415 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
416 	else
417 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
418 
419 	if (pf->vsi.enable_head_writeback) {
420 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
421 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
422 		scctx->isc_txrx = &ixl_txrx_hwb;
423 	} else {
424 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
425 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
426 		scctx->isc_txrx = &ixl_txrx_dwb;
427 	}
428 
429 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
430 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
431 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
432 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
433 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
434 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
435 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
436 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
437 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
438 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
439 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
440 }
441 
442 static void
443 ixl_admin_timer(void *arg)
444 {
445 	struct ixl_pf *pf = (struct ixl_pf *)arg;
446 
447 	/* Fire off the admin task */
448 	iflib_admin_intr_deferred(pf->vsi.ctx);
449 
450 	/* Reschedule the admin timer */
451 	callout_schedule(&pf->admin_timer, hz/2);
452 }
453 
454 static int
455 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
456 {
457 	struct ixl_vsi *vsi = &pf->vsi;
458 	struct i40e_hw *hw = &pf->hw;
459 	device_t dev = pf->dev;
460 
461 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
462 
463 	i40e_get_mac_addr(hw, hw->mac.addr);
464 
465 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
466 		ixl_configure_intr0_msix(pf);
467 		ixl_enable_intr0(hw);
468 	}
469 
470 	ixl_setup_ssctx(pf);
471 
472 	return (0);
473 }
474 
475 static int
476 ixl_if_attach_pre(if_ctx_t ctx)
477 {
478 	device_t dev;
479 	struct ixl_pf *pf;
480 	struct i40e_hw *hw;
481 	struct ixl_vsi *vsi;
482 	enum i40e_get_fw_lldp_status_resp lldp_status;
483 	struct i40e_filter_control_settings filter;
484 	enum i40e_status_code status;
485 	int error = 0;
486 
487 	dev = iflib_get_dev(ctx);
488 	pf = iflib_get_softc(ctx);
489 
490 	INIT_DBG_DEV(dev, "begin");
491 
492 	vsi = &pf->vsi;
493 	vsi->back = pf;
494 	pf->dev = dev;
495 	hw = &pf->hw;
496 
497 	vsi->dev = dev;
498 	vsi->hw = &pf->hw;
499 	vsi->id = 0;
500 	vsi->num_vlans = 0;
501 	vsi->ctx = ctx;
502 	vsi->media = iflib_get_media(ctx);
503 	vsi->shared = iflib_get_softc_ctx(ctx);
504 
505 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
506 	    "%s:admin", device_get_nameunit(dev));
507 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
508 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
509 
510 	/* Save tunable values */
511 	ixl_save_pf_tunables(pf);
512 
513 	/* Do PCI setup - map BAR0, etc */
514 	if (ixl_allocate_pci_resources(pf)) {
515 		device_printf(dev, "Allocation of PCI resources failed\n");
516 		error = ENXIO;
517 		goto err_pci_res;
518 	}
519 
520 	/* Establish a clean starting point */
521 	i40e_clear_hw(hw);
522 	i40e_set_mac_type(hw);
523 
524 	error = ixl_pf_reset(pf);
525 	if (error)
526 		goto err_out;
527 
528 	/* Initialize the shared code */
529 	status = i40e_init_shared_code(hw);
530 	if (status) {
531 		device_printf(dev, "Unable to initialize shared code, error %s\n",
532 		    i40e_stat_str(hw, status));
533 		error = EIO;
534 		goto err_out;
535 	}
536 
537 	/* Set up the admin queue */
538 	hw->aq.num_arq_entries = IXL_AQ_LEN;
539 	hw->aq.num_asq_entries = IXL_AQ_LEN;
540 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
541 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
542 
543 	status = i40e_init_adminq(hw);
544 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
545 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
546 		    i40e_stat_str(hw, status));
547 		error = EIO;
548 		goto err_out;
549 	}
550 	ixl_print_nvm_version(pf);
551 
552 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
553 		device_printf(dev, "The driver for the device stopped "
554 		    "because the NVM image is newer than expected.\n");
555 		device_printf(dev, "You must install the most recent version of "
556 		    "the network driver.\n");
557 		error = EIO;
558 		goto err_out;
559 	}
560 
561         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
562 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
563 		device_printf(dev, "The driver for the device detected "
564 		    "a newer version of the NVM image than expected.\n");
565 		device_printf(dev, "Please install the most recent version "
566 		    "of the network driver.\n");
567 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
568 		device_printf(dev, "The driver for the device detected "
569 		    "an older version of the NVM image than expected.\n");
570 		device_printf(dev, "Please update the NVM image.\n");
571 	}
572 
573 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
574 		error = ixl_attach_pre_recovery_mode(pf);
575 		if (error)
576 			goto err_out;
577 		return (error);
578 	}
579 
580 	/* Clear PXE mode */
581 	i40e_clear_pxe_mode(hw);
582 
583 	/* Get capabilities from the device */
584 	error = ixl_get_hw_capabilities(pf);
585 	if (error) {
586 		device_printf(dev, "get_hw_capabilities failed: %d\n",
587 		    error);
588 		goto err_get_cap;
589 	}
590 
591 	/* Set up host memory cache */
592 	error = ixl_setup_hmc(pf);
593 	if (error)
594 		goto err_mac_hmc;
595 
596 	/* Disable LLDP from the firmware for certain NVM versions */
597 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
598 	    (pf->hw.aq.fw_maj_ver < 4)) {
599 		i40e_aq_stop_lldp(hw, true, false, NULL);
600 		ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
601 	}
602 
603 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
604 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
605 		ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
606 	else
607 		ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
608 
609 	/* Get MAC addresses from hardware */
610 	i40e_get_mac_addr(hw, hw->mac.addr);
611 	error = i40e_validate_mac_addr(hw->mac.addr);
612 	if (error) {
613 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
614 		goto err_mac_hmc;
615 	}
616 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
617 	iflib_set_mac(ctx, hw->mac.addr);
618 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
619 
620 	/* Set up the device filtering */
621 	bzero(&filter, sizeof(filter));
622 	filter.enable_ethtype = TRUE;
623 	filter.enable_macvlan = TRUE;
624 	filter.enable_fdir = FALSE;
625 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
626 	if (i40e_set_filter_control(hw, &filter))
627 		device_printf(dev, "i40e_set_filter_control() failed\n");
628 
629 	/* Query device FW LLDP status */
630 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
631 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
632 			ixl_set_state(&pf->state,
633 			    IXL_STATE_FW_LLDP_DISABLED);
634 		} else {
635 			ixl_clear_state(&pf->state,
636 			    IXL_STATE_FW_LLDP_DISABLED);
637 		}
638 	}
639 
640 	/* Tell FW to apply DCB config on link up */
641 	i40e_aq_set_dcb_parameters(hw, true, NULL);
642 
643 	/* Fill out iflib parameters */
644 	ixl_setup_ssctx(pf);
645 
646 	INIT_DBG_DEV(dev, "end");
647 	return (0);
648 
649 err_mac_hmc:
650 	ixl_shutdown_hmc(pf);
651 err_get_cap:
652 	i40e_shutdown_adminq(hw);
653 err_out:
654 	ixl_free_pci_resources(pf);
655 err_pci_res:
656 	mtx_lock(&pf->admin_mtx);
657 	callout_stop(&pf->admin_timer);
658 	mtx_unlock(&pf->admin_mtx);
659 	mtx_destroy(&pf->admin_mtx);
660 	return (error);
661 }
662 
663 static int
664 ixl_if_attach_post(if_ctx_t ctx)
665 {
666 	device_t dev;
667 	struct ixl_pf *pf;
668 	struct i40e_hw *hw;
669 	struct ixl_vsi *vsi;
670 	int error = 0;
671 	enum i40e_status_code status;
672 
673 	dev = iflib_get_dev(ctx);
674 	pf = iflib_get_softc(ctx);
675 
676 	INIT_DBG_DEV(dev, "begin");
677 
678 	vsi = &pf->vsi;
679 	vsi->ifp = iflib_get_ifp(ctx);
680 	hw = &pf->hw;
681 
682 	/* Save off determined number of queues for interface */
683 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
684 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
685 
686 	/* Setup OS network interface / ifnet */
687 	if (ixl_setup_interface(dev, pf)) {
688 		device_printf(dev, "interface setup failed!\n");
689 		error = EIO;
690 		goto err;
691 	}
692 
693 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
694 		/* Keep admin queue interrupts active while driver is loaded */
695 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
696 			ixl_configure_intr0_msix(pf);
697 			ixl_enable_intr0(hw);
698 		}
699 
700 		ixl_add_sysctls_recovery_mode(pf);
701 
702 		/* Start the admin timer */
703 		mtx_lock(&pf->admin_mtx);
704 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
705 		mtx_unlock(&pf->admin_mtx);
706 		return (0);
707 	}
708 
709 	/* Determine link state */
710 	if (ixl_attach_get_link_status(pf)) {
711 		error = EINVAL;
712 		goto err;
713 	}
714 
715 	error = ixl_switch_config(pf);
716 	if (error) {
717 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
718 		     error);
719 		goto err;
720 	}
721 
722 	/* Add protocol filters to list */
723 	ixl_init_filters(vsi);
724 
725 	/* Init queue allocation manager */
726 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
727 	if (error) {
728 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
729 		    error);
730 		goto err;
731 	}
732 	/* reserve a contiguous allocation for the PF's VSI */
733 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
734 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
735 	if (error) {
736 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
737 		    error);
738 		goto err;
739 	}
740 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
741 	    pf->qtag.num_allocated, pf->qtag.num_active);
742 
743 	/* Limit PHY interrupts to link, autoneg, and modules failure */
744 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
745 	    NULL);
746         if (status) {
747 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
748 		    " aq_err %s\n", i40e_stat_str(hw, status),
749 		    i40e_aq_str(hw, hw->aq.asq_last_status));
750 		goto err;
751 	}
752 
753 	/* Get the bus configuration and set the shared code */
754 	ixl_get_bus_info(pf);
755 
756 	/* Keep admin queue interrupts active while driver is loaded */
757 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
758  		ixl_configure_intr0_msix(pf);
759  		ixl_enable_intr0(hw);
760 	}
761 
762 	/* Set initial advertised speed sysctl value */
763 	ixl_set_initial_advertised_speeds(pf);
764 
765 	/* Initialize statistics & add sysctls */
766 	ixl_add_device_sysctls(pf);
767 	ixl_pf_reset_stats(pf);
768 	ixl_update_stats_counters(pf);
769 	ixl_add_hw_stats(pf);
770 
771 	/*
772 	 * Driver may have been reloaded. Ensure that the link state
773 	 * is consistent with current settings.
774 	 */
775 	ixl_set_link(pf, ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN));
776 
777 	hw->phy.get_link_info = true;
778 	i40e_get_link_status(hw, &pf->link_up);
779 	ixl_update_link_status(pf);
780 
781 #ifdef PCI_IOV
782 	ixl_initialize_sriov(pf);
783 #endif
784 
785 #ifdef IXL_IW
786 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
787 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
788 		if (pf->iw_enabled) {
789 			error = ixl_iw_pf_attach(pf);
790 			if (error) {
791 				device_printf(dev,
792 				    "interfacing to iWARP driver failed: %d\n",
793 				    error);
794 				goto err;
795 			} else
796 				device_printf(dev, "iWARP ready\n");
797 		} else
798 			device_printf(dev, "iWARP disabled on this device "
799 			    "(no MSI-X vectors)\n");
800 	} else {
801 		pf->iw_enabled = false;
802 		device_printf(dev, "The device is not iWARP enabled\n");
803 	}
804 #endif
805 	/* Start the admin timer */
806 	mtx_lock(&pf->admin_mtx);
807 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
808 	mtx_unlock(&pf->admin_mtx);
809 
810 	INIT_DBG_DEV(dev, "end");
811 	return (0);
812 
813 err:
814 	INIT_DEBUGOUT("end: error %d", error);
815 	/* ixl_if_detach() is called on error from this */
816 	return (error);
817 }
818 
819 /**
820  * XXX: iflib always ignores the return value of detach()
821  * -> This means that this isn't allowed to fail
822  */
823 static int
824 ixl_if_detach(if_ctx_t ctx)
825 {
826 	struct ixl_pf *pf = iflib_get_softc(ctx);
827 	struct ixl_vsi *vsi = &pf->vsi;
828 	struct i40e_hw *hw = &pf->hw;
829 	device_t dev = pf->dev;
830 	enum i40e_status_code	status;
831 #ifdef IXL_IW
832 	int			error;
833 #endif
834 
835 	INIT_DBG_DEV(dev, "begin");
836 
837 	/* Stop the admin timer */
838 	mtx_lock(&pf->admin_mtx);
839 	callout_stop(&pf->admin_timer);
840 	mtx_unlock(&pf->admin_mtx);
841 	mtx_destroy(&pf->admin_mtx);
842 
843 #ifdef IXL_IW
844 	if (ixl_enable_iwarp && pf->iw_enabled) {
845 		error = ixl_iw_pf_detach(pf);
846 		if (error == EBUSY) {
847 			device_printf(dev, "iwarp in use; stop it first.\n");
848 			//return (error);
849 		}
850 	}
851 #endif
852 	/* Remove all previously allocated media types */
853 	ifmedia_removeall(vsi->media);
854 
855 	/* Shutdown LAN HMC */
856 	ixl_shutdown_hmc(pf);
857 
858 	/* Shutdown admin queue */
859 	ixl_disable_intr0(hw);
860 	status = i40e_shutdown_adminq(hw);
861 	if (status)
862 		device_printf(dev,
863 		    "i40e_shutdown_adminq() failed with status %s\n",
864 		    i40e_stat_str(hw, status));
865 
866 	ixl_pf_qmgr_destroy(&pf->qmgr);
867 	ixl_free_pci_resources(pf);
868 	ixl_free_filters(&vsi->ftl);
869 	INIT_DBG_DEV(dev, "end");
870 	return (0);
871 }
872 
873 static int
874 ixl_if_shutdown(if_ctx_t ctx)
875 {
876 	int error = 0;
877 
878 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
879 
880 	/* TODO: Call ixl_if_stop()? */
881 
882 	/* TODO: Then setup low power mode */
883 
884 	return (error);
885 }
886 
887 static int
888 ixl_if_suspend(if_ctx_t ctx)
889 {
890 	int error = 0;
891 
892 	INIT_DEBUGOUT("ixl_if_suspend: begin");
893 
894 	/* TODO: Call ixl_if_stop()? */
895 
896 	/* TODO: Then setup low power mode */
897 
898 	return (error);
899 }
900 
901 static int
902 ixl_if_resume(if_ctx_t ctx)
903 {
904 	if_t ifp = iflib_get_ifp(ctx);
905 
906 	INIT_DEBUGOUT("ixl_if_resume: begin");
907 
908 	/* Read & clear wake-up registers */
909 
910 	/* Required after D3->D0 transition */
911 	if (if_getflags(ifp) & IFF_UP)
912 		ixl_if_init(ctx);
913 
914 	return (0);
915 }
916 
917 void
918 ixl_if_init(if_ctx_t ctx)
919 {
920 	struct ixl_pf *pf = iflib_get_softc(ctx);
921 	struct ixl_vsi *vsi = &pf->vsi;
922 	struct i40e_hw	*hw = &pf->hw;
923 	if_t ifp = iflib_get_ifp(ctx);
924 	device_t 	dev = iflib_get_dev(ctx);
925 	u8		tmpaddr[ETHER_ADDR_LEN];
926 	int		ret;
927 
928 	if (IXL_PF_IN_RECOVERY_MODE(pf))
929 		return;
930 	/*
931 	 * If the aq is dead here, it probably means something outside of the driver
932 	 * did something to the adapter, like a PF reset.
933 	 * So, rebuild the driver's state here if that occurs.
934 	 */
935 	if (!i40e_check_asq_alive(&pf->hw)) {
936 		device_printf(dev, "Admin Queue is down; resetting...\n");
937 		ixl_teardown_hw_structs(pf);
938 		ixl_rebuild_hw_structs_after_reset(pf, false);
939 	}
940 
941 	/* Get the latest mac address... User might use a LAA */
942 	bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN);
943 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
944 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
945 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
946 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
947 		ret = i40e_aq_mac_address_write(hw,
948 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
949 		    hw->mac.addr, NULL);
950 		if (ret) {
951 			device_printf(dev, "LLA address change failed!!\n");
952 			return;
953 		}
954 		/*
955 		 * New filters are configured by ixl_reconfigure_filters
956 		 * at the end of ixl_init_locked.
957 		 */
958 	}
959 
960 	iflib_set_mac(ctx, hw->mac.addr);
961 
962 	/* Prepare the VSI: rings, hmc contexts, etc... */
963 	if (ixl_initialize_vsi(vsi)) {
964 		device_printf(dev, "initialize vsi failed!!\n");
965 		return;
966 	}
967 
968 	ixl_set_link(pf, true);
969 
970 	/* Reconfigure multicast filters in HW */
971 	ixl_if_multi_set(ctx);
972 
973 	/* Set up RSS */
974 	ixl_config_rss(pf);
975 
976 	/* Set up MSI-X routing and the ITR settings */
977 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
978 		ixl_configure_queue_intr_msix(pf);
979 		ixl_configure_itr(pf);
980 	} else
981 		ixl_configure_legacy(pf);
982 
983 	if (vsi->enable_head_writeback)
984 		ixl_init_tx_cidx(vsi);
985 	else
986 		ixl_init_tx_rsqs(vsi);
987 
988 	ixl_enable_rings(vsi);
989 
990 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
991 
992 	/* Re-add configure filters to HW */
993 	ixl_reconfigure_filters(vsi);
994 
995 	/* Configure promiscuous mode */
996 	ixl_if_promisc_set(ctx, if_getflags(ifp));
997 
998 #ifdef IXL_IW
999 	if (ixl_enable_iwarp && pf->iw_enabled) {
1000 		ret = ixl_iw_pf_init(pf);
1001 		if (ret)
1002 			device_printf(dev,
1003 			    "initialize iwarp failed, code %d\n", ret);
1004 	}
1005 #endif
1006 }
1007 
1008 void
1009 ixl_if_stop(if_ctx_t ctx)
1010 {
1011 	struct ixl_pf *pf = iflib_get_softc(ctx);
1012 	if_t ifp = iflib_get_ifp(ctx);
1013 	struct ixl_vsi *vsi = &pf->vsi;
1014 
1015 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1016 
1017 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1018 		return;
1019 
1020 	// TODO: This may need to be reworked
1021 #ifdef IXL_IW
1022 	/* Stop iWARP device */
1023 	if (ixl_enable_iwarp && pf->iw_enabled)
1024 		ixl_iw_pf_stop(pf);
1025 #endif
1026 
1027 	ixl_disable_rings_intr(vsi);
1028 	ixl_disable_rings(pf, vsi, &pf->qtag);
1029 
1030 	/*
1031 	 * Don't set link state if only reconfiguring
1032 	 * e.g. on MTU change.
1033 	 */
1034 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1035 	    !ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN))
1036 		ixl_set_link(pf, false);
1037 }
1038 
1039 static int
1040 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1041 {
1042 	struct ixl_pf *pf = iflib_get_softc(ctx);
1043 	struct ixl_vsi *vsi = &pf->vsi;
1044 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1045 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1046 	int err, i, rid, vector = 0;
1047 	char buf[16];
1048 
1049 	MPASS(vsi->shared->isc_nrxqsets > 0);
1050 	MPASS(vsi->shared->isc_ntxqsets > 0);
1051 
1052 	/* Admin Que must use vector 0*/
1053 	rid = vector + 1;
1054 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1055 	    ixl_msix_adminq, pf, 0, "aq");
1056 	if (err) {
1057 		iflib_irq_free(ctx, &vsi->irq);
1058 		device_printf(iflib_get_dev(ctx),
1059 		    "Failed to register Admin Que handler");
1060 		return (err);
1061 	}
1062 
1063 #ifdef PCI_IOV
1064 	/* Create soft IRQ for handling VFLRs */
1065 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1066 #endif
1067 
1068 	/* Now set up the stations */
1069 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1070 		rid = vector + 1;
1071 
1072 		snprintf(buf, sizeof(buf), "rxq%d", i);
1073 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1074 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1075 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1076 		 * what's expected in the iflib context? */
1077 		if (err) {
1078 			device_printf(iflib_get_dev(ctx),
1079 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1080 			vsi->num_rx_queues = i + 1;
1081 			goto fail;
1082 		}
1083 		rx_que->msix = vector;
1084 	}
1085 
1086 	bzero(buf, sizeof(buf));
1087 
1088 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1089 		snprintf(buf, sizeof(buf), "txq%d", i);
1090 		iflib_softirq_alloc_generic(ctx,
1091 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1092 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1093 
1094 		/* TODO: Maybe call a strategy function for this to figure out which
1095 		* interrupts to map Tx queues to. I don't know if there's an immediately
1096 		* better way than this other than a user-supplied map, though. */
1097 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1098 	}
1099 
1100 	return (0);
1101 fail:
1102 	iflib_irq_free(ctx, &vsi->irq);
1103 	rx_que = vsi->rx_queues;
1104 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1105 		iflib_irq_free(ctx, &rx_que->que_irq);
1106 	return (err);
1107 }
1108 
1109 /*
1110  * Enable all interrupts
1111  *
1112  * Called in:
1113  * iflib_init_locked, after ixl_if_init()
1114  */
1115 static void
1116 ixl_if_enable_intr(if_ctx_t ctx)
1117 {
1118 	struct ixl_pf *pf = iflib_get_softc(ctx);
1119 	struct ixl_vsi *vsi = &pf->vsi;
1120 	struct i40e_hw		*hw = vsi->hw;
1121 	struct ixl_rx_queue	*que = vsi->rx_queues;
1122 
1123 	ixl_enable_intr0(hw);
1124 	/* Enable queue interrupts */
1125 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1126 		/* TODO: Queue index parameter is probably wrong */
1127 		ixl_enable_queue(hw, que->rxr.me);
1128 }
1129 
1130 /*
1131  * Disable queue interrupts
1132  *
1133  * Other interrupt causes need to remain active.
1134  */
1135 static void
1136 ixl_if_disable_intr(if_ctx_t ctx)
1137 {
1138 	struct ixl_pf *pf = iflib_get_softc(ctx);
1139 	struct ixl_vsi *vsi = &pf->vsi;
1140 	struct i40e_hw		*hw = vsi->hw;
1141 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1142 
1143 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1144 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1145 			ixl_disable_queue(hw, rx_que->msix - 1);
1146 	} else {
1147 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1148 		// stops queues from triggering interrupts
1149 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1150 	}
1151 }
1152 
1153 static int
1154 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1155 {
1156 	struct ixl_pf *pf = iflib_get_softc(ctx);
1157 	struct ixl_vsi *vsi = &pf->vsi;
1158 	struct i40e_hw		*hw = vsi->hw;
1159 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1160 
1161 	ixl_enable_queue(hw, rx_que->msix - 1);
1162 	return (0);
1163 }
1164 
1165 static int
1166 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1167 {
1168 	struct ixl_pf *pf = iflib_get_softc(ctx);
1169 	struct ixl_vsi *vsi = &pf->vsi;
1170 	struct i40e_hw *hw = vsi->hw;
1171 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1172 
1173 	ixl_enable_queue(hw, tx_que->msix - 1);
1174 	return (0);
1175 }
1176 
1177 static int
1178 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1179 {
1180 	struct ixl_pf *pf = iflib_get_softc(ctx);
1181 	struct ixl_vsi *vsi = &pf->vsi;
1182 	if_softc_ctx_t scctx = vsi->shared;
1183 	struct ixl_tx_queue *que;
1184 	int i, j, error = 0;
1185 
1186 	MPASS(scctx->isc_ntxqsets > 0);
1187 	MPASS(ntxqs == 1);
1188 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1189 
1190 	/* Allocate queue structure memory */
1191 	if (!(vsi->tx_queues =
1192 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1193 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1194 		return (ENOMEM);
1195 	}
1196 
1197 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1198 		struct tx_ring *txr = &que->txr;
1199 
1200 		txr->me = i;
1201 		que->vsi = vsi;
1202 
1203 		if (!vsi->enable_head_writeback) {
1204 			/* Allocate report status array */
1205 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1206 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1207 				error = ENOMEM;
1208 				goto fail;
1209 			}
1210 			/* Init report status array */
1211 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1212 				txr->tx_rsq[j] = QIDX_INVALID;
1213 		}
1214 		/* get the virtual and physical address of the hardware queues */
1215 		txr->tail = I40E_QTX_TAIL(txr->me);
1216 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1217 		txr->tx_paddr = paddrs[i * ntxqs];
1218 		txr->que = que;
1219 	}
1220 
1221 	return (0);
1222 fail:
1223 	ixl_if_queues_free(ctx);
1224 	return (error);
1225 }
1226 
1227 static int
1228 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1229 {
1230 	struct ixl_pf *pf = iflib_get_softc(ctx);
1231 	struct ixl_vsi *vsi = &pf->vsi;
1232 	struct ixl_rx_queue *que;
1233 	int i, error = 0;
1234 
1235 #ifdef INVARIANTS
1236 	if_softc_ctx_t scctx = vsi->shared;
1237 	MPASS(scctx->isc_nrxqsets > 0);
1238 	MPASS(nrxqs == 1);
1239 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1240 #endif
1241 
1242 	/* Allocate queue structure memory */
1243 	if (!(vsi->rx_queues =
1244 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1245 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1246 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1247 		error = ENOMEM;
1248 		goto fail;
1249 	}
1250 
1251 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1252 		struct rx_ring *rxr = &que->rxr;
1253 
1254 		rxr->me = i;
1255 		que->vsi = vsi;
1256 
1257 		/* get the virtual and physical address of the hardware queues */
1258 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1259 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1260 		rxr->rx_paddr = paddrs[i * nrxqs];
1261 		rxr->que = que;
1262 	}
1263 
1264 	return (0);
1265 fail:
1266 	ixl_if_queues_free(ctx);
1267 	return (error);
1268 }
1269 
1270 static void
1271 ixl_if_queues_free(if_ctx_t ctx)
1272 {
1273 	struct ixl_pf *pf = iflib_get_softc(ctx);
1274 	struct ixl_vsi *vsi = &pf->vsi;
1275 
1276 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1277 		struct ixl_tx_queue *que;
1278 		int i = 0;
1279 
1280 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1281 			struct tx_ring *txr = &que->txr;
1282 			if (txr->tx_rsq != NULL) {
1283 				free(txr->tx_rsq, M_IXL);
1284 				txr->tx_rsq = NULL;
1285 			}
1286 		}
1287 	}
1288 
1289 	if (vsi->tx_queues != NULL) {
1290 		free(vsi->tx_queues, M_IXL);
1291 		vsi->tx_queues = NULL;
1292 	}
1293 	if (vsi->rx_queues != NULL) {
1294 		free(vsi->rx_queues, M_IXL);
1295 		vsi->rx_queues = NULL;
1296 	}
1297 
1298 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1299 		sysctl_ctx_free(&vsi->sysctl_ctx);
1300 }
1301 
1302 void
1303 ixl_update_link_status(struct ixl_pf *pf)
1304 {
1305 	struct ixl_vsi *vsi = &pf->vsi;
1306 	struct i40e_hw *hw = &pf->hw;
1307 	u64 baudrate;
1308 
1309 	if (pf->link_up) {
1310 		if (vsi->link_active == FALSE) {
1311 			vsi->link_active = TRUE;
1312 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1313 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1314 			ixl_link_up_msg(pf);
1315 #ifdef PCI_IOV
1316 			ixl_broadcast_link_state(pf);
1317 #endif
1318 		}
1319 	} else { /* Link down */
1320 		if (vsi->link_active == TRUE) {
1321 			vsi->link_active = FALSE;
1322 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1323 #ifdef PCI_IOV
1324 			ixl_broadcast_link_state(pf);
1325 #endif
1326 		}
1327 	}
1328 }
1329 
1330 static void
1331 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1332 {
1333 	device_t dev = pf->dev;
1334 	u32 rxq_idx, qtx_ctl;
1335 
1336 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1337 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1338 	qtx_ctl = e->desc.params.external.param1;
1339 
1340 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1341 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1342 }
1343 
1344 static int
1345 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1346 {
1347 	enum i40e_status_code status = I40E_SUCCESS;
1348 	struct i40e_arq_event_info event;
1349 	struct i40e_hw *hw = &pf->hw;
1350 	device_t dev = pf->dev;
1351 	u16 opcode;
1352 	u32 loop = 0, reg;
1353 
1354 	event.buf_len = IXL_AQ_BUF_SZ;
1355 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1356 	if (!event.msg_buf) {
1357 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1358 		    " Queue event!\n", __func__);
1359 		return (ENOMEM);
1360 	}
1361 
1362 	/* clean and process any events */
1363 	do {
1364 		status = i40e_clean_arq_element(hw, &event, pending);
1365 		if (status)
1366 			break;
1367 		opcode = LE16_TO_CPU(event.desc.opcode);
1368 		ixl_dbg(pf, IXL_DBG_AQ,
1369 		    "Admin Queue event: %#06x\n", opcode);
1370 		switch (opcode) {
1371 		case i40e_aqc_opc_get_link_status:
1372 			ixl_link_event(pf, &event);
1373 			break;
1374 		case i40e_aqc_opc_send_msg_to_pf:
1375 #ifdef PCI_IOV
1376 			ixl_handle_vf_msg(pf, &event);
1377 #endif
1378 			break;
1379 		/*
1380 		 * This should only occur on no-drop queues, which
1381 		 * aren't currently configured.
1382 		 */
1383 		case i40e_aqc_opc_event_lan_overflow:
1384 			ixl_handle_lan_overflow_event(pf, &event);
1385 			break;
1386 		default:
1387 			break;
1388 		}
1389 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1390 
1391 	free(event.msg_buf, M_IXL);
1392 
1393 	/* Re-enable admin queue interrupt cause */
1394 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1395 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1396 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1397 
1398 	return (status);
1399 }
1400 
1401 static void
1402 ixl_if_update_admin_status(if_ctx_t ctx)
1403 {
1404 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1405 	struct i40e_hw	*hw = &pf->hw;
1406 	u16		pending;
1407 
1408 	if (IXL_PF_IS_RESETTING(pf))
1409 		ixl_handle_empr_reset(pf);
1410 
1411 	/*
1412 	 * Admin Queue is shut down while handling reset.
1413 	 * Don't proceed if it hasn't been re-initialized
1414 	 * e.g due to an issue with new FW.
1415 	 */
1416 	if (!i40e_check_asq_alive(&pf->hw))
1417 		return;
1418 
1419 	if (ixl_test_state(&pf->state, IXL_STATE_MDD_PENDING))
1420 		ixl_handle_mdd_event(pf);
1421 
1422 	ixl_process_adminq(pf, &pending);
1423 	ixl_update_link_status(pf);
1424 
1425 	/*
1426 	 * If there are still messages to process, reschedule ourselves.
1427 	 * Otherwise, re-enable our interrupt and go to sleep.
1428 	 */
1429 	if (pending > 0)
1430 		iflib_admin_intr_deferred(ctx);
1431 	else
1432 		ixl_enable_intr0(hw);
1433 }
1434 
1435 static void
1436 ixl_if_multi_set(if_ctx_t ctx)
1437 {
1438 	struct ixl_pf *pf = iflib_get_softc(ctx);
1439 	struct ixl_vsi *vsi = &pf->vsi;
1440 	struct i40e_hw *hw = vsi->hw;
1441 	int mcnt;
1442 
1443 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1444 
1445 	/* Delete filters for removed multicast addresses */
1446 	ixl_del_multi(vsi, false);
1447 
1448 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1449 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1450 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1451 		    vsi->seid, TRUE, NULL);
1452 		ixl_del_multi(vsi, true);
1453 		return;
1454 	}
1455 
1456 	ixl_add_multi(vsi);
1457 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1458 }
1459 
1460 static int
1461 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1462 {
1463 	struct ixl_pf *pf = iflib_get_softc(ctx);
1464 	struct ixl_vsi *vsi = &pf->vsi;
1465 
1466 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1467 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1468 		ETHER_VLAN_ENCAP_LEN)
1469 		return (EINVAL);
1470 
1471 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1472 		ETHER_VLAN_ENCAP_LEN;
1473 
1474 	return (0);
1475 }
1476 
1477 static void
1478 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1479 {
1480 	struct ixl_pf *pf = iflib_get_softc(ctx);
1481 	struct i40e_hw  *hw = &pf->hw;
1482 
1483 	INIT_DEBUGOUT("ixl_media_status: begin");
1484 
1485 	ifmr->ifm_status = IFM_AVALID;
1486 	ifmr->ifm_active = IFM_ETHER;
1487 
1488 	if (!pf->link_up) {
1489 		return;
1490 	}
1491 
1492 	ifmr->ifm_status |= IFM_ACTIVE;
1493 	/* Hardware is always full-duplex */
1494 	ifmr->ifm_active |= IFM_FDX;
1495 
1496 	switch (hw->phy.link_info.phy_type) {
1497 		/* 100 M */
1498 		case I40E_PHY_TYPE_100BASE_TX:
1499 			ifmr->ifm_active |= IFM_100_TX;
1500 			break;
1501 		/* 1 G */
1502 		case I40E_PHY_TYPE_1000BASE_T:
1503 			ifmr->ifm_active |= IFM_1000_T;
1504 			break;
1505 		case I40E_PHY_TYPE_1000BASE_SX:
1506 			ifmr->ifm_active |= IFM_1000_SX;
1507 			break;
1508 		case I40E_PHY_TYPE_1000BASE_LX:
1509 			ifmr->ifm_active |= IFM_1000_LX;
1510 			break;
1511 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1512 			ifmr->ifm_active |= IFM_1000_T;
1513 			break;
1514 		/* 2.5 G */
1515 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1516 			ifmr->ifm_active |= IFM_2500_T;
1517 			break;
1518 		/* 5 G */
1519 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1520 			ifmr->ifm_active |= IFM_5000_T;
1521 			break;
1522 		/* 10 G */
1523 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1524 			ifmr->ifm_active |= IFM_10G_TWINAX;
1525 			break;
1526 		case I40E_PHY_TYPE_10GBASE_SR:
1527 			ifmr->ifm_active |= IFM_10G_SR;
1528 			break;
1529 		case I40E_PHY_TYPE_10GBASE_LR:
1530 			ifmr->ifm_active |= IFM_10G_LR;
1531 			break;
1532 		case I40E_PHY_TYPE_10GBASE_T:
1533 			ifmr->ifm_active |= IFM_10G_T;
1534 			break;
1535 		case I40E_PHY_TYPE_XAUI:
1536 		case I40E_PHY_TYPE_XFI:
1537 			ifmr->ifm_active |= IFM_10G_TWINAX;
1538 			break;
1539 		case I40E_PHY_TYPE_10GBASE_AOC:
1540 			ifmr->ifm_active |= IFM_10G_AOC;
1541 			break;
1542 		/* 25 G */
1543 		case I40E_PHY_TYPE_25GBASE_KR:
1544 			ifmr->ifm_active |= IFM_25G_KR;
1545 			break;
1546 		case I40E_PHY_TYPE_25GBASE_CR:
1547 			ifmr->ifm_active |= IFM_25G_CR;
1548 			break;
1549 		case I40E_PHY_TYPE_25GBASE_SR:
1550 			ifmr->ifm_active |= IFM_25G_SR;
1551 			break;
1552 		case I40E_PHY_TYPE_25GBASE_LR:
1553 			ifmr->ifm_active |= IFM_25G_LR;
1554 			break;
1555 		case I40E_PHY_TYPE_25GBASE_AOC:
1556 			ifmr->ifm_active |= IFM_25G_AOC;
1557 			break;
1558 		case I40E_PHY_TYPE_25GBASE_ACC:
1559 			ifmr->ifm_active |= IFM_25G_ACC;
1560 			break;
1561 		/* 40 G */
1562 		case I40E_PHY_TYPE_40GBASE_CR4:
1563 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1564 			ifmr->ifm_active |= IFM_40G_CR4;
1565 			break;
1566 		case I40E_PHY_TYPE_40GBASE_SR4:
1567 			ifmr->ifm_active |= IFM_40G_SR4;
1568 			break;
1569 		case I40E_PHY_TYPE_40GBASE_LR4:
1570 			ifmr->ifm_active |= IFM_40G_LR4;
1571 			break;
1572 		case I40E_PHY_TYPE_XLAUI:
1573 			ifmr->ifm_active |= IFM_OTHER;
1574 			break;
1575 		case I40E_PHY_TYPE_1000BASE_KX:
1576 			ifmr->ifm_active |= IFM_1000_KX;
1577 			break;
1578 		case I40E_PHY_TYPE_SGMII:
1579 			ifmr->ifm_active |= IFM_1000_SGMII;
1580 			break;
1581 		/* ERJ: What's the difference between these? */
1582 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1583 		case I40E_PHY_TYPE_10GBASE_CR1:
1584 			ifmr->ifm_active |= IFM_10G_CR1;
1585 			break;
1586 		case I40E_PHY_TYPE_10GBASE_KX4:
1587 			ifmr->ifm_active |= IFM_10G_KX4;
1588 			break;
1589 		case I40E_PHY_TYPE_10GBASE_KR:
1590 			ifmr->ifm_active |= IFM_10G_KR;
1591 			break;
1592 		case I40E_PHY_TYPE_SFI:
1593 			ifmr->ifm_active |= IFM_10G_SFI;
1594 			break;
1595 		/* Our single 20G media type */
1596 		case I40E_PHY_TYPE_20GBASE_KR2:
1597 			ifmr->ifm_active |= IFM_20G_KR2;
1598 			break;
1599 		case I40E_PHY_TYPE_40GBASE_KR4:
1600 			ifmr->ifm_active |= IFM_40G_KR4;
1601 			break;
1602 		case I40E_PHY_TYPE_XLPPI:
1603 		case I40E_PHY_TYPE_40GBASE_AOC:
1604 			ifmr->ifm_active |= IFM_40G_XLPPI;
1605 			break;
1606 		/* Unknown to driver */
1607 		default:
1608 			ifmr->ifm_active |= IFM_UNKNOWN;
1609 			break;
1610 	}
1611 	/* Report flow control status as well */
1612 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1613 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1614 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1615 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1616 }
1617 
1618 static int
1619 ixl_if_media_change(if_ctx_t ctx)
1620 {
1621 	struct ifmedia *ifm = iflib_get_media(ctx);
1622 
1623 	INIT_DEBUGOUT("ixl_media_change: begin");
1624 
1625 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1626 		return (EINVAL);
1627 
1628 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1629 	return (ENODEV);
1630 }
1631 
1632 static int
1633 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1634 {
1635 	struct ixl_pf *pf = iflib_get_softc(ctx);
1636 	struct ixl_vsi *vsi = &pf->vsi;
1637 	if_t ifp = iflib_get_ifp(ctx);
1638 	struct i40e_hw	*hw = vsi->hw;
1639 	int		err;
1640 	bool		uni = FALSE, multi = FALSE;
1641 
1642 	if (flags & IFF_PROMISC)
1643 		uni = multi = TRUE;
1644 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1645 	    MAX_MULTICAST_ADDR)
1646 		multi = TRUE;
1647 
1648 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1649 	    vsi->seid, uni, NULL, true);
1650 	if (err)
1651 		return (err);
1652 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1653 	    vsi->seid, multi, NULL);
1654 	return (err);
1655 }
1656 
1657 static void
1658 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1659 {
1660 	struct ixl_pf *pf = iflib_get_softc(ctx);
1661 
1662 	if (qid != 0)
1663 		return;
1664 
1665 	ixl_update_stats_counters(pf);
1666 }
1667 
1668 static void
1669 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1670 {
1671 	struct ixl_pf *pf = iflib_get_softc(ctx);
1672 	struct ixl_vsi *vsi = &pf->vsi;
1673 	struct i40e_hw	*hw = vsi->hw;
1674 	if_t ifp = iflib_get_ifp(ctx);
1675 
1676 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1677 		return;
1678 
1679 	/*
1680 	 * Keep track of registered VLANS to know what
1681 	 * filters have to be configured when VLAN_HWFILTER
1682 	 * capability is enabled.
1683 	 */
1684 	++vsi->num_vlans;
1685 	bit_set(vsi->vlans_map, vtag);
1686 
1687 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1688 		return;
1689 
1690 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1691 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1692 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1693 		/*
1694 		 * There is not enough HW resources to add filters
1695 		 * for all registered VLANs. Re-configure filtering
1696 		 * to allow reception of all expected traffic.
1697 		 */
1698 		device_printf(vsi->dev,
1699 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1700 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1701 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1702 	}
1703 }
1704 
1705 static void
1706 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1707 {
1708 	struct ixl_pf *pf = iflib_get_softc(ctx);
1709 	struct ixl_vsi *vsi = &pf->vsi;
1710 	struct i40e_hw	*hw = vsi->hw;
1711 	if_t ifp = iflib_get_ifp(ctx);
1712 
1713 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1714 		return;
1715 
1716 	--vsi->num_vlans;
1717 	bit_clear(vsi->vlans_map, vtag);
1718 
1719 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1720 		return;
1721 
1722 	/* One filter is used for untagged frames */
1723 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1)
1724 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1725 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) {
1726 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1727 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1728 	}
1729 }
1730 
1731 static uint64_t
1732 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1733 {
1734 	struct ixl_pf *pf = iflib_get_softc(ctx);
1735 	struct ixl_vsi *vsi = &pf->vsi;
1736 	if_t ifp = iflib_get_ifp(ctx);
1737 
1738 	switch (cnt) {
1739 	case IFCOUNTER_IPACKETS:
1740 		return (vsi->ipackets);
1741 	case IFCOUNTER_IERRORS:
1742 		return (vsi->ierrors);
1743 	case IFCOUNTER_OPACKETS:
1744 		return (vsi->opackets);
1745 	case IFCOUNTER_OERRORS:
1746 		return (vsi->oerrors);
1747 	case IFCOUNTER_COLLISIONS:
1748 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1749 		return (0);
1750 	case IFCOUNTER_IBYTES:
1751 		return (vsi->ibytes);
1752 	case IFCOUNTER_OBYTES:
1753 		return (vsi->obytes);
1754 	case IFCOUNTER_IMCASTS:
1755 		return (vsi->imcasts);
1756 	case IFCOUNTER_OMCASTS:
1757 		return (vsi->omcasts);
1758 	case IFCOUNTER_IQDROPS:
1759 		return (vsi->iqdrops);
1760 	case IFCOUNTER_OQDROPS:
1761 		return (vsi->oqdrops);
1762 	case IFCOUNTER_NOPROTO:
1763 		return (vsi->noproto);
1764 	default:
1765 		return (if_get_counter_default(ifp, cnt));
1766 	}
1767 }
1768 
1769 #ifdef PCI_IOV
1770 static void
1771 ixl_if_vflr_handle(if_ctx_t ctx)
1772 {
1773 	struct ixl_pf *pf = iflib_get_softc(ctx);
1774 
1775 	ixl_handle_vflr(pf);
1776 }
1777 #endif
1778 
1779 static int
1780 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1781 {
1782 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1783 
1784 	if (pf->read_i2c_byte == NULL)
1785 		return (EINVAL);
1786 
1787 	for (int i = 0; i < req->len; i++)
1788 		if (pf->read_i2c_byte(pf, req->offset + i,
1789 		    req->dev_addr, &req->data[i]))
1790 			return (EIO);
1791 	return (0);
1792 }
1793 
1794 static int
1795 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1796 {
1797 	struct ixl_pf *pf = iflib_get_softc(ctx);
1798 	struct ifdrv *ifd = (struct ifdrv *)data;
1799 	int error = 0;
1800 
1801 	/*
1802 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1803 	 * performing privilege checks. It is important that this function
1804 	 * perform the necessary checks for commands which should only be
1805 	 * executed by privileged threads.
1806 	 */
1807 
1808 	switch(command) {
1809 	case SIOCGDRVSPEC:
1810 	case SIOCSDRVSPEC:
1811 		/* NVM update command */
1812 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1813 			error = priv_check(curthread, PRIV_DRIVER);
1814 			if (error)
1815 				break;
1816 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1817 		} else {
1818 			error = EINVAL;
1819 		}
1820 		break;
1821 	default:
1822 		error = EOPNOTSUPP;
1823 	}
1824 
1825 	return (error);
1826 }
1827 
1828 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1829  * @ctx: iflib context
1830  * @event: event code to check
1831  *
1832  * Defaults to returning false for every event.
1833  *
1834  * @returns true if iflib needs to reinit the interface, false otherwise
1835  */
1836 static bool
1837 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1838 {
1839 	switch (event) {
1840 	case IFLIB_RESTART_VLAN_CONFIG:
1841 	default:
1842 		return (false);
1843 	}
1844 }
1845 
1846 /*
1847  * Sanity check and save off tunable values.
1848  */
1849 static void
1850 ixl_save_pf_tunables(struct ixl_pf *pf)
1851 {
1852 	device_t dev = pf->dev;
1853 
1854 	/* Save tunable information */
1855 #ifdef IXL_DEBUG_FC
1856 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1857 #endif
1858 #ifdef IXL_DEBUG
1859 	pf->recovery_mode = ixl_debug_recovery_mode;
1860 #endif
1861 	pf->dbg_mask = ixl_core_debug_mask;
1862 	pf->hw.debug_mask = ixl_shared_debug_mask;
1863 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1864 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1865 #if 0
1866 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1867 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1868 #endif
1869 
1870 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1871 		pf->i2c_access_method = 0;
1872 	else
1873 		pf->i2c_access_method = ixl_i2c_access_method;
1874 
1875 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1876 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1877 		    ixl_tx_itr);
1878 		device_printf(dev, "tx_itr must be between %d and %d, "
1879 		    "inclusive\n",
1880 		    0, IXL_MAX_ITR);
1881 		device_printf(dev, "Using default value of %d instead\n",
1882 		    IXL_ITR_4K);
1883 		pf->tx_itr = IXL_ITR_4K;
1884 	} else
1885 		pf->tx_itr = ixl_tx_itr;
1886 
1887 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1888 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1889 		    ixl_rx_itr);
1890 		device_printf(dev, "rx_itr must be between %d and %d, "
1891 		    "inclusive\n",
1892 		    0, IXL_MAX_ITR);
1893 		device_printf(dev, "Using default value of %d instead\n",
1894 		    IXL_ITR_8K);
1895 		pf->rx_itr = IXL_ITR_8K;
1896 	} else
1897 		pf->rx_itr = ixl_rx_itr;
1898 
1899 	pf->fc = -1;
1900 	if (ixl_flow_control != -1) {
1901 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1902 			device_printf(dev,
1903 			    "Invalid flow_control value of %d set!\n",
1904 			    ixl_flow_control);
1905 			device_printf(dev,
1906 			    "flow_control must be between %d and %d, "
1907 			    "inclusive\n", 0, 3);
1908 			device_printf(dev,
1909 			    "Using default configuration instead\n");
1910 		} else
1911 			pf->fc = ixl_flow_control;
1912 	}
1913 }
1914 
1915