xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 17859d538c23d6faa5a5512262d678377130e591)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	2
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
90 	/* required last entry */
91 	PVID_END
92 };
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 /*** IFLIB interface ***/
98 static void	*ixl_register(device_t dev);
99 static int	 ixl_if_attach_pre(if_ctx_t ctx);
100 static int	 ixl_if_attach_post(if_ctx_t ctx);
101 static int	 ixl_if_detach(if_ctx_t ctx);
102 static int	 ixl_if_shutdown(if_ctx_t ctx);
103 static int	 ixl_if_suspend(if_ctx_t ctx);
104 static int	 ixl_if_resume(if_ctx_t ctx);
105 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
106 static void	 ixl_if_enable_intr(if_ctx_t ctx);
107 static void	 ixl_if_disable_intr(if_ctx_t ctx);
108 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
109 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
110 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
111 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
112 static void	 ixl_if_queues_free(if_ctx_t ctx);
113 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
114 static void	 ixl_if_multi_set(if_ctx_t ctx);
115 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
116 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
117 static int	 ixl_if_media_change(if_ctx_t ctx);
118 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
119 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
120 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
121 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
122 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
123 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
124 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
125 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 #ifdef PCI_IOV
127 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
128 #endif
129 
130 /*** Other ***/
131 static void	 ixl_save_pf_tunables(struct ixl_pf *);
132 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
133 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
134 static void	 ixl_admin_timer(void *arg);
135 
136 /*********************************************************************
137  *  FreeBSD Device Interface Entry Points
138  *********************************************************************/
139 
140 static device_method_t ixl_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_register, ixl_register),
143 	DEVMETHOD(device_probe, iflib_device_probe),
144 	DEVMETHOD(device_attach, iflib_device_attach),
145 	DEVMETHOD(device_detach, iflib_device_detach),
146 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 #ifdef PCI_IOV
148 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
151 #endif
152 	DEVMETHOD_END
153 };
154 
155 static driver_t ixl_driver = {
156 	"ixl", ixl_methods, sizeof(struct ixl_pf),
157 };
158 
159 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
162 
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 
167 static device_method_t ixl_if_methods[] = {
168 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 	DEVMETHOD(ifdi_detach, ixl_if_detach),
171 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 	DEVMETHOD(ifdi_resume, ixl_if_resume),
174 	DEVMETHOD(ifdi_init, ixl_if_init),
175 	DEVMETHOD(ifdi_stop, ixl_if_stop),
176 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 	DEVMETHOD(ifdi_timer, ixl_if_timer),
191 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 #ifdef PCI_IOV
198 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
202 #endif
203 	// ifdi_led_func
204 	// ifdi_debug
205 	DEVMETHOD_END
206 };
207 
208 static driver_t ixl_if_driver = {
209 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
210 };
211 
212 /*
213 ** TUNEABLE PARAMETERS:
214 */
215 
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217     "ixl driver parameters");
218 
219 #ifdef IXL_DEBUG_FC
220 /*
221  * Leave this on unless you need to send flow control
222  * frames (or other control frames) from software
223  */
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226     &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228     &ixl_enable_tx_fc_filter, 0,
229     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
230 #endif
231 
232 #ifdef IXL_DEBUG
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235     &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237     &ixl_debug_recovery_mode, 0,
238     "Act like when FW entered recovery mode (for debugging)");
239 #endif
240 
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243     &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245     &ixl_i2c_access_method, 0,
246     IXL_SYSCTL_HELP_I2C_METHOD);
247 
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250     &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252     &ixl_enable_vf_loopback, 0,
253     IXL_SYSCTL_HELP_VF_LOOPBACK);
254 
255 /*
256  * Different method for processing TX descriptor
257  * completion.
258  */
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261     &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263     &ixl_enable_head_writeback, 0,
264     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268     &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270     &ixl_core_debug_mask, 0,
271     "Display debug statements that are printed in non-shared code");
272 
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275     &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277     &ixl_shared_debug_mask, 0,
278     "Display debug statements that are printed in shared code");
279 
280 #if 0
281 /*
282 ** Controls for Interrupt Throttling
283 **	- true/false for dynamic adjustment
284 ** 	- default values for static ITR
285 */
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
295 #endif
296 
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300     &ixl_rx_itr, 0, "RX Interrupt Rate");
301 
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305     &ixl_tx_itr, 0, "TX Interrupt Rate");
306 
307 static int ixl_flow_control = -1;
308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
309     &ixl_flow_control, 0, "Initial Flow Control setting");
310 
311 #ifdef IXL_IW
312 int ixl_enable_iwarp = 0;
313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
315     &ixl_enable_iwarp, 0, "iWARP enabled");
316 
317 #if __FreeBSD_version < 1100000
318 int ixl_limit_iwarp_msix = 1;
319 #else
320 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
321 #endif
322 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
323 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
324     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
325 #endif
326 
327 extern struct if_txrx ixl_txrx_hwb;
328 extern struct if_txrx ixl_txrx_dwb;
329 
330 static struct if_shared_ctx ixl_sctx_init = {
331 	.isc_magic = IFLIB_MAGIC,
332 	.isc_q_align = PAGE_SIZE,
333 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
334 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
335 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
336 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
337 	.isc_rx_maxsize = 16384,
338 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
339 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
340 	.isc_nfl = 1,
341 	.isc_ntxqs = 1,
342 	.isc_nrxqs = 1,
343 
344 	.isc_admin_intrcnt = 1,
345 	.isc_vendor_info = ixl_vendor_info_array,
346 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
347 	.isc_driver = &ixl_if_driver,
348 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
349 
350 	.isc_nrxd_min = {IXL_MIN_RING},
351 	.isc_ntxd_min = {IXL_MIN_RING},
352 	.isc_nrxd_max = {IXL_MAX_RING},
353 	.isc_ntxd_max = {IXL_MAX_RING},
354 	.isc_nrxd_default = {IXL_DEFAULT_RING},
355 	.isc_ntxd_default = {IXL_DEFAULT_RING},
356 };
357 
358 /*** Functions ***/
359 static void *
360 ixl_register(device_t dev)
361 {
362 	return (&ixl_sctx_init);
363 }
364 
365 static int
366 ixl_allocate_pci_resources(struct ixl_pf *pf)
367 {
368 	device_t dev = iflib_get_dev(pf->vsi.ctx);
369 	struct i40e_hw *hw = &pf->hw;
370 	int             rid;
371 
372 	/* Map BAR0 */
373 	rid = PCIR_BAR(0);
374 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
375 	    &rid, RF_ACTIVE);
376 
377 	if (!(pf->pci_mem)) {
378 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
379 		return (ENXIO);
380 	}
381 
382 	/* Save off the PCI information */
383 	hw->vendor_id = pci_get_vendor(dev);
384 	hw->device_id = pci_get_device(dev);
385 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
386 	hw->subsystem_vendor_id =
387 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
388 	hw->subsystem_device_id =
389 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
390 
391 	hw->bus.device = pci_get_slot(dev);
392 	hw->bus.func = pci_get_function(dev);
393 
394 	/* Save off register access information */
395 	pf->osdep.mem_bus_space_tag =
396 		rman_get_bustag(pf->pci_mem);
397 	pf->osdep.mem_bus_space_handle =
398 		rman_get_bushandle(pf->pci_mem);
399 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
400 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
401 	pf->osdep.dev = dev;
402 
403 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
404 	pf->hw.back = &pf->osdep;
405 
406  	return (0);
407 }
408 
409 static void
410 ixl_setup_ssctx(struct ixl_pf *pf)
411 {
412 	if_softc_ctx_t scctx = pf->vsi.shared;
413 	struct i40e_hw *hw = &pf->hw;
414 
415 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
416 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
417 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
418 	} else if (hw->mac.type == I40E_MAC_X722)
419 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
420 	else
421 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
422 
423 	if (pf->vsi.enable_head_writeback) {
424 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
425 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
426 		scctx->isc_txrx = &ixl_txrx_hwb;
427 	} else {
428 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
429 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
430 		scctx->isc_txrx = &ixl_txrx_dwb;
431 	}
432 
433 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
434 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
435 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
436 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
437 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
438 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
439 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
440 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
441 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
442 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
443 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
444 }
445 
446 static void
447 ixl_admin_timer(void *arg)
448 {
449 	struct ixl_pf *pf = (struct ixl_pf *)arg;
450 
451 	/* Fire off the admin task */
452 	iflib_admin_intr_deferred(pf->vsi.ctx);
453 
454 	/* Reschedule the admin timer */
455 	callout_schedule(&pf->admin_timer, hz/2);
456 }
457 
458 static int
459 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
460 {
461 	struct ixl_vsi *vsi = &pf->vsi;
462 	struct i40e_hw *hw = &pf->hw;
463 	device_t dev = pf->dev;
464 
465 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
466 
467 	i40e_get_mac_addr(hw, hw->mac.addr);
468 
469 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
470 		ixl_configure_intr0_msix(pf);
471 		ixl_enable_intr0(hw);
472 	}
473 
474 	ixl_setup_ssctx(pf);
475 
476 	return (0);
477 }
478 
479 static int
480 ixl_if_attach_pre(if_ctx_t ctx)
481 {
482 	device_t dev;
483 	struct ixl_pf *pf;
484 	struct i40e_hw *hw;
485 	struct ixl_vsi *vsi;
486 	enum i40e_get_fw_lldp_status_resp lldp_status;
487 	struct i40e_filter_control_settings filter;
488 	enum i40e_status_code status;
489 	int error = 0;
490 
491 	dev = iflib_get_dev(ctx);
492 	pf = iflib_get_softc(ctx);
493 
494 	INIT_DBG_DEV(dev, "begin");
495 
496 	vsi = &pf->vsi;
497 	vsi->back = pf;
498 	pf->dev = dev;
499 	hw = &pf->hw;
500 
501 	vsi->dev = dev;
502 	vsi->hw = &pf->hw;
503 	vsi->id = 0;
504 	vsi->num_vlans = 0;
505 	vsi->ctx = ctx;
506 	vsi->media = iflib_get_media(ctx);
507 	vsi->shared = iflib_get_softc_ctx(ctx);
508 
509 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
510 	    "%s:admin", device_get_nameunit(dev));
511 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
512 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
513 
514 	/* Save tunable values */
515 	ixl_save_pf_tunables(pf);
516 
517 	/* Do PCI setup - map BAR0, etc */
518 	if (ixl_allocate_pci_resources(pf)) {
519 		device_printf(dev, "Allocation of PCI resources failed\n");
520 		error = ENXIO;
521 		goto err_pci_res;
522 	}
523 
524 	/* Establish a clean starting point */
525 	i40e_clear_hw(hw);
526 	i40e_set_mac_type(hw);
527 
528 	error = ixl_pf_reset(pf);
529 	if (error)
530 		goto err_out;
531 
532 	/* Initialize the shared code */
533 	status = i40e_init_shared_code(hw);
534 	if (status) {
535 		device_printf(dev, "Unable to initialize shared code, error %s\n",
536 		    i40e_stat_str(hw, status));
537 		error = EIO;
538 		goto err_out;
539 	}
540 
541 	/* Set up the admin queue */
542 	hw->aq.num_arq_entries = IXL_AQ_LEN;
543 	hw->aq.num_asq_entries = IXL_AQ_LEN;
544 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
545 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
546 
547 	status = i40e_init_adminq(hw);
548 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
549 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
550 		    i40e_stat_str(hw, status));
551 		error = EIO;
552 		goto err_out;
553 	}
554 	ixl_print_nvm_version(pf);
555 
556 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
557 		device_printf(dev, "The driver for the device stopped "
558 		    "because the NVM image is newer than expected.\n");
559 		device_printf(dev, "You must install the most recent version of "
560 		    "the network driver.\n");
561 		error = EIO;
562 		goto err_out;
563 	}
564 
565         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
566 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
567 		device_printf(dev, "The driver for the device detected "
568 		    "a newer version of the NVM image than expected.\n");
569 		device_printf(dev, "Please install the most recent version "
570 		    "of the network driver.\n");
571 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
572 		device_printf(dev, "The driver for the device detected "
573 		    "an older version of the NVM image than expected.\n");
574 		device_printf(dev, "Please update the NVM image.\n");
575 	}
576 
577 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
578 		error = ixl_attach_pre_recovery_mode(pf);
579 		if (error)
580 			goto err_out;
581 		return (error);
582 	}
583 
584 	/* Clear PXE mode */
585 	i40e_clear_pxe_mode(hw);
586 
587 	/* Get capabilities from the device */
588 	error = ixl_get_hw_capabilities(pf);
589 	if (error) {
590 		device_printf(dev, "get_hw_capabilities failed: %d\n",
591 		    error);
592 		goto err_get_cap;
593 	}
594 
595 	/* Set up host memory cache */
596 	error = ixl_setup_hmc(pf);
597 	if (error)
598 		goto err_mac_hmc;
599 
600 	/* Disable LLDP from the firmware for certain NVM versions */
601 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
602 	    (pf->hw.aq.fw_maj_ver < 4)) {
603 		i40e_aq_stop_lldp(hw, true, false, NULL);
604 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
605 	}
606 
607 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
608 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
609 		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
610 	else
611 		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
612 
613 	/* Get MAC addresses from hardware */
614 	i40e_get_mac_addr(hw, hw->mac.addr);
615 	error = i40e_validate_mac_addr(hw->mac.addr);
616 	if (error) {
617 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
618 		goto err_mac_hmc;
619 	}
620 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
621 	iflib_set_mac(ctx, hw->mac.addr);
622 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
623 
624 	/* Set up the device filtering */
625 	bzero(&filter, sizeof(filter));
626 	filter.enable_ethtype = TRUE;
627 	filter.enable_macvlan = TRUE;
628 	filter.enable_fdir = FALSE;
629 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
630 	if (i40e_set_filter_control(hw, &filter))
631 		device_printf(dev, "i40e_set_filter_control() failed\n");
632 
633 	/* Query device FW LLDP status */
634 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
635 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
636 			atomic_set_32(&pf->state,
637 			    IXL_PF_STATE_FW_LLDP_DISABLED);
638 		} else {
639 			atomic_clear_32(&pf->state,
640 			    IXL_PF_STATE_FW_LLDP_DISABLED);
641 		}
642 	}
643 
644 	/* Tell FW to apply DCB config on link up */
645 	i40e_aq_set_dcb_parameters(hw, true, NULL);
646 
647 	/* Fill out iflib parameters */
648 	ixl_setup_ssctx(pf);
649 
650 	INIT_DBG_DEV(dev, "end");
651 	return (0);
652 
653 err_mac_hmc:
654 	ixl_shutdown_hmc(pf);
655 err_get_cap:
656 	i40e_shutdown_adminq(hw);
657 err_out:
658 	ixl_free_pci_resources(pf);
659 err_pci_res:
660 	mtx_lock(&pf->admin_mtx);
661 	callout_stop(&pf->admin_timer);
662 	mtx_unlock(&pf->admin_mtx);
663 	mtx_destroy(&pf->admin_mtx);
664 	return (error);
665 }
666 
667 static int
668 ixl_if_attach_post(if_ctx_t ctx)
669 {
670 	device_t dev;
671 	struct ixl_pf *pf;
672 	struct i40e_hw *hw;
673 	struct ixl_vsi *vsi;
674 	int error = 0;
675 	enum i40e_status_code status;
676 
677 	dev = iflib_get_dev(ctx);
678 	pf = iflib_get_softc(ctx);
679 
680 	INIT_DBG_DEV(dev, "begin");
681 
682 	vsi = &pf->vsi;
683 	vsi->ifp = iflib_get_ifp(ctx);
684 	hw = &pf->hw;
685 
686 	/* Save off determined number of queues for interface */
687 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
688 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
689 
690 	/* Setup OS network interface / ifnet */
691 	if (ixl_setup_interface(dev, pf)) {
692 		device_printf(dev, "interface setup failed!\n");
693 		error = EIO;
694 		goto err;
695 	}
696 
697 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
698 		/* Keep admin queue interrupts active while driver is loaded */
699 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
700 			ixl_configure_intr0_msix(pf);
701 			ixl_enable_intr0(hw);
702 		}
703 
704 		ixl_add_sysctls_recovery_mode(pf);
705 
706 		/* Start the admin timer */
707 		mtx_lock(&pf->admin_mtx);
708 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
709 		mtx_unlock(&pf->admin_mtx);
710 		return (0);
711 	}
712 
713 	/* Determine link state */
714 	if (ixl_attach_get_link_status(pf)) {
715 		error = EINVAL;
716 		goto err;
717 	}
718 
719 	error = ixl_switch_config(pf);
720 	if (error) {
721 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
722 		     error);
723 		goto err;
724 	}
725 
726 	/* Add protocol filters to list */
727 	ixl_init_filters(vsi);
728 
729 	/* Init queue allocation manager */
730 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
731 	if (error) {
732 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
733 		    error);
734 		goto err;
735 	}
736 	/* reserve a contiguous allocation for the PF's VSI */
737 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
738 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
739 	if (error) {
740 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
741 		    error);
742 		goto err;
743 	}
744 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
745 	    pf->qtag.num_allocated, pf->qtag.num_active);
746 
747 	/* Limit PHY interrupts to link, autoneg, and modules failure */
748 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
749 	    NULL);
750         if (status) {
751 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
752 		    " aq_err %s\n", i40e_stat_str(hw, status),
753 		    i40e_aq_str(hw, hw->aq.asq_last_status));
754 		goto err;
755 	}
756 
757 	/* Get the bus configuration and set the shared code */
758 	ixl_get_bus_info(pf);
759 
760 	/* Keep admin queue interrupts active while driver is loaded */
761 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
762  		ixl_configure_intr0_msix(pf);
763  		ixl_enable_intr0(hw);
764 	}
765 
766 	/* Set initial advertised speed sysctl value */
767 	ixl_set_initial_advertised_speeds(pf);
768 
769 	/* Initialize statistics & add sysctls */
770 	ixl_add_device_sysctls(pf);
771 	ixl_pf_reset_stats(pf);
772 	ixl_update_stats_counters(pf);
773 	ixl_add_hw_stats(pf);
774 
775 	/*
776 	 * Driver may have been reloaded. Ensure that the link state
777 	 * is consistent with current settings.
778 	 */
779 	ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
780 
781 	hw->phy.get_link_info = true;
782 	i40e_get_link_status(hw, &pf->link_up);
783 	ixl_update_link_status(pf);
784 
785 #ifdef PCI_IOV
786 	ixl_initialize_sriov(pf);
787 #endif
788 
789 #ifdef IXL_IW
790 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
791 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
792 		if (pf->iw_enabled) {
793 			error = ixl_iw_pf_attach(pf);
794 			if (error) {
795 				device_printf(dev,
796 				    "interfacing to iWARP driver failed: %d\n",
797 				    error);
798 				goto err;
799 			} else
800 				device_printf(dev, "iWARP ready\n");
801 		} else
802 			device_printf(dev, "iWARP disabled on this device "
803 			    "(no MSI-X vectors)\n");
804 	} else {
805 		pf->iw_enabled = false;
806 		device_printf(dev, "The device is not iWARP enabled\n");
807 	}
808 #endif
809 	/* Start the admin timer */
810 	mtx_lock(&pf->admin_mtx);
811 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
812 	mtx_unlock(&pf->admin_mtx);
813 
814 	INIT_DBG_DEV(dev, "end");
815 	return (0);
816 
817 err:
818 	INIT_DEBUGOUT("end: error %d", error);
819 	/* ixl_if_detach() is called on error from this */
820 	return (error);
821 }
822 
823 /**
824  * XXX: iflib always ignores the return value of detach()
825  * -> This means that this isn't allowed to fail
826  */
827 static int
828 ixl_if_detach(if_ctx_t ctx)
829 {
830 	struct ixl_pf *pf = iflib_get_softc(ctx);
831 	struct ixl_vsi *vsi = &pf->vsi;
832 	struct i40e_hw *hw = &pf->hw;
833 	device_t dev = pf->dev;
834 	enum i40e_status_code	status;
835 #ifdef IXL_IW
836 	int			error;
837 #endif
838 
839 	INIT_DBG_DEV(dev, "begin");
840 
841 	/* Stop the admin timer */
842 	mtx_lock(&pf->admin_mtx);
843 	callout_stop(&pf->admin_timer);
844 	mtx_unlock(&pf->admin_mtx);
845 	mtx_destroy(&pf->admin_mtx);
846 
847 #ifdef IXL_IW
848 	if (ixl_enable_iwarp && pf->iw_enabled) {
849 		error = ixl_iw_pf_detach(pf);
850 		if (error == EBUSY) {
851 			device_printf(dev, "iwarp in use; stop it first.\n");
852 			//return (error);
853 		}
854 	}
855 #endif
856 	/* Remove all previously allocated media types */
857 	ifmedia_removeall(vsi->media);
858 
859 	/* Shutdown LAN HMC */
860 	ixl_shutdown_hmc(pf);
861 
862 	/* Shutdown admin queue */
863 	ixl_disable_intr0(hw);
864 	status = i40e_shutdown_adminq(hw);
865 	if (status)
866 		device_printf(dev,
867 		    "i40e_shutdown_adminq() failed with status %s\n",
868 		    i40e_stat_str(hw, status));
869 
870 	ixl_pf_qmgr_destroy(&pf->qmgr);
871 	ixl_free_pci_resources(pf);
872 	ixl_free_filters(&vsi->ftl);
873 	INIT_DBG_DEV(dev, "end");
874 	return (0);
875 }
876 
877 static int
878 ixl_if_shutdown(if_ctx_t ctx)
879 {
880 	int error = 0;
881 
882 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
883 
884 	/* TODO: Call ixl_if_stop()? */
885 
886 	/* TODO: Then setup low power mode */
887 
888 	return (error);
889 }
890 
891 static int
892 ixl_if_suspend(if_ctx_t ctx)
893 {
894 	int error = 0;
895 
896 	INIT_DEBUGOUT("ixl_if_suspend: begin");
897 
898 	/* TODO: Call ixl_if_stop()? */
899 
900 	/* TODO: Then setup low power mode */
901 
902 	return (error);
903 }
904 
905 static int
906 ixl_if_resume(if_ctx_t ctx)
907 {
908 	struct ifnet *ifp = iflib_get_ifp(ctx);
909 
910 	INIT_DEBUGOUT("ixl_if_resume: begin");
911 
912 	/* Read & clear wake-up registers */
913 
914 	/* Required after D3->D0 transition */
915 	if (ifp->if_flags & IFF_UP)
916 		ixl_if_init(ctx);
917 
918 	return (0);
919 }
920 
921 void
922 ixl_if_init(if_ctx_t ctx)
923 {
924 	struct ixl_pf *pf = iflib_get_softc(ctx);
925 	struct ixl_vsi *vsi = &pf->vsi;
926 	struct i40e_hw	*hw = &pf->hw;
927 	struct ifnet *ifp = iflib_get_ifp(ctx);
928 	device_t 	dev = iflib_get_dev(ctx);
929 	u8		tmpaddr[ETHER_ADDR_LEN];
930 	int		ret;
931 
932 	if (IXL_PF_IN_RECOVERY_MODE(pf))
933 		return;
934 	/*
935 	 * If the aq is dead here, it probably means something outside of the driver
936 	 * did something to the adapter, like a PF reset.
937 	 * So, rebuild the driver's state here if that occurs.
938 	 */
939 	if (!i40e_check_asq_alive(&pf->hw)) {
940 		device_printf(dev, "Admin Queue is down; resetting...\n");
941 		ixl_teardown_hw_structs(pf);
942 		ixl_rebuild_hw_structs_after_reset(pf, false);
943 	}
944 
945 	/* Get the latest mac address... User might use a LAA */
946 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
947 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
948 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
949 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
950 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
951 		ret = i40e_aq_mac_address_write(hw,
952 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
953 		    hw->mac.addr, NULL);
954 		if (ret) {
955 			device_printf(dev, "LLA address change failed!!\n");
956 			return;
957 		}
958 		/*
959 		 * New filters are configured by ixl_reconfigure_filters
960 		 * at the end of ixl_init_locked.
961 		 */
962 	}
963 
964 	iflib_set_mac(ctx, hw->mac.addr);
965 
966 	/* Prepare the VSI: rings, hmc contexts, etc... */
967 	if (ixl_initialize_vsi(vsi)) {
968 		device_printf(dev, "initialize vsi failed!!\n");
969 		return;
970 	}
971 
972 	ixl_set_link(pf, true);
973 
974 	/* Reconfigure multicast filters in HW */
975 	ixl_if_multi_set(ctx);
976 
977 	/* Set up RSS */
978 	ixl_config_rss(pf);
979 
980 	/* Set up MSI-X routing and the ITR settings */
981 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
982 		ixl_configure_queue_intr_msix(pf);
983 		ixl_configure_itr(pf);
984 	} else
985 		ixl_configure_legacy(pf);
986 
987 	if (vsi->enable_head_writeback)
988 		ixl_init_tx_cidx(vsi);
989 	else
990 		ixl_init_tx_rsqs(vsi);
991 
992 	ixl_enable_rings(vsi);
993 
994 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
995 
996 	/* Re-add configure filters to HW */
997 	ixl_reconfigure_filters(vsi);
998 
999 	/* Configure promiscuous mode */
1000 	ixl_if_promisc_set(ctx, if_getflags(ifp));
1001 
1002 #ifdef IXL_IW
1003 	if (ixl_enable_iwarp && pf->iw_enabled) {
1004 		ret = ixl_iw_pf_init(pf);
1005 		if (ret)
1006 			device_printf(dev,
1007 			    "initialize iwarp failed, code %d\n", ret);
1008 	}
1009 #endif
1010 }
1011 
1012 void
1013 ixl_if_stop(if_ctx_t ctx)
1014 {
1015 	struct ixl_pf *pf = iflib_get_softc(ctx);
1016 	struct ifnet *ifp = iflib_get_ifp(ctx);
1017 	struct ixl_vsi *vsi = &pf->vsi;
1018 
1019 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1020 
1021 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1022 		return;
1023 
1024 	// TODO: This may need to be reworked
1025 #ifdef IXL_IW
1026 	/* Stop iWARP device */
1027 	if (ixl_enable_iwarp && pf->iw_enabled)
1028 		ixl_iw_pf_stop(pf);
1029 #endif
1030 
1031 	ixl_disable_rings_intr(vsi);
1032 	ixl_disable_rings(pf, vsi, &pf->qtag);
1033 
1034 	/*
1035 	 * Don't set link state if only reconfiguring
1036 	 * e.g. on MTU change.
1037 	 */
1038 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1039 	    (atomic_load_acq_32(&pf->state) &
1040 	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1041 		ixl_set_link(pf, false);
1042 }
1043 
1044 static int
1045 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1046 {
1047 	struct ixl_pf *pf = iflib_get_softc(ctx);
1048 	struct ixl_vsi *vsi = &pf->vsi;
1049 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1050 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1051 	int err, i, rid, vector = 0;
1052 	char buf[16];
1053 
1054 	MPASS(vsi->shared->isc_nrxqsets > 0);
1055 	MPASS(vsi->shared->isc_ntxqsets > 0);
1056 
1057 	/* Admin Que must use vector 0*/
1058 	rid = vector + 1;
1059 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1060 	    ixl_msix_adminq, pf, 0, "aq");
1061 	if (err) {
1062 		iflib_irq_free(ctx, &vsi->irq);
1063 		device_printf(iflib_get_dev(ctx),
1064 		    "Failed to register Admin Que handler");
1065 		return (err);
1066 	}
1067 
1068 #ifdef PCI_IOV
1069 	/* Create soft IRQ for handling VFLRs */
1070 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1071 #endif
1072 
1073 	/* Now set up the stations */
1074 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1075 		rid = vector + 1;
1076 
1077 		snprintf(buf, sizeof(buf), "rxq%d", i);
1078 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1079 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1080 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1081 		 * what's expected in the iflib context? */
1082 		if (err) {
1083 			device_printf(iflib_get_dev(ctx),
1084 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1085 			vsi->num_rx_queues = i + 1;
1086 			goto fail;
1087 		}
1088 		rx_que->msix = vector;
1089 	}
1090 
1091 	bzero(buf, sizeof(buf));
1092 
1093 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1094 		snprintf(buf, sizeof(buf), "txq%d", i);
1095 		iflib_softirq_alloc_generic(ctx,
1096 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1097 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1098 
1099 		/* TODO: Maybe call a strategy function for this to figure out which
1100 		* interrupts to map Tx queues to. I don't know if there's an immediately
1101 		* better way than this other than a user-supplied map, though. */
1102 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1103 	}
1104 
1105 	return (0);
1106 fail:
1107 	iflib_irq_free(ctx, &vsi->irq);
1108 	rx_que = vsi->rx_queues;
1109 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1110 		iflib_irq_free(ctx, &rx_que->que_irq);
1111 	return (err);
1112 }
1113 
1114 /*
1115  * Enable all interrupts
1116  *
1117  * Called in:
1118  * iflib_init_locked, after ixl_if_init()
1119  */
1120 static void
1121 ixl_if_enable_intr(if_ctx_t ctx)
1122 {
1123 	struct ixl_pf *pf = iflib_get_softc(ctx);
1124 	struct ixl_vsi *vsi = &pf->vsi;
1125 	struct i40e_hw		*hw = vsi->hw;
1126 	struct ixl_rx_queue	*que = vsi->rx_queues;
1127 
1128 	ixl_enable_intr0(hw);
1129 	/* Enable queue interrupts */
1130 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1131 		/* TODO: Queue index parameter is probably wrong */
1132 		ixl_enable_queue(hw, que->rxr.me);
1133 }
1134 
1135 /*
1136  * Disable queue interrupts
1137  *
1138  * Other interrupt causes need to remain active.
1139  */
1140 static void
1141 ixl_if_disable_intr(if_ctx_t ctx)
1142 {
1143 	struct ixl_pf *pf = iflib_get_softc(ctx);
1144 	struct ixl_vsi *vsi = &pf->vsi;
1145 	struct i40e_hw		*hw = vsi->hw;
1146 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1147 
1148 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1149 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1150 			ixl_disable_queue(hw, rx_que->msix - 1);
1151 	} else {
1152 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1153 		// stops queues from triggering interrupts
1154 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1155 	}
1156 }
1157 
1158 static int
1159 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1160 {
1161 	struct ixl_pf *pf = iflib_get_softc(ctx);
1162 	struct ixl_vsi *vsi = &pf->vsi;
1163 	struct i40e_hw		*hw = vsi->hw;
1164 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1165 
1166 	ixl_enable_queue(hw, rx_que->msix - 1);
1167 	return (0);
1168 }
1169 
1170 static int
1171 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1172 {
1173 	struct ixl_pf *pf = iflib_get_softc(ctx);
1174 	struct ixl_vsi *vsi = &pf->vsi;
1175 	struct i40e_hw *hw = vsi->hw;
1176 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1177 
1178 	ixl_enable_queue(hw, tx_que->msix - 1);
1179 	return (0);
1180 }
1181 
1182 static int
1183 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1184 {
1185 	struct ixl_pf *pf = iflib_get_softc(ctx);
1186 	struct ixl_vsi *vsi = &pf->vsi;
1187 	if_softc_ctx_t scctx = vsi->shared;
1188 	struct ixl_tx_queue *que;
1189 	int i, j, error = 0;
1190 
1191 	MPASS(scctx->isc_ntxqsets > 0);
1192 	MPASS(ntxqs == 1);
1193 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1194 
1195 	/* Allocate queue structure memory */
1196 	if (!(vsi->tx_queues =
1197 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1198 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1199 		return (ENOMEM);
1200 	}
1201 
1202 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1203 		struct tx_ring *txr = &que->txr;
1204 
1205 		txr->me = i;
1206 		que->vsi = vsi;
1207 
1208 		if (!vsi->enable_head_writeback) {
1209 			/* Allocate report status array */
1210 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1211 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1212 				error = ENOMEM;
1213 				goto fail;
1214 			}
1215 			/* Init report status array */
1216 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1217 				txr->tx_rsq[j] = QIDX_INVALID;
1218 		}
1219 		/* get the virtual and physical address of the hardware queues */
1220 		txr->tail = I40E_QTX_TAIL(txr->me);
1221 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1222 		txr->tx_paddr = paddrs[i * ntxqs];
1223 		txr->que = que;
1224 	}
1225 
1226 	return (0);
1227 fail:
1228 	ixl_if_queues_free(ctx);
1229 	return (error);
1230 }
1231 
1232 static int
1233 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1234 {
1235 	struct ixl_pf *pf = iflib_get_softc(ctx);
1236 	struct ixl_vsi *vsi = &pf->vsi;
1237 	struct ixl_rx_queue *que;
1238 	int i, error = 0;
1239 
1240 #ifdef INVARIANTS
1241 	if_softc_ctx_t scctx = vsi->shared;
1242 	MPASS(scctx->isc_nrxqsets > 0);
1243 	MPASS(nrxqs == 1);
1244 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1245 #endif
1246 
1247 	/* Allocate queue structure memory */
1248 	if (!(vsi->rx_queues =
1249 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1250 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1251 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1252 		error = ENOMEM;
1253 		goto fail;
1254 	}
1255 
1256 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1257 		struct rx_ring *rxr = &que->rxr;
1258 
1259 		rxr->me = i;
1260 		que->vsi = vsi;
1261 
1262 		/* get the virtual and physical address of the hardware queues */
1263 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1264 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1265 		rxr->rx_paddr = paddrs[i * nrxqs];
1266 		rxr->que = que;
1267 	}
1268 
1269 	return (0);
1270 fail:
1271 	ixl_if_queues_free(ctx);
1272 	return (error);
1273 }
1274 
1275 static void
1276 ixl_if_queues_free(if_ctx_t ctx)
1277 {
1278 	struct ixl_pf *pf = iflib_get_softc(ctx);
1279 	struct ixl_vsi *vsi = &pf->vsi;
1280 
1281 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1282 		struct ixl_tx_queue *que;
1283 		int i = 0;
1284 
1285 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1286 			struct tx_ring *txr = &que->txr;
1287 			if (txr->tx_rsq != NULL) {
1288 				free(txr->tx_rsq, M_IXL);
1289 				txr->tx_rsq = NULL;
1290 			}
1291 		}
1292 	}
1293 
1294 	if (vsi->tx_queues != NULL) {
1295 		free(vsi->tx_queues, M_IXL);
1296 		vsi->tx_queues = NULL;
1297 	}
1298 	if (vsi->rx_queues != NULL) {
1299 		free(vsi->rx_queues, M_IXL);
1300 		vsi->rx_queues = NULL;
1301 	}
1302 
1303 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1304 		sysctl_ctx_free(&vsi->sysctl_ctx);
1305 }
1306 
1307 void
1308 ixl_update_link_status(struct ixl_pf *pf)
1309 {
1310 	struct ixl_vsi *vsi = &pf->vsi;
1311 	struct i40e_hw *hw = &pf->hw;
1312 	u64 baudrate;
1313 
1314 	if (pf->link_up) {
1315 		if (vsi->link_active == FALSE) {
1316 			vsi->link_active = TRUE;
1317 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1318 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1319 			ixl_link_up_msg(pf);
1320 #ifdef PCI_IOV
1321 			ixl_broadcast_link_state(pf);
1322 #endif
1323 		}
1324 	} else { /* Link down */
1325 		if (vsi->link_active == TRUE) {
1326 			vsi->link_active = FALSE;
1327 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1328 #ifdef PCI_IOV
1329 			ixl_broadcast_link_state(pf);
1330 #endif
1331 		}
1332 	}
1333 }
1334 
1335 static void
1336 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1337 {
1338 	device_t dev = pf->dev;
1339 	u32 rxq_idx, qtx_ctl;
1340 
1341 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1342 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1343 	qtx_ctl = e->desc.params.external.param1;
1344 
1345 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1346 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1347 }
1348 
1349 static int
1350 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1351 {
1352 	enum i40e_status_code status = I40E_SUCCESS;
1353 	struct i40e_arq_event_info event;
1354 	struct i40e_hw *hw = &pf->hw;
1355 	device_t dev = pf->dev;
1356 	u16 opcode;
1357 	u32 loop = 0, reg;
1358 
1359 	event.buf_len = IXL_AQ_BUF_SZ;
1360 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1361 	if (!event.msg_buf) {
1362 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1363 		    " Queue event!\n", __func__);
1364 		return (ENOMEM);
1365 	}
1366 
1367 	/* clean and process any events */
1368 	do {
1369 		status = i40e_clean_arq_element(hw, &event, pending);
1370 		if (status)
1371 			break;
1372 		opcode = LE16_TO_CPU(event.desc.opcode);
1373 		ixl_dbg(pf, IXL_DBG_AQ,
1374 		    "Admin Queue event: %#06x\n", opcode);
1375 		switch (opcode) {
1376 		case i40e_aqc_opc_get_link_status:
1377 			ixl_link_event(pf, &event);
1378 			break;
1379 		case i40e_aqc_opc_send_msg_to_pf:
1380 #ifdef PCI_IOV
1381 			ixl_handle_vf_msg(pf, &event);
1382 #endif
1383 			break;
1384 		/*
1385 		 * This should only occur on no-drop queues, which
1386 		 * aren't currently configured.
1387 		 */
1388 		case i40e_aqc_opc_event_lan_overflow:
1389 			ixl_handle_lan_overflow_event(pf, &event);
1390 			break;
1391 		default:
1392 			break;
1393 		}
1394 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1395 
1396 	free(event.msg_buf, M_IXL);
1397 
1398 	/* Re-enable admin queue interrupt cause */
1399 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1400 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1401 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1402 
1403 	return (status);
1404 }
1405 
1406 static void
1407 ixl_if_update_admin_status(if_ctx_t ctx)
1408 {
1409 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1410 	struct i40e_hw	*hw = &pf->hw;
1411 	u16		pending;
1412 
1413 	if (IXL_PF_IS_RESETTING(pf))
1414 		ixl_handle_empr_reset(pf);
1415 
1416 	/*
1417 	 * Admin Queue is shut down while handling reset.
1418 	 * Don't proceed if it hasn't been re-initialized
1419 	 * e.g due to an issue with new FW.
1420 	 */
1421 	if (!i40e_check_asq_alive(&pf->hw))
1422 		return;
1423 
1424 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1425 		ixl_handle_mdd_event(pf);
1426 
1427 	ixl_process_adminq(pf, &pending);
1428 	ixl_update_link_status(pf);
1429 
1430 	/*
1431 	 * If there are still messages to process, reschedule ourselves.
1432 	 * Otherwise, re-enable our interrupt and go to sleep.
1433 	 */
1434 	if (pending > 0)
1435 		iflib_admin_intr_deferred(ctx);
1436 	else
1437 		ixl_enable_intr0(hw);
1438 }
1439 
1440 static void
1441 ixl_if_multi_set(if_ctx_t ctx)
1442 {
1443 	struct ixl_pf *pf = iflib_get_softc(ctx);
1444 	struct ixl_vsi *vsi = &pf->vsi;
1445 	struct i40e_hw *hw = vsi->hw;
1446 	int mcnt;
1447 
1448 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1449 
1450 	/* Delete filters for removed multicast addresses */
1451 	ixl_del_multi(vsi, false);
1452 
1453 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1454 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1455 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1456 		    vsi->seid, TRUE, NULL);
1457 		ixl_del_multi(vsi, true);
1458 		return;
1459 	}
1460 
1461 	ixl_add_multi(vsi);
1462 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1463 }
1464 
1465 static int
1466 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1467 {
1468 	struct ixl_pf *pf = iflib_get_softc(ctx);
1469 	struct ixl_vsi *vsi = &pf->vsi;
1470 
1471 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1472 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1473 		ETHER_VLAN_ENCAP_LEN)
1474 		return (EINVAL);
1475 
1476 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1477 		ETHER_VLAN_ENCAP_LEN;
1478 
1479 	return (0);
1480 }
1481 
1482 static void
1483 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1484 {
1485 	struct ixl_pf *pf = iflib_get_softc(ctx);
1486 	struct i40e_hw  *hw = &pf->hw;
1487 
1488 	INIT_DEBUGOUT("ixl_media_status: begin");
1489 
1490 	ifmr->ifm_status = IFM_AVALID;
1491 	ifmr->ifm_active = IFM_ETHER;
1492 
1493 	if (!pf->link_up) {
1494 		return;
1495 	}
1496 
1497 	ifmr->ifm_status |= IFM_ACTIVE;
1498 	/* Hardware is always full-duplex */
1499 	ifmr->ifm_active |= IFM_FDX;
1500 
1501 	switch (hw->phy.link_info.phy_type) {
1502 		/* 100 M */
1503 		case I40E_PHY_TYPE_100BASE_TX:
1504 			ifmr->ifm_active |= IFM_100_TX;
1505 			break;
1506 		/* 1 G */
1507 		case I40E_PHY_TYPE_1000BASE_T:
1508 			ifmr->ifm_active |= IFM_1000_T;
1509 			break;
1510 		case I40E_PHY_TYPE_1000BASE_SX:
1511 			ifmr->ifm_active |= IFM_1000_SX;
1512 			break;
1513 		case I40E_PHY_TYPE_1000BASE_LX:
1514 			ifmr->ifm_active |= IFM_1000_LX;
1515 			break;
1516 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1517 			ifmr->ifm_active |= IFM_1000_T;
1518 			break;
1519 		/* 2.5 G */
1520 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1521 			ifmr->ifm_active |= IFM_2500_T;
1522 			break;
1523 		/* 5 G */
1524 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1525 			ifmr->ifm_active |= IFM_5000_T;
1526 			break;
1527 		/* 10 G */
1528 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1529 			ifmr->ifm_active |= IFM_10G_TWINAX;
1530 			break;
1531 		case I40E_PHY_TYPE_10GBASE_SR:
1532 			ifmr->ifm_active |= IFM_10G_SR;
1533 			break;
1534 		case I40E_PHY_TYPE_10GBASE_LR:
1535 			ifmr->ifm_active |= IFM_10G_LR;
1536 			break;
1537 		case I40E_PHY_TYPE_10GBASE_T:
1538 			ifmr->ifm_active |= IFM_10G_T;
1539 			break;
1540 		case I40E_PHY_TYPE_XAUI:
1541 		case I40E_PHY_TYPE_XFI:
1542 			ifmr->ifm_active |= IFM_10G_TWINAX;
1543 			break;
1544 		case I40E_PHY_TYPE_10GBASE_AOC:
1545 			ifmr->ifm_active |= IFM_10G_AOC;
1546 			break;
1547 		/* 25 G */
1548 		case I40E_PHY_TYPE_25GBASE_KR:
1549 			ifmr->ifm_active |= IFM_25G_KR;
1550 			break;
1551 		case I40E_PHY_TYPE_25GBASE_CR:
1552 			ifmr->ifm_active |= IFM_25G_CR;
1553 			break;
1554 		case I40E_PHY_TYPE_25GBASE_SR:
1555 			ifmr->ifm_active |= IFM_25G_SR;
1556 			break;
1557 		case I40E_PHY_TYPE_25GBASE_LR:
1558 			ifmr->ifm_active |= IFM_25G_LR;
1559 			break;
1560 		case I40E_PHY_TYPE_25GBASE_AOC:
1561 			ifmr->ifm_active |= IFM_25G_AOC;
1562 			break;
1563 		case I40E_PHY_TYPE_25GBASE_ACC:
1564 			ifmr->ifm_active |= IFM_25G_ACC;
1565 			break;
1566 		/* 40 G */
1567 		case I40E_PHY_TYPE_40GBASE_CR4:
1568 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1569 			ifmr->ifm_active |= IFM_40G_CR4;
1570 			break;
1571 		case I40E_PHY_TYPE_40GBASE_SR4:
1572 			ifmr->ifm_active |= IFM_40G_SR4;
1573 			break;
1574 		case I40E_PHY_TYPE_40GBASE_LR4:
1575 			ifmr->ifm_active |= IFM_40G_LR4;
1576 			break;
1577 		case I40E_PHY_TYPE_XLAUI:
1578 			ifmr->ifm_active |= IFM_OTHER;
1579 			break;
1580 		case I40E_PHY_TYPE_1000BASE_KX:
1581 			ifmr->ifm_active |= IFM_1000_KX;
1582 			break;
1583 		case I40E_PHY_TYPE_SGMII:
1584 			ifmr->ifm_active |= IFM_1000_SGMII;
1585 			break;
1586 		/* ERJ: What's the difference between these? */
1587 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1588 		case I40E_PHY_TYPE_10GBASE_CR1:
1589 			ifmr->ifm_active |= IFM_10G_CR1;
1590 			break;
1591 		case I40E_PHY_TYPE_10GBASE_KX4:
1592 			ifmr->ifm_active |= IFM_10G_KX4;
1593 			break;
1594 		case I40E_PHY_TYPE_10GBASE_KR:
1595 			ifmr->ifm_active |= IFM_10G_KR;
1596 			break;
1597 		case I40E_PHY_TYPE_SFI:
1598 			ifmr->ifm_active |= IFM_10G_SFI;
1599 			break;
1600 		/* Our single 20G media type */
1601 		case I40E_PHY_TYPE_20GBASE_KR2:
1602 			ifmr->ifm_active |= IFM_20G_KR2;
1603 			break;
1604 		case I40E_PHY_TYPE_40GBASE_KR4:
1605 			ifmr->ifm_active |= IFM_40G_KR4;
1606 			break;
1607 		case I40E_PHY_TYPE_XLPPI:
1608 		case I40E_PHY_TYPE_40GBASE_AOC:
1609 			ifmr->ifm_active |= IFM_40G_XLPPI;
1610 			break;
1611 		/* Unknown to driver */
1612 		default:
1613 			ifmr->ifm_active |= IFM_UNKNOWN;
1614 			break;
1615 	}
1616 	/* Report flow control status as well */
1617 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1618 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1619 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1620 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1621 }
1622 
1623 static int
1624 ixl_if_media_change(if_ctx_t ctx)
1625 {
1626 	struct ifmedia *ifm = iflib_get_media(ctx);
1627 
1628 	INIT_DEBUGOUT("ixl_media_change: begin");
1629 
1630 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1631 		return (EINVAL);
1632 
1633 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1634 	return (ENODEV);
1635 }
1636 
1637 static int
1638 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1639 {
1640 	struct ixl_pf *pf = iflib_get_softc(ctx);
1641 	struct ixl_vsi *vsi = &pf->vsi;
1642 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1643 	struct i40e_hw	*hw = vsi->hw;
1644 	int		err;
1645 	bool		uni = FALSE, multi = FALSE;
1646 
1647 	if (flags & IFF_PROMISC)
1648 		uni = multi = TRUE;
1649 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1650 	    MAX_MULTICAST_ADDR)
1651 		multi = TRUE;
1652 
1653 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1654 	    vsi->seid, uni, NULL, true);
1655 	if (err)
1656 		return (err);
1657 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1658 	    vsi->seid, multi, NULL);
1659 	return (err);
1660 }
1661 
1662 static void
1663 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1664 {
1665 	struct ixl_pf *pf = iflib_get_softc(ctx);
1666 
1667 	if (qid != 0)
1668 		return;
1669 
1670 	ixl_update_stats_counters(pf);
1671 }
1672 
1673 static void
1674 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1675 {
1676 	struct ixl_pf *pf = iflib_get_softc(ctx);
1677 	struct ixl_vsi *vsi = &pf->vsi;
1678 	struct i40e_hw	*hw = vsi->hw;
1679 	if_t ifp = iflib_get_ifp(ctx);
1680 
1681 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1682 		return;
1683 
1684 	/*
1685 	 * Keep track of registered VLANS to know what
1686 	 * filters have to be configured when VLAN_HWFILTER
1687 	 * capability is enabled.
1688 	 */
1689 	++vsi->num_vlans;
1690 	bit_set(vsi->vlans_map, vtag);
1691 
1692 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1693 		return;
1694 
1695 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1696 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1697 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1698 		/*
1699 		 * There is not enough HW resources to add filters
1700 		 * for all registered VLANs. Re-configure filtering
1701 		 * to allow reception of all expected traffic.
1702 		 */
1703 		device_printf(vsi->dev,
1704 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1705 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1706 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1707 	}
1708 }
1709 
1710 static void
1711 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1712 {
1713 	struct ixl_pf *pf = iflib_get_softc(ctx);
1714 	struct ixl_vsi *vsi = &pf->vsi;
1715 	struct i40e_hw	*hw = vsi->hw;
1716 	if_t ifp = iflib_get_ifp(ctx);
1717 
1718 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1719 		return;
1720 
1721 	--vsi->num_vlans;
1722 	bit_clear(vsi->vlans_map, vtag);
1723 
1724 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1725 		return;
1726 
1727 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1728 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1729 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1730 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1731 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1732 	}
1733 }
1734 
1735 static uint64_t
1736 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1737 {
1738 	struct ixl_pf *pf = iflib_get_softc(ctx);
1739 	struct ixl_vsi *vsi = &pf->vsi;
1740 	if_t ifp = iflib_get_ifp(ctx);
1741 
1742 	switch (cnt) {
1743 	case IFCOUNTER_IPACKETS:
1744 		return (vsi->ipackets);
1745 	case IFCOUNTER_IERRORS:
1746 		return (vsi->ierrors);
1747 	case IFCOUNTER_OPACKETS:
1748 		return (vsi->opackets);
1749 	case IFCOUNTER_OERRORS:
1750 		return (vsi->oerrors);
1751 	case IFCOUNTER_COLLISIONS:
1752 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1753 		return (0);
1754 	case IFCOUNTER_IBYTES:
1755 		return (vsi->ibytes);
1756 	case IFCOUNTER_OBYTES:
1757 		return (vsi->obytes);
1758 	case IFCOUNTER_IMCASTS:
1759 		return (vsi->imcasts);
1760 	case IFCOUNTER_OMCASTS:
1761 		return (vsi->omcasts);
1762 	case IFCOUNTER_IQDROPS:
1763 		return (vsi->iqdrops);
1764 	case IFCOUNTER_OQDROPS:
1765 		return (vsi->oqdrops);
1766 	case IFCOUNTER_NOPROTO:
1767 		return (vsi->noproto);
1768 	default:
1769 		return (if_get_counter_default(ifp, cnt));
1770 	}
1771 }
1772 
1773 #ifdef PCI_IOV
1774 static void
1775 ixl_if_vflr_handle(if_ctx_t ctx)
1776 {
1777 	struct ixl_pf *pf = iflib_get_softc(ctx);
1778 
1779 	ixl_handle_vflr(pf);
1780 }
1781 #endif
1782 
1783 static int
1784 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1785 {
1786 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1787 
1788 	if (pf->read_i2c_byte == NULL)
1789 		return (EINVAL);
1790 
1791 	for (int i = 0; i < req->len; i++)
1792 		if (pf->read_i2c_byte(pf, req->offset + i,
1793 		    req->dev_addr, &req->data[i]))
1794 			return (EIO);
1795 	return (0);
1796 }
1797 
1798 static int
1799 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1800 {
1801 	struct ixl_pf *pf = iflib_get_softc(ctx);
1802 	struct ifdrv *ifd = (struct ifdrv *)data;
1803 	int error = 0;
1804 
1805 	/*
1806 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1807 	 * performing privilege checks. It is important that this function
1808 	 * perform the necessary checks for commands which should only be
1809 	 * executed by privileged threads.
1810 	 */
1811 
1812 	switch(command) {
1813 	case SIOCGDRVSPEC:
1814 	case SIOCSDRVSPEC:
1815 		/* NVM update command */
1816 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1817 			error = priv_check(curthread, PRIV_DRIVER);
1818 			if (error)
1819 				break;
1820 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1821 		} else {
1822 			error = EINVAL;
1823 		}
1824 		break;
1825 	default:
1826 		error = EOPNOTSUPP;
1827 	}
1828 
1829 	return (error);
1830 }
1831 
1832 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1833  * @ctx: iflib context
1834  * @event: event code to check
1835  *
1836  * Defaults to returning false for every event.
1837  *
1838  * @returns true if iflib needs to reinit the interface, false otherwise
1839  */
1840 static bool
1841 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1842 {
1843 	switch (event) {
1844 	case IFLIB_RESTART_VLAN_CONFIG:
1845 	default:
1846 		return (false);
1847 	}
1848 }
1849 
1850 /*
1851  * Sanity check and save off tunable values.
1852  */
1853 static void
1854 ixl_save_pf_tunables(struct ixl_pf *pf)
1855 {
1856 	device_t dev = pf->dev;
1857 
1858 	/* Save tunable information */
1859 #ifdef IXL_DEBUG_FC
1860 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1861 #endif
1862 #ifdef IXL_DEBUG
1863 	pf->recovery_mode = ixl_debug_recovery_mode;
1864 #endif
1865 	pf->dbg_mask = ixl_core_debug_mask;
1866 	pf->hw.debug_mask = ixl_shared_debug_mask;
1867 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1868 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1869 #if 0
1870 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1871 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1872 #endif
1873 
1874 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1875 		pf->i2c_access_method = 0;
1876 	else
1877 		pf->i2c_access_method = ixl_i2c_access_method;
1878 
1879 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1880 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1881 		    ixl_tx_itr);
1882 		device_printf(dev, "tx_itr must be between %d and %d, "
1883 		    "inclusive\n",
1884 		    0, IXL_MAX_ITR);
1885 		device_printf(dev, "Using default value of %d instead\n",
1886 		    IXL_ITR_4K);
1887 		pf->tx_itr = IXL_ITR_4K;
1888 	} else
1889 		pf->tx_itr = ixl_tx_itr;
1890 
1891 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1892 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1893 		    ixl_rx_itr);
1894 		device_printf(dev, "rx_itr must be between %d and %d, "
1895 		    "inclusive\n",
1896 		    0, IXL_MAX_ITR);
1897 		device_printf(dev, "Using default value of %d instead\n",
1898 		    IXL_ITR_8K);
1899 		pf->rx_itr = IXL_ITR_8K;
1900 	} else
1901 		pf->rx_itr = ixl_rx_itr;
1902 
1903 	pf->fc = -1;
1904 	if (ixl_flow_control != -1) {
1905 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1906 			device_printf(dev,
1907 			    "Invalid flow_control value of %d set!\n",
1908 			    ixl_flow_control);
1909 			device_printf(dev,
1910 			    "flow_control must be between %d and %d, "
1911 			    "inclusive\n", 0, 3);
1912 			device_printf(dev,
1913 			    "Using default configuration instead\n");
1914 		} else
1915 			pf->fc = ixl_flow_control;
1916 	}
1917 }
1918 
1919