xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 9d54812421274e490dc5f0fe4722ab8d35d9b258)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	2
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
90 	/* required last entry */
91 	PVID_END
92 };
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 /*** IFLIB interface ***/
98 static void	*ixl_register(device_t dev);
99 static int	 ixl_if_attach_pre(if_ctx_t ctx);
100 static int	 ixl_if_attach_post(if_ctx_t ctx);
101 static int	 ixl_if_detach(if_ctx_t ctx);
102 static int	 ixl_if_shutdown(if_ctx_t ctx);
103 static int	 ixl_if_suspend(if_ctx_t ctx);
104 static int	 ixl_if_resume(if_ctx_t ctx);
105 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
106 static void	 ixl_if_enable_intr(if_ctx_t ctx);
107 static void	 ixl_if_disable_intr(if_ctx_t ctx);
108 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
109 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
110 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
111 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
112 static void	 ixl_if_queues_free(if_ctx_t ctx);
113 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
114 static void	 ixl_if_multi_set(if_ctx_t ctx);
115 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
116 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
117 static int	 ixl_if_media_change(if_ctx_t ctx);
118 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
119 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
120 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
121 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
122 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
123 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
124 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
125 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 #ifdef PCI_IOV
127 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
128 #endif
129 
130 /*** Other ***/
131 static void	 ixl_save_pf_tunables(struct ixl_pf *);
132 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
133 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
134 static void	 ixl_admin_timer(void *arg);
135 
136 /*********************************************************************
137  *  FreeBSD Device Interface Entry Points
138  *********************************************************************/
139 
140 static device_method_t ixl_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_register, ixl_register),
143 	DEVMETHOD(device_probe, iflib_device_probe),
144 	DEVMETHOD(device_attach, iflib_device_attach),
145 	DEVMETHOD(device_detach, iflib_device_detach),
146 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 #ifdef PCI_IOV
148 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
151 #endif
152 	DEVMETHOD_END
153 };
154 
155 static driver_t ixl_driver = {
156 	"ixl", ixl_methods, sizeof(struct ixl_pf),
157 };
158 
159 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
162 
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 
167 static device_method_t ixl_if_methods[] = {
168 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 	DEVMETHOD(ifdi_detach, ixl_if_detach),
171 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 	DEVMETHOD(ifdi_resume, ixl_if_resume),
174 	DEVMETHOD(ifdi_init, ixl_if_init),
175 	DEVMETHOD(ifdi_stop, ixl_if_stop),
176 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 	DEVMETHOD(ifdi_timer, ixl_if_timer),
191 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 #ifdef PCI_IOV
198 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
202 #endif
203 	// ifdi_led_func
204 	// ifdi_debug
205 	DEVMETHOD_END
206 };
207 
208 static driver_t ixl_if_driver = {
209 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
210 };
211 
212 /*
213 ** TUNEABLE PARAMETERS:
214 */
215 
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217     "ixl driver parameters");
218 
219 #ifdef IXL_DEBUG_FC
220 /*
221  * Leave this on unless you need to send flow control
222  * frames (or other control frames) from software
223  */
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226     &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228     &ixl_enable_tx_fc_filter, 0,
229     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
230 #endif
231 
232 #ifdef IXL_DEBUG
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235     &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237     &ixl_debug_recovery_mode, 0,
238     "Act like when FW entered recovery mode (for debugging)");
239 #endif
240 
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243     &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245     &ixl_i2c_access_method, 0,
246     IXL_SYSCTL_HELP_I2C_METHOD);
247 
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250     &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252     &ixl_enable_vf_loopback, 0,
253     IXL_SYSCTL_HELP_VF_LOOPBACK);
254 
255 /*
256  * Different method for processing TX descriptor
257  * completion.
258  */
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261     &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263     &ixl_enable_head_writeback, 0,
264     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268     &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270     &ixl_core_debug_mask, 0,
271     "Display debug statements that are printed in non-shared code");
272 
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275     &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277     &ixl_shared_debug_mask, 0,
278     "Display debug statements that are printed in shared code");
279 
280 #if 0
281 /*
282 ** Controls for Interrupt Throttling
283 **	- true/false for dynamic adjustment
284 ** 	- default values for static ITR
285 */
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
295 #endif
296 
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300     &ixl_rx_itr, 0, "RX Interrupt Rate");
301 
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305     &ixl_tx_itr, 0, "TX Interrupt Rate");
306 
307 static int ixl_flow_control = -1;
308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
309     &ixl_flow_control, 0, "Initial Flow Control setting");
310 
311 #ifdef IXL_IW
312 int ixl_enable_iwarp = 0;
313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
315     &ixl_enable_iwarp, 0, "iWARP enabled");
316 
317 #if __FreeBSD_version < 1100000
318 int ixl_limit_iwarp_msix = 1;
319 #else
320 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
321 #endif
322 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
323 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
324     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
325 #endif
326 
327 extern struct if_txrx ixl_txrx_hwb;
328 extern struct if_txrx ixl_txrx_dwb;
329 
330 static struct if_shared_ctx ixl_sctx_init = {
331 	.isc_magic = IFLIB_MAGIC,
332 	.isc_q_align = PAGE_SIZE,
333 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
334 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
335 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
336 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
337 	.isc_rx_maxsize = 16384,
338 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
339 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
340 	.isc_nfl = 1,
341 	.isc_ntxqs = 1,
342 	.isc_nrxqs = 1,
343 
344 	.isc_admin_intrcnt = 1,
345 	.isc_vendor_info = ixl_vendor_info_array,
346 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
347 	.isc_driver = &ixl_if_driver,
348 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
349 
350 	.isc_nrxd_min = {IXL_MIN_RING},
351 	.isc_ntxd_min = {IXL_MIN_RING},
352 	.isc_nrxd_max = {IXL_MAX_RING},
353 	.isc_ntxd_max = {IXL_MAX_RING},
354 	.isc_nrxd_default = {IXL_DEFAULT_RING},
355 	.isc_ntxd_default = {IXL_DEFAULT_RING},
356 };
357 
358 /*** Functions ***/
359 static void *
360 ixl_register(device_t dev)
361 {
362 	return (&ixl_sctx_init);
363 }
364 
365 static int
366 ixl_allocate_pci_resources(struct ixl_pf *pf)
367 {
368 	device_t dev = iflib_get_dev(pf->vsi.ctx);
369 	struct i40e_hw *hw = &pf->hw;
370 	int             rid;
371 
372 	/* Map BAR0 */
373 	rid = PCIR_BAR(0);
374 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
375 	    &rid, RF_ACTIVE);
376 
377 	if (!(pf->pci_mem)) {
378 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
379 		return (ENXIO);
380 	}
381 
382 	/* Save off the PCI information */
383 	hw->vendor_id = pci_get_vendor(dev);
384 	hw->device_id = pci_get_device(dev);
385 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
386 	hw->subsystem_vendor_id =
387 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
388 	hw->subsystem_device_id =
389 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
390 
391 	hw->bus.device = pci_get_slot(dev);
392 	hw->bus.func = pci_get_function(dev);
393 
394 	/* Save off register access information */
395 	pf->osdep.mem_bus_space_tag =
396 		rman_get_bustag(pf->pci_mem);
397 	pf->osdep.mem_bus_space_handle =
398 		rman_get_bushandle(pf->pci_mem);
399 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
400 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
401 	pf->osdep.dev = dev;
402 
403 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
404 	pf->hw.back = &pf->osdep;
405 
406  	return (0);
407 }
408 
409 static void
410 ixl_setup_ssctx(struct ixl_pf *pf)
411 {
412 	if_softc_ctx_t scctx = pf->vsi.shared;
413 	struct i40e_hw *hw = &pf->hw;
414 
415 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
416 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
417 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
418 	} else if (hw->mac.type == I40E_MAC_X722)
419 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
420 	else
421 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
422 
423 	if (pf->vsi.enable_head_writeback) {
424 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
425 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
426 		scctx->isc_txrx = &ixl_txrx_hwb;
427 	} else {
428 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
429 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
430 		scctx->isc_txrx = &ixl_txrx_dwb;
431 	}
432 
433 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
434 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
435 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
436 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
437 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
438 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
439 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
440 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
441 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
442 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
443 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
444 }
445 
446 static void
447 ixl_admin_timer(void *arg)
448 {
449 	struct ixl_pf *pf = (struct ixl_pf *)arg;
450 
451 	/* Fire off the admin task */
452 	iflib_admin_intr_deferred(pf->vsi.ctx);
453 
454 	/* Reschedule the admin timer */
455 	callout_schedule(&pf->admin_timer, hz/2);
456 }
457 
458 static int
459 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
460 {
461 	struct ixl_vsi *vsi = &pf->vsi;
462 	struct i40e_hw *hw = &pf->hw;
463 	device_t dev = pf->dev;
464 
465 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
466 
467 	i40e_get_mac_addr(hw, hw->mac.addr);
468 
469 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
470 		ixl_configure_intr0_msix(pf);
471 		ixl_enable_intr0(hw);
472 	}
473 
474 	ixl_setup_ssctx(pf);
475 
476 	return (0);
477 }
478 
479 static int
480 ixl_if_attach_pre(if_ctx_t ctx)
481 {
482 	device_t dev;
483 	struct ixl_pf *pf;
484 	struct i40e_hw *hw;
485 	struct ixl_vsi *vsi;
486 	enum i40e_get_fw_lldp_status_resp lldp_status;
487 	struct i40e_filter_control_settings filter;
488 	enum i40e_status_code status;
489 	int error = 0;
490 
491 	dev = iflib_get_dev(ctx);
492 	pf = iflib_get_softc(ctx);
493 
494 	INIT_DBG_DEV(dev, "begin");
495 
496 	vsi = &pf->vsi;
497 	vsi->back = pf;
498 	pf->dev = dev;
499 	hw = &pf->hw;
500 
501 	vsi->dev = dev;
502 	vsi->hw = &pf->hw;
503 	vsi->id = 0;
504 	vsi->num_vlans = 0;
505 	vsi->ctx = ctx;
506 	vsi->media = iflib_get_media(ctx);
507 	vsi->shared = iflib_get_softc_ctx(ctx);
508 
509 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
510 	    "%s:admin", device_get_nameunit(dev));
511 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
512 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
513 
514 	/* Save tunable values */
515 	ixl_save_pf_tunables(pf);
516 
517 	/* Do PCI setup - map BAR0, etc */
518 	if (ixl_allocate_pci_resources(pf)) {
519 		device_printf(dev, "Allocation of PCI resources failed\n");
520 		error = ENXIO;
521 		goto err_pci_res;
522 	}
523 
524 	/* Establish a clean starting point */
525 	i40e_clear_hw(hw);
526 	i40e_set_mac_type(hw);
527 
528 	error = ixl_pf_reset(pf);
529 	if (error)
530 		goto err_out;
531 
532 	/* Initialize the shared code */
533 	status = i40e_init_shared_code(hw);
534 	if (status) {
535 		device_printf(dev, "Unable to initialize shared code, error %s\n",
536 		    i40e_stat_str(hw, status));
537 		error = EIO;
538 		goto err_out;
539 	}
540 
541 	/* Set up the admin queue */
542 	hw->aq.num_arq_entries = IXL_AQ_LEN;
543 	hw->aq.num_asq_entries = IXL_AQ_LEN;
544 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
545 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
546 
547 	status = i40e_init_adminq(hw);
548 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
549 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
550 		    i40e_stat_str(hw, status));
551 		error = EIO;
552 		goto err_out;
553 	}
554 	ixl_print_nvm_version(pf);
555 
556 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
557 		device_printf(dev, "The driver for the device stopped "
558 		    "because the NVM image is newer than expected.\n");
559 		device_printf(dev, "You must install the most recent version of "
560 		    "the network driver.\n");
561 		error = EIO;
562 		goto err_out;
563 	}
564 
565         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
566 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
567 		device_printf(dev, "The driver for the device detected "
568 		    "a newer version of the NVM image than expected.\n");
569 		device_printf(dev, "Please install the most recent version "
570 		    "of the network driver.\n");
571 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
572 		device_printf(dev, "The driver for the device detected "
573 		    "an older version of the NVM image than expected.\n");
574 		device_printf(dev, "Please update the NVM image.\n");
575 	}
576 
577 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
578 		error = ixl_attach_pre_recovery_mode(pf);
579 		if (error)
580 			goto err_out;
581 		return (error);
582 	}
583 
584 	/* Clear PXE mode */
585 	i40e_clear_pxe_mode(hw);
586 
587 	/* Get capabilities from the device */
588 	error = ixl_get_hw_capabilities(pf);
589 	if (error) {
590 		device_printf(dev, "get_hw_capabilities failed: %d\n",
591 		    error);
592 		goto err_get_cap;
593 	}
594 
595 	/* Set up host memory cache */
596 	error = ixl_setup_hmc(pf);
597 	if (error)
598 		goto err_mac_hmc;
599 
600 	/* Disable LLDP from the firmware for certain NVM versions */
601 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
602 	    (pf->hw.aq.fw_maj_ver < 4)) {
603 		i40e_aq_stop_lldp(hw, true, false, NULL);
604 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
605 	}
606 
607 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
608 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
609 		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
610 	else
611 		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
612 
613 	/* Get MAC addresses from hardware */
614 	i40e_get_mac_addr(hw, hw->mac.addr);
615 	error = i40e_validate_mac_addr(hw->mac.addr);
616 	if (error) {
617 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
618 		goto err_mac_hmc;
619 	}
620 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
621 	iflib_set_mac(ctx, hw->mac.addr);
622 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
623 
624 	/* Set up the device filtering */
625 	bzero(&filter, sizeof(filter));
626 	filter.enable_ethtype = TRUE;
627 	filter.enable_macvlan = TRUE;
628 	filter.enable_fdir = FALSE;
629 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
630 	if (i40e_set_filter_control(hw, &filter))
631 		device_printf(dev, "i40e_set_filter_control() failed\n");
632 
633 	/* Query device FW LLDP status */
634 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
635 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
636 			atomic_set_32(&pf->state,
637 			    IXL_PF_STATE_FW_LLDP_DISABLED);
638 		} else {
639 			atomic_clear_32(&pf->state,
640 			    IXL_PF_STATE_FW_LLDP_DISABLED);
641 		}
642 	}
643 
644 	/* Tell FW to apply DCB config on link up */
645 	i40e_aq_set_dcb_parameters(hw, true, NULL);
646 
647 	/* Fill out iflib parameters */
648 	ixl_setup_ssctx(pf);
649 
650 	INIT_DBG_DEV(dev, "end");
651 	return (0);
652 
653 err_mac_hmc:
654 	ixl_shutdown_hmc(pf);
655 err_get_cap:
656 	i40e_shutdown_adminq(hw);
657 err_out:
658 	ixl_free_pci_resources(pf);
659 err_pci_res:
660 	mtx_lock(&pf->admin_mtx);
661 	callout_stop(&pf->admin_timer);
662 	mtx_unlock(&pf->admin_mtx);
663 	mtx_destroy(&pf->admin_mtx);
664 	return (error);
665 }
666 
667 static int
668 ixl_if_attach_post(if_ctx_t ctx)
669 {
670 	device_t dev;
671 	struct ixl_pf *pf;
672 	struct i40e_hw *hw;
673 	struct ixl_vsi *vsi;
674 	int error = 0;
675 	enum i40e_status_code status;
676 
677 	dev = iflib_get_dev(ctx);
678 	pf = iflib_get_softc(ctx);
679 
680 	INIT_DBG_DEV(dev, "begin");
681 
682 	vsi = &pf->vsi;
683 	vsi->ifp = iflib_get_ifp(ctx);
684 	hw = &pf->hw;
685 
686 	/* Save off determined number of queues for interface */
687 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
688 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
689 
690 	/* Setup OS network interface / ifnet */
691 	if (ixl_setup_interface(dev, pf)) {
692 		device_printf(dev, "interface setup failed!\n");
693 		error = EIO;
694 		goto err;
695 	}
696 
697 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
698 		/* Keep admin queue interrupts active while driver is loaded */
699 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
700 			ixl_configure_intr0_msix(pf);
701 			ixl_enable_intr0(hw);
702 		}
703 
704 		ixl_add_sysctls_recovery_mode(pf);
705 
706 		/* Start the admin timer */
707 		mtx_lock(&pf->admin_mtx);
708 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
709 		mtx_unlock(&pf->admin_mtx);
710 		return (0);
711 	}
712 
713 	/* Determine link state */
714 	if (ixl_attach_get_link_status(pf)) {
715 		error = EINVAL;
716 		goto err;
717 	}
718 
719 	error = ixl_switch_config(pf);
720 	if (error) {
721 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
722 		     error);
723 		goto err;
724 	}
725 
726 	/* Add protocol filters to list */
727 	ixl_init_filters(vsi);
728 
729 	/* Init queue allocation manager */
730 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
731 	if (error) {
732 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
733 		    error);
734 		goto err;
735 	}
736 	/* reserve a contiguous allocation for the PF's VSI */
737 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
738 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
739 	if (error) {
740 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
741 		    error);
742 		goto err;
743 	}
744 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
745 	    pf->qtag.num_allocated, pf->qtag.num_active);
746 
747 	/* Limit PHY interrupts to link, autoneg, and modules failure */
748 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
749 	    NULL);
750         if (status) {
751 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
752 		    " aq_err %s\n", i40e_stat_str(hw, status),
753 		    i40e_aq_str(hw, hw->aq.asq_last_status));
754 		goto err;
755 	}
756 
757 	/* Get the bus configuration and set the shared code */
758 	ixl_get_bus_info(pf);
759 
760 	/* Keep admin queue interrupts active while driver is loaded */
761 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
762  		ixl_configure_intr0_msix(pf);
763  		ixl_enable_intr0(hw);
764 	}
765 
766 	/* Set initial advertised speed sysctl value */
767 	ixl_set_initial_advertised_speeds(pf);
768 
769 	/* Initialize statistics & add sysctls */
770 	ixl_add_device_sysctls(pf);
771 	ixl_pf_reset_stats(pf);
772 	ixl_update_stats_counters(pf);
773 	ixl_add_hw_stats(pf);
774 
775 	/*
776 	 * Driver may have been reloaded. Ensure that the link state
777 	 * is consistent with current settings.
778 	 */
779 	ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
780 
781 	hw->phy.get_link_info = true;
782 	i40e_get_link_status(hw, &pf->link_up);
783 	ixl_update_link_status(pf);
784 
785 #ifdef PCI_IOV
786 	ixl_initialize_sriov(pf);
787 #endif
788 
789 #ifdef IXL_IW
790 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
791 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
792 		if (pf->iw_enabled) {
793 			error = ixl_iw_pf_attach(pf);
794 			if (error) {
795 				device_printf(dev,
796 				    "interfacing to iWARP driver failed: %d\n",
797 				    error);
798 				goto err;
799 			} else
800 				device_printf(dev, "iWARP ready\n");
801 		} else
802 			device_printf(dev, "iWARP disabled on this device "
803 			    "(no MSI-X vectors)\n");
804 	} else {
805 		pf->iw_enabled = false;
806 		device_printf(dev, "The device is not iWARP enabled\n");
807 	}
808 #endif
809 	/* Start the admin timer */
810 	mtx_lock(&pf->admin_mtx);
811 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
812 	mtx_unlock(&pf->admin_mtx);
813 
814 	INIT_DBG_DEV(dev, "end");
815 	return (0);
816 
817 err:
818 	INIT_DEBUGOUT("end: error %d", error);
819 	/* ixl_if_detach() is called on error from this */
820 	return (error);
821 }
822 
823 /**
824  * XXX: iflib always ignores the return value of detach()
825  * -> This means that this isn't allowed to fail
826  */
827 static int
828 ixl_if_detach(if_ctx_t ctx)
829 {
830 	struct ixl_pf *pf = iflib_get_softc(ctx);
831 	struct ixl_vsi *vsi = &pf->vsi;
832 	struct i40e_hw *hw = &pf->hw;
833 	device_t dev = pf->dev;
834 	enum i40e_status_code	status;
835 #ifdef IXL_IW
836 	int			error;
837 #endif
838 
839 	INIT_DBG_DEV(dev, "begin");
840 
841 	/* Stop the admin timer */
842 	mtx_lock(&pf->admin_mtx);
843 	callout_stop(&pf->admin_timer);
844 	mtx_unlock(&pf->admin_mtx);
845 	mtx_destroy(&pf->admin_mtx);
846 
847 #ifdef IXL_IW
848 	if (ixl_enable_iwarp && pf->iw_enabled) {
849 		error = ixl_iw_pf_detach(pf);
850 		if (error == EBUSY) {
851 			device_printf(dev, "iwarp in use; stop it first.\n");
852 			//return (error);
853 		}
854 	}
855 #endif
856 	/* Remove all previously allocated media types */
857 	ifmedia_removeall(vsi->media);
858 
859 	/* Shutdown LAN HMC */
860 	ixl_shutdown_hmc(pf);
861 
862 	/* Shutdown admin queue */
863 	ixl_disable_intr0(hw);
864 	status = i40e_shutdown_adminq(hw);
865 	if (status)
866 		device_printf(dev,
867 		    "i40e_shutdown_adminq() failed with status %s\n",
868 		    i40e_stat_str(hw, status));
869 
870 	ixl_pf_qmgr_destroy(&pf->qmgr);
871 	ixl_free_pci_resources(pf);
872 	ixl_free_filters(&vsi->ftl);
873 	INIT_DBG_DEV(dev, "end");
874 	return (0);
875 }
876 
877 static int
878 ixl_if_shutdown(if_ctx_t ctx)
879 {
880 	int error = 0;
881 
882 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
883 
884 	/* TODO: Call ixl_if_stop()? */
885 
886 	/* TODO: Then setup low power mode */
887 
888 	return (error);
889 }
890 
891 static int
892 ixl_if_suspend(if_ctx_t ctx)
893 {
894 	int error = 0;
895 
896 	INIT_DEBUGOUT("ixl_if_suspend: begin");
897 
898 	/* TODO: Call ixl_if_stop()? */
899 
900 	/* TODO: Then setup low power mode */
901 
902 	return (error);
903 }
904 
905 static int
906 ixl_if_resume(if_ctx_t ctx)
907 {
908 	struct ifnet *ifp = iflib_get_ifp(ctx);
909 
910 	INIT_DEBUGOUT("ixl_if_resume: begin");
911 
912 	/* Read & clear wake-up registers */
913 
914 	/* Required after D3->D0 transition */
915 	if (ifp->if_flags & IFF_UP)
916 		ixl_if_init(ctx);
917 
918 	return (0);
919 }
920 
921 void
922 ixl_if_init(if_ctx_t ctx)
923 {
924 	struct ixl_pf *pf = iflib_get_softc(ctx);
925 	struct ixl_vsi *vsi = &pf->vsi;
926 	struct i40e_hw	*hw = &pf->hw;
927 	struct ifnet *ifp = iflib_get_ifp(ctx);
928 	device_t 	dev = iflib_get_dev(ctx);
929 	u8		tmpaddr[ETHER_ADDR_LEN];
930 	int		ret;
931 
932 	if (IXL_PF_IN_RECOVERY_MODE(pf))
933 		return;
934 	/*
935 	 * If the aq is dead here, it probably means something outside of the driver
936 	 * did something to the adapter, like a PF reset.
937 	 * So, rebuild the driver's state here if that occurs.
938 	 */
939 	if (!i40e_check_asq_alive(&pf->hw)) {
940 		device_printf(dev, "Admin Queue is down; resetting...\n");
941 		ixl_teardown_hw_structs(pf);
942 		ixl_rebuild_hw_structs_after_reset(pf, false);
943 	}
944 
945 	/* Get the latest mac address... User might use a LAA */
946 	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
947 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
948 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
949 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
950 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
951 		ret = i40e_aq_mac_address_write(hw,
952 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
953 		    hw->mac.addr, NULL);
954 		if (ret) {
955 			device_printf(dev, "LLA address change failed!!\n");
956 			return;
957 		}
958 		/*
959 		 * New filters are configured by ixl_reconfigure_filters
960 		 * at the end of ixl_init_locked.
961 		 */
962 	}
963 
964 	iflib_set_mac(ctx, hw->mac.addr);
965 
966 	/* Prepare the VSI: rings, hmc contexts, etc... */
967 	if (ixl_initialize_vsi(vsi)) {
968 		device_printf(dev, "initialize vsi failed!!\n");
969 		return;
970 	}
971 
972 	ixl_set_link(pf, true);
973 
974 	/* Reconfigure multicast filters in HW */
975 	ixl_if_multi_set(ctx);
976 
977 	/* Set up RSS */
978 	ixl_config_rss(pf);
979 
980 	/* Set up MSI-X routing and the ITR settings */
981 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
982 		ixl_configure_queue_intr_msix(pf);
983 		ixl_configure_itr(pf);
984 	} else
985 		ixl_configure_legacy(pf);
986 
987 	if (vsi->enable_head_writeback)
988 		ixl_init_tx_cidx(vsi);
989 	else
990 		ixl_init_tx_rsqs(vsi);
991 
992 	ixl_enable_rings(vsi);
993 
994 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
995 
996 	/* Re-add configure filters to HW */
997 	ixl_reconfigure_filters(vsi);
998 
999 	/* Configure promiscuous mode */
1000 	ixl_if_promisc_set(ctx, if_getflags(ifp));
1001 
1002 #ifdef IXL_IW
1003 	if (ixl_enable_iwarp && pf->iw_enabled) {
1004 		ret = ixl_iw_pf_init(pf);
1005 		if (ret)
1006 			device_printf(dev,
1007 			    "initialize iwarp failed, code %d\n", ret);
1008 	}
1009 #endif
1010 }
1011 
1012 void
1013 ixl_if_stop(if_ctx_t ctx)
1014 {
1015 	struct ixl_pf *pf = iflib_get_softc(ctx);
1016 	struct ifnet *ifp = iflib_get_ifp(ctx);
1017 	struct ixl_vsi *vsi = &pf->vsi;
1018 
1019 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1020 
1021 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1022 		return;
1023 
1024 	// TODO: This may need to be reworked
1025 #ifdef IXL_IW
1026 	/* Stop iWARP device */
1027 	if (ixl_enable_iwarp && pf->iw_enabled)
1028 		ixl_iw_pf_stop(pf);
1029 #endif
1030 
1031 	ixl_disable_rings_intr(vsi);
1032 	ixl_disable_rings(pf, vsi, &pf->qtag);
1033 
1034 	/*
1035 	 * Don't set link state if only reconfiguring
1036 	 * e.g. on MTU change.
1037 	 */
1038 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1039 	    (atomic_load_acq_32(&pf->state) &
1040 	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1041 		ixl_set_link(pf, false);
1042 }
1043 
1044 static int
1045 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1046 {
1047 	struct ixl_pf *pf = iflib_get_softc(ctx);
1048 	struct ixl_vsi *vsi = &pf->vsi;
1049 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1050 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1051 	int err, i, rid, vector = 0;
1052 	char buf[16];
1053 
1054 	MPASS(vsi->shared->isc_nrxqsets > 0);
1055 	MPASS(vsi->shared->isc_ntxqsets > 0);
1056 
1057 	/* Admin Que must use vector 0*/
1058 	rid = vector + 1;
1059 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1060 	    ixl_msix_adminq, pf, 0, "aq");
1061 	if (err) {
1062 		iflib_irq_free(ctx, &vsi->irq);
1063 		device_printf(iflib_get_dev(ctx),
1064 		    "Failed to register Admin Que handler");
1065 		return (err);
1066 	}
1067 	/* Create soft IRQ for handling VFLRs */
1068 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1069 
1070 	/* Now set up the stations */
1071 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1072 		rid = vector + 1;
1073 
1074 		snprintf(buf, sizeof(buf), "rxq%d", i);
1075 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1076 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1077 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1078 		 * what's expected in the iflib context? */
1079 		if (err) {
1080 			device_printf(iflib_get_dev(ctx),
1081 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1082 			vsi->num_rx_queues = i + 1;
1083 			goto fail;
1084 		}
1085 		rx_que->msix = vector;
1086 	}
1087 
1088 	bzero(buf, sizeof(buf));
1089 
1090 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1091 		snprintf(buf, sizeof(buf), "txq%d", i);
1092 		iflib_softirq_alloc_generic(ctx,
1093 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1094 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1095 
1096 		/* TODO: Maybe call a strategy function for this to figure out which
1097 		* interrupts to map Tx queues to. I don't know if there's an immediately
1098 		* better way than this other than a user-supplied map, though. */
1099 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1100 	}
1101 
1102 	return (0);
1103 fail:
1104 	iflib_irq_free(ctx, &vsi->irq);
1105 	rx_que = vsi->rx_queues;
1106 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1107 		iflib_irq_free(ctx, &rx_que->que_irq);
1108 	return (err);
1109 }
1110 
1111 /*
1112  * Enable all interrupts
1113  *
1114  * Called in:
1115  * iflib_init_locked, after ixl_if_init()
1116  */
1117 static void
1118 ixl_if_enable_intr(if_ctx_t ctx)
1119 {
1120 	struct ixl_pf *pf = iflib_get_softc(ctx);
1121 	struct ixl_vsi *vsi = &pf->vsi;
1122 	struct i40e_hw		*hw = vsi->hw;
1123 	struct ixl_rx_queue	*que = vsi->rx_queues;
1124 
1125 	ixl_enable_intr0(hw);
1126 	/* Enable queue interrupts */
1127 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1128 		/* TODO: Queue index parameter is probably wrong */
1129 		ixl_enable_queue(hw, que->rxr.me);
1130 }
1131 
1132 /*
1133  * Disable queue interrupts
1134  *
1135  * Other interrupt causes need to remain active.
1136  */
1137 static void
1138 ixl_if_disable_intr(if_ctx_t ctx)
1139 {
1140 	struct ixl_pf *pf = iflib_get_softc(ctx);
1141 	struct ixl_vsi *vsi = &pf->vsi;
1142 	struct i40e_hw		*hw = vsi->hw;
1143 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1144 
1145 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1146 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1147 			ixl_disable_queue(hw, rx_que->msix - 1);
1148 	} else {
1149 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1150 		// stops queues from triggering interrupts
1151 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1152 	}
1153 }
1154 
1155 static int
1156 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1157 {
1158 	struct ixl_pf *pf = iflib_get_softc(ctx);
1159 	struct ixl_vsi *vsi = &pf->vsi;
1160 	struct i40e_hw		*hw = vsi->hw;
1161 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1162 
1163 	ixl_enable_queue(hw, rx_que->msix - 1);
1164 	return (0);
1165 }
1166 
1167 static int
1168 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1169 {
1170 	struct ixl_pf *pf = iflib_get_softc(ctx);
1171 	struct ixl_vsi *vsi = &pf->vsi;
1172 	struct i40e_hw *hw = vsi->hw;
1173 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1174 
1175 	ixl_enable_queue(hw, tx_que->msix - 1);
1176 	return (0);
1177 }
1178 
1179 static int
1180 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1181 {
1182 	struct ixl_pf *pf = iflib_get_softc(ctx);
1183 	struct ixl_vsi *vsi = &pf->vsi;
1184 	if_softc_ctx_t scctx = vsi->shared;
1185 	struct ixl_tx_queue *que;
1186 	int i, j, error = 0;
1187 
1188 	MPASS(scctx->isc_ntxqsets > 0);
1189 	MPASS(ntxqs == 1);
1190 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1191 
1192 	/* Allocate queue structure memory */
1193 	if (!(vsi->tx_queues =
1194 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1195 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1196 		return (ENOMEM);
1197 	}
1198 
1199 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1200 		struct tx_ring *txr = &que->txr;
1201 
1202 		txr->me = i;
1203 		que->vsi = vsi;
1204 
1205 		if (!vsi->enable_head_writeback) {
1206 			/* Allocate report status array */
1207 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1208 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1209 				error = ENOMEM;
1210 				goto fail;
1211 			}
1212 			/* Init report status array */
1213 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1214 				txr->tx_rsq[j] = QIDX_INVALID;
1215 		}
1216 		/* get the virtual and physical address of the hardware queues */
1217 		txr->tail = I40E_QTX_TAIL(txr->me);
1218 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1219 		txr->tx_paddr = paddrs[i * ntxqs];
1220 		txr->que = que;
1221 	}
1222 
1223 	return (0);
1224 fail:
1225 	ixl_if_queues_free(ctx);
1226 	return (error);
1227 }
1228 
1229 static int
1230 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1231 {
1232 	struct ixl_pf *pf = iflib_get_softc(ctx);
1233 	struct ixl_vsi *vsi = &pf->vsi;
1234 	struct ixl_rx_queue *que;
1235 	int i, error = 0;
1236 
1237 #ifdef INVARIANTS
1238 	if_softc_ctx_t scctx = vsi->shared;
1239 	MPASS(scctx->isc_nrxqsets > 0);
1240 	MPASS(nrxqs == 1);
1241 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1242 #endif
1243 
1244 	/* Allocate queue structure memory */
1245 	if (!(vsi->rx_queues =
1246 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1247 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1248 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1249 		error = ENOMEM;
1250 		goto fail;
1251 	}
1252 
1253 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1254 		struct rx_ring *rxr = &que->rxr;
1255 
1256 		rxr->me = i;
1257 		que->vsi = vsi;
1258 
1259 		/* get the virtual and physical address of the hardware queues */
1260 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1261 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1262 		rxr->rx_paddr = paddrs[i * nrxqs];
1263 		rxr->que = que;
1264 	}
1265 
1266 	return (0);
1267 fail:
1268 	ixl_if_queues_free(ctx);
1269 	return (error);
1270 }
1271 
1272 static void
1273 ixl_if_queues_free(if_ctx_t ctx)
1274 {
1275 	struct ixl_pf *pf = iflib_get_softc(ctx);
1276 	struct ixl_vsi *vsi = &pf->vsi;
1277 
1278 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1279 		struct ixl_tx_queue *que;
1280 		int i = 0;
1281 
1282 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1283 			struct tx_ring *txr = &que->txr;
1284 			if (txr->tx_rsq != NULL) {
1285 				free(txr->tx_rsq, M_IXL);
1286 				txr->tx_rsq = NULL;
1287 			}
1288 		}
1289 	}
1290 
1291 	if (vsi->tx_queues != NULL) {
1292 		free(vsi->tx_queues, M_IXL);
1293 		vsi->tx_queues = NULL;
1294 	}
1295 	if (vsi->rx_queues != NULL) {
1296 		free(vsi->rx_queues, M_IXL);
1297 		vsi->rx_queues = NULL;
1298 	}
1299 
1300 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1301 		sysctl_ctx_free(&vsi->sysctl_ctx);
1302 }
1303 
1304 void
1305 ixl_update_link_status(struct ixl_pf *pf)
1306 {
1307 	struct ixl_vsi *vsi = &pf->vsi;
1308 	struct i40e_hw *hw = &pf->hw;
1309 	u64 baudrate;
1310 
1311 	if (pf->link_up) {
1312 		if (vsi->link_active == FALSE) {
1313 			vsi->link_active = TRUE;
1314 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1315 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1316 			ixl_link_up_msg(pf);
1317 #ifdef PCI_IOV
1318 			ixl_broadcast_link_state(pf);
1319 #endif
1320 		}
1321 	} else { /* Link down */
1322 		if (vsi->link_active == TRUE) {
1323 			vsi->link_active = FALSE;
1324 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1325 #ifdef PCI_IOV
1326 			ixl_broadcast_link_state(pf);
1327 #endif
1328 		}
1329 	}
1330 }
1331 
1332 static void
1333 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1334 {
1335 	device_t dev = pf->dev;
1336 	u32 rxq_idx, qtx_ctl;
1337 
1338 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1339 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1340 	qtx_ctl = e->desc.params.external.param1;
1341 
1342 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1343 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1344 }
1345 
1346 static int
1347 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1348 {
1349 	enum i40e_status_code status = I40E_SUCCESS;
1350 	struct i40e_arq_event_info event;
1351 	struct i40e_hw *hw = &pf->hw;
1352 	device_t dev = pf->dev;
1353 	u16 opcode;
1354 	u32 loop = 0, reg;
1355 
1356 	event.buf_len = IXL_AQ_BUF_SZ;
1357 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1358 	if (!event.msg_buf) {
1359 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1360 		    " Queue event!\n", __func__);
1361 		return (ENOMEM);
1362 	}
1363 
1364 	/* clean and process any events */
1365 	do {
1366 		status = i40e_clean_arq_element(hw, &event, pending);
1367 		if (status)
1368 			break;
1369 		opcode = LE16_TO_CPU(event.desc.opcode);
1370 		ixl_dbg(pf, IXL_DBG_AQ,
1371 		    "Admin Queue event: %#06x\n", opcode);
1372 		switch (opcode) {
1373 		case i40e_aqc_opc_get_link_status:
1374 			ixl_link_event(pf, &event);
1375 			break;
1376 		case i40e_aqc_opc_send_msg_to_pf:
1377 #ifdef PCI_IOV
1378 			ixl_handle_vf_msg(pf, &event);
1379 #endif
1380 			break;
1381 		/*
1382 		 * This should only occur on no-drop queues, which
1383 		 * aren't currently configured.
1384 		 */
1385 		case i40e_aqc_opc_event_lan_overflow:
1386 			ixl_handle_lan_overflow_event(pf, &event);
1387 			break;
1388 		default:
1389 			break;
1390 		}
1391 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1392 
1393 	free(event.msg_buf, M_IXL);
1394 
1395 	/* Re-enable admin queue interrupt cause */
1396 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1397 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1398 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1399 
1400 	return (status);
1401 }
1402 
1403 static void
1404 ixl_if_update_admin_status(if_ctx_t ctx)
1405 {
1406 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1407 	struct i40e_hw	*hw = &pf->hw;
1408 	u16		pending;
1409 
1410 	if (IXL_PF_IS_RESETTING(pf))
1411 		ixl_handle_empr_reset(pf);
1412 
1413 	/*
1414 	 * Admin Queue is shut down while handling reset.
1415 	 * Don't proceed if it hasn't been re-initialized
1416 	 * e.g due to an issue with new FW.
1417 	 */
1418 	if (!i40e_check_asq_alive(&pf->hw))
1419 		return;
1420 
1421 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1422 		ixl_handle_mdd_event(pf);
1423 
1424 	ixl_process_adminq(pf, &pending);
1425 	ixl_update_link_status(pf);
1426 
1427 	/*
1428 	 * If there are still messages to process, reschedule ourselves.
1429 	 * Otherwise, re-enable our interrupt and go to sleep.
1430 	 */
1431 	if (pending > 0)
1432 		iflib_admin_intr_deferred(ctx);
1433 	else
1434 		ixl_enable_intr0(hw);
1435 }
1436 
1437 static void
1438 ixl_if_multi_set(if_ctx_t ctx)
1439 {
1440 	struct ixl_pf *pf = iflib_get_softc(ctx);
1441 	struct ixl_vsi *vsi = &pf->vsi;
1442 	struct i40e_hw *hw = vsi->hw;
1443 	int mcnt;
1444 
1445 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1446 
1447 	/* Delete filters for removed multicast addresses */
1448 	ixl_del_multi(vsi, false);
1449 
1450 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1451 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1452 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1453 		    vsi->seid, TRUE, NULL);
1454 		ixl_del_multi(vsi, true);
1455 		return;
1456 	}
1457 
1458 	ixl_add_multi(vsi);
1459 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1460 }
1461 
1462 static int
1463 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1464 {
1465 	struct ixl_pf *pf = iflib_get_softc(ctx);
1466 	struct ixl_vsi *vsi = &pf->vsi;
1467 
1468 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1469 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1470 		ETHER_VLAN_ENCAP_LEN)
1471 		return (EINVAL);
1472 
1473 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1474 		ETHER_VLAN_ENCAP_LEN;
1475 
1476 	return (0);
1477 }
1478 
1479 static void
1480 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1481 {
1482 	struct ixl_pf *pf = iflib_get_softc(ctx);
1483 	struct i40e_hw  *hw = &pf->hw;
1484 
1485 	INIT_DEBUGOUT("ixl_media_status: begin");
1486 
1487 	ifmr->ifm_status = IFM_AVALID;
1488 	ifmr->ifm_active = IFM_ETHER;
1489 
1490 	if (!pf->link_up) {
1491 		return;
1492 	}
1493 
1494 	ifmr->ifm_status |= IFM_ACTIVE;
1495 	/* Hardware is always full-duplex */
1496 	ifmr->ifm_active |= IFM_FDX;
1497 
1498 	switch (hw->phy.link_info.phy_type) {
1499 		/* 100 M */
1500 		case I40E_PHY_TYPE_100BASE_TX:
1501 			ifmr->ifm_active |= IFM_100_TX;
1502 			break;
1503 		/* 1 G */
1504 		case I40E_PHY_TYPE_1000BASE_T:
1505 			ifmr->ifm_active |= IFM_1000_T;
1506 			break;
1507 		case I40E_PHY_TYPE_1000BASE_SX:
1508 			ifmr->ifm_active |= IFM_1000_SX;
1509 			break;
1510 		case I40E_PHY_TYPE_1000BASE_LX:
1511 			ifmr->ifm_active |= IFM_1000_LX;
1512 			break;
1513 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1514 			ifmr->ifm_active |= IFM_1000_T;
1515 			break;
1516 		/* 2.5 G */
1517 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1518 			ifmr->ifm_active |= IFM_2500_T;
1519 			break;
1520 		/* 5 G */
1521 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1522 			ifmr->ifm_active |= IFM_5000_T;
1523 			break;
1524 		/* 10 G */
1525 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1526 			ifmr->ifm_active |= IFM_10G_TWINAX;
1527 			break;
1528 		case I40E_PHY_TYPE_10GBASE_SR:
1529 			ifmr->ifm_active |= IFM_10G_SR;
1530 			break;
1531 		case I40E_PHY_TYPE_10GBASE_LR:
1532 			ifmr->ifm_active |= IFM_10G_LR;
1533 			break;
1534 		case I40E_PHY_TYPE_10GBASE_T:
1535 			ifmr->ifm_active |= IFM_10G_T;
1536 			break;
1537 		case I40E_PHY_TYPE_XAUI:
1538 		case I40E_PHY_TYPE_XFI:
1539 			ifmr->ifm_active |= IFM_10G_TWINAX;
1540 			break;
1541 		case I40E_PHY_TYPE_10GBASE_AOC:
1542 			ifmr->ifm_active |= IFM_10G_AOC;
1543 			break;
1544 		/* 25 G */
1545 		case I40E_PHY_TYPE_25GBASE_KR:
1546 			ifmr->ifm_active |= IFM_25G_KR;
1547 			break;
1548 		case I40E_PHY_TYPE_25GBASE_CR:
1549 			ifmr->ifm_active |= IFM_25G_CR;
1550 			break;
1551 		case I40E_PHY_TYPE_25GBASE_SR:
1552 			ifmr->ifm_active |= IFM_25G_SR;
1553 			break;
1554 		case I40E_PHY_TYPE_25GBASE_LR:
1555 			ifmr->ifm_active |= IFM_25G_LR;
1556 			break;
1557 		case I40E_PHY_TYPE_25GBASE_AOC:
1558 			ifmr->ifm_active |= IFM_25G_AOC;
1559 			break;
1560 		case I40E_PHY_TYPE_25GBASE_ACC:
1561 			ifmr->ifm_active |= IFM_25G_ACC;
1562 			break;
1563 		/* 40 G */
1564 		case I40E_PHY_TYPE_40GBASE_CR4:
1565 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1566 			ifmr->ifm_active |= IFM_40G_CR4;
1567 			break;
1568 		case I40E_PHY_TYPE_40GBASE_SR4:
1569 			ifmr->ifm_active |= IFM_40G_SR4;
1570 			break;
1571 		case I40E_PHY_TYPE_40GBASE_LR4:
1572 			ifmr->ifm_active |= IFM_40G_LR4;
1573 			break;
1574 		case I40E_PHY_TYPE_XLAUI:
1575 			ifmr->ifm_active |= IFM_OTHER;
1576 			break;
1577 		case I40E_PHY_TYPE_1000BASE_KX:
1578 			ifmr->ifm_active |= IFM_1000_KX;
1579 			break;
1580 		case I40E_PHY_TYPE_SGMII:
1581 			ifmr->ifm_active |= IFM_1000_SGMII;
1582 			break;
1583 		/* ERJ: What's the difference between these? */
1584 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1585 		case I40E_PHY_TYPE_10GBASE_CR1:
1586 			ifmr->ifm_active |= IFM_10G_CR1;
1587 			break;
1588 		case I40E_PHY_TYPE_10GBASE_KX4:
1589 			ifmr->ifm_active |= IFM_10G_KX4;
1590 			break;
1591 		case I40E_PHY_TYPE_10GBASE_KR:
1592 			ifmr->ifm_active |= IFM_10G_KR;
1593 			break;
1594 		case I40E_PHY_TYPE_SFI:
1595 			ifmr->ifm_active |= IFM_10G_SFI;
1596 			break;
1597 		/* Our single 20G media type */
1598 		case I40E_PHY_TYPE_20GBASE_KR2:
1599 			ifmr->ifm_active |= IFM_20G_KR2;
1600 			break;
1601 		case I40E_PHY_TYPE_40GBASE_KR4:
1602 			ifmr->ifm_active |= IFM_40G_KR4;
1603 			break;
1604 		case I40E_PHY_TYPE_XLPPI:
1605 		case I40E_PHY_TYPE_40GBASE_AOC:
1606 			ifmr->ifm_active |= IFM_40G_XLPPI;
1607 			break;
1608 		/* Unknown to driver */
1609 		default:
1610 			ifmr->ifm_active |= IFM_UNKNOWN;
1611 			break;
1612 	}
1613 	/* Report flow control status as well */
1614 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1615 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1616 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1617 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1618 }
1619 
1620 static int
1621 ixl_if_media_change(if_ctx_t ctx)
1622 {
1623 	struct ifmedia *ifm = iflib_get_media(ctx);
1624 
1625 	INIT_DEBUGOUT("ixl_media_change: begin");
1626 
1627 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1628 		return (EINVAL);
1629 
1630 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1631 	return (ENODEV);
1632 }
1633 
1634 static int
1635 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1636 {
1637 	struct ixl_pf *pf = iflib_get_softc(ctx);
1638 	struct ixl_vsi *vsi = &pf->vsi;
1639 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1640 	struct i40e_hw	*hw = vsi->hw;
1641 	int		err;
1642 	bool		uni = FALSE, multi = FALSE;
1643 
1644 	if (flags & IFF_PROMISC)
1645 		uni = multi = TRUE;
1646 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1647 	    MAX_MULTICAST_ADDR)
1648 		multi = TRUE;
1649 
1650 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1651 	    vsi->seid, uni, NULL, true);
1652 	if (err)
1653 		return (err);
1654 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1655 	    vsi->seid, multi, NULL);
1656 	return (err);
1657 }
1658 
1659 static void
1660 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1661 {
1662 	struct ixl_pf *pf = iflib_get_softc(ctx);
1663 
1664 	if (qid != 0)
1665 		return;
1666 
1667 	ixl_update_stats_counters(pf);
1668 }
1669 
1670 static void
1671 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1672 {
1673 	struct ixl_pf *pf = iflib_get_softc(ctx);
1674 	struct ixl_vsi *vsi = &pf->vsi;
1675 	struct i40e_hw	*hw = vsi->hw;
1676 	if_t ifp = iflib_get_ifp(ctx);
1677 
1678 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1679 		return;
1680 
1681 	/*
1682 	 * Keep track of registered VLANS to know what
1683 	 * filters have to be configured when VLAN_HWFILTER
1684 	 * capability is enabled.
1685 	 */
1686 	++vsi->num_vlans;
1687 	bit_set(vsi->vlans_map, vtag);
1688 
1689 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1690 		return;
1691 
1692 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1693 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1694 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1695 		/*
1696 		 * There is not enough HW resources to add filters
1697 		 * for all registered VLANs. Re-configure filtering
1698 		 * to allow reception of all expected traffic.
1699 		 */
1700 		device_printf(vsi->dev,
1701 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1702 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1703 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1704 	}
1705 }
1706 
1707 static void
1708 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1709 {
1710 	struct ixl_pf *pf = iflib_get_softc(ctx);
1711 	struct ixl_vsi *vsi = &pf->vsi;
1712 	struct i40e_hw	*hw = vsi->hw;
1713 	if_t ifp = iflib_get_ifp(ctx);
1714 
1715 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1716 		return;
1717 
1718 	--vsi->num_vlans;
1719 	bit_clear(vsi->vlans_map, vtag);
1720 
1721 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1722 		return;
1723 
1724 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1725 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1726 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1727 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1728 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1729 	}
1730 }
1731 
1732 static uint64_t
1733 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1734 {
1735 	struct ixl_pf *pf = iflib_get_softc(ctx);
1736 	struct ixl_vsi *vsi = &pf->vsi;
1737 	if_t ifp = iflib_get_ifp(ctx);
1738 
1739 	switch (cnt) {
1740 	case IFCOUNTER_IPACKETS:
1741 		return (vsi->ipackets);
1742 	case IFCOUNTER_IERRORS:
1743 		return (vsi->ierrors);
1744 	case IFCOUNTER_OPACKETS:
1745 		return (vsi->opackets);
1746 	case IFCOUNTER_OERRORS:
1747 		return (vsi->oerrors);
1748 	case IFCOUNTER_COLLISIONS:
1749 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1750 		return (0);
1751 	case IFCOUNTER_IBYTES:
1752 		return (vsi->ibytes);
1753 	case IFCOUNTER_OBYTES:
1754 		return (vsi->obytes);
1755 	case IFCOUNTER_IMCASTS:
1756 		return (vsi->imcasts);
1757 	case IFCOUNTER_OMCASTS:
1758 		return (vsi->omcasts);
1759 	case IFCOUNTER_IQDROPS:
1760 		return (vsi->iqdrops);
1761 	case IFCOUNTER_OQDROPS:
1762 		return (vsi->oqdrops);
1763 	case IFCOUNTER_NOPROTO:
1764 		return (vsi->noproto);
1765 	default:
1766 		return (if_get_counter_default(ifp, cnt));
1767 	}
1768 }
1769 
1770 #ifdef PCI_IOV
1771 static void
1772 ixl_if_vflr_handle(if_ctx_t ctx)
1773 {
1774 	struct ixl_pf *pf = iflib_get_softc(ctx);
1775 
1776 	ixl_handle_vflr(pf);
1777 }
1778 #endif
1779 
1780 static int
1781 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1782 {
1783 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1784 
1785 	if (pf->read_i2c_byte == NULL)
1786 		return (EINVAL);
1787 
1788 	for (int i = 0; i < req->len; i++)
1789 		if (pf->read_i2c_byte(pf, req->offset + i,
1790 		    req->dev_addr, &req->data[i]))
1791 			return (EIO);
1792 	return (0);
1793 }
1794 
1795 static int
1796 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1797 {
1798 	struct ixl_pf *pf = iflib_get_softc(ctx);
1799 	struct ifdrv *ifd = (struct ifdrv *)data;
1800 	int error = 0;
1801 
1802 	/*
1803 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1804 	 * performing privilege checks. It is important that this function
1805 	 * perform the necessary checks for commands which should only be
1806 	 * executed by privileged threads.
1807 	 */
1808 
1809 	switch(command) {
1810 	case SIOCGDRVSPEC:
1811 	case SIOCSDRVSPEC:
1812 		/* NVM update command */
1813 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1814 			error = priv_check(curthread, PRIV_DRIVER);
1815 			if (error)
1816 				break;
1817 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1818 		} else {
1819 			error = EINVAL;
1820 		}
1821 		break;
1822 	default:
1823 		error = EOPNOTSUPP;
1824 	}
1825 
1826 	return (error);
1827 }
1828 
1829 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1830  * @ctx: iflib context
1831  * @event: event code to check
1832  *
1833  * Defaults to returning false for every event.
1834  *
1835  * @returns true if iflib needs to reinit the interface, false otherwise
1836  */
1837 static bool
1838 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1839 {
1840 	switch (event) {
1841 	case IFLIB_RESTART_VLAN_CONFIG:
1842 	default:
1843 		return (false);
1844 	}
1845 }
1846 
1847 /*
1848  * Sanity check and save off tunable values.
1849  */
1850 static void
1851 ixl_save_pf_tunables(struct ixl_pf *pf)
1852 {
1853 	device_t dev = pf->dev;
1854 
1855 	/* Save tunable information */
1856 #ifdef IXL_DEBUG_FC
1857 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1858 #endif
1859 #ifdef IXL_DEBUG
1860 	pf->recovery_mode = ixl_debug_recovery_mode;
1861 #endif
1862 	pf->dbg_mask = ixl_core_debug_mask;
1863 	pf->hw.debug_mask = ixl_shared_debug_mask;
1864 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1865 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1866 #if 0
1867 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1868 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1869 #endif
1870 
1871 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1872 		pf->i2c_access_method = 0;
1873 	else
1874 		pf->i2c_access_method = ixl_i2c_access_method;
1875 
1876 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1877 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1878 		    ixl_tx_itr);
1879 		device_printf(dev, "tx_itr must be between %d and %d, "
1880 		    "inclusive\n",
1881 		    0, IXL_MAX_ITR);
1882 		device_printf(dev, "Using default value of %d instead\n",
1883 		    IXL_ITR_4K);
1884 		pf->tx_itr = IXL_ITR_4K;
1885 	} else
1886 		pf->tx_itr = ixl_tx_itr;
1887 
1888 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1889 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1890 		    ixl_rx_itr);
1891 		device_printf(dev, "rx_itr must be between %d and %d, "
1892 		    "inclusive\n",
1893 		    0, IXL_MAX_ITR);
1894 		device_printf(dev, "Using default value of %d instead\n",
1895 		    IXL_ITR_8K);
1896 		pf->rx_itr = IXL_ITR_8K;
1897 	} else
1898 		pf->rx_itr = ixl_rx_itr;
1899 
1900 	pf->fc = -1;
1901 	if (ixl_flow_control != -1) {
1902 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1903 			device_printf(dev,
1904 			    "Invalid flow_control value of %d set!\n",
1905 			    ixl_flow_control);
1906 			device_printf(dev,
1907 			    "flow_control must be between %d and %d, "
1908 			    "inclusive\n", 0, 3);
1909 			device_printf(dev,
1910 			    "Using default configuration instead\n");
1911 		} else
1912 			pf->fc = ixl_flow_control;
1913 	}
1914 }
1915 
1916