xref: /freebsd/sys/dev/ixl/if_ixl.c (revision 4d3fc8b0570b29fb0d6ee9525f104d52176ff0d4)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl.h"
36 #include "ixl_pf.h"
37 
38 #ifdef IXL_IW
39 #include "ixl_iw.h"
40 #include "ixl_iw_int.h"
41 #endif
42 
43 #ifdef PCI_IOV
44 #include "ixl_pf_iov.h"
45 #endif
46 
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50 #define IXL_DRIVER_VERSION_MAJOR	2
51 #define IXL_DRIVER_VERSION_MINOR	3
52 #define IXL_DRIVER_VERSION_BUILD	3
53 
54 #define IXL_DRIVER_VERSION_STRING			\
55     __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56     __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57     __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58 
59 /*********************************************************************
60  *  PCI Device ID Table
61  *
62  *  Used by probe to select devices to load on
63  *
64  *  ( Vendor ID, Device ID, Branding String )
65  *********************************************************************/
66 
67 static pci_vendor_info_t ixl_vendor_info_array[] =
68 {
69 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89 	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_BC, "Intel(R) Ethernet Controller I710 for 1GBASE-T"),
90 	/* required last entry */
91 	PVID_END
92 };
93 
94 /*********************************************************************
95  *  Function prototypes
96  *********************************************************************/
97 /*** IFLIB interface ***/
98 static void	*ixl_register(device_t dev);
99 static int	 ixl_if_attach_pre(if_ctx_t ctx);
100 static int	 ixl_if_attach_post(if_ctx_t ctx);
101 static int	 ixl_if_detach(if_ctx_t ctx);
102 static int	 ixl_if_shutdown(if_ctx_t ctx);
103 static int	 ixl_if_suspend(if_ctx_t ctx);
104 static int	 ixl_if_resume(if_ctx_t ctx);
105 static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
106 static void	 ixl_if_enable_intr(if_ctx_t ctx);
107 static void	 ixl_if_disable_intr(if_ctx_t ctx);
108 static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
109 static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
110 static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
111 static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
112 static void	 ixl_if_queues_free(if_ctx_t ctx);
113 static void	 ixl_if_update_admin_status(if_ctx_t ctx);
114 static void	 ixl_if_multi_set(if_ctx_t ctx);
115 static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
116 static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
117 static int	 ixl_if_media_change(if_ctx_t ctx);
118 static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
119 static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
120 static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
121 static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
122 static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
123 static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
124 static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
125 static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
126 #ifdef PCI_IOV
127 static void	 ixl_if_vflr_handle(if_ctx_t ctx);
128 #endif
129 
130 /*** Other ***/
131 static void	 ixl_save_pf_tunables(struct ixl_pf *);
132 static int	 ixl_allocate_pci_resources(struct ixl_pf *);
133 static void	 ixl_setup_ssctx(struct ixl_pf *pf);
134 static void	 ixl_admin_timer(void *arg);
135 
136 /*********************************************************************
137  *  FreeBSD Device Interface Entry Points
138  *********************************************************************/
139 
140 static device_method_t ixl_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_register, ixl_register),
143 	DEVMETHOD(device_probe, iflib_device_probe),
144 	DEVMETHOD(device_attach, iflib_device_attach),
145 	DEVMETHOD(device_detach, iflib_device_detach),
146 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 #ifdef PCI_IOV
148 	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149 	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150 	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
151 #endif
152 	DEVMETHOD_END
153 };
154 
155 static driver_t ixl_driver = {
156 	"ixl", ixl_methods, sizeof(struct ixl_pf),
157 };
158 
159 DRIVER_MODULE(ixl, pci, ixl_driver, 0, 0);
160 IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
161 MODULE_VERSION(ixl, 3);
162 
163 MODULE_DEPEND(ixl, pci, 1, 1, 1);
164 MODULE_DEPEND(ixl, ether, 1, 1, 1);
165 MODULE_DEPEND(ixl, iflib, 1, 1, 1);
166 
167 static device_method_t ixl_if_methods[] = {
168 	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
169 	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
170 	DEVMETHOD(ifdi_detach, ixl_if_detach),
171 	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
172 	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
173 	DEVMETHOD(ifdi_resume, ixl_if_resume),
174 	DEVMETHOD(ifdi_init, ixl_if_init),
175 	DEVMETHOD(ifdi_stop, ixl_if_stop),
176 	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
177 	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
178 	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
179 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
180 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
181 	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
182 	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
183 	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
184 	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
185 	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
186 	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
187 	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
188 	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
189 	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
190 	DEVMETHOD(ifdi_timer, ixl_if_timer),
191 	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
192 	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
193 	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
194 	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
195 	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
196 	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
197 #ifdef PCI_IOV
198 	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
199 	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
200 	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
201 	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
202 #endif
203 	// ifdi_led_func
204 	// ifdi_debug
205 	DEVMETHOD_END
206 };
207 
208 static driver_t ixl_if_driver = {
209 	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
210 };
211 
212 /*
213 ** TUNEABLE PARAMETERS:
214 */
215 
216 static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
217     "ixl driver parameters");
218 
219 #ifdef IXL_DEBUG_FC
220 /*
221  * Leave this on unless you need to send flow control
222  * frames (or other control frames) from software
223  */
224 static int ixl_enable_tx_fc_filter = 1;
225 TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
226     &ixl_enable_tx_fc_filter);
227 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
228     &ixl_enable_tx_fc_filter, 0,
229     "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
230 #endif
231 
232 #ifdef IXL_DEBUG
233 static int ixl_debug_recovery_mode = 0;
234 TUNABLE_INT("hw.ixl.debug_recovery_mode",
235     &ixl_debug_recovery_mode);
236 SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
237     &ixl_debug_recovery_mode, 0,
238     "Act like when FW entered recovery mode (for debugging)");
239 #endif
240 
241 static int ixl_i2c_access_method = 0;
242 TUNABLE_INT("hw.ixl.i2c_access_method",
243     &ixl_i2c_access_method);
244 SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
245     &ixl_i2c_access_method, 0,
246     IXL_SYSCTL_HELP_I2C_METHOD);
247 
248 static int ixl_enable_vf_loopback = 1;
249 TUNABLE_INT("hw.ixl.enable_vf_loopback",
250     &ixl_enable_vf_loopback);
251 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
252     &ixl_enable_vf_loopback, 0,
253     IXL_SYSCTL_HELP_VF_LOOPBACK);
254 
255 /*
256  * Different method for processing TX descriptor
257  * completion.
258  */
259 static int ixl_enable_head_writeback = 1;
260 TUNABLE_INT("hw.ixl.enable_head_writeback",
261     &ixl_enable_head_writeback);
262 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
263     &ixl_enable_head_writeback, 0,
264     "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
265 
266 static int ixl_core_debug_mask = 0;
267 TUNABLE_INT("hw.ixl.core_debug_mask",
268     &ixl_core_debug_mask);
269 SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
270     &ixl_core_debug_mask, 0,
271     "Display debug statements that are printed in non-shared code");
272 
273 static int ixl_shared_debug_mask = 0;
274 TUNABLE_INT("hw.ixl.shared_debug_mask",
275     &ixl_shared_debug_mask);
276 SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
277     &ixl_shared_debug_mask, 0,
278     "Display debug statements that are printed in shared code");
279 
280 #if 0
281 /*
282 ** Controls for Interrupt Throttling
283 **	- true/false for dynamic adjustment
284 ** 	- default values for static ITR
285 */
286 static int ixl_dynamic_rx_itr = 0;
287 TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
288 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
289     &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
290 
291 static int ixl_dynamic_tx_itr = 0;
292 TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
293 SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
294     &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
295 #endif
296 
297 static int ixl_rx_itr = IXL_ITR_8K;
298 TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
299 SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
300     &ixl_rx_itr, 0, "RX Interrupt Rate");
301 
302 static int ixl_tx_itr = IXL_ITR_4K;
303 TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
304 SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
305     &ixl_tx_itr, 0, "TX Interrupt Rate");
306 
307 static int ixl_flow_control = -1;
308 SYSCTL_INT(_hw_ixl, OID_AUTO, flow_control, CTLFLAG_RDTUN,
309     &ixl_flow_control, 0, "Initial Flow Control setting");
310 
311 #ifdef IXL_IW
312 int ixl_enable_iwarp = 0;
313 TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
314 SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
315     &ixl_enable_iwarp, 0, "iWARP enabled");
316 
317 int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
318 TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
319 SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
320     &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
321 #endif
322 
323 extern struct if_txrx ixl_txrx_hwb;
324 extern struct if_txrx ixl_txrx_dwb;
325 
326 static struct if_shared_ctx ixl_sctx_init = {
327 	.isc_magic = IFLIB_MAGIC,
328 	.isc_q_align = PAGE_SIZE,
329 	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
330 	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
331 	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
332 	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
333 	.isc_rx_maxsize = 16384,
334 	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
335 	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
336 	.isc_nfl = 1,
337 	.isc_ntxqs = 1,
338 	.isc_nrxqs = 1,
339 
340 	.isc_admin_intrcnt = 1,
341 	.isc_vendor_info = ixl_vendor_info_array,
342 	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
343 	.isc_driver = &ixl_if_driver,
344 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
345 
346 	.isc_nrxd_min = {IXL_MIN_RING},
347 	.isc_ntxd_min = {IXL_MIN_RING},
348 	.isc_nrxd_max = {IXL_MAX_RING},
349 	.isc_ntxd_max = {IXL_MAX_RING},
350 	.isc_nrxd_default = {IXL_DEFAULT_RING},
351 	.isc_ntxd_default = {IXL_DEFAULT_RING},
352 };
353 
354 /*** Functions ***/
355 static void *
356 ixl_register(device_t dev)
357 {
358 	return (&ixl_sctx_init);
359 }
360 
361 static int
362 ixl_allocate_pci_resources(struct ixl_pf *pf)
363 {
364 	device_t dev = iflib_get_dev(pf->vsi.ctx);
365 	struct i40e_hw *hw = &pf->hw;
366 	int             rid;
367 
368 	/* Map BAR0 */
369 	rid = PCIR_BAR(0);
370 	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
371 	    &rid, RF_ACTIVE);
372 
373 	if (!(pf->pci_mem)) {
374 		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
375 		return (ENXIO);
376 	}
377 
378 	/* Save off the PCI information */
379 	hw->vendor_id = pci_get_vendor(dev);
380 	hw->device_id = pci_get_device(dev);
381 	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
382 	hw->subsystem_vendor_id =
383 	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
384 	hw->subsystem_device_id =
385 	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
386 
387 	hw->bus.device = pci_get_slot(dev);
388 	hw->bus.func = pci_get_function(dev);
389 
390 	/* Save off register access information */
391 	pf->osdep.mem_bus_space_tag =
392 		rman_get_bustag(pf->pci_mem);
393 	pf->osdep.mem_bus_space_handle =
394 		rman_get_bushandle(pf->pci_mem);
395 	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
396 	pf->osdep.flush_reg = I40E_GLGEN_STAT;
397 	pf->osdep.dev = dev;
398 
399 	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
400 	pf->hw.back = &pf->osdep;
401 
402  	return (0);
403 }
404 
405 static void
406 ixl_setup_ssctx(struct ixl_pf *pf)
407 {
408 	if_softc_ctx_t scctx = pf->vsi.shared;
409 	struct i40e_hw *hw = &pf->hw;
410 
411 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
412 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
413 		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
414 	} else if (hw->mac.type == I40E_MAC_X722)
415 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
416 	else
417 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
418 
419 	if (pf->vsi.enable_head_writeback) {
420 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
421 		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
422 		scctx->isc_txrx = &ixl_txrx_hwb;
423 	} else {
424 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
425 		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
426 		scctx->isc_txrx = &ixl_txrx_dwb;
427 	}
428 
429 	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
430 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
431 	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
432 	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
433 	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
434 	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
435 	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
436 	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
437 	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
438 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
439 	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
440 }
441 
442 static void
443 ixl_admin_timer(void *arg)
444 {
445 	struct ixl_pf *pf = (struct ixl_pf *)arg;
446 
447 	/* Fire off the admin task */
448 	iflib_admin_intr_deferred(pf->vsi.ctx);
449 
450 	/* Reschedule the admin timer */
451 	callout_schedule(&pf->admin_timer, hz/2);
452 }
453 
454 static int
455 ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
456 {
457 	struct ixl_vsi *vsi = &pf->vsi;
458 	struct i40e_hw *hw = &pf->hw;
459 	device_t dev = pf->dev;
460 
461 	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
462 
463 	i40e_get_mac_addr(hw, hw->mac.addr);
464 
465 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
466 		ixl_configure_intr0_msix(pf);
467 		ixl_enable_intr0(hw);
468 	}
469 
470 	ixl_setup_ssctx(pf);
471 
472 	return (0);
473 }
474 
475 static int
476 ixl_if_attach_pre(if_ctx_t ctx)
477 {
478 	device_t dev;
479 	struct ixl_pf *pf;
480 	struct i40e_hw *hw;
481 	struct ixl_vsi *vsi;
482 	enum i40e_get_fw_lldp_status_resp lldp_status;
483 	struct i40e_filter_control_settings filter;
484 	enum i40e_status_code status;
485 	int error = 0;
486 
487 	dev = iflib_get_dev(ctx);
488 	pf = iflib_get_softc(ctx);
489 
490 	INIT_DBG_DEV(dev, "begin");
491 
492 	vsi = &pf->vsi;
493 	vsi->back = pf;
494 	pf->dev = dev;
495 	hw = &pf->hw;
496 
497 	vsi->dev = dev;
498 	vsi->hw = &pf->hw;
499 	vsi->id = 0;
500 	vsi->num_vlans = 0;
501 	vsi->ctx = ctx;
502 	vsi->media = iflib_get_media(ctx);
503 	vsi->shared = iflib_get_softc_ctx(ctx);
504 
505 	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
506 	    "%s:admin", device_get_nameunit(dev));
507 	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
508 	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
509 
510 	/* Save tunable values */
511 	ixl_save_pf_tunables(pf);
512 
513 	/* Do PCI setup - map BAR0, etc */
514 	if (ixl_allocate_pci_resources(pf)) {
515 		device_printf(dev, "Allocation of PCI resources failed\n");
516 		error = ENXIO;
517 		goto err_pci_res;
518 	}
519 
520 	/* Establish a clean starting point */
521 	i40e_clear_hw(hw);
522 	i40e_set_mac_type(hw);
523 
524 	error = ixl_pf_reset(pf);
525 	if (error)
526 		goto err_out;
527 
528 	/* Initialize the shared code */
529 	status = i40e_init_shared_code(hw);
530 	if (status) {
531 		device_printf(dev, "Unable to initialize shared code, error %s\n",
532 		    i40e_stat_str(hw, status));
533 		error = EIO;
534 		goto err_out;
535 	}
536 
537 	/* Set up the admin queue */
538 	hw->aq.num_arq_entries = IXL_AQ_LEN;
539 	hw->aq.num_asq_entries = IXL_AQ_LEN;
540 	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
541 	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
542 
543 	status = i40e_init_adminq(hw);
544 	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
545 		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
546 		    i40e_stat_str(hw, status));
547 		error = EIO;
548 		goto err_out;
549 	}
550 	ixl_print_nvm_version(pf);
551 
552 	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
553 		device_printf(dev, "The driver for the device stopped "
554 		    "because the NVM image is newer than expected.\n");
555 		device_printf(dev, "You must install the most recent version of "
556 		    "the network driver.\n");
557 		error = EIO;
558 		goto err_out;
559 	}
560 
561         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
562 	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
563 		device_printf(dev, "The driver for the device detected "
564 		    "a newer version of the NVM image than expected.\n");
565 		device_printf(dev, "Please install the most recent version "
566 		    "of the network driver.\n");
567 	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
568 		device_printf(dev, "The driver for the device detected "
569 		    "an older version of the NVM image than expected.\n");
570 		device_printf(dev, "Please update the NVM image.\n");
571 	}
572 
573 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
574 		error = ixl_attach_pre_recovery_mode(pf);
575 		if (error)
576 			goto err_out;
577 		return (error);
578 	}
579 
580 	/* Clear PXE mode */
581 	i40e_clear_pxe_mode(hw);
582 
583 	/* Get capabilities from the device */
584 	error = ixl_get_hw_capabilities(pf);
585 	if (error) {
586 		device_printf(dev, "get_hw_capabilities failed: %d\n",
587 		    error);
588 		goto err_get_cap;
589 	}
590 
591 	/* Set up host memory cache */
592 	error = ixl_setup_hmc(pf);
593 	if (error)
594 		goto err_mac_hmc;
595 
596 	/* Disable LLDP from the firmware for certain NVM versions */
597 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
598 	    (pf->hw.aq.fw_maj_ver < 4)) {
599 		i40e_aq_stop_lldp(hw, true, false, NULL);
600 		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
601 	}
602 
603 	/* Try enabling Energy Efficient Ethernet (EEE) mode */
604 	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
605 		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
606 	else
607 		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
608 
609 	/* Get MAC addresses from hardware */
610 	i40e_get_mac_addr(hw, hw->mac.addr);
611 	error = i40e_validate_mac_addr(hw->mac.addr);
612 	if (error) {
613 		device_printf(dev, "validate_mac_addr failed: %d\n", error);
614 		goto err_mac_hmc;
615 	}
616 	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
617 	iflib_set_mac(ctx, hw->mac.addr);
618 	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
619 
620 	/* Set up the device filtering */
621 	bzero(&filter, sizeof(filter));
622 	filter.enable_ethtype = TRUE;
623 	filter.enable_macvlan = TRUE;
624 	filter.enable_fdir = FALSE;
625 	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
626 	if (i40e_set_filter_control(hw, &filter))
627 		device_printf(dev, "i40e_set_filter_control() failed\n");
628 
629 	/* Query device FW LLDP status */
630 	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
631 		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
632 			atomic_set_32(&pf->state,
633 			    IXL_PF_STATE_FW_LLDP_DISABLED);
634 		} else {
635 			atomic_clear_32(&pf->state,
636 			    IXL_PF_STATE_FW_LLDP_DISABLED);
637 		}
638 	}
639 
640 	/* Tell FW to apply DCB config on link up */
641 	i40e_aq_set_dcb_parameters(hw, true, NULL);
642 
643 	/* Fill out iflib parameters */
644 	ixl_setup_ssctx(pf);
645 
646 	INIT_DBG_DEV(dev, "end");
647 	return (0);
648 
649 err_mac_hmc:
650 	ixl_shutdown_hmc(pf);
651 err_get_cap:
652 	i40e_shutdown_adminq(hw);
653 err_out:
654 	ixl_free_pci_resources(pf);
655 err_pci_res:
656 	mtx_lock(&pf->admin_mtx);
657 	callout_stop(&pf->admin_timer);
658 	mtx_unlock(&pf->admin_mtx);
659 	mtx_destroy(&pf->admin_mtx);
660 	return (error);
661 }
662 
663 static int
664 ixl_if_attach_post(if_ctx_t ctx)
665 {
666 	device_t dev;
667 	struct ixl_pf *pf;
668 	struct i40e_hw *hw;
669 	struct ixl_vsi *vsi;
670 	int error = 0;
671 	enum i40e_status_code status;
672 
673 	dev = iflib_get_dev(ctx);
674 	pf = iflib_get_softc(ctx);
675 
676 	INIT_DBG_DEV(dev, "begin");
677 
678 	vsi = &pf->vsi;
679 	vsi->ifp = iflib_get_ifp(ctx);
680 	hw = &pf->hw;
681 
682 	/* Save off determined number of queues for interface */
683 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
684 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
685 
686 	/* Setup OS network interface / ifnet */
687 	if (ixl_setup_interface(dev, pf)) {
688 		device_printf(dev, "interface setup failed!\n");
689 		error = EIO;
690 		goto err;
691 	}
692 
693 	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
694 		/* Keep admin queue interrupts active while driver is loaded */
695 		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
696 			ixl_configure_intr0_msix(pf);
697 			ixl_enable_intr0(hw);
698 		}
699 
700 		ixl_add_sysctls_recovery_mode(pf);
701 
702 		/* Start the admin timer */
703 		mtx_lock(&pf->admin_mtx);
704 		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
705 		mtx_unlock(&pf->admin_mtx);
706 		return (0);
707 	}
708 
709 	/* Determine link state */
710 	if (ixl_attach_get_link_status(pf)) {
711 		error = EINVAL;
712 		goto err;
713 	}
714 
715 	error = ixl_switch_config(pf);
716 	if (error) {
717 		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
718 		     error);
719 		goto err;
720 	}
721 
722 	/* Add protocol filters to list */
723 	ixl_init_filters(vsi);
724 
725 	/* Init queue allocation manager */
726 	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
727 	if (error) {
728 		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
729 		    error);
730 		goto err;
731 	}
732 	/* reserve a contiguous allocation for the PF's VSI */
733 	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
734 	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
735 	if (error) {
736 		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
737 		    error);
738 		goto err;
739 	}
740 	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
741 	    pf->qtag.num_allocated, pf->qtag.num_active);
742 
743 	/* Limit PHY interrupts to link, autoneg, and modules failure */
744 	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
745 	    NULL);
746         if (status) {
747 		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
748 		    " aq_err %s\n", i40e_stat_str(hw, status),
749 		    i40e_aq_str(hw, hw->aq.asq_last_status));
750 		goto err;
751 	}
752 
753 	/* Get the bus configuration and set the shared code */
754 	ixl_get_bus_info(pf);
755 
756 	/* Keep admin queue interrupts active while driver is loaded */
757 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
758  		ixl_configure_intr0_msix(pf);
759  		ixl_enable_intr0(hw);
760 	}
761 
762 	/* Set initial advertised speed sysctl value */
763 	ixl_set_initial_advertised_speeds(pf);
764 
765 	/* Initialize statistics & add sysctls */
766 	ixl_add_device_sysctls(pf);
767 	ixl_pf_reset_stats(pf);
768 	ixl_update_stats_counters(pf);
769 	ixl_add_hw_stats(pf);
770 
771 	/*
772 	 * Driver may have been reloaded. Ensure that the link state
773 	 * is consistent with current settings.
774 	 */
775 	ixl_set_link(pf, (pf->state & IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) != 0);
776 
777 	hw->phy.get_link_info = true;
778 	i40e_get_link_status(hw, &pf->link_up);
779 	ixl_update_link_status(pf);
780 
781 #ifdef PCI_IOV
782 	ixl_initialize_sriov(pf);
783 #endif
784 
785 #ifdef IXL_IW
786 	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
787 		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
788 		if (pf->iw_enabled) {
789 			error = ixl_iw_pf_attach(pf);
790 			if (error) {
791 				device_printf(dev,
792 				    "interfacing to iWARP driver failed: %d\n",
793 				    error);
794 				goto err;
795 			} else
796 				device_printf(dev, "iWARP ready\n");
797 		} else
798 			device_printf(dev, "iWARP disabled on this device "
799 			    "(no MSI-X vectors)\n");
800 	} else {
801 		pf->iw_enabled = false;
802 		device_printf(dev, "The device is not iWARP enabled\n");
803 	}
804 #endif
805 	/* Start the admin timer */
806 	mtx_lock(&pf->admin_mtx);
807 	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
808 	mtx_unlock(&pf->admin_mtx);
809 
810 	INIT_DBG_DEV(dev, "end");
811 	return (0);
812 
813 err:
814 	INIT_DEBUGOUT("end: error %d", error);
815 	/* ixl_if_detach() is called on error from this */
816 	return (error);
817 }
818 
819 /**
820  * XXX: iflib always ignores the return value of detach()
821  * -> This means that this isn't allowed to fail
822  */
823 static int
824 ixl_if_detach(if_ctx_t ctx)
825 {
826 	struct ixl_pf *pf = iflib_get_softc(ctx);
827 	struct ixl_vsi *vsi = &pf->vsi;
828 	struct i40e_hw *hw = &pf->hw;
829 	device_t dev = pf->dev;
830 	enum i40e_status_code	status;
831 #ifdef IXL_IW
832 	int			error;
833 #endif
834 
835 	INIT_DBG_DEV(dev, "begin");
836 
837 	/* Stop the admin timer */
838 	mtx_lock(&pf->admin_mtx);
839 	callout_stop(&pf->admin_timer);
840 	mtx_unlock(&pf->admin_mtx);
841 	mtx_destroy(&pf->admin_mtx);
842 
843 #ifdef IXL_IW
844 	if (ixl_enable_iwarp && pf->iw_enabled) {
845 		error = ixl_iw_pf_detach(pf);
846 		if (error == EBUSY) {
847 			device_printf(dev, "iwarp in use; stop it first.\n");
848 			//return (error);
849 		}
850 	}
851 #endif
852 	/* Remove all previously allocated media types */
853 	ifmedia_removeall(vsi->media);
854 
855 	/* Shutdown LAN HMC */
856 	ixl_shutdown_hmc(pf);
857 
858 	/* Shutdown admin queue */
859 	ixl_disable_intr0(hw);
860 	status = i40e_shutdown_adminq(hw);
861 	if (status)
862 		device_printf(dev,
863 		    "i40e_shutdown_adminq() failed with status %s\n",
864 		    i40e_stat_str(hw, status));
865 
866 	ixl_pf_qmgr_destroy(&pf->qmgr);
867 	ixl_free_pci_resources(pf);
868 	ixl_free_filters(&vsi->ftl);
869 	INIT_DBG_DEV(dev, "end");
870 	return (0);
871 }
872 
873 static int
874 ixl_if_shutdown(if_ctx_t ctx)
875 {
876 	int error = 0;
877 
878 	INIT_DEBUGOUT("ixl_if_shutdown: begin");
879 
880 	/* TODO: Call ixl_if_stop()? */
881 
882 	/* TODO: Then setup low power mode */
883 
884 	return (error);
885 }
886 
887 static int
888 ixl_if_suspend(if_ctx_t ctx)
889 {
890 	int error = 0;
891 
892 	INIT_DEBUGOUT("ixl_if_suspend: begin");
893 
894 	/* TODO: Call ixl_if_stop()? */
895 
896 	/* TODO: Then setup low power mode */
897 
898 	return (error);
899 }
900 
901 static int
902 ixl_if_resume(if_ctx_t ctx)
903 {
904 	if_t ifp = iflib_get_ifp(ctx);
905 
906 	INIT_DEBUGOUT("ixl_if_resume: begin");
907 
908 	/* Read & clear wake-up registers */
909 
910 	/* Required after D3->D0 transition */
911 	if (if_getflags(ifp) & IFF_UP)
912 		ixl_if_init(ctx);
913 
914 	return (0);
915 }
916 
917 void
918 ixl_if_init(if_ctx_t ctx)
919 {
920 	struct ixl_pf *pf = iflib_get_softc(ctx);
921 	struct ixl_vsi *vsi = &pf->vsi;
922 	struct i40e_hw	*hw = &pf->hw;
923 	if_t ifp = iflib_get_ifp(ctx);
924 	device_t 	dev = iflib_get_dev(ctx);
925 	u8		tmpaddr[ETHER_ADDR_LEN];
926 	int		ret;
927 
928 	if (IXL_PF_IN_RECOVERY_MODE(pf))
929 		return;
930 	/*
931 	 * If the aq is dead here, it probably means something outside of the driver
932 	 * did something to the adapter, like a PF reset.
933 	 * So, rebuild the driver's state here if that occurs.
934 	 */
935 	if (!i40e_check_asq_alive(&pf->hw)) {
936 		device_printf(dev, "Admin Queue is down; resetting...\n");
937 		ixl_teardown_hw_structs(pf);
938 		ixl_rebuild_hw_structs_after_reset(pf, false);
939 	}
940 
941 	/* Get the latest mac address... User might use a LAA */
942 	bcopy(if_getlladdr(vsi->ifp), tmpaddr, ETH_ALEN);
943 	if (!ixl_ether_is_equal(hw->mac.addr, tmpaddr) &&
944 	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
945 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
946 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
947 		ret = i40e_aq_mac_address_write(hw,
948 		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
949 		    hw->mac.addr, NULL);
950 		if (ret) {
951 			device_printf(dev, "LLA address change failed!!\n");
952 			return;
953 		}
954 		/*
955 		 * New filters are configured by ixl_reconfigure_filters
956 		 * at the end of ixl_init_locked.
957 		 */
958 	}
959 
960 	iflib_set_mac(ctx, hw->mac.addr);
961 
962 	/* Prepare the VSI: rings, hmc contexts, etc... */
963 	if (ixl_initialize_vsi(vsi)) {
964 		device_printf(dev, "initialize vsi failed!!\n");
965 		return;
966 	}
967 
968 	ixl_set_link(pf, true);
969 
970 	/* Reconfigure multicast filters in HW */
971 	ixl_if_multi_set(ctx);
972 
973 	/* Set up RSS */
974 	ixl_config_rss(pf);
975 
976 	/* Set up MSI-X routing and the ITR settings */
977 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
978 		ixl_configure_queue_intr_msix(pf);
979 		ixl_configure_itr(pf);
980 	} else
981 		ixl_configure_legacy(pf);
982 
983 	if (vsi->enable_head_writeback)
984 		ixl_init_tx_cidx(vsi);
985 	else
986 		ixl_init_tx_rsqs(vsi);
987 
988 	ixl_enable_rings(vsi);
989 
990 	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
991 
992 	/* Re-add configure filters to HW */
993 	ixl_reconfigure_filters(vsi);
994 
995 	/* Configure promiscuous mode */
996 	ixl_if_promisc_set(ctx, if_getflags(ifp));
997 
998 #ifdef IXL_IW
999 	if (ixl_enable_iwarp && pf->iw_enabled) {
1000 		ret = ixl_iw_pf_init(pf);
1001 		if (ret)
1002 			device_printf(dev,
1003 			    "initialize iwarp failed, code %d\n", ret);
1004 	}
1005 #endif
1006 }
1007 
1008 void
1009 ixl_if_stop(if_ctx_t ctx)
1010 {
1011 	struct ixl_pf *pf = iflib_get_softc(ctx);
1012 	if_t ifp = iflib_get_ifp(ctx);
1013 	struct ixl_vsi *vsi = &pf->vsi;
1014 
1015 	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1016 
1017 	if (IXL_PF_IN_RECOVERY_MODE(pf))
1018 		return;
1019 
1020 	// TODO: This may need to be reworked
1021 #ifdef IXL_IW
1022 	/* Stop iWARP device */
1023 	if (ixl_enable_iwarp && pf->iw_enabled)
1024 		ixl_iw_pf_stop(pf);
1025 #endif
1026 
1027 	ixl_disable_rings_intr(vsi);
1028 	ixl_disable_rings(pf, vsi, &pf->qtag);
1029 
1030 	/*
1031 	 * Don't set link state if only reconfiguring
1032 	 * e.g. on MTU change.
1033 	 */
1034 	if ((if_getflags(ifp) & IFF_UP) == 0 &&
1035 	    (atomic_load_acq_32(&pf->state) &
1036 	    IXL_PF_STATE_LINK_ACTIVE_ON_DOWN) == 0)
1037 		ixl_set_link(pf, false);
1038 }
1039 
1040 static int
1041 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1042 {
1043 	struct ixl_pf *pf = iflib_get_softc(ctx);
1044 	struct ixl_vsi *vsi = &pf->vsi;
1045 	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1046 	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1047 	int err, i, rid, vector = 0;
1048 	char buf[16];
1049 
1050 	MPASS(vsi->shared->isc_nrxqsets > 0);
1051 	MPASS(vsi->shared->isc_ntxqsets > 0);
1052 
1053 	/* Admin Que must use vector 0*/
1054 	rid = vector + 1;
1055 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1056 	    ixl_msix_adminq, pf, 0, "aq");
1057 	if (err) {
1058 		iflib_irq_free(ctx, &vsi->irq);
1059 		device_printf(iflib_get_dev(ctx),
1060 		    "Failed to register Admin Que handler");
1061 		return (err);
1062 	}
1063 
1064 #ifdef PCI_IOV
1065 	/* Create soft IRQ for handling VFLRs */
1066 	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1067 #endif
1068 
1069 	/* Now set up the stations */
1070 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1071 		rid = vector + 1;
1072 
1073 		snprintf(buf, sizeof(buf), "rxq%d", i);
1074 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1075 		    IFLIB_INTR_RXTX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1076 		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1077 		 * what's expected in the iflib context? */
1078 		if (err) {
1079 			device_printf(iflib_get_dev(ctx),
1080 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1081 			vsi->num_rx_queues = i + 1;
1082 			goto fail;
1083 		}
1084 		rx_que->msix = vector;
1085 	}
1086 
1087 	bzero(buf, sizeof(buf));
1088 
1089 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1090 		snprintf(buf, sizeof(buf), "txq%d", i);
1091 		iflib_softirq_alloc_generic(ctx,
1092 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1093 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1094 
1095 		/* TODO: Maybe call a strategy function for this to figure out which
1096 		* interrupts to map Tx queues to. I don't know if there's an immediately
1097 		* better way than this other than a user-supplied map, though. */
1098 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1099 	}
1100 
1101 	return (0);
1102 fail:
1103 	iflib_irq_free(ctx, &vsi->irq);
1104 	rx_que = vsi->rx_queues;
1105 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1106 		iflib_irq_free(ctx, &rx_que->que_irq);
1107 	return (err);
1108 }
1109 
1110 /*
1111  * Enable all interrupts
1112  *
1113  * Called in:
1114  * iflib_init_locked, after ixl_if_init()
1115  */
1116 static void
1117 ixl_if_enable_intr(if_ctx_t ctx)
1118 {
1119 	struct ixl_pf *pf = iflib_get_softc(ctx);
1120 	struct ixl_vsi *vsi = &pf->vsi;
1121 	struct i40e_hw		*hw = vsi->hw;
1122 	struct ixl_rx_queue	*que = vsi->rx_queues;
1123 
1124 	ixl_enable_intr0(hw);
1125 	/* Enable queue interrupts */
1126 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1127 		/* TODO: Queue index parameter is probably wrong */
1128 		ixl_enable_queue(hw, que->rxr.me);
1129 }
1130 
1131 /*
1132  * Disable queue interrupts
1133  *
1134  * Other interrupt causes need to remain active.
1135  */
1136 static void
1137 ixl_if_disable_intr(if_ctx_t ctx)
1138 {
1139 	struct ixl_pf *pf = iflib_get_softc(ctx);
1140 	struct ixl_vsi *vsi = &pf->vsi;
1141 	struct i40e_hw		*hw = vsi->hw;
1142 	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1143 
1144 	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1145 		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1146 			ixl_disable_queue(hw, rx_que->msix - 1);
1147 	} else {
1148 		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1149 		// stops queues from triggering interrupts
1150 		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1151 	}
1152 }
1153 
1154 static int
1155 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1156 {
1157 	struct ixl_pf *pf = iflib_get_softc(ctx);
1158 	struct ixl_vsi *vsi = &pf->vsi;
1159 	struct i40e_hw		*hw = vsi->hw;
1160 	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1161 
1162 	ixl_enable_queue(hw, rx_que->msix - 1);
1163 	return (0);
1164 }
1165 
1166 static int
1167 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1168 {
1169 	struct ixl_pf *pf = iflib_get_softc(ctx);
1170 	struct ixl_vsi *vsi = &pf->vsi;
1171 	struct i40e_hw *hw = vsi->hw;
1172 	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1173 
1174 	ixl_enable_queue(hw, tx_que->msix - 1);
1175 	return (0);
1176 }
1177 
1178 static int
1179 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1180 {
1181 	struct ixl_pf *pf = iflib_get_softc(ctx);
1182 	struct ixl_vsi *vsi = &pf->vsi;
1183 	if_softc_ctx_t scctx = vsi->shared;
1184 	struct ixl_tx_queue *que;
1185 	int i, j, error = 0;
1186 
1187 	MPASS(scctx->isc_ntxqsets > 0);
1188 	MPASS(ntxqs == 1);
1189 	MPASS(scctx->isc_ntxqsets == ntxqsets);
1190 
1191 	/* Allocate queue structure memory */
1192 	if (!(vsi->tx_queues =
1193 	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1194 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1195 		return (ENOMEM);
1196 	}
1197 
1198 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1199 		struct tx_ring *txr = &que->txr;
1200 
1201 		txr->me = i;
1202 		que->vsi = vsi;
1203 
1204 		if (!vsi->enable_head_writeback) {
1205 			/* Allocate report status array */
1206 			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1207 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1208 				error = ENOMEM;
1209 				goto fail;
1210 			}
1211 			/* Init report status array */
1212 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1213 				txr->tx_rsq[j] = QIDX_INVALID;
1214 		}
1215 		/* get the virtual and physical address of the hardware queues */
1216 		txr->tail = I40E_QTX_TAIL(txr->me);
1217 		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1218 		txr->tx_paddr = paddrs[i * ntxqs];
1219 		txr->que = que;
1220 	}
1221 
1222 	return (0);
1223 fail:
1224 	ixl_if_queues_free(ctx);
1225 	return (error);
1226 }
1227 
1228 static int
1229 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1230 {
1231 	struct ixl_pf *pf = iflib_get_softc(ctx);
1232 	struct ixl_vsi *vsi = &pf->vsi;
1233 	struct ixl_rx_queue *que;
1234 	int i, error = 0;
1235 
1236 #ifdef INVARIANTS
1237 	if_softc_ctx_t scctx = vsi->shared;
1238 	MPASS(scctx->isc_nrxqsets > 0);
1239 	MPASS(nrxqs == 1);
1240 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1241 #endif
1242 
1243 	/* Allocate queue structure memory */
1244 	if (!(vsi->rx_queues =
1245 	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1246 	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1247 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1248 		error = ENOMEM;
1249 		goto fail;
1250 	}
1251 
1252 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1253 		struct rx_ring *rxr = &que->rxr;
1254 
1255 		rxr->me = i;
1256 		que->vsi = vsi;
1257 
1258 		/* get the virtual and physical address of the hardware queues */
1259 		rxr->tail = I40E_QRX_TAIL(rxr->me);
1260 		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1261 		rxr->rx_paddr = paddrs[i * nrxqs];
1262 		rxr->que = que;
1263 	}
1264 
1265 	return (0);
1266 fail:
1267 	ixl_if_queues_free(ctx);
1268 	return (error);
1269 }
1270 
1271 static void
1272 ixl_if_queues_free(if_ctx_t ctx)
1273 {
1274 	struct ixl_pf *pf = iflib_get_softc(ctx);
1275 	struct ixl_vsi *vsi = &pf->vsi;
1276 
1277 	if (vsi->tx_queues != NULL && !vsi->enable_head_writeback) {
1278 		struct ixl_tx_queue *que;
1279 		int i = 0;
1280 
1281 		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1282 			struct tx_ring *txr = &que->txr;
1283 			if (txr->tx_rsq != NULL) {
1284 				free(txr->tx_rsq, M_IXL);
1285 				txr->tx_rsq = NULL;
1286 			}
1287 		}
1288 	}
1289 
1290 	if (vsi->tx_queues != NULL) {
1291 		free(vsi->tx_queues, M_IXL);
1292 		vsi->tx_queues = NULL;
1293 	}
1294 	if (vsi->rx_queues != NULL) {
1295 		free(vsi->rx_queues, M_IXL);
1296 		vsi->rx_queues = NULL;
1297 	}
1298 
1299 	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1300 		sysctl_ctx_free(&vsi->sysctl_ctx);
1301 }
1302 
1303 void
1304 ixl_update_link_status(struct ixl_pf *pf)
1305 {
1306 	struct ixl_vsi *vsi = &pf->vsi;
1307 	struct i40e_hw *hw = &pf->hw;
1308 	u64 baudrate;
1309 
1310 	if (pf->link_up) {
1311 		if (vsi->link_active == FALSE) {
1312 			vsi->link_active = TRUE;
1313 			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1314 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1315 			ixl_link_up_msg(pf);
1316 #ifdef PCI_IOV
1317 			ixl_broadcast_link_state(pf);
1318 #endif
1319 		}
1320 	} else { /* Link down */
1321 		if (vsi->link_active == TRUE) {
1322 			vsi->link_active = FALSE;
1323 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1324 #ifdef PCI_IOV
1325 			ixl_broadcast_link_state(pf);
1326 #endif
1327 		}
1328 	}
1329 }
1330 
1331 static void
1332 ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1333 {
1334 	device_t dev = pf->dev;
1335 	u32 rxq_idx, qtx_ctl;
1336 
1337 	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1338 	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1339 	qtx_ctl = e->desc.params.external.param1;
1340 
1341 	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1342 	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1343 }
1344 
1345 static int
1346 ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1347 {
1348 	enum i40e_status_code status = I40E_SUCCESS;
1349 	struct i40e_arq_event_info event;
1350 	struct i40e_hw *hw = &pf->hw;
1351 	device_t dev = pf->dev;
1352 	u16 opcode;
1353 	u32 loop = 0, reg;
1354 
1355 	event.buf_len = IXL_AQ_BUF_SZ;
1356 	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1357 	if (!event.msg_buf) {
1358 		device_printf(dev, "%s: Unable to allocate memory for Admin"
1359 		    " Queue event!\n", __func__);
1360 		return (ENOMEM);
1361 	}
1362 
1363 	/* clean and process any events */
1364 	do {
1365 		status = i40e_clean_arq_element(hw, &event, pending);
1366 		if (status)
1367 			break;
1368 		opcode = LE16_TO_CPU(event.desc.opcode);
1369 		ixl_dbg(pf, IXL_DBG_AQ,
1370 		    "Admin Queue event: %#06x\n", opcode);
1371 		switch (opcode) {
1372 		case i40e_aqc_opc_get_link_status:
1373 			ixl_link_event(pf, &event);
1374 			break;
1375 		case i40e_aqc_opc_send_msg_to_pf:
1376 #ifdef PCI_IOV
1377 			ixl_handle_vf_msg(pf, &event);
1378 #endif
1379 			break;
1380 		/*
1381 		 * This should only occur on no-drop queues, which
1382 		 * aren't currently configured.
1383 		 */
1384 		case i40e_aqc_opc_event_lan_overflow:
1385 			ixl_handle_lan_overflow_event(pf, &event);
1386 			break;
1387 		default:
1388 			break;
1389 		}
1390 	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1391 
1392 	free(event.msg_buf, M_IXL);
1393 
1394 	/* Re-enable admin queue interrupt cause */
1395 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1396 	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1397 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1398 
1399 	return (status);
1400 }
1401 
1402 static void
1403 ixl_if_update_admin_status(if_ctx_t ctx)
1404 {
1405 	struct ixl_pf	*pf = iflib_get_softc(ctx);
1406 	struct i40e_hw	*hw = &pf->hw;
1407 	u16		pending;
1408 
1409 	if (IXL_PF_IS_RESETTING(pf))
1410 		ixl_handle_empr_reset(pf);
1411 
1412 	/*
1413 	 * Admin Queue is shut down while handling reset.
1414 	 * Don't proceed if it hasn't been re-initialized
1415 	 * e.g due to an issue with new FW.
1416 	 */
1417 	if (!i40e_check_asq_alive(&pf->hw))
1418 		return;
1419 
1420 	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1421 		ixl_handle_mdd_event(pf);
1422 
1423 	ixl_process_adminq(pf, &pending);
1424 	ixl_update_link_status(pf);
1425 
1426 	/*
1427 	 * If there are still messages to process, reschedule ourselves.
1428 	 * Otherwise, re-enable our interrupt and go to sleep.
1429 	 */
1430 	if (pending > 0)
1431 		iflib_admin_intr_deferred(ctx);
1432 	else
1433 		ixl_enable_intr0(hw);
1434 }
1435 
1436 static void
1437 ixl_if_multi_set(if_ctx_t ctx)
1438 {
1439 	struct ixl_pf *pf = iflib_get_softc(ctx);
1440 	struct ixl_vsi *vsi = &pf->vsi;
1441 	struct i40e_hw *hw = vsi->hw;
1442 	int mcnt;
1443 
1444 	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1445 
1446 	/* Delete filters for removed multicast addresses */
1447 	ixl_del_multi(vsi, false);
1448 
1449 	mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
1450 	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1451 		i40e_aq_set_vsi_multicast_promiscuous(hw,
1452 		    vsi->seid, TRUE, NULL);
1453 		ixl_del_multi(vsi, true);
1454 		return;
1455 	}
1456 
1457 	ixl_add_multi(vsi);
1458 	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1459 }
1460 
1461 static int
1462 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1463 {
1464 	struct ixl_pf *pf = iflib_get_softc(ctx);
1465 	struct ixl_vsi *vsi = &pf->vsi;
1466 
1467 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1468 	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1469 		ETHER_VLAN_ENCAP_LEN)
1470 		return (EINVAL);
1471 
1472 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1473 		ETHER_VLAN_ENCAP_LEN;
1474 
1475 	return (0);
1476 }
1477 
1478 static void
1479 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1480 {
1481 	struct ixl_pf *pf = iflib_get_softc(ctx);
1482 	struct i40e_hw  *hw = &pf->hw;
1483 
1484 	INIT_DEBUGOUT("ixl_media_status: begin");
1485 
1486 	ifmr->ifm_status = IFM_AVALID;
1487 	ifmr->ifm_active = IFM_ETHER;
1488 
1489 	if (!pf->link_up) {
1490 		return;
1491 	}
1492 
1493 	ifmr->ifm_status |= IFM_ACTIVE;
1494 	/* Hardware is always full-duplex */
1495 	ifmr->ifm_active |= IFM_FDX;
1496 
1497 	switch (hw->phy.link_info.phy_type) {
1498 		/* 100 M */
1499 		case I40E_PHY_TYPE_100BASE_TX:
1500 			ifmr->ifm_active |= IFM_100_TX;
1501 			break;
1502 		/* 1 G */
1503 		case I40E_PHY_TYPE_1000BASE_T:
1504 			ifmr->ifm_active |= IFM_1000_T;
1505 			break;
1506 		case I40E_PHY_TYPE_1000BASE_SX:
1507 			ifmr->ifm_active |= IFM_1000_SX;
1508 			break;
1509 		case I40E_PHY_TYPE_1000BASE_LX:
1510 			ifmr->ifm_active |= IFM_1000_LX;
1511 			break;
1512 		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1513 			ifmr->ifm_active |= IFM_1000_T;
1514 			break;
1515 		/* 2.5 G */
1516 		case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
1517 			ifmr->ifm_active |= IFM_2500_T;
1518 			break;
1519 		/* 5 G */
1520 		case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
1521 			ifmr->ifm_active |= IFM_5000_T;
1522 			break;
1523 		/* 10 G */
1524 		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1525 			ifmr->ifm_active |= IFM_10G_TWINAX;
1526 			break;
1527 		case I40E_PHY_TYPE_10GBASE_SR:
1528 			ifmr->ifm_active |= IFM_10G_SR;
1529 			break;
1530 		case I40E_PHY_TYPE_10GBASE_LR:
1531 			ifmr->ifm_active |= IFM_10G_LR;
1532 			break;
1533 		case I40E_PHY_TYPE_10GBASE_T:
1534 			ifmr->ifm_active |= IFM_10G_T;
1535 			break;
1536 		case I40E_PHY_TYPE_XAUI:
1537 		case I40E_PHY_TYPE_XFI:
1538 			ifmr->ifm_active |= IFM_10G_TWINAX;
1539 			break;
1540 		case I40E_PHY_TYPE_10GBASE_AOC:
1541 			ifmr->ifm_active |= IFM_10G_AOC;
1542 			break;
1543 		/* 25 G */
1544 		case I40E_PHY_TYPE_25GBASE_KR:
1545 			ifmr->ifm_active |= IFM_25G_KR;
1546 			break;
1547 		case I40E_PHY_TYPE_25GBASE_CR:
1548 			ifmr->ifm_active |= IFM_25G_CR;
1549 			break;
1550 		case I40E_PHY_TYPE_25GBASE_SR:
1551 			ifmr->ifm_active |= IFM_25G_SR;
1552 			break;
1553 		case I40E_PHY_TYPE_25GBASE_LR:
1554 			ifmr->ifm_active |= IFM_25G_LR;
1555 			break;
1556 		case I40E_PHY_TYPE_25GBASE_AOC:
1557 			ifmr->ifm_active |= IFM_25G_AOC;
1558 			break;
1559 		case I40E_PHY_TYPE_25GBASE_ACC:
1560 			ifmr->ifm_active |= IFM_25G_ACC;
1561 			break;
1562 		/* 40 G */
1563 		case I40E_PHY_TYPE_40GBASE_CR4:
1564 		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1565 			ifmr->ifm_active |= IFM_40G_CR4;
1566 			break;
1567 		case I40E_PHY_TYPE_40GBASE_SR4:
1568 			ifmr->ifm_active |= IFM_40G_SR4;
1569 			break;
1570 		case I40E_PHY_TYPE_40GBASE_LR4:
1571 			ifmr->ifm_active |= IFM_40G_LR4;
1572 			break;
1573 		case I40E_PHY_TYPE_XLAUI:
1574 			ifmr->ifm_active |= IFM_OTHER;
1575 			break;
1576 		case I40E_PHY_TYPE_1000BASE_KX:
1577 			ifmr->ifm_active |= IFM_1000_KX;
1578 			break;
1579 		case I40E_PHY_TYPE_SGMII:
1580 			ifmr->ifm_active |= IFM_1000_SGMII;
1581 			break;
1582 		/* ERJ: What's the difference between these? */
1583 		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1584 		case I40E_PHY_TYPE_10GBASE_CR1:
1585 			ifmr->ifm_active |= IFM_10G_CR1;
1586 			break;
1587 		case I40E_PHY_TYPE_10GBASE_KX4:
1588 			ifmr->ifm_active |= IFM_10G_KX4;
1589 			break;
1590 		case I40E_PHY_TYPE_10GBASE_KR:
1591 			ifmr->ifm_active |= IFM_10G_KR;
1592 			break;
1593 		case I40E_PHY_TYPE_SFI:
1594 			ifmr->ifm_active |= IFM_10G_SFI;
1595 			break;
1596 		/* Our single 20G media type */
1597 		case I40E_PHY_TYPE_20GBASE_KR2:
1598 			ifmr->ifm_active |= IFM_20G_KR2;
1599 			break;
1600 		case I40E_PHY_TYPE_40GBASE_KR4:
1601 			ifmr->ifm_active |= IFM_40G_KR4;
1602 			break;
1603 		case I40E_PHY_TYPE_XLPPI:
1604 		case I40E_PHY_TYPE_40GBASE_AOC:
1605 			ifmr->ifm_active |= IFM_40G_XLPPI;
1606 			break;
1607 		/* Unknown to driver */
1608 		default:
1609 			ifmr->ifm_active |= IFM_UNKNOWN;
1610 			break;
1611 	}
1612 	/* Report flow control status as well */
1613 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1614 		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1615 	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1616 		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1617 }
1618 
1619 static int
1620 ixl_if_media_change(if_ctx_t ctx)
1621 {
1622 	struct ifmedia *ifm = iflib_get_media(ctx);
1623 
1624 	INIT_DEBUGOUT("ixl_media_change: begin");
1625 
1626 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1627 		return (EINVAL);
1628 
1629 	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1630 	return (ENODEV);
1631 }
1632 
1633 static int
1634 ixl_if_promisc_set(if_ctx_t ctx, int flags)
1635 {
1636 	struct ixl_pf *pf = iflib_get_softc(ctx);
1637 	struct ixl_vsi *vsi = &pf->vsi;
1638 	if_t ifp = iflib_get_ifp(ctx);
1639 	struct i40e_hw	*hw = vsi->hw;
1640 	int		err;
1641 	bool		uni = FALSE, multi = FALSE;
1642 
1643 	if (flags & IFF_PROMISC)
1644 		uni = multi = TRUE;
1645 	else if (flags & IFF_ALLMULTI || if_llmaddr_count(ifp) >=
1646 	    MAX_MULTICAST_ADDR)
1647 		multi = TRUE;
1648 
1649 	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1650 	    vsi->seid, uni, NULL, true);
1651 	if (err)
1652 		return (err);
1653 	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1654 	    vsi->seid, multi, NULL);
1655 	return (err);
1656 }
1657 
1658 static void
1659 ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1660 {
1661 	struct ixl_pf *pf = iflib_get_softc(ctx);
1662 
1663 	if (qid != 0)
1664 		return;
1665 
1666 	ixl_update_stats_counters(pf);
1667 }
1668 
1669 static void
1670 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1671 {
1672 	struct ixl_pf *pf = iflib_get_softc(ctx);
1673 	struct ixl_vsi *vsi = &pf->vsi;
1674 	struct i40e_hw	*hw = vsi->hw;
1675 	if_t ifp = iflib_get_ifp(ctx);
1676 
1677 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1678 		return;
1679 
1680 	/*
1681 	 * Keep track of registered VLANS to know what
1682 	 * filters have to be configured when VLAN_HWFILTER
1683 	 * capability is enabled.
1684 	 */
1685 	++vsi->num_vlans;
1686 	bit_set(vsi->vlans_map, vtag);
1687 
1688 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1689 		return;
1690 
1691 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS)
1692 		ixl_add_filter(vsi, hw->mac.addr, vtag);
1693 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS) {
1694 		/*
1695 		 * There is not enough HW resources to add filters
1696 		 * for all registered VLANs. Re-configure filtering
1697 		 * to allow reception of all expected traffic.
1698 		 */
1699 		device_printf(vsi->dev,
1700 		    "Not enough HW filters for all VLANs. VLAN HW filtering disabled");
1701 		ixl_del_all_vlan_filters(vsi, hw->mac.addr);
1702 		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1703 	}
1704 }
1705 
1706 static void
1707 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1708 {
1709 	struct ixl_pf *pf = iflib_get_softc(ctx);
1710 	struct ixl_vsi *vsi = &pf->vsi;
1711 	struct i40e_hw	*hw = vsi->hw;
1712 	if_t ifp = iflib_get_ifp(ctx);
1713 
1714 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1715 		return;
1716 
1717 	--vsi->num_vlans;
1718 	bit_clear(vsi->vlans_map, vtag);
1719 
1720 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1721 		return;
1722 
1723 	/* One filter is used for untagged frames */
1724 	if (vsi->num_vlans < IXL_MAX_VLAN_FILTERS - 1)
1725 		ixl_del_filter(vsi, hw->mac.addr, vtag);
1726 	else if (vsi->num_vlans == IXL_MAX_VLAN_FILTERS - 1) {
1727 		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1728 		ixl_add_vlan_filters(vsi, hw->mac.addr);
1729 	}
1730 }
1731 
1732 static uint64_t
1733 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1734 {
1735 	struct ixl_pf *pf = iflib_get_softc(ctx);
1736 	struct ixl_vsi *vsi = &pf->vsi;
1737 	if_t ifp = iflib_get_ifp(ctx);
1738 
1739 	switch (cnt) {
1740 	case IFCOUNTER_IPACKETS:
1741 		return (vsi->ipackets);
1742 	case IFCOUNTER_IERRORS:
1743 		return (vsi->ierrors);
1744 	case IFCOUNTER_OPACKETS:
1745 		return (vsi->opackets);
1746 	case IFCOUNTER_OERRORS:
1747 		return (vsi->oerrors);
1748 	case IFCOUNTER_COLLISIONS:
1749 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1750 		return (0);
1751 	case IFCOUNTER_IBYTES:
1752 		return (vsi->ibytes);
1753 	case IFCOUNTER_OBYTES:
1754 		return (vsi->obytes);
1755 	case IFCOUNTER_IMCASTS:
1756 		return (vsi->imcasts);
1757 	case IFCOUNTER_OMCASTS:
1758 		return (vsi->omcasts);
1759 	case IFCOUNTER_IQDROPS:
1760 		return (vsi->iqdrops);
1761 	case IFCOUNTER_OQDROPS:
1762 		return (vsi->oqdrops);
1763 	case IFCOUNTER_NOPROTO:
1764 		return (vsi->noproto);
1765 	default:
1766 		return (if_get_counter_default(ifp, cnt));
1767 	}
1768 }
1769 
1770 #ifdef PCI_IOV
1771 static void
1772 ixl_if_vflr_handle(if_ctx_t ctx)
1773 {
1774 	struct ixl_pf *pf = iflib_get_softc(ctx);
1775 
1776 	ixl_handle_vflr(pf);
1777 }
1778 #endif
1779 
1780 static int
1781 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1782 {
1783 	struct ixl_pf		*pf = iflib_get_softc(ctx);
1784 
1785 	if (pf->read_i2c_byte == NULL)
1786 		return (EINVAL);
1787 
1788 	for (int i = 0; i < req->len; i++)
1789 		if (pf->read_i2c_byte(pf, req->offset + i,
1790 		    req->dev_addr, &req->data[i]))
1791 			return (EIO);
1792 	return (0);
1793 }
1794 
1795 static int
1796 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1797 {
1798 	struct ixl_pf *pf = iflib_get_softc(ctx);
1799 	struct ifdrv *ifd = (struct ifdrv *)data;
1800 	int error = 0;
1801 
1802 	/*
1803 	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1804 	 * performing privilege checks. It is important that this function
1805 	 * perform the necessary checks for commands which should only be
1806 	 * executed by privileged threads.
1807 	 */
1808 
1809 	switch(command) {
1810 	case SIOCGDRVSPEC:
1811 	case SIOCSDRVSPEC:
1812 		/* NVM update command */
1813 		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1814 			error = priv_check(curthread, PRIV_DRIVER);
1815 			if (error)
1816 				break;
1817 			error = ixl_handle_nvmupd_cmd(pf, ifd);
1818 		} else {
1819 			error = EINVAL;
1820 		}
1821 		break;
1822 	default:
1823 		error = EOPNOTSUPP;
1824 	}
1825 
1826 	return (error);
1827 }
1828 
1829 /* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1830  * @ctx: iflib context
1831  * @event: event code to check
1832  *
1833  * Defaults to returning false for every event.
1834  *
1835  * @returns true if iflib needs to reinit the interface, false otherwise
1836  */
1837 static bool
1838 ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1839 {
1840 	switch (event) {
1841 	case IFLIB_RESTART_VLAN_CONFIG:
1842 	default:
1843 		return (false);
1844 	}
1845 }
1846 
1847 /*
1848  * Sanity check and save off tunable values.
1849  */
1850 static void
1851 ixl_save_pf_tunables(struct ixl_pf *pf)
1852 {
1853 	device_t dev = pf->dev;
1854 
1855 	/* Save tunable information */
1856 #ifdef IXL_DEBUG_FC
1857 	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1858 #endif
1859 #ifdef IXL_DEBUG
1860 	pf->recovery_mode = ixl_debug_recovery_mode;
1861 #endif
1862 	pf->dbg_mask = ixl_core_debug_mask;
1863 	pf->hw.debug_mask = ixl_shared_debug_mask;
1864 	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1865 	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1866 #if 0
1867 	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1868 	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1869 #endif
1870 
1871 	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1872 		pf->i2c_access_method = 0;
1873 	else
1874 		pf->i2c_access_method = ixl_i2c_access_method;
1875 
1876 	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1877 		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1878 		    ixl_tx_itr);
1879 		device_printf(dev, "tx_itr must be between %d and %d, "
1880 		    "inclusive\n",
1881 		    0, IXL_MAX_ITR);
1882 		device_printf(dev, "Using default value of %d instead\n",
1883 		    IXL_ITR_4K);
1884 		pf->tx_itr = IXL_ITR_4K;
1885 	} else
1886 		pf->tx_itr = ixl_tx_itr;
1887 
1888 	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1889 		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1890 		    ixl_rx_itr);
1891 		device_printf(dev, "rx_itr must be between %d and %d, "
1892 		    "inclusive\n",
1893 		    0, IXL_MAX_ITR);
1894 		device_printf(dev, "Using default value of %d instead\n",
1895 		    IXL_ITR_8K);
1896 		pf->rx_itr = IXL_ITR_8K;
1897 	} else
1898 		pf->rx_itr = ixl_rx_itr;
1899 
1900 	pf->fc = -1;
1901 	if (ixl_flow_control != -1) {
1902 		if (ixl_flow_control < 0 || ixl_flow_control > 3) {
1903 			device_printf(dev,
1904 			    "Invalid flow_control value of %d set!\n",
1905 			    ixl_flow_control);
1906 			device_printf(dev,
1907 			    "flow_control must be between %d and %d, "
1908 			    "inclusive\n", 0, 3);
1909 			device_printf(dev,
1910 			    "Using default configuration instead\n");
1911 		} else
1912 			pf->fc = ixl_flow_control;
1913 	}
1914 }
1915 
1916