xref: /freebsd/sys/dev/iavf/if_iavf_iflib.c (revision 734e82fe33aa764367791a7d603b383996c6b40b)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file if_iavf_iflib.c
34  * @brief iflib driver implementation
35  *
36  * Contains the main entry point for the iflib driver implementation. It
37  * implements the various ifdi driver methods, and sets up the module and
38  * driver values to load an iflib driver.
39  */
40 
41 #include "iavf_iflib.h"
42 #include "iavf_vc_common.h"
43 
44 #include "iavf_drv_info.h"
45 #include "iavf_sysctls_iflib.h"
46 
47 /*********************************************************************
48  *  Function prototypes
49  *********************************************************************/
50 static void	 *iavf_register(device_t dev);
51 static int	 iavf_if_attach_pre(if_ctx_t ctx);
52 static int	 iavf_if_attach_post(if_ctx_t ctx);
53 static int	 iavf_if_detach(if_ctx_t ctx);
54 static int	 iavf_if_shutdown(if_ctx_t ctx);
55 static int	 iavf_if_suspend(if_ctx_t ctx);
56 static int	 iavf_if_resume(if_ctx_t ctx);
57 static int	 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
58 static void	 iavf_if_enable_intr(if_ctx_t ctx);
59 static void	 iavf_if_disable_intr(if_ctx_t ctx);
60 static int	 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
61 static int	 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
62 static int	 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
63 static int	 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
64 static void	 iavf_if_queues_free(if_ctx_t ctx);
65 static void	 iavf_if_update_admin_status(if_ctx_t ctx);
66 static void	 iavf_if_multi_set(if_ctx_t ctx);
67 static int	 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
68 static void	 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
69 static int	 iavf_if_media_change(if_ctx_t ctx);
70 static int	 iavf_if_promisc_set(if_ctx_t ctx, int flags);
71 static void	 iavf_if_timer(if_ctx_t ctx, uint16_t qid);
72 static void	 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
73 static void	 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
74 static uint64_t	 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
75 static void	 iavf_if_init(if_ctx_t ctx);
76 static void	 iavf_if_stop(if_ctx_t ctx);
77 
78 static int	iavf_allocate_pci_resources(struct iavf_sc *);
79 static void	iavf_free_pci_resources(struct iavf_sc *);
80 static void	iavf_setup_interface(struct iavf_sc *);
81 static void	iavf_add_device_sysctls(struct iavf_sc *);
82 static void	iavf_enable_queue_irq(struct iavf_hw *, int);
83 static void	iavf_disable_queue_irq(struct iavf_hw *, int);
84 static void	iavf_stop(struct iavf_sc *);
85 
86 static int	iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
87 static int	iavf_msix_que(void *);
88 static int	iavf_msix_adminq(void *);
89 static void	iavf_configure_itr(struct iavf_sc *sc);
90 
91 static int	iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
92 #ifdef IAVF_DEBUG
93 static int	iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
94 static int	iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
95 #endif
96 
97 static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *);
98 static void	iavf_vc_task(void *arg, int pending __unused);
99 static int	iavf_setup_vc_tq(struct iavf_sc *sc);
100 static int	iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op);
101 
102 /*********************************************************************
103  *  FreeBSD Device Interface Entry Points
104  *********************************************************************/
105 
106 /**
107  * @var iavf_methods
108  * @brief device methods for the iavf driver
109  *
110  * Device method callbacks used to interact with the driver. For iflib this
111  * primarily resolves to the default iflib implementations.
112  */
113 static device_method_t iavf_methods[] = {
114 	/* Device interface */
115 	DEVMETHOD(device_register, iavf_register),
116 	DEVMETHOD(device_probe, iflib_device_probe),
117 	DEVMETHOD(device_attach, iflib_device_attach),
118 	DEVMETHOD(device_detach, iflib_device_detach),
119 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
120 	DEVMETHOD_END
121 };
122 
123 static driver_t iavf_driver = {
124 	"iavf", iavf_methods, sizeof(struct iavf_sc),
125 };
126 
127 DRIVER_MODULE(iavf, pci, iavf_driver, 0, 0);
128 MODULE_VERSION(iavf, 1);
129 
130 MODULE_DEPEND(iavf, pci, 1, 1, 1);
131 MODULE_DEPEND(iavf, ether, 1, 1, 1);
132 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
133 
134 IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array);
135 
136 /**
137  * @var M_IAVF
138  * @brief main iavf driver allocation type
139  *
140  * malloc(9) allocation type used by the majority of memory allocations in the
141  * iavf iflib driver.
142  */
143 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
144 
145 static device_method_t iavf_if_methods[] = {
146 	DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
147 	DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
148 	DEVMETHOD(ifdi_detach, iavf_if_detach),
149 	DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
150 	DEVMETHOD(ifdi_suspend, iavf_if_suspend),
151 	DEVMETHOD(ifdi_resume, iavf_if_resume),
152 	DEVMETHOD(ifdi_init, iavf_if_init),
153 	DEVMETHOD(ifdi_stop, iavf_if_stop),
154 	DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
155 	DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
156 	DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
157 	DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
158 	DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
159 	DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
160 	DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
161 	DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
162 	DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
163 	DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
164 	DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
165 	DEVMETHOD(ifdi_media_status, iavf_if_media_status),
166 	DEVMETHOD(ifdi_media_change, iavf_if_media_change),
167 	DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
168 	DEVMETHOD(ifdi_timer, iavf_if_timer),
169 	DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
170 	DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
171 	DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
172 	DEVMETHOD_END
173 };
174 
175 static driver_t iavf_if_driver = {
176 	"iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
177 };
178 
179 extern struct if_txrx iavf_txrx_hwb;
180 extern struct if_txrx iavf_txrx_dwb;
181 
182 static struct if_shared_ctx iavf_sctx = {
183 	.isc_magic = IFLIB_MAGIC,
184 	.isc_q_align = PAGE_SIZE,
185 	.isc_tx_maxsize = IAVF_MAX_FRAME,
186 	.isc_tx_maxsegsize = IAVF_MAX_FRAME,
187 	.isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header),
188 	.isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE,
189 	.isc_rx_maxsize = IAVF_MAX_FRAME,
190 	.isc_rx_nsegments = IAVF_MAX_RX_SEGS,
191 	.isc_rx_maxsegsize = IAVF_MAX_FRAME,
192 	.isc_nfl = 1,
193 	.isc_ntxqs = 1,
194 	.isc_nrxqs = 1,
195 
196 	.isc_admin_intrcnt = 1,
197 	.isc_vendor_info = iavf_vendor_info_array,
198 	.isc_driver_version = __DECONST(char *, iavf_driver_version),
199 	.isc_driver = &iavf_if_driver,
200 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
201 
202 	.isc_nrxd_min = {IAVF_MIN_RING},
203 	.isc_ntxd_min = {IAVF_MIN_RING},
204 	.isc_nrxd_max = {IAVF_MAX_RING},
205 	.isc_ntxd_max = {IAVF_MAX_RING},
206 	.isc_nrxd_default = {IAVF_DEFAULT_RING},
207 	.isc_ntxd_default = {IAVF_DEFAULT_RING},
208 };
209 
210 /*** Functions ***/
211 
212 /**
213  * iavf_register - iflib callback to obtain the shared context pointer
214  * @dev: the device being registered
215  *
216  * Called when the driver is first being attached to the driver. This function
217  * is used by iflib to obtain a pointer to the shared context structure which
218  * describes the device features.
219  *
220  * @returns a pointer to the iavf shared context structure.
221  */
222 static void *
223 iavf_register(device_t dev __unused)
224 {
225 	return (&iavf_sctx);
226 }
227 
228 /**
229  * iavf_allocate_pci_resources - Allocate PCI resources
230  * @sc: the device private softc
231  *
232  * Allocate PCI resources used by the iflib driver.
233  *
234  * @returns zero or a non-zero error code on failure
235  */
236 static int
237 iavf_allocate_pci_resources(struct iavf_sc *sc)
238 {
239 	return iavf_allocate_pci_resources_common(sc);
240 }
241 
242 /**
243  * iavf_if_attach_pre - Begin attaching the device to the driver
244  * @ctx: the iflib context pointer
245  *
246  * Called by iflib to begin the attach process. Allocates resources and
247  * initializes the hardware for operation.
248  *
249  * @returns zero or a non-zero error code on failure.
250  */
251 static int
252 iavf_if_attach_pre(if_ctx_t ctx)
253 {
254 	device_t dev;
255 	struct iavf_sc *sc;
256 	struct iavf_hw *hw;
257 	struct iavf_vsi *vsi;
258 	if_softc_ctx_t scctx;
259 	int error = 0;
260 
261 	/* Setup pointers */
262 	dev = iflib_get_dev(ctx);
263 	sc = iavf_sc_from_ctx(ctx);
264 
265 	vsi = &sc->vsi;
266 	vsi->back = sc;
267 	sc->dev = sc->osdep.dev = dev;
268 	hw = &sc->hw;
269 
270 	vsi->dev = dev;
271 	vsi->hw = &sc->hw;
272 	vsi->num_vlans = 0;
273 	vsi->ctx = ctx;
274 	sc->media = iflib_get_media(ctx);
275 	vsi->ifp = iflib_get_ifp(ctx);
276 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
277 
278 	iavf_save_tunables(sc);
279 
280 	/* Setup VC mutex */
281 	snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name),
282 		 "%s:vc", device_get_nameunit(dev));
283 	mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF);
284 
285 	/* Do PCI setup - map BAR0, etc */
286 	error = iavf_allocate_pci_resources(sc);
287 	if (error) {
288 		device_printf(dev, "%s: Allocation of PCI resources failed\n",
289 		    __func__);
290 		goto err_early;
291 	}
292 
293 	iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
294 
295 	error = iavf_set_mac_type(hw);
296 	if (error) {
297 		device_printf(dev, "%s: set_mac_type failed: %d\n",
298 		    __func__, error);
299 		goto err_pci_res;
300 	}
301 
302 	error = iavf_reset_complete(hw);
303 	if (error) {
304 		device_printf(dev, "%s: Device is still being reset\n",
305 		    __func__);
306 		goto err_pci_res;
307 	}
308 
309 	iavf_dbg_init(sc, "VF Device is ready for configuration\n");
310 
311 	/* Sets up Admin Queue */
312 	error = iavf_setup_vc(sc);
313 	if (error) {
314 		device_printf(dev, "%s: Error setting up PF comms, %d\n",
315 		    __func__, error);
316 		goto err_pci_res;
317 	}
318 
319 	iavf_dbg_init(sc, "PF API version verified\n");
320 
321 	/* Need API version before sending reset message */
322 	error = iavf_reset(sc);
323 	if (error) {
324 		device_printf(dev, "VF reset failed; reload the driver\n");
325 		goto err_aq;
326 	}
327 
328 	iavf_dbg_init(sc, "VF reset complete\n");
329 
330 	/* Ask for VF config from PF */
331 	error = iavf_vf_config(sc);
332 	if (error) {
333 		device_printf(dev, "Error getting configuration from PF: %d\n",
334 		    error);
335 		goto err_aq;
336 	}
337 
338 	iavf_print_device_info(sc);
339 
340 	error = iavf_get_vsi_res_from_vf_res(sc);
341 	if (error)
342 		goto err_res_buf;
343 
344 	iavf_dbg_init(sc, "Resource Acquisition complete\n");
345 
346 	/* Setup taskqueue to service VC messages */
347 	error = iavf_setup_vc_tq(sc);
348 	if (error)
349 		goto err_vc_tq;
350 
351 	iavf_set_mac_addresses(sc);
352 	iflib_set_mac(ctx, hw->mac.addr);
353 
354 	/* Allocate filter lists */
355 	iavf_init_filters(sc);
356 
357 	/* Fill out more iflib parameters */
358 	scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
359 	    sc->vsi_res->num_queue_pairs;
360 	if (vsi->enable_head_writeback) {
361 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
362 		    * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN);
363 		scctx->isc_txrx = &iavf_txrx_hwb;
364 	} else {
365 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
366 		    * sizeof(struct iavf_tx_desc), DBA_ALIGN);
367 		scctx->isc_txrx = &iavf_txrx_dwb;
368 	}
369 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
370 	    * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN);
371 	scctx->isc_msix_bar = PCIR_BAR(IAVF_MSIX_BAR);
372 	scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS;
373 	scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS;
374 	scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE;
375 	scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE;
376 	scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE;
377 	scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS;
378 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
379 
380 	/* Update OS cache of MSIX control register values */
381 	iavf_update_msix_devinfo(dev);
382 
383 	return (0);
384 
385 err_vc_tq:
386 	taskqueue_free(sc->vc_tq);
387 err_res_buf:
388 	free(sc->vf_res, M_IAVF);
389 err_aq:
390 	iavf_shutdown_adminq(hw);
391 err_pci_res:
392 	iavf_free_pci_resources(sc);
393 err_early:
394 	IAVF_VC_LOCK_DESTROY(sc);
395 	return (error);
396 }
397 
398 /**
399  * iavf_vc_task - task used to process VC messages
400  * @arg: device softc
401  * @pending: unused
402  *
403  * Processes the admin queue, in order to process the virtual
404  * channel messages received from the PF.
405  */
406 static void
407 iavf_vc_task(void *arg, int pending __unused)
408 {
409 	struct iavf_sc *sc = (struct iavf_sc *)arg;
410 	u16 var;
411 
412 	iavf_process_adminq(sc, &var);
413 }
414 
415 /**
416  * iavf_setup_vc_tq - Setup task queues
417  * @sc: device softc
418  *
419  * Create taskqueue and tasklet for processing virtual channel messages. This
420  * is done in a separate non-iflib taskqueue so that the iflib context lock
421  * does not need to be held for VC messages to be processed.
422  *
423  * @returns zero on success, or an error code on failure.
424  */
425 static int
426 iavf_setup_vc_tq(struct iavf_sc *sc)
427 {
428 	device_t dev = sc->dev;
429 	int error = 0;
430 
431 	TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc);
432 
433 	sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT,
434 	    taskqueue_thread_enqueue, &sc->vc_tq);
435 	if (!sc->vc_tq) {
436 		device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n");
437 		return (ENOMEM);
438 	}
439 	error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc",
440 	    device_get_nameunit(dev));
441 	if (error) {
442 		device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n",
443 		    error);
444 		taskqueue_free(sc->vc_tq);
445 		return (error);
446 	}
447 
448 	return (error);
449 }
450 
451 /**
452  * iavf_if_attach_post - Finish attaching the device to the driver
453  * @ctx: the iflib context pointer
454  *
455  * Called by iflib after it has setup queues and interrupts. Used to finish up
456  * the attach process for a device. Attach logic which must occur after Tx and
457  * Rx queues are setup belongs here.
458  *
459  * @returns zero or a non-zero error code on failure
460  */
461 static int
462 iavf_if_attach_post(if_ctx_t ctx)
463 {
464 #ifdef IXL_DEBUG
465 	device_t dev = iflib_get_dev(ctx);
466 #endif
467 	struct iavf_sc	*sc;
468 	struct iavf_hw	*hw;
469 	struct iavf_vsi *vsi;
470 	int error = 0;
471 
472 	INIT_DBG_DEV(dev, "begin");
473 
474 	sc = iavf_sc_from_ctx(ctx);
475 	vsi = &sc->vsi;
476 	hw = &sc->hw;
477 
478 	/* Save off determined number of queues for interface */
479 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
480 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
481 
482 	/* Setup the stack interface */
483 	iavf_setup_interface(sc);
484 
485 	iavf_dbg_init(sc, "Interface setup complete\n");
486 
487 	/* Initialize statistics & add sysctls */
488 	bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats));
489 	iavf_add_device_sysctls(sc);
490 
491 	atomic_store_rel_32(&sc->queues_enabled, 0);
492 	iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED);
493 
494 	/* We want AQ enabled early for init */
495 	iavf_enable_adminq_irq(hw);
496 
497 	INIT_DBG_DEV(dev, "end");
498 
499 	return (error);
500 }
501 
502 /**
503  * iavf_if_detach - Detach a device from the driver
504  * @ctx: the iflib context of the device to detach
505  *
506  * Called by iflib to detach a given device from the driver. Clean up any
507  * resources associated with the driver and shut the device down.
508  *
509  * @remark iflib always ignores the return value of IFDI_DETACH, so this
510  * function is effectively not allowed to fail. Instead, it should clean up
511  * and release as much as possible even if something goes wrong.
512  *
513  * @returns zero
514  */
515 static int
516 iavf_if_detach(if_ctx_t ctx)
517 {
518 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
519 	struct iavf_hw *hw = &sc->hw;
520 	device_t dev = sc->dev;
521 	enum iavf_status status;
522 
523 	INIT_DBG_DEV(dev, "begin");
524 
525 	iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED);
526 
527 	/* Drain admin queue taskqueue */
528 	taskqueue_free(sc->vc_tq);
529 	IAVF_VC_LOCK_DESTROY(sc);
530 
531 	/* Remove all the media and link information */
532 	ifmedia_removeall(sc->media);
533 
534 	iavf_disable_adminq_irq(hw);
535 	status = iavf_shutdown_adminq(&sc->hw);
536 	if (status != IAVF_SUCCESS) {
537 		device_printf(dev,
538 		    "iavf_shutdown_adminq() failed with status %s\n",
539 		    iavf_stat_str(hw, status));
540 	}
541 
542 	free(sc->vf_res, M_IAVF);
543 	sc->vf_res = NULL;
544 	iavf_free_pci_resources(sc);
545 	iavf_free_filters(sc);
546 
547 	INIT_DBG_DEV(dev, "end");
548 	return (0);
549 }
550 
551 /**
552  * iavf_if_shutdown - called by iflib to handle shutdown
553  * @ctx: the iflib context pointer
554  *
555  * Callback for the IFDI_SHUTDOWN iflib function.
556  *
557  * @returns zero or an error code on failure
558  */
559 static int
560 iavf_if_shutdown(if_ctx_t ctx __unused)
561 {
562 	return (0);
563 }
564 
565 /**
566  * iavf_if_suspend - called by iflib to handle suspend
567  * @ctx: the iflib context pointer
568  *
569  * Callback for the IFDI_SUSPEND iflib function.
570  *
571  * @returns zero or an error code on failure
572  */
573 static int
574 iavf_if_suspend(if_ctx_t ctx __unused)
575 {
576 	return (0);
577 }
578 
579 /**
580  * iavf_if_resume - called by iflib to handle resume
581  * @ctx: the iflib context pointer
582  *
583  * Callback for the IFDI_RESUME iflib function.
584  *
585  * @returns zero or an error code on failure
586  */
587 static int
588 iavf_if_resume(if_ctx_t ctx __unused)
589 {
590 	return (0);
591 }
592 
593 /**
594  * iavf_vc_sleep_wait - Sleep for a response from a VC message
595  * @sc: device softc
596  * @op: the op code to sleep on
597  *
598  * Sleep until a response from the PF for the VC message sent by the
599  * given op.
600  *
601  * @returns zero on success, or EWOULDBLOCK if the sleep times out.
602  */
603 static int
604 iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op)
605 {
606 	int error = 0;
607 
608 	IAVF_VC_LOCK_ASSERT(sc);
609 
610 	iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
611 
612 	error = mtx_sleep(iavf_vc_get_op_chan(sc, op),
613 	    &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
614 
615 	return (error);
616 }
617 
618 /**
619  * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response
620  * @sc: device softc
621  * @op: the op code to send
622  *
623  * Send a virtchnl message to the PF, and sleep or busy wait for a response
624  * from the PF, depending on iflib context lock type.
625  *
626  * @remark this function does not wait if the device is detaching, on kernels
627  * that support indicating to the driver that the device is detaching
628  *
629  * @returns zero or an error code on failure.
630  */
631 int
632 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
633 {
634 	if_ctx_t ctx = sc->vsi.ctx;
635 	int error = 0;
636 
637 	IAVF_VC_LOCK(sc);
638 	error = iavf_vc_send_cmd(sc, op);
639 	if (error != 0) {
640 		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
641 		goto release_lock;
642 	}
643 
644 	/* Don't wait for a response if the device is being detached. */
645 	if (!iflib_in_detach(ctx)) {
646 		error = iavf_vc_sleep_wait(sc, op);
647 		IAVF_VC_LOCK_ASSERT(sc);
648 
649 		if (error == EWOULDBLOCK)
650 			device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
651 	}
652 release_lock:
653 	IAVF_VC_UNLOCK(sc);
654 	return (error);
655 }
656 
657 /**
658  * iavf_send_vc_msg - Send a virtchnl message to the PF
659  * @sc: device softc
660  * @op: the op code to send
661  *
662  * Send a virtchnl message to the PF and do not wait for a response.
663  *
664  * @returns zero on success, or an error code on failure.
665  */
666 int
667 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
668 {
669 	int error = 0;
670 
671 	error = iavf_vc_send_cmd(sc, op);
672 	if (error != 0)
673 		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
674 
675 	return (error);
676 }
677 
678 /**
679  * iavf_init_queues - initialize Tx and Rx queues
680  * @vsi: the VSI to initialize
681  *
682  * Refresh the Tx and Rx ring contents and update the tail pointers for each
683  * queue.
684  */
685 static void
686 iavf_init_queues(struct iavf_vsi *vsi)
687 {
688 	struct iavf_tx_queue *tx_que = vsi->tx_queues;
689 	struct iavf_rx_queue *rx_que = vsi->rx_queues;
690 	struct rx_ring *rxr;
691 	uint32_t mbuf_sz;
692 
693 	mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
694 	MPASS(mbuf_sz <= UINT16_MAX);
695 
696 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
697 		iavf_init_tx_ring(vsi, tx_que);
698 
699 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
700 		rxr = &rx_que->rxr;
701 
702 		rxr->mbuf_sz = mbuf_sz;
703 		wr32(vsi->hw, rxr->tail, 0);
704 	}
705 }
706 
707 /**
708  * iavf_if_init - Initialize device for operation
709  * @ctx: the iflib context pointer
710  *
711  * Initializes a device for operation. Called by iflib in response to an
712  * interface up event from the stack.
713  *
714  * @remark this function does not return a value and thus cannot indicate
715  * failure to initialize.
716  */
717 static void
718 iavf_if_init(if_ctx_t ctx)
719 {
720 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
721 	struct iavf_vsi *vsi = &sc->vsi;
722 	struct iavf_hw *hw = &sc->hw;
723 	if_t ifp = iflib_get_ifp(ctx);
724 	u8 tmpaddr[ETHER_ADDR_LEN];
725 	enum iavf_status status;
726 	device_t dev = sc->dev;
727 	int error = 0;
728 
729 	INIT_DBG_IF(ifp, "begin");
730 
731 	IFLIB_CTX_ASSERT(ctx);
732 
733 	error = iavf_reset_complete(hw);
734 	if (error) {
735 		device_printf(sc->dev, "%s: VF reset failed\n",
736 		    __func__);
737 	}
738 
739 	if (!iavf_check_asq_alive(hw)) {
740 		iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
741 		pci_enable_busmaster(dev);
742 
743 		status = iavf_shutdown_adminq(hw);
744 		if (status != IAVF_SUCCESS) {
745 			device_printf(dev,
746 			    "%s: iavf_shutdown_adminq failed: %s\n",
747 			    __func__, iavf_stat_str(hw, status));
748 			return;
749 		}
750 
751 		status = iavf_init_adminq(hw);
752 		if (status != IAVF_SUCCESS) {
753 			device_printf(dev,
754 			"%s: iavf_init_adminq failed: %s\n",
755 			    __func__, iavf_stat_str(hw, status));
756 			return;
757 		}
758 	}
759 
760 	/* Make sure queues are disabled */
761 	iavf_disable_queues_with_retries(sc);
762 
763 	bcopy(if_getlladdr(ifp), tmpaddr, ETHER_ADDR_LEN);
764 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
765 	    (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) {
766 		error = iavf_del_mac_filter(sc, hw->mac.addr);
767 		if (error == 0)
768 			iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
769 
770 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
771 	}
772 
773 	error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
774 	if (!error || error == EEXIST)
775 		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
776 	iflib_set_mac(ctx, hw->mac.addr);
777 
778 	/* Prepare the queues for operation */
779 	iavf_init_queues(vsi);
780 
781 	/* Set initial ITR values */
782 	iavf_configure_itr(sc);
783 
784 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
785 
786 	/* Set up RSS */
787 	iavf_config_rss(sc);
788 
789 	/* Map vectors */
790 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
791 
792 	/* Init SW TX ring indices */
793 	if (vsi->enable_head_writeback)
794 		iavf_init_tx_cidx(vsi);
795 	else
796 		iavf_init_tx_rsqs(vsi);
797 
798 	/* Configure promiscuous mode */
799 	iavf_config_promisc(sc, if_getflags(ifp));
800 
801 	/* Enable queues */
802 	iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
803 
804 	iavf_set_state(&sc->state, IAVF_STATE_RUNNING);
805 }
806 
807 /**
808  * iavf_if_msix_intr_assign - Assign MSI-X interrupts
809  * @ctx: the iflib context pointer
810  * @msix: the number of MSI-X vectors available
811  *
812  * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and
813  * sets up vectors for each Tx and Rx queue, as well as the administrative
814  * control interrupt.
815  *
816  * @returns zero or an error code on failure
817  */
818 static int
819 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused)
820 {
821 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
822 	struct iavf_vsi *vsi = &sc->vsi;
823 	struct iavf_rx_queue *rx_que = vsi->rx_queues;
824 	struct iavf_tx_queue *tx_que = vsi->tx_queues;
825 	int err, i, rid, vector = 0;
826 	char buf[16];
827 
828 	MPASS(vsi->shared->isc_nrxqsets > 0);
829 	MPASS(vsi->shared->isc_ntxqsets > 0);
830 
831 	/* Admin Que is vector 0*/
832 	rid = vector + 1;
833 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
834 	    iavf_msix_adminq, sc, 0, "aq");
835 	if (err) {
836 		iflib_irq_free(ctx, &vsi->irq);
837 		device_printf(iflib_get_dev(ctx),
838 		    "Failed to register Admin Que handler");
839 		return (err);
840 	}
841 
842 	/* Now set up the stations */
843 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
844 		rid = vector + 1;
845 
846 		snprintf(buf, sizeof(buf), "rxq%d", i);
847 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
848 		    IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
849 		if (err) {
850 			device_printf(iflib_get_dev(ctx),
851 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
852 			vsi->num_rx_queues = i + 1;
853 			goto fail;
854 		}
855 		rx_que->msix = vector;
856 	}
857 
858 	bzero(buf, sizeof(buf));
859 
860 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
861 		snprintf(buf, sizeof(buf), "txq%d", i);
862 		iflib_softirq_alloc_generic(ctx,
863 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
864 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
865 
866 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
867 	}
868 
869 	return (0);
870 fail:
871 	iflib_irq_free(ctx, &vsi->irq);
872 	rx_que = vsi->rx_queues;
873 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
874 		iflib_irq_free(ctx, &rx_que->que_irq);
875 	return (err);
876 }
877 
878 /**
879  * iavf_if_enable_intr - Enable all interrupts for a device
880  * @ctx: the iflib context pointer
881  *
882  * Called by iflib to request enabling all interrupts.
883  */
884 static void
885 iavf_if_enable_intr(if_ctx_t ctx)
886 {
887 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
888 	struct iavf_vsi *vsi = &sc->vsi;
889 
890 	iavf_enable_intr(vsi);
891 }
892 
893 /**
894  * iavf_if_disable_intr - Disable all interrupts for a device
895  * @ctx: the iflib context pointer
896  *
897  * Called by iflib to request disabling all interrupts.
898  */
899 static void
900 iavf_if_disable_intr(if_ctx_t ctx)
901 {
902 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
903 	struct iavf_vsi *vsi = &sc->vsi;
904 
905 	iavf_disable_intr(vsi);
906 }
907 
908 /**
909  * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt
910  * @ctx: the iflib context pointer
911  * @rxqid: Rx queue index
912  *
913  * Enables the interrupt associated with a specified Rx queue.
914  *
915  * @returns zero
916  */
917 static int
918 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
919 {
920 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
921 	struct iavf_vsi *vsi = &sc->vsi;
922 	struct iavf_hw *hw = vsi->hw;
923 	struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid];
924 
925 	iavf_enable_queue_irq(hw, rx_que->msix - 1);
926 	return (0);
927 }
928 
929 /**
930  * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt
931  * @ctx: the iflib context pointer
932  * @txqid: Tx queue index
933  *
934  * Enables the interrupt associated with a specified Tx queue.
935  *
936  * @returns zero
937  */
938 static int
939 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
940 {
941 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
942 	struct iavf_vsi *vsi = &sc->vsi;
943 	struct iavf_hw *hw = vsi->hw;
944 	struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
945 
946 	iavf_enable_queue_irq(hw, tx_que->msix - 1);
947 	return (0);
948 }
949 
950 /**
951  * iavf_if_tx_queues_alloc - Allocate Tx queue memory
952  * @ctx: the iflib context pointer
953  * @vaddrs: Array of virtual addresses
954  * @paddrs: Array of physical addresses
955  * @ntxqs: the number of Tx queues per group (should always be 1)
956  * @ntxqsets: the number of Tx queues
957  *
958  * Allocates memory for the specified number of Tx queues. This includes
959  * memory for the queue structures and the report status array for the queues.
960  * The virtual and physical addresses are saved for later use during
961  * initialization.
962  *
963  * @returns zero or a non-zero error code on failure
964  */
965 static int
966 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
967 {
968 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
969 	struct iavf_vsi *vsi = &sc->vsi;
970 	if_softc_ctx_t scctx = vsi->shared;
971 	struct iavf_tx_queue *que;
972 	int i, j, error = 0;
973 
974 	MPASS(scctx->isc_ntxqsets > 0);
975 	MPASS(ntxqs == 1);
976 	MPASS(scctx->isc_ntxqsets == ntxqsets);
977 
978 	/* Allocate queue structure memory */
979 	if (!(vsi->tx_queues =
980 	    (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
981 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
982 		return (ENOMEM);
983 	}
984 
985 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
986 		struct tx_ring *txr = &que->txr;
987 
988 		txr->me = i;
989 		que->vsi = vsi;
990 
991 		if (!vsi->enable_head_writeback) {
992 			/* Allocate report status array */
993 			if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
994 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
995 				error = ENOMEM;
996 				goto fail;
997 			}
998 			/* Init report status array */
999 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1000 				txr->tx_rsq[j] = QIDX_INVALID;
1001 		}
1002 		/* get the virtual and physical address of the hardware queues */
1003 		txr->tail = IAVF_QTX_TAIL1(txr->me);
1004 		txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs];
1005 		txr->tx_paddr = paddrs[i * ntxqs];
1006 		txr->que = que;
1007 	}
1008 
1009 	return (0);
1010 fail:
1011 	iavf_if_queues_free(ctx);
1012 	return (error);
1013 }
1014 
1015 /**
1016  * iavf_if_rx_queues_alloc - Allocate Rx queue memory
1017  * @ctx: the iflib context pointer
1018  * @vaddrs: Array of virtual addresses
1019  * @paddrs: Array of physical addresses
1020  * @nrxqs: number of Rx queues per group (should always be 1)
1021  * @nrxqsets: the number of Rx queues to allocate
1022  *
1023  * Called by iflib to allocate driver memory for a number of Rx queues.
1024  * Allocates memory for the drivers private Rx queue data structure, and saves
1025  * the physical and virtual addresses for later use.
1026  *
1027  * @returns zero or a non-zero error code on failure
1028  */
1029 static int
1030 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1031 {
1032 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1033 	struct iavf_vsi *vsi = &sc->vsi;
1034 	struct iavf_rx_queue *que;
1035 	int i, error = 0;
1036 
1037 #ifdef INVARIANTS
1038 	if_softc_ctx_t scctx = vsi->shared;
1039 	MPASS(scctx->isc_nrxqsets > 0);
1040 	MPASS(nrxqs == 1);
1041 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1042 #endif
1043 
1044 	/* Allocate queue structure memory */
1045 	if (!(vsi->rx_queues =
1046 	    (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) *
1047 	    nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1048 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1049 		error = ENOMEM;
1050 		goto fail;
1051 	}
1052 
1053 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1054 		struct rx_ring *rxr = &que->rxr;
1055 
1056 		rxr->me = i;
1057 		que->vsi = vsi;
1058 
1059 		/* get the virtual and physical address of the hardware queues */
1060 		rxr->tail = IAVF_QRX_TAIL1(rxr->me);
1061 		rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs];
1062 		rxr->rx_paddr = paddrs[i * nrxqs];
1063 		rxr->que = que;
1064 	}
1065 
1066 	return (0);
1067 fail:
1068 	iavf_if_queues_free(ctx);
1069 	return (error);
1070 }
1071 
1072 /**
1073  * iavf_if_queues_free - Free driver queue memory
1074  * @ctx: the iflib context pointer
1075  *
1076  * Called by iflib to release memory allocated by the driver when setting up
1077  * Tx and Rx queues.
1078  *
1079  * @remark The ordering of this function and iavf_if_detach is not guaranteed.
1080  * It is possible for this function to be called either before or after the
1081  * iavf_if_detach. Thus, care must be taken to ensure that either ordering of
1082  * iavf_if_detach and iavf_if_queues_free is safe.
1083  */
1084 static void
1085 iavf_if_queues_free(if_ctx_t ctx)
1086 {
1087 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1088 	struct iavf_vsi *vsi = &sc->vsi;
1089 
1090 	if (!vsi->enable_head_writeback) {
1091 		struct iavf_tx_queue *que;
1092 		int i = 0;
1093 
1094 		for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1095 			struct tx_ring *txr = &que->txr;
1096 			if (txr->tx_rsq != NULL) {
1097 				free(txr->tx_rsq, M_IAVF);
1098 				txr->tx_rsq = NULL;
1099 			}
1100 		}
1101 	}
1102 
1103 	if (vsi->tx_queues != NULL) {
1104 		free(vsi->tx_queues, M_IAVF);
1105 		vsi->tx_queues = NULL;
1106 	}
1107 	if (vsi->rx_queues != NULL) {
1108 		free(vsi->rx_queues, M_IAVF);
1109 		vsi->rx_queues = NULL;
1110 	}
1111 }
1112 
1113 /**
1114  * iavf_check_aq_errors - Check for AdminQ errors
1115  * @sc: device softc
1116  *
1117  * Check the AdminQ registers for errors, and determine whether or not a reset
1118  * may be required to resolve them.
1119  *
1120  * @post if there are errors, the VF device will be stopped and a reset will
1121  * be requested.
1122  *
1123  * @returns zero if there are no issues, EBUSY if the device is resetting,
1124  * or EIO if there are any AQ errors.
1125  */
1126 static int
1127 iavf_check_aq_errors(struct iavf_sc *sc)
1128 {
1129 	struct iavf_hw *hw = &sc->hw;
1130 	device_t dev = sc->dev;
1131 	u32 reg, oldreg;
1132 	u8 aq_error = false;
1133 
1134 	oldreg = reg = rd32(hw, hw->aq.arq.len);
1135 
1136 	/* Check if device is in reset */
1137 	if (reg == 0xdeadbeef || reg == 0xffffffff) {
1138 		device_printf(dev, "VF in reset\n");
1139 		return (EBUSY);
1140 	}
1141 
1142 	/* Check for Admin queue errors */
1143 	if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
1144 		device_printf(dev, "ARQ VF Error detected\n");
1145 		reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
1146 		aq_error = true;
1147 	}
1148 	if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
1149 		device_printf(dev, "ARQ Overflow Error detected\n");
1150 		reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
1151 		aq_error = true;
1152 	}
1153 	if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
1154 		device_printf(dev, "ARQ Critical Error detected\n");
1155 		reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
1156 		aq_error = true;
1157 	}
1158 	if (oldreg != reg)
1159 		wr32(hw, hw->aq.arq.len, reg);
1160 
1161 	oldreg = reg = rd32(hw, hw->aq.asq.len);
1162 	if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
1163 		device_printf(dev, "ASQ VF Error detected\n");
1164 		reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
1165 		aq_error = true;
1166 	}
1167 	if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
1168 		device_printf(dev, "ASQ Overflow Error detected\n");
1169 		reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
1170 		aq_error = true;
1171 	}
1172 	if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
1173 		device_printf(dev, "ASQ Critical Error detected\n");
1174 		reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
1175 		aq_error = true;
1176 	}
1177 	if (oldreg != reg)
1178 		wr32(hw, hw->aq.asq.len, reg);
1179 
1180 	return (aq_error ? EIO : 0);
1181 }
1182 
1183 /**
1184  * iavf_process_adminq - Process adminq responses from the PF
1185  * @sc: device softc
1186  * @pending: output parameter indicating how many messages remain
1187  *
1188  * Process the adminq to handle replies from the PF over the virtchnl
1189  * connection.
1190  *
1191  * @returns zero or an iavf_status code on failure
1192  */
1193 static enum iavf_status
1194 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1195 {
1196 	enum iavf_status status = IAVF_SUCCESS;
1197 	struct iavf_arq_event_info event;
1198 	struct iavf_hw *hw = &sc->hw;
1199 	struct virtchnl_msg *v_msg;
1200 	int error = 0, loop = 0;
1201 	u32 reg;
1202 
1203 	if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) {
1204 		status = IAVF_ERR_ADMIN_QUEUE_ERROR;
1205 		goto reenable_interrupt;
1206 	}
1207 
1208 	error = iavf_check_aq_errors(sc);
1209 	if (error) {
1210 		status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1211 		goto reenable_interrupt;
1212 	}
1213 
1214 	event.buf_len = IAVF_AQ_BUF_SZ;
1215         event.msg_buf = sc->aq_buffer;
1216 	bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1217 	v_msg = (struct virtchnl_msg *)&event.desc;
1218 
1219 	IAVF_VC_LOCK(sc);
1220 	/* clean and process any events */
1221 	do {
1222 		status = iavf_clean_arq_element(hw, &event, pending);
1223 		/*
1224 		 * Also covers normal case when iavf_clean_arq_element()
1225 		 * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK"
1226 		 */
1227 		if (status)
1228 			break;
1229 		iavf_vc_completion(sc, v_msg->v_opcode,
1230 		    v_msg->v_retval, event.msg_buf, event.msg_len);
1231 		bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1232 	} while (*pending && (loop++ < IAVF_ADM_LIMIT));
1233 	IAVF_VC_UNLOCK(sc);
1234 
1235 reenable_interrupt:
1236 	/* Re-enable admin queue interrupt cause */
1237 	reg = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1238 	reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1239 	wr32(hw, IAVF_VFINT_ICR0_ENA1, reg);
1240 
1241 	return (status);
1242 }
1243 
1244 /**
1245  * iavf_if_update_admin_status - Administrative status task
1246  * @ctx: iflib context
1247  *
1248  * Called by iflib to handle administrative status events. The iavf driver
1249  * uses this to process the adminq virtchnl messages outside of interrupt
1250  * context.
1251  */
1252 static void
1253 iavf_if_update_admin_status(if_ctx_t ctx)
1254 {
1255 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1256 	struct iavf_hw *hw = &sc->hw;
1257 	u16 pending = 0;
1258 
1259 	iavf_process_adminq(sc, &pending);
1260 	iavf_update_link_status(sc);
1261 
1262 	/*
1263 	 * If there are still messages to process, reschedule.
1264 	 * Otherwise, re-enable the Admin Queue interrupt.
1265 	 */
1266 	if (pending > 0)
1267 		iflib_admin_intr_deferred(ctx);
1268 	else
1269 		iavf_enable_adminq_irq(hw);
1270 }
1271 
1272 /**
1273  * iavf_if_multi_set - Set multicast address filters
1274  * @ctx: iflib context
1275  *
1276  * Called by iflib to update the current list of multicast filters for the
1277  * device.
1278  */
1279 static void
1280 iavf_if_multi_set(if_ctx_t ctx)
1281 {
1282 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1283 
1284 	iavf_multi_set(sc);
1285 }
1286 
1287 /**
1288  * iavf_if_mtu_set - Set the device MTU
1289  * @ctx: iflib context
1290  * @mtu: MTU value to set
1291  *
1292  * Called by iflib to set the device MTU.
1293  *
1294  * @returns zero on success, or EINVAL if the MTU is invalid.
1295  */
1296 static int
1297 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1298 {
1299 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1300 	struct iavf_vsi *vsi = &sc->vsi;
1301 
1302 	IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)");
1303 	if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) {
1304 		device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n",
1305 		    mtu, IAVF_MIN_MTU, IAVF_MAX_MTU);
1306 		return (EINVAL);
1307 	}
1308 
1309 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1310 		ETHER_VLAN_ENCAP_LEN;
1311 
1312 	return (0);
1313 }
1314 
1315 /**
1316  * iavf_if_media_status - Report current media status
1317  * @ctx: iflib context
1318  * @ifmr: ifmedia request structure
1319  *
1320  * Called by iflib to report the current media status in the ifmr.
1321  */
1322 static void
1323 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1324 {
1325 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1326 
1327 	iavf_media_status_common(sc, ifmr);
1328 }
1329 
1330 /**
1331  * iavf_if_media_change - Change the current media settings
1332  * @ctx: iflib context
1333  *
1334  * Called by iflib to change the current media settings.
1335  *
1336  * @returns zero on success, or an error code on failure.
1337  */
1338 static int
1339 iavf_if_media_change(if_ctx_t ctx)
1340 {
1341 	return iavf_media_change_common(iflib_get_ifp(ctx));
1342 }
1343 
1344 /**
1345  * iavf_if_promisc_set - Set device promiscuous mode
1346  * @ctx: iflib context
1347  * @flags: promiscuous configuration
1348  *
1349  * Called by iflib to request that the device enter promiscuous mode.
1350  *
1351  * @returns zero on success, or an error code on failure.
1352  */
1353 static int
1354 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1355 {
1356 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1357 
1358 	return iavf_config_promisc(sc, flags);
1359 }
1360 
1361 /**
1362  * iavf_if_timer - Periodic timer called by iflib
1363  * @ctx: iflib context
1364  * @qid: The queue being triggered
1365  *
1366  * Called by iflib periodically as a timer task, so that the driver can handle
1367  * periodic work.
1368  *
1369  * @remark this timer is only called while the interface is up, even if
1370  * IFLIB_ADMIN_ALWAYS_RUN is set.
1371  */
1372 static void
1373 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1374 {
1375 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1376 	struct iavf_hw *hw = &sc->hw;
1377 	u32 val;
1378 
1379 	if (qid != 0)
1380 		return;
1381 
1382 	/* Check for when PF triggers a VF reset */
1383 	val = rd32(hw, IAVF_VFGEN_RSTAT) &
1384 	    IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1385 	if (val != VIRTCHNL_VFR_VFACTIVE
1386 	    && val != VIRTCHNL_VFR_COMPLETED) {
1387 		iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1388 		return;
1389 	}
1390 
1391 	/* Fire off the adminq task */
1392 	iflib_admin_intr_deferred(ctx);
1393 
1394 	/* Update stats */
1395 	iavf_request_stats(sc);
1396 }
1397 
1398 /**
1399  * iavf_if_vlan_register - Register a VLAN
1400  * @ctx: iflib context
1401  * @vtag: the VLAN to register
1402  *
1403  * Register a VLAN filter for a given vtag.
1404  */
1405 static void
1406 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1407 {
1408 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1409 	struct iavf_vsi *vsi = &sc->vsi;
1410 
1411 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1412 		return;
1413 
1414 	/* Add VLAN 0 to list, for untagged traffic */
1415 	if (vsi->num_vlans == 0)
1416 		iavf_add_vlan_filter(sc, 0);
1417 
1418 	iavf_add_vlan_filter(sc, vtag);
1419 
1420 	++vsi->num_vlans;
1421 
1422 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1423 }
1424 
1425 /**
1426  * iavf_if_vlan_unregister - Unregister a VLAN
1427  * @ctx: iflib context
1428  * @vtag: the VLAN to remove
1429  *
1430  * Unregister (remove) a VLAN filter for the given vtag.
1431  */
1432 static void
1433 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1434 {
1435 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1436 	struct iavf_vsi *vsi = &sc->vsi;
1437 	int i = 0;
1438 
1439 	if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0))	/* Invalid */
1440 		return;
1441 
1442 	i = iavf_mark_del_vlan_filter(sc, vtag);
1443 	vsi->num_vlans -= i;
1444 
1445 	/* Remove VLAN filter 0 if the last VLAN is being removed */
1446 	if (vsi->num_vlans == 0)
1447 		i += iavf_mark_del_vlan_filter(sc, 0);
1448 
1449 	if (i > 0)
1450 		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1451 }
1452 
1453 /**
1454  * iavf_if_get_counter - Get network statistic counters
1455  * @ctx: iflib context
1456  * @cnt: The counter to obtain
1457  *
1458  * Called by iflib to obtain the value of the specified counter.
1459  *
1460  * @returns the uint64_t counter value.
1461  */
1462 static uint64_t
1463 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1464 {
1465 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1466 	struct iavf_vsi *vsi = &sc->vsi;
1467 	if_t ifp = iflib_get_ifp(ctx);
1468 
1469 	switch (cnt) {
1470 	case IFCOUNTER_IPACKETS:
1471 		return (vsi->ipackets);
1472 	case IFCOUNTER_IERRORS:
1473 		return (vsi->ierrors);
1474 	case IFCOUNTER_OPACKETS:
1475 		return (vsi->opackets);
1476 	case IFCOUNTER_OERRORS:
1477 		return (vsi->oerrors);
1478 	case IFCOUNTER_COLLISIONS:
1479 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1480 		return (0);
1481 	case IFCOUNTER_IBYTES:
1482 		return (vsi->ibytes);
1483 	case IFCOUNTER_OBYTES:
1484 		return (vsi->obytes);
1485 	case IFCOUNTER_IMCASTS:
1486 		return (vsi->imcasts);
1487 	case IFCOUNTER_OMCASTS:
1488 		return (vsi->omcasts);
1489 	case IFCOUNTER_IQDROPS:
1490 		return (vsi->iqdrops);
1491 	case IFCOUNTER_OQDROPS:
1492 		return (vsi->oqdrops);
1493 	case IFCOUNTER_NOPROTO:
1494 		return (vsi->noproto);
1495 	default:
1496 		return (if_get_counter_default(ifp, cnt));
1497 	}
1498 }
1499 
1500 /**
1501  * iavf_free_pci_resources - Free PCI resources
1502  * @sc: device softc
1503  *
1504  * Called to release the PCI resources allocated during attach. May be called
1505  * in the error flow of attach_pre, or during detach as part of cleanup.
1506  */
1507 static void
1508 iavf_free_pci_resources(struct iavf_sc *sc)
1509 {
1510 	struct iavf_vsi		*vsi = &sc->vsi;
1511 	struct iavf_rx_queue	*rx_que = vsi->rx_queues;
1512 	device_t                dev = sc->dev;
1513 
1514 	/* We may get here before stations are set up */
1515 	if (rx_que == NULL)
1516 		goto early;
1517 
1518 	/* Release all interrupts */
1519 	iflib_irq_free(vsi->ctx, &vsi->irq);
1520 
1521 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1522 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1523 
1524 early:
1525 	if (sc->pci_mem != NULL)
1526 		bus_release_resource(dev, SYS_RES_MEMORY,
1527 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1528 }
1529 
1530 /**
1531  * iavf_setup_interface - Setup the device interface
1532  * @sc: device softc
1533  *
1534  * Called to setup some device interface settings, such as the ifmedia
1535  * structure.
1536  */
1537 static void
1538 iavf_setup_interface(struct iavf_sc *sc)
1539 {
1540 	struct iavf_vsi *vsi = &sc->vsi;
1541 	if_ctx_t ctx = vsi->ctx;
1542 	if_t ifp = iflib_get_ifp(ctx);
1543 
1544 	iavf_dbg_init(sc, "begin\n");
1545 
1546 	vsi->shared->isc_max_frame_size =
1547 	    if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN
1548 	    + ETHER_VLAN_ENCAP_LEN;
1549 
1550 	iavf_set_initial_baudrate(ifp);
1551 
1552 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1553 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1554 }
1555 
1556 /**
1557  * iavf_msix_adminq - Admin Queue interrupt handler
1558  * @arg: void pointer to the device softc
1559  *
1560  * Interrupt handler for the non-queue interrupt causes. Primarily this will
1561  * be the adminq interrupt, but also includes other miscellaneous causes.
1562  *
1563  * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise
1564  * returns FITLER_HANDLED.
1565  */
1566 static int
1567 iavf_msix_adminq(void *arg)
1568 {
1569 	struct iavf_sc	*sc = (struct iavf_sc *)arg;
1570 	struct iavf_hw	*hw = &sc->hw;
1571 	u32		reg, mask;
1572 
1573 	++sc->admin_irq;
1574 
1575 	if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED))
1576 		return (FILTER_HANDLED);
1577 
1578         reg = rd32(hw, IAVF_VFINT_ICR01);
1579 	/*
1580 	 * For masking off interrupt causes that need to be handled before
1581 	 * they can be re-enabled
1582 	 */
1583         mask = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1584 
1585 	/* Check on the cause */
1586 	if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) {
1587 		mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1588 
1589 		/* Process messages outside of the iflib context lock */
1590 		taskqueue_enqueue(sc->vc_tq, &sc->vc_task);
1591 	}
1592 
1593 	wr32(hw, IAVF_VFINT_ICR0_ENA1, mask);
1594 	iavf_enable_adminq_irq(hw);
1595 
1596 	return (FILTER_HANDLED);
1597 }
1598 
1599 /**
1600  * iavf_enable_intr - Enable device interrupts
1601  * @vsi: the main VSI
1602  *
1603  * Called to enable all queue interrupts.
1604  */
1605 void
1606 iavf_enable_intr(struct iavf_vsi *vsi)
1607 {
1608 	struct iavf_hw *hw = vsi->hw;
1609 	struct iavf_rx_queue *que = vsi->rx_queues;
1610 
1611 	iavf_enable_adminq_irq(hw);
1612 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1613 		iavf_enable_queue_irq(hw, que->rxr.me);
1614 }
1615 
1616 /**
1617  * iavf_disable_intr - Disable device interrupts
1618  * @vsi: the main VSI
1619  *
1620  * Called to disable all interrupts
1621  *
1622  * @remark we never disable the admin status interrupt.
1623  */
1624 void
1625 iavf_disable_intr(struct iavf_vsi *vsi)
1626 {
1627         struct iavf_hw *hw = vsi->hw;
1628         struct iavf_rx_queue *que = vsi->rx_queues;
1629 
1630 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1631 		iavf_disable_queue_irq(hw, que->rxr.me);
1632 }
1633 
1634 /**
1635  * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt
1636  * @hw: hardware structure
1637  * @id: IRQ vector to enable
1638  *
1639  * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt.
1640  */
1641 static void
1642 iavf_enable_queue_irq(struct iavf_hw *hw, int id)
1643 {
1644 	u32		reg;
1645 
1646 	reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1647 	    IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1648 	    IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1649 	wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg);
1650 }
1651 
1652 /**
1653  * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt
1654  * @hw: hardware structure
1655  * @id: IRQ vector to disable
1656  *
1657  * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt.
1658  */
1659 static void
1660 iavf_disable_queue_irq(struct iavf_hw *hw, int id)
1661 {
1662 	wr32(hw, IAVF_VFINT_DYN_CTLN1(id),
1663 	    IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1664 	rd32(hw, IAVF_VFGEN_RSTAT);
1665 }
1666 
1667 /**
1668  * iavf_configure_itr - Get initial ITR values from tunable values.
1669  * @sc: device softc
1670  *
1671  * Load the initial tunable values for the ITR configuration.
1672  */
1673 static void
1674 iavf_configure_itr(struct iavf_sc *sc)
1675 {
1676 	iavf_configure_tx_itr(sc);
1677 	iavf_configure_rx_itr(sc);
1678 }
1679 
1680 /**
1681  * iavf_set_queue_rx_itr - Update Rx ITR value
1682  * @que: Rx queue to update
1683  *
1684  * Provide a update to the queue RX interrupt moderation value.
1685  */
1686 static void
1687 iavf_set_queue_rx_itr(struct iavf_rx_queue *que)
1688 {
1689 	struct iavf_vsi	*vsi = que->vsi;
1690 	struct iavf_hw	*hw = vsi->hw;
1691 	struct rx_ring	*rxr = &que->rxr;
1692 
1693 	/* Idle, do nothing */
1694 	if (rxr->bytes == 0)
1695 		return;
1696 
1697 	/* Update the hardware if needed */
1698 	if (rxr->itr != vsi->rx_itr_setting) {
1699 		rxr->itr = vsi->rx_itr_setting;
1700 		wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR,
1701 		    que->rxr.me), rxr->itr);
1702 	}
1703 }
1704 
1705 /**
1706  * iavf_msix_que - Main Rx queue interrupt handler
1707  * @arg: void pointer to the Rx queue
1708  *
1709  * Main MSI-X interrupt handler for Rx queue interrupts
1710  *
1711  * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run,
1712  * otherwise returns FILTER_HANDLED.
1713  */
1714 static int
1715 iavf_msix_que(void *arg)
1716 {
1717 	struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg;
1718 	struct iavf_sc *sc = rx_que->vsi->back;
1719 
1720 	++rx_que->irqs;
1721 
1722 	if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING))
1723 		return (FILTER_HANDLED);
1724 
1725 	iavf_set_queue_rx_itr(rx_que);
1726 
1727 	return (FILTER_SCHEDULE_THREAD);
1728 }
1729 
1730 /**
1731  * iavf_update_link_status - Update iflib Link status
1732  * @sc: device softc
1733  *
1734  * Notify the iflib stack of changes in link status. Called after the device
1735  * receives a virtchnl message indicating a change in link status.
1736  */
1737 void
1738 iavf_update_link_status(struct iavf_sc *sc)
1739 {
1740 	struct iavf_vsi *vsi = &sc->vsi;
1741 	u64 baudrate;
1742 
1743 	if (sc->link_up){
1744 		if (vsi->link_active == FALSE) {
1745 			vsi->link_active = TRUE;
1746 			baudrate = iavf_baudrate_from_link_speed(sc);
1747 			iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate);
1748 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1749 		}
1750 	} else { /* Link down */
1751 		if (vsi->link_active == TRUE) {
1752 			vsi->link_active = FALSE;
1753 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1754 		}
1755 	}
1756 }
1757 
1758 /**
1759  * iavf_stop - Stop the interface
1760  * @sc: device softc
1761  *
1762  * This routine disables all traffic on the adapter by disabling interrupts
1763  * and sending a message to the PF to tell it to stop the hardware
1764  * Tx/Rx LAN queues.
1765  */
1766 static void
1767 iavf_stop(struct iavf_sc *sc)
1768 {
1769 	iavf_clear_state(&sc->state, IAVF_STATE_RUNNING);
1770 
1771 	iavf_disable_intr(&sc->vsi);
1772 
1773 	iavf_disable_queues_with_retries(sc);
1774 }
1775 
1776 /**
1777  * iavf_if_stop - iflib stop handler
1778  * @ctx: iflib context
1779  *
1780  * Call iavf_stop to stop the interface.
1781  */
1782 static void
1783 iavf_if_stop(if_ctx_t ctx)
1784 {
1785 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1786 
1787 	iavf_stop(sc);
1788 }
1789 
1790 /**
1791  * iavf_del_mac_filter - Delete a MAC filter
1792  * @sc: device softc
1793  * @macaddr: MAC address to remove
1794  *
1795  * Marks a MAC filter for deletion.
1796  *
1797  * @returns zero if the filter existed, or ENOENT if it did not.
1798  */
1799 static int
1800 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1801 {
1802 	struct iavf_mac_filter	*f;
1803 
1804 	f = iavf_find_mac_filter(sc, macaddr);
1805 	if (f == NULL)
1806 		return (ENOENT);
1807 
1808 	f->flags |= IAVF_FILTER_DEL;
1809 	return (0);
1810 }
1811 
1812 /**
1813  * iavf_init_tx_rsqs - Initialize Report Status array
1814  * @vsi: the main VSI
1815  *
1816  * Set the Report Status queue fields to zero in order to initialize the
1817  * queues for transmit.
1818  */
1819 void
1820 iavf_init_tx_rsqs(struct iavf_vsi *vsi)
1821 {
1822 	if_softc_ctx_t scctx = vsi->shared;
1823 	struct iavf_tx_queue *tx_que;
1824 	int i, j;
1825 
1826 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1827 		struct tx_ring *txr = &tx_que->txr;
1828 
1829 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1830 
1831 		/* Initialize the last processed descriptor to be the end of
1832 		 * the ring, rather than the start, so that we avoid an
1833 		 * off-by-one error when calculating how many descriptors are
1834 		 * done in the credits_update function.
1835 		 */
1836 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1837 
1838 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
1839 			txr->tx_rsq[j] = QIDX_INVALID;
1840 	}
1841 }
1842 
1843 /**
1844  * iavf_init_tx_cidx - Initialize Tx cidx values
1845  * @vsi: the main VSI
1846  *
1847  * Initialize the tx_cidx_processed values for Tx queues in order to
1848  * initialize the Tx queues for transmit.
1849  */
1850 void
1851 iavf_init_tx_cidx(struct iavf_vsi *vsi)
1852 {
1853 	if_softc_ctx_t scctx = vsi->shared;
1854 	struct iavf_tx_queue *tx_que;
1855 	int i;
1856 
1857 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1858 		struct tx_ring *txr = &tx_que->txr;
1859 
1860 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1861 	}
1862 }
1863 
1864 /**
1865  * iavf_add_device_sysctls - Add device sysctls for configuration
1866  * @sc: device softc
1867  *
1868  * Add the main sysctl nodes and sysctls for device configuration.
1869  */
1870 static void
1871 iavf_add_device_sysctls(struct iavf_sc *sc)
1872 {
1873 	struct iavf_vsi *vsi = &sc->vsi;
1874 	device_t dev = sc->dev;
1875 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1876 	struct sysctl_oid_list *debug_list;
1877 
1878 	iavf_add_device_sysctls_common(sc);
1879 
1880 	debug_list = iavf_create_debug_sysctl_tree(sc);
1881 
1882 	iavf_add_debug_sysctls_common(sc, debug_list);
1883 
1884 	SYSCTL_ADD_PROC(ctx, debug_list,
1885 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
1886 	    sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
1887 
1888 #ifdef IAVF_DEBUG
1889 	SYSCTL_ADD_PROC(ctx, debug_list,
1890 	    OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
1891 	    sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
1892 
1893 	SYSCTL_ADD_PROC(ctx, debug_list,
1894 	    OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
1895 	    sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
1896 #endif
1897 
1898 	/* Add stats sysctls */
1899 	iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi");
1900 
1901 	iavf_add_queues_sysctls(dev, vsi);
1902 }
1903 
1904 /**
1905  * iavf_add_queues_sysctls - Add per-queue sysctls
1906  * @dev: device pointer
1907  * @vsi: the main VSI
1908  *
1909  * Add sysctls for each Tx and Rx queue.
1910  */
1911 void
1912 iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi)
1913 {
1914 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1915 	struct sysctl_oid_list *vsi_list, *queue_list;
1916 	struct sysctl_oid *queue_node;
1917 	char queue_namebuf[32];
1918 
1919 	struct iavf_rx_queue *rx_que;
1920 	struct iavf_tx_queue *tx_que;
1921 	struct tx_ring *txr;
1922 	struct rx_ring *rxr;
1923 
1924 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
1925 
1926 	/* Queue statistics */
1927 	for (int q = 0; q < vsi->num_rx_queues; q++) {
1928 		bzero(queue_namebuf, sizeof(queue_namebuf));
1929 		snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q);
1930 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1931 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
1932 		queue_list = SYSCTL_CHILDREN(queue_node);
1933 
1934 		rx_que = &(vsi->rx_queues[q]);
1935 		rxr = &(rx_que->rxr);
1936 
1937 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1938 				CTLFLAG_RD, &(rx_que->irqs),
1939 				"irqs on this queue (both Tx and Rx)");
1940 
1941 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1942 				CTLFLAG_RD, &(rxr->rx_packets),
1943 				"Queue Packets Received");
1944 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1945 				CTLFLAG_RD, &(rxr->rx_bytes),
1946 				"Queue Bytes Received");
1947 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
1948 				CTLFLAG_RD, &(rxr->desc_errs),
1949 				"Queue Rx Descriptor Errors");
1950 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1951 				CTLFLAG_RD, &(rxr->itr), 0,
1952 				"Queue Rx ITR Interval");
1953 	}
1954 	for (int q = 0; q < vsi->num_tx_queues; q++) {
1955 		bzero(queue_namebuf, sizeof(queue_namebuf));
1956 		snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q);
1957 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1958 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
1959 		queue_list = SYSCTL_CHILDREN(queue_node);
1960 
1961 		tx_que = &(vsi->tx_queues[q]);
1962 		txr = &(tx_que->txr);
1963 
1964 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
1965 				CTLFLAG_RD, &(tx_que->tso),
1966 				"TSO");
1967 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
1968 				CTLFLAG_RD, &(txr->mss_too_small),
1969 				"TSO sends with an MSS less than 64");
1970 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1971 				CTLFLAG_RD, &(txr->tx_packets),
1972 				"Queue Packets Transmitted");
1973 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1974 				CTLFLAG_RD, &(txr->tx_bytes),
1975 				"Queue Bytes Transmitted");
1976 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1977 				CTLFLAG_RD, &(txr->itr), 0,
1978 				"Queue Tx ITR Interval");
1979 	}
1980 }
1981 
1982 /**
1983  * iavf_driver_is_detaching - Check if the driver is detaching/unloading
1984  * @sc: device private softc
1985  *
1986  * @returns true if the driver is detaching, false otherwise.
1987  *
1988  * @remark on newer kernels, take advantage of iflib_in_detach in order to
1989  * report detachment correctly as early as possible.
1990  *
1991  * @remark this function is used by various code paths that want to avoid
1992  * running if the driver is about to be removed. This includes sysctls and
1993  * other driver access points. Note that it does not fully resolve
1994  * detach-based race conditions as it is possible for a thread to race with
1995  * iflib_in_detach.
1996  */
1997 bool
1998 iavf_driver_is_detaching(struct iavf_sc *sc)
1999 {
2000 	return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) ||
2001 		iflib_in_detach(sc->vsi.ctx));
2002 }
2003 
2004 /**
2005  * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping
2006  * @oidp: sysctl oid structure
2007  * @arg1: void pointer to device softc
2008  * @arg2: unused
2009  * @req: sysctl request pointer
2010  *
2011  * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors.
2012  *
2013  * @returns zero on success, or an error code on failure.
2014  */
2015 static int
2016 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2017 {
2018 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2019 	struct iavf_vsi *vsi = &sc->vsi;
2020 	device_t dev = sc->dev;
2021 	struct sbuf *buf;
2022 	int error = 0;
2023 
2024 	struct iavf_rx_queue *rx_que;
2025 	struct iavf_tx_queue *tx_que;
2026 
2027 	UNREFERENCED_2PARAMETER(arg2, oidp);
2028 
2029 	if (iavf_driver_is_detaching(sc))
2030 		return (ESHUTDOWN);
2031 
2032 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2033 	if (!buf) {
2034 		device_printf(dev, "Could not allocate sbuf for output.\n");
2035 		return (ENOMEM);
2036 	}
2037 
2038 	sbuf_cat(buf, "\n");
2039 	for (int i = 0; i < vsi->num_rx_queues; i++) {
2040 		rx_que = &vsi->rx_queues[i];
2041 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2042 	}
2043 	for (int i = 0; i < vsi->num_tx_queues; i++) {
2044 		tx_que = &vsi->tx_queues[i];
2045 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2046 	}
2047 
2048 	error = sbuf_finish(buf);
2049 	if (error)
2050 		device_printf(dev, "Error finishing sbuf: %d\n", error);
2051 	sbuf_delete(buf);
2052 
2053 	return (error);
2054 }
2055 
2056 #ifdef IAVF_DEBUG
2057 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2058 
2059 /**
2060  * iavf_sysctl_vf_reset - Request a VF reset
2061  * @oidp: sysctl oid pointer
2062  * @arg1: void pointer to device softc
2063  * @arg2: unused
2064  * @req: sysctl request pointer
2065  *
2066  * Request a VF reset for the device.
2067  *
2068  * @returns zero on success, or an error code on failure.
2069  */
2070 static int
2071 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2072 {
2073 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2074 	int do_reset = 0, error = 0;
2075 
2076 	UNREFERENCED_PARAMETER(arg2);
2077 
2078 	if (iavf_driver_is_detaching(sc))
2079 		return (ESHUTDOWN);
2080 
2081 	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2082 	if ((error) || (req->newptr == NULL))
2083 		return (error);
2084 
2085 	if (do_reset == 1) {
2086 		iavf_reset(sc);
2087 		if (CTX_ACTIVE(sc->vsi.ctx))
2088 			iflib_request_reset(sc->vsi.ctx);
2089 	}
2090 
2091 	return (error);
2092 }
2093 
2094 /**
2095  * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device
2096  * @oidp: sysctl oid pointer
2097  * @arg1: void pointer to device softc
2098  * @arg2: unused
2099  * @req: sysctl request pointer
2100  *
2101  * Sysctl callback to trigger a PCIe FLR.
2102  *
2103  * @returns zero on success, or an error code on failure.
2104  */
2105 static int
2106 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2107 {
2108 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2109 	device_t dev = sc->dev;
2110 	int do_reset = 0, error = 0;
2111 
2112 	UNREFERENCED_PARAMETER(arg2);
2113 
2114 	if (iavf_driver_is_detaching(sc))
2115 		return (ESHUTDOWN);
2116 
2117 	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2118 	if ((error) || (req->newptr == NULL))
2119 		return (error);
2120 
2121 	if (do_reset == 1) {
2122 		if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2123 			device_printf(dev, "PCIE FLR failed\n");
2124 			error = EIO;
2125 		}
2126 		else if (CTX_ACTIVE(sc->vsi.ctx))
2127 			iflib_request_reset(sc->vsi.ctx);
2128 	}
2129 
2130 	return (error);
2131 }
2132 #undef CTX_ACTIVE
2133 #endif
2134