xref: /freebsd/sys/dev/iavf/if_iavf_iflib.c (revision a39a5a6905612447def27b66ffe73b9d11efd80c)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 /**
34  * @file if_iavf_iflib.c
35  * @brief iflib driver implementation
36  *
37  * Contains the main entry point for the iflib driver implementation. It
38  * implements the various ifdi driver methods, and sets up the module and
39  * driver values to load an iflib driver.
40  */
41 
42 #include "iavf_iflib.h"
43 #include "iavf_vc_common.h"
44 
45 #include "iavf_drv_info.h"
46 #include "iavf_sysctls_iflib.h"
47 
48 /*********************************************************************
49  *  Function prototypes
50  *********************************************************************/
51 static void	 *iavf_register(device_t dev);
52 static int	 iavf_if_attach_pre(if_ctx_t ctx);
53 static int	 iavf_if_attach_post(if_ctx_t ctx);
54 static int	 iavf_if_detach(if_ctx_t ctx);
55 static int	 iavf_if_shutdown(if_ctx_t ctx);
56 static int	 iavf_if_suspend(if_ctx_t ctx);
57 static int	 iavf_if_resume(if_ctx_t ctx);
58 static int	 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
59 static void	 iavf_if_enable_intr(if_ctx_t ctx);
60 static void	 iavf_if_disable_intr(if_ctx_t ctx);
61 static int	 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
62 static int	 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
63 static int	 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
64 static int	 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
65 static void	 iavf_if_queues_free(if_ctx_t ctx);
66 static void	 iavf_if_update_admin_status(if_ctx_t ctx);
67 static void	 iavf_if_multi_set(if_ctx_t ctx);
68 static int	 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
69 static void	 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
70 static int	 iavf_if_media_change(if_ctx_t ctx);
71 static int	 iavf_if_promisc_set(if_ctx_t ctx, int flags);
72 static void	 iavf_if_timer(if_ctx_t ctx, uint16_t qid);
73 static void	 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
74 static void	 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
75 static uint64_t	 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
76 static void	 iavf_if_init(if_ctx_t ctx);
77 static void	 iavf_if_stop(if_ctx_t ctx);
78 
79 static int	iavf_allocate_pci_resources(struct iavf_sc *);
80 static void	iavf_free_pci_resources(struct iavf_sc *);
81 static void	iavf_setup_interface(struct iavf_sc *);
82 static void	iavf_add_device_sysctls(struct iavf_sc *);
83 static void	iavf_enable_queue_irq(struct iavf_hw *, int);
84 static void	iavf_disable_queue_irq(struct iavf_hw *, int);
85 static void	iavf_stop(struct iavf_sc *);
86 
87 static int	iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
88 static int	iavf_msix_que(void *);
89 static int	iavf_msix_adminq(void *);
90 static void	iavf_configure_itr(struct iavf_sc *sc);
91 
92 static int	iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
93 #ifdef IAVF_DEBUG
94 static int	iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
95 static int	iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
96 #endif
97 
98 static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *);
99 static void	iavf_vc_task(void *arg, int pending __unused);
100 static int	iavf_setup_vc_tq(struct iavf_sc *sc);
101 static int	iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op);
102 
103 /*********************************************************************
104  *  FreeBSD Device Interface Entry Points
105  *********************************************************************/
106 
107 /**
108  * @var iavf_methods
109  * @brief device methods for the iavf driver
110  *
111  * Device method callbacks used to interact with the driver. For iflib this
112  * primarily resolves to the default iflib implementations.
113  */
114 static device_method_t iavf_methods[] = {
115 	/* Device interface */
116 	DEVMETHOD(device_register, iavf_register),
117 	DEVMETHOD(device_probe, iflib_device_probe),
118 	DEVMETHOD(device_attach, iflib_device_attach),
119 	DEVMETHOD(device_detach, iflib_device_detach),
120 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
121 	DEVMETHOD_END
122 };
123 
124 static driver_t iavf_driver = {
125 	"iavf", iavf_methods, sizeof(struct iavf_sc),
126 };
127 
128 DRIVER_MODULE(iavf, pci, iavf_driver, 0, 0);
129 MODULE_VERSION(iavf, 1);
130 
131 MODULE_DEPEND(iavf, pci, 1, 1, 1);
132 MODULE_DEPEND(iavf, ether, 1, 1, 1);
133 MODULE_DEPEND(iavf, iflib, 1, 1, 1);
134 
135 IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array);
136 
137 /**
138  * @var M_IAVF
139  * @brief main iavf driver allocation type
140  *
141  * malloc(9) allocation type used by the majority of memory allocations in the
142  * iavf iflib driver.
143  */
144 MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
145 
146 static device_method_t iavf_if_methods[] = {
147 	DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
148 	DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
149 	DEVMETHOD(ifdi_detach, iavf_if_detach),
150 	DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
151 	DEVMETHOD(ifdi_suspend, iavf_if_suspend),
152 	DEVMETHOD(ifdi_resume, iavf_if_resume),
153 	DEVMETHOD(ifdi_init, iavf_if_init),
154 	DEVMETHOD(ifdi_stop, iavf_if_stop),
155 	DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
156 	DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
157 	DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
158 	DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
159 	DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
160 	DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
161 	DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
162 	DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
163 	DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
164 	DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
165 	DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
166 	DEVMETHOD(ifdi_media_status, iavf_if_media_status),
167 	DEVMETHOD(ifdi_media_change, iavf_if_media_change),
168 	DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
169 	DEVMETHOD(ifdi_timer, iavf_if_timer),
170 	DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
171 	DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
172 	DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
173 	DEVMETHOD_END
174 };
175 
176 static driver_t iavf_if_driver = {
177 	"iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
178 };
179 
180 extern struct if_txrx iavf_txrx_hwb;
181 extern struct if_txrx iavf_txrx_dwb;
182 
183 static struct if_shared_ctx iavf_sctx = {
184 	.isc_magic = IFLIB_MAGIC,
185 	.isc_q_align = PAGE_SIZE,
186 	.isc_tx_maxsize = IAVF_MAX_FRAME,
187 	.isc_tx_maxsegsize = IAVF_MAX_FRAME,
188 	.isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header),
189 	.isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE,
190 	.isc_rx_maxsize = IAVF_MAX_FRAME,
191 	.isc_rx_nsegments = IAVF_MAX_RX_SEGS,
192 	.isc_rx_maxsegsize = IAVF_MAX_FRAME,
193 	.isc_nfl = 1,
194 	.isc_ntxqs = 1,
195 	.isc_nrxqs = 1,
196 
197 	.isc_admin_intrcnt = 1,
198 	.isc_vendor_info = iavf_vendor_info_array,
199 	.isc_driver_version = __DECONST(char *, iavf_driver_version),
200 	.isc_driver = &iavf_if_driver,
201 	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
202 
203 	.isc_nrxd_min = {IAVF_MIN_RING},
204 	.isc_ntxd_min = {IAVF_MIN_RING},
205 	.isc_nrxd_max = {IAVF_MAX_RING},
206 	.isc_ntxd_max = {IAVF_MAX_RING},
207 	.isc_nrxd_default = {IAVF_DEFAULT_RING},
208 	.isc_ntxd_default = {IAVF_DEFAULT_RING},
209 };
210 
211 /*** Functions ***/
212 
213 /**
214  * iavf_register - iflib callback to obtain the shared context pointer
215  * @dev: the device being registered
216  *
217  * Called when the driver is first being attached to the driver. This function
218  * is used by iflib to obtain a pointer to the shared context structure which
219  * describes the device features.
220  *
221  * @returns a pointer to the iavf shared context structure.
222  */
223 static void *
224 iavf_register(device_t dev __unused)
225 {
226 	return (&iavf_sctx);
227 }
228 
229 /**
230  * iavf_allocate_pci_resources - Allocate PCI resources
231  * @sc: the device private softc
232  *
233  * Allocate PCI resources used by the iflib driver.
234  *
235  * @returns zero or a non-zero error code on failure
236  */
237 static int
238 iavf_allocate_pci_resources(struct iavf_sc *sc)
239 {
240 	return iavf_allocate_pci_resources_common(sc);
241 }
242 
243 /**
244  * iavf_if_attach_pre - Begin attaching the device to the driver
245  * @ctx: the iflib context pointer
246  *
247  * Called by iflib to begin the attach process. Allocates resources and
248  * initializes the hardware for operation.
249  *
250  * @returns zero or a non-zero error code on failure.
251  */
252 static int
253 iavf_if_attach_pre(if_ctx_t ctx)
254 {
255 	device_t dev;
256 	struct iavf_sc *sc;
257 	struct iavf_hw *hw;
258 	struct iavf_vsi *vsi;
259 	if_softc_ctx_t scctx;
260 	int error = 0;
261 
262 	/* Setup pointers */
263 	dev = iflib_get_dev(ctx);
264 	sc = iavf_sc_from_ctx(ctx);
265 
266 	vsi = &sc->vsi;
267 	vsi->back = sc;
268 	sc->dev = sc->osdep.dev = dev;
269 	hw = &sc->hw;
270 
271 	vsi->dev = dev;
272 	vsi->hw = &sc->hw;
273 	vsi->num_vlans = 0;
274 	vsi->ctx = ctx;
275 	sc->media = iflib_get_media(ctx);
276 	vsi->ifp = iflib_get_ifp(ctx);
277 	vsi->shared = scctx = iflib_get_softc_ctx(ctx);
278 
279 	iavf_save_tunables(sc);
280 
281 	/* Setup VC mutex */
282 	snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name),
283 		 "%s:vc", device_get_nameunit(dev));
284 	mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF);
285 
286 	/* Do PCI setup - map BAR0, etc */
287 	error = iavf_allocate_pci_resources(sc);
288 	if (error) {
289 		device_printf(dev, "%s: Allocation of PCI resources failed\n",
290 		    __func__);
291 		goto err_early;
292 	}
293 
294 	iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
295 
296 	error = iavf_set_mac_type(hw);
297 	if (error) {
298 		device_printf(dev, "%s: set_mac_type failed: %d\n",
299 		    __func__, error);
300 		goto err_pci_res;
301 	}
302 
303 	error = iavf_reset_complete(hw);
304 	if (error) {
305 		device_printf(dev, "%s: Device is still being reset\n",
306 		    __func__);
307 		goto err_pci_res;
308 	}
309 
310 	iavf_dbg_init(sc, "VF Device is ready for configuration\n");
311 
312 	/* Sets up Admin Queue */
313 	error = iavf_setup_vc(sc);
314 	if (error) {
315 		device_printf(dev, "%s: Error setting up PF comms, %d\n",
316 		    __func__, error);
317 		goto err_pci_res;
318 	}
319 
320 	iavf_dbg_init(sc, "PF API version verified\n");
321 
322 	/* Need API version before sending reset message */
323 	error = iavf_reset(sc);
324 	if (error) {
325 		device_printf(dev, "VF reset failed; reload the driver\n");
326 		goto err_aq;
327 	}
328 
329 	iavf_dbg_init(sc, "VF reset complete\n");
330 
331 	/* Ask for VF config from PF */
332 	error = iavf_vf_config(sc);
333 	if (error) {
334 		device_printf(dev, "Error getting configuration from PF: %d\n",
335 		    error);
336 		goto err_aq;
337 	}
338 
339 	iavf_print_device_info(sc);
340 
341 	error = iavf_get_vsi_res_from_vf_res(sc);
342 	if (error)
343 		goto err_res_buf;
344 
345 	iavf_dbg_init(sc, "Resource Acquisition complete\n");
346 
347 	/* Setup taskqueue to service VC messages */
348 	error = iavf_setup_vc_tq(sc);
349 	if (error)
350 		goto err_vc_tq;
351 
352 	iavf_set_mac_addresses(sc);
353 	iflib_set_mac(ctx, hw->mac.addr);
354 
355 	/* Allocate filter lists */
356 	iavf_init_filters(sc);
357 
358 	/* Fill out more iflib parameters */
359 	scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
360 	    sc->vsi_res->num_queue_pairs;
361 	if (vsi->enable_head_writeback) {
362 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
363 		    * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN);
364 		scctx->isc_txrx = &iavf_txrx_hwb;
365 	} else {
366 		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
367 		    * sizeof(struct iavf_tx_desc), DBA_ALIGN);
368 		scctx->isc_txrx = &iavf_txrx_dwb;
369 	}
370 	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
371 	    * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN);
372 	scctx->isc_msix_bar = PCIR_BAR(IAVF_MSIX_BAR);
373 	scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS;
374 	scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS;
375 	scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE;
376 	scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE;
377 	scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE;
378 	scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS;
379 	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
380 
381 	/* Update OS cache of MSIX control register values */
382 	iavf_update_msix_devinfo(dev);
383 
384 	return (0);
385 
386 err_vc_tq:
387 	taskqueue_free(sc->vc_tq);
388 err_res_buf:
389 	free(sc->vf_res, M_IAVF);
390 err_aq:
391 	iavf_shutdown_adminq(hw);
392 err_pci_res:
393 	iavf_free_pci_resources(sc);
394 err_early:
395 	IAVF_VC_LOCK_DESTROY(sc);
396 	return (error);
397 }
398 
399 /**
400  * iavf_vc_task - task used to process VC messages
401  * @arg: device softc
402  * @pending: unused
403  *
404  * Processes the admin queue, in order to process the virtual
405  * channel messages received from the PF.
406  */
407 static void
408 iavf_vc_task(void *arg, int pending __unused)
409 {
410 	struct iavf_sc *sc = (struct iavf_sc *)arg;
411 	u16 var;
412 
413 	iavf_process_adminq(sc, &var);
414 }
415 
416 /**
417  * iavf_setup_vc_tq - Setup task queues
418  * @sc: device softc
419  *
420  * Create taskqueue and tasklet for processing virtual channel messages. This
421  * is done in a separate non-iflib taskqueue so that the iflib context lock
422  * does not need to be held for VC messages to be processed.
423  *
424  * @returns zero on success, or an error code on failure.
425  */
426 static int
427 iavf_setup_vc_tq(struct iavf_sc *sc)
428 {
429 	device_t dev = sc->dev;
430 	int error = 0;
431 
432 	TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc);
433 
434 	sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT,
435 	    taskqueue_thread_enqueue, &sc->vc_tq);
436 	if (!sc->vc_tq) {
437 		device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n");
438 		return (ENOMEM);
439 	}
440 	error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc",
441 	    device_get_nameunit(dev));
442 	if (error) {
443 		device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n",
444 		    error);
445 		taskqueue_free(sc->vc_tq);
446 		return (error);
447 	}
448 
449 	return (error);
450 }
451 
452 /**
453  * iavf_if_attach_post - Finish attaching the device to the driver
454  * @ctx: the iflib context pointer
455  *
456  * Called by iflib after it has setup queues and interrupts. Used to finish up
457  * the attach process for a device. Attach logic which must occur after Tx and
458  * Rx queues are setup belongs here.
459  *
460  * @returns zero or a non-zero error code on failure
461  */
462 static int
463 iavf_if_attach_post(if_ctx_t ctx)
464 {
465 #ifdef IXL_DEBUG
466 	device_t dev = iflib_get_dev(ctx);
467 #endif
468 	struct iavf_sc	*sc;
469 	struct iavf_hw	*hw;
470 	struct iavf_vsi *vsi;
471 	int error = 0;
472 
473 	INIT_DBG_DEV(dev, "begin");
474 
475 	sc = iavf_sc_from_ctx(ctx);
476 	vsi = &sc->vsi;
477 	hw = &sc->hw;
478 
479 	/* Save off determined number of queues for interface */
480 	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
481 	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
482 
483 	/* Setup the stack interface */
484 	iavf_setup_interface(sc);
485 
486 	iavf_dbg_init(sc, "Interface setup complete\n");
487 
488 	/* Initialize statistics & add sysctls */
489 	bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats));
490 	iavf_add_device_sysctls(sc);
491 
492 	atomic_store_rel_32(&sc->queues_enabled, 0);
493 	iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED);
494 
495 	/* We want AQ enabled early for init */
496 	iavf_enable_adminq_irq(hw);
497 
498 	INIT_DBG_DEV(dev, "end");
499 
500 	return (error);
501 }
502 
503 /**
504  * iavf_if_detach - Detach a device from the driver
505  * @ctx: the iflib context of the device to detach
506  *
507  * Called by iflib to detach a given device from the driver. Clean up any
508  * resources associated with the driver and shut the device down.
509  *
510  * @remark iflib always ignores the return value of IFDI_DETACH, so this
511  * function is effectively not allowed to fail. Instead, it should clean up
512  * and release as much as possible even if something goes wrong.
513  *
514  * @returns zero
515  */
516 static int
517 iavf_if_detach(if_ctx_t ctx)
518 {
519 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
520 	struct iavf_hw *hw = &sc->hw;
521 	device_t dev = sc->dev;
522 	enum iavf_status status;
523 
524 	INIT_DBG_DEV(dev, "begin");
525 
526 	iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED);
527 
528 	/* Drain admin queue taskqueue */
529 	taskqueue_free(sc->vc_tq);
530 	IAVF_VC_LOCK_DESTROY(sc);
531 
532 	/* Remove all the media and link information */
533 	ifmedia_removeall(sc->media);
534 
535 	iavf_disable_adminq_irq(hw);
536 	status = iavf_shutdown_adminq(&sc->hw);
537 	if (status != IAVF_SUCCESS) {
538 		device_printf(dev,
539 		    "iavf_shutdown_adminq() failed with status %s\n",
540 		    iavf_stat_str(hw, status));
541 	}
542 
543 	free(sc->vf_res, M_IAVF);
544 	sc->vf_res = NULL;
545 	iavf_free_pci_resources(sc);
546 	iavf_free_filters(sc);
547 
548 	INIT_DBG_DEV(dev, "end");
549 	return (0);
550 }
551 
552 /**
553  * iavf_if_shutdown - called by iflib to handle shutdown
554  * @ctx: the iflib context pointer
555  *
556  * Callback for the IFDI_SHUTDOWN iflib function.
557  *
558  * @returns zero or an error code on failure
559  */
560 static int
561 iavf_if_shutdown(if_ctx_t ctx __unused)
562 {
563 	return (0);
564 }
565 
566 /**
567  * iavf_if_suspend - called by iflib to handle suspend
568  * @ctx: the iflib context pointer
569  *
570  * Callback for the IFDI_SUSPEND iflib function.
571  *
572  * @returns zero or an error code on failure
573  */
574 static int
575 iavf_if_suspend(if_ctx_t ctx __unused)
576 {
577 	return (0);
578 }
579 
580 /**
581  * iavf_if_resume - called by iflib to handle resume
582  * @ctx: the iflib context pointer
583  *
584  * Callback for the IFDI_RESUME iflib function.
585  *
586  * @returns zero or an error code on failure
587  */
588 static int
589 iavf_if_resume(if_ctx_t ctx __unused)
590 {
591 	return (0);
592 }
593 
594 /**
595  * iavf_vc_sleep_wait - Sleep for a response from a VC message
596  * @sc: device softc
597  * @op: the op code to sleep on
598  *
599  * Sleep until a response from the PF for the VC message sent by the
600  * given op.
601  *
602  * @returns zero on success, or EWOULDBLOCK if the sleep times out.
603  */
604 static int
605 iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op)
606 {
607 	int error = 0;
608 
609 	IAVF_VC_LOCK_ASSERT(sc);
610 
611 	iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
612 
613 	error = mtx_sleep(iavf_vc_get_op_chan(sc, op),
614 	    &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
615 
616 	return (error);
617 }
618 
619 /**
620  * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response
621  * @sc: device softc
622  * @op: the op code to send
623  *
624  * Send a virtchnl message to the PF, and sleep or busy wait for a response
625  * from the PF, depending on iflib context lock type.
626  *
627  * @remark this function does not wait if the device is detaching, on kernels
628  * that support indicating to the driver that the device is detaching
629  *
630  * @returns zero or an error code on failure.
631  */
632 int
633 iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
634 {
635 	if_ctx_t ctx = sc->vsi.ctx;
636 	int error = 0;
637 
638 	IAVF_VC_LOCK(sc);
639 	error = iavf_vc_send_cmd(sc, op);
640 	if (error != 0) {
641 		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
642 		goto release_lock;
643 	}
644 
645 	/* Don't wait for a response if the device is being detached. */
646 	if (!iflib_in_detach(ctx)) {
647 		error = iavf_vc_sleep_wait(sc, op);
648 		IAVF_VC_LOCK_ASSERT(sc);
649 
650 		if (error == EWOULDBLOCK)
651 			device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
652 	}
653 release_lock:
654 	IAVF_VC_UNLOCK(sc);
655 	return (error);
656 }
657 
658 /**
659  * iavf_send_vc_msg - Send a virtchnl message to the PF
660  * @sc: device softc
661  * @op: the op code to send
662  *
663  * Send a virtchnl message to the PF and do not wait for a response.
664  *
665  * @returns zero on success, or an error code on failure.
666  */
667 int
668 iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
669 {
670 	int error = 0;
671 
672 	error = iavf_vc_send_cmd(sc, op);
673 	if (error != 0)
674 		iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
675 
676 	return (error);
677 }
678 
679 /**
680  * iavf_init_queues - initialize Tx and Rx queues
681  * @vsi: the VSI to initialize
682  *
683  * Refresh the Tx and Rx ring contents and update the tail pointers for each
684  * queue.
685  */
686 static void
687 iavf_init_queues(struct iavf_vsi *vsi)
688 {
689 	struct iavf_tx_queue *tx_que = vsi->tx_queues;
690 	struct iavf_rx_queue *rx_que = vsi->rx_queues;
691 	struct rx_ring *rxr;
692 	uint32_t mbuf_sz;
693 
694 	mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
695 	MPASS(mbuf_sz <= UINT16_MAX);
696 
697 	for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
698 		iavf_init_tx_ring(vsi, tx_que);
699 
700 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
701 		rxr = &rx_que->rxr;
702 
703 		rxr->mbuf_sz = mbuf_sz;
704 		wr32(vsi->hw, rxr->tail, 0);
705 	}
706 }
707 
708 /**
709  * iavf_if_init - Initialize device for operation
710  * @ctx: the iflib context pointer
711  *
712  * Initializes a device for operation. Called by iflib in response to an
713  * interface up event from the stack.
714  *
715  * @remark this function does not return a value and thus cannot indicate
716  * failure to initialize.
717  */
718 static void
719 iavf_if_init(if_ctx_t ctx)
720 {
721 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
722 	struct iavf_vsi *vsi = &sc->vsi;
723 	struct iavf_hw *hw = &sc->hw;
724 	if_t ifp = iflib_get_ifp(ctx);
725 	u8 tmpaddr[ETHER_ADDR_LEN];
726 	enum iavf_status status;
727 	device_t dev = sc->dev;
728 	int error = 0;
729 
730 	INIT_DBG_IF(ifp, "begin");
731 
732 	IFLIB_CTX_ASSERT(ctx);
733 
734 	error = iavf_reset_complete(hw);
735 	if (error) {
736 		device_printf(sc->dev, "%s: VF reset failed\n",
737 		    __func__);
738 	}
739 
740 	if (!iavf_check_asq_alive(hw)) {
741 		iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
742 		pci_enable_busmaster(dev);
743 
744 		status = iavf_shutdown_adminq(hw);
745 		if (status != IAVF_SUCCESS) {
746 			device_printf(dev,
747 			    "%s: iavf_shutdown_adminq failed: %s\n",
748 			    __func__, iavf_stat_str(hw, status));
749 			return;
750 		}
751 
752 		status = iavf_init_adminq(hw);
753 		if (status != IAVF_SUCCESS) {
754 			device_printf(dev,
755 			"%s: iavf_init_adminq failed: %s\n",
756 			    __func__, iavf_stat_str(hw, status));
757 			return;
758 		}
759 	}
760 
761 	/* Make sure queues are disabled */
762 	iavf_disable_queues_with_retries(sc);
763 
764 	bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN);
765 	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
766 	    (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) {
767 		error = iavf_del_mac_filter(sc, hw->mac.addr);
768 		if (error == 0)
769 			iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
770 
771 		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
772 	}
773 
774 	error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
775 	if (!error || error == EEXIST)
776 		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
777 	iflib_set_mac(ctx, hw->mac.addr);
778 
779 	/* Prepare the queues for operation */
780 	iavf_init_queues(vsi);
781 
782 	/* Set initial ITR values */
783 	iavf_configure_itr(sc);
784 
785 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
786 
787 	/* Set up RSS */
788 	iavf_config_rss(sc);
789 
790 	/* Map vectors */
791 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
792 
793 	/* Init SW TX ring indices */
794 	if (vsi->enable_head_writeback)
795 		iavf_init_tx_cidx(vsi);
796 	else
797 		iavf_init_tx_rsqs(vsi);
798 
799 	/* Configure promiscuous mode */
800 	iavf_config_promisc(sc, if_getflags(ifp));
801 
802 	/* Enable queues */
803 	iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
804 
805 	iavf_set_state(&sc->state, IAVF_STATE_RUNNING);
806 }
807 
808 /**
809  * iavf_if_msix_intr_assign - Assign MSI-X interrupts
810  * @ctx: the iflib context pointer
811  * @msix: the number of MSI-X vectors available
812  *
813  * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and
814  * sets up vectors for each Tx and Rx queue, as well as the administrative
815  * control interrupt.
816  *
817  * @returns zero or an error code on failure
818  */
819 static int
820 iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused)
821 {
822 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
823 	struct iavf_vsi *vsi = &sc->vsi;
824 	struct iavf_rx_queue *rx_que = vsi->rx_queues;
825 	struct iavf_tx_queue *tx_que = vsi->tx_queues;
826 	int err, i, rid, vector = 0;
827 	char buf[16];
828 
829 	MPASS(vsi->shared->isc_nrxqsets > 0);
830 	MPASS(vsi->shared->isc_ntxqsets > 0);
831 
832 	/* Admin Que is vector 0*/
833 	rid = vector + 1;
834 	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
835 	    iavf_msix_adminq, sc, 0, "aq");
836 	if (err) {
837 		iflib_irq_free(ctx, &vsi->irq);
838 		device_printf(iflib_get_dev(ctx),
839 		    "Failed to register Admin Que handler");
840 		return (err);
841 	}
842 
843 	/* Now set up the stations */
844 	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
845 		rid = vector + 1;
846 
847 		snprintf(buf, sizeof(buf), "rxq%d", i);
848 		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
849 		    IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
850 		if (err) {
851 			device_printf(iflib_get_dev(ctx),
852 			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
853 			vsi->num_rx_queues = i + 1;
854 			goto fail;
855 		}
856 		rx_que->msix = vector;
857 	}
858 
859 	bzero(buf, sizeof(buf));
860 
861 	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
862 		snprintf(buf, sizeof(buf), "txq%d", i);
863 		iflib_softirq_alloc_generic(ctx,
864 		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
865 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
866 
867 		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
868 	}
869 
870 	return (0);
871 fail:
872 	iflib_irq_free(ctx, &vsi->irq);
873 	rx_que = vsi->rx_queues;
874 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
875 		iflib_irq_free(ctx, &rx_que->que_irq);
876 	return (err);
877 }
878 
879 /**
880  * iavf_if_enable_intr - Enable all interrupts for a device
881  * @ctx: the iflib context pointer
882  *
883  * Called by iflib to request enabling all interrupts.
884  */
885 static void
886 iavf_if_enable_intr(if_ctx_t ctx)
887 {
888 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
889 	struct iavf_vsi *vsi = &sc->vsi;
890 
891 	iavf_enable_intr(vsi);
892 }
893 
894 /**
895  * iavf_if_disable_intr - Disable all interrupts for a device
896  * @ctx: the iflib context pointer
897  *
898  * Called by iflib to request disabling all interrupts.
899  */
900 static void
901 iavf_if_disable_intr(if_ctx_t ctx)
902 {
903 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
904 	struct iavf_vsi *vsi = &sc->vsi;
905 
906 	iavf_disable_intr(vsi);
907 }
908 
909 /**
910  * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt
911  * @ctx: the iflib context pointer
912  * @rxqid: Rx queue index
913  *
914  * Enables the interrupt associated with a specified Rx queue.
915  *
916  * @returns zero
917  */
918 static int
919 iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
920 {
921 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
922 	struct iavf_vsi *vsi = &sc->vsi;
923 	struct iavf_hw *hw = vsi->hw;
924 	struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid];
925 
926 	iavf_enable_queue_irq(hw, rx_que->msix - 1);
927 	return (0);
928 }
929 
930 /**
931  * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt
932  * @ctx: the iflib context pointer
933  * @txqid: Tx queue index
934  *
935  * Enables the interrupt associated with a specified Tx queue.
936  *
937  * @returns zero
938  */
939 static int
940 iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
941 {
942 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
943 	struct iavf_vsi *vsi = &sc->vsi;
944 	struct iavf_hw *hw = vsi->hw;
945 	struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
946 
947 	iavf_enable_queue_irq(hw, tx_que->msix - 1);
948 	return (0);
949 }
950 
951 /**
952  * iavf_if_tx_queues_alloc - Allocate Tx queue memory
953  * @ctx: the iflib context pointer
954  * @vaddrs: Array of virtual addresses
955  * @paddrs: Array of physical addresses
956  * @ntxqs: the number of Tx queues per group (should always be 1)
957  * @ntxqsets: the number of Tx queues
958  *
959  * Allocates memory for the specified number of Tx queues. This includes
960  * memory for the queue structures and the report status array for the queues.
961  * The virtual and physical addresses are saved for later use during
962  * initialization.
963  *
964  * @returns zero or a non-zero error code on failure
965  */
966 static int
967 iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
968 {
969 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
970 	struct iavf_vsi *vsi = &sc->vsi;
971 	if_softc_ctx_t scctx = vsi->shared;
972 	struct iavf_tx_queue *que;
973 	int i, j, error = 0;
974 
975 	MPASS(scctx->isc_ntxqsets > 0);
976 	MPASS(ntxqs == 1);
977 	MPASS(scctx->isc_ntxqsets == ntxqsets);
978 
979 	/* Allocate queue structure memory */
980 	if (!(vsi->tx_queues =
981 	    (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
982 		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
983 		return (ENOMEM);
984 	}
985 
986 	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
987 		struct tx_ring *txr = &que->txr;
988 
989 		txr->me = i;
990 		que->vsi = vsi;
991 
992 		if (!vsi->enable_head_writeback) {
993 			/* Allocate report status array */
994 			if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
995 				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
996 				error = ENOMEM;
997 				goto fail;
998 			}
999 			/* Init report status array */
1000 			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1001 				txr->tx_rsq[j] = QIDX_INVALID;
1002 		}
1003 		/* get the virtual and physical address of the hardware queues */
1004 		txr->tail = IAVF_QTX_TAIL1(txr->me);
1005 		txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs];
1006 		txr->tx_paddr = paddrs[i * ntxqs];
1007 		txr->que = que;
1008 	}
1009 
1010 	return (0);
1011 fail:
1012 	iavf_if_queues_free(ctx);
1013 	return (error);
1014 }
1015 
1016 /**
1017  * iavf_if_rx_queues_alloc - Allocate Rx queue memory
1018  * @ctx: the iflib context pointer
1019  * @vaddrs: Array of virtual addresses
1020  * @paddrs: Array of physical addresses
1021  * @nrxqs: number of Rx queues per group (should always be 1)
1022  * @nrxqsets: the number of Rx queues to allocate
1023  *
1024  * Called by iflib to allocate driver memory for a number of Rx queues.
1025  * Allocates memory for the drivers private Rx queue data structure, and saves
1026  * the physical and virtual addresses for later use.
1027  *
1028  * @returns zero or a non-zero error code on failure
1029  */
1030 static int
1031 iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1032 {
1033 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1034 	struct iavf_vsi *vsi = &sc->vsi;
1035 	struct iavf_rx_queue *que;
1036 	int i, error = 0;
1037 
1038 #ifdef INVARIANTS
1039 	if_softc_ctx_t scctx = vsi->shared;
1040 	MPASS(scctx->isc_nrxqsets > 0);
1041 	MPASS(nrxqs == 1);
1042 	MPASS(scctx->isc_nrxqsets == nrxqsets);
1043 #endif
1044 
1045 	/* Allocate queue structure memory */
1046 	if (!(vsi->rx_queues =
1047 	    (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) *
1048 	    nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1049 		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1050 		error = ENOMEM;
1051 		goto fail;
1052 	}
1053 
1054 	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1055 		struct rx_ring *rxr = &que->rxr;
1056 
1057 		rxr->me = i;
1058 		que->vsi = vsi;
1059 
1060 		/* get the virtual and physical address of the hardware queues */
1061 		rxr->tail = IAVF_QRX_TAIL1(rxr->me);
1062 		rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs];
1063 		rxr->rx_paddr = paddrs[i * nrxqs];
1064 		rxr->que = que;
1065 	}
1066 
1067 	return (0);
1068 fail:
1069 	iavf_if_queues_free(ctx);
1070 	return (error);
1071 }
1072 
1073 /**
1074  * iavf_if_queues_free - Free driver queue memory
1075  * @ctx: the iflib context pointer
1076  *
1077  * Called by iflib to release memory allocated by the driver when setting up
1078  * Tx and Rx queues.
1079  *
1080  * @remark The ordering of this function and iavf_if_detach is not guaranteed.
1081  * It is possible for this function to be called either before or after the
1082  * iavf_if_detach. Thus, care must be taken to ensure that either ordering of
1083  * iavf_if_detach and iavf_if_queues_free is safe.
1084  */
1085 static void
1086 iavf_if_queues_free(if_ctx_t ctx)
1087 {
1088 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1089 	struct iavf_vsi *vsi = &sc->vsi;
1090 
1091 	if (!vsi->enable_head_writeback) {
1092 		struct iavf_tx_queue *que;
1093 		int i = 0;
1094 
1095 		for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1096 			struct tx_ring *txr = &que->txr;
1097 			if (txr->tx_rsq != NULL) {
1098 				free(txr->tx_rsq, M_IAVF);
1099 				txr->tx_rsq = NULL;
1100 			}
1101 		}
1102 	}
1103 
1104 	if (vsi->tx_queues != NULL) {
1105 		free(vsi->tx_queues, M_IAVF);
1106 		vsi->tx_queues = NULL;
1107 	}
1108 	if (vsi->rx_queues != NULL) {
1109 		free(vsi->rx_queues, M_IAVF);
1110 		vsi->rx_queues = NULL;
1111 	}
1112 }
1113 
1114 /**
1115  * iavf_check_aq_errors - Check for AdminQ errors
1116  * @sc: device softc
1117  *
1118  * Check the AdminQ registers for errors, and determine whether or not a reset
1119  * may be required to resolve them.
1120  *
1121  * @post if there are errors, the VF device will be stopped and a reset will
1122  * be requested.
1123  *
1124  * @returns zero if there are no issues, EBUSY if the device is resetting,
1125  * or EIO if there are any AQ errors.
1126  */
1127 static int
1128 iavf_check_aq_errors(struct iavf_sc *sc)
1129 {
1130 	struct iavf_hw *hw = &sc->hw;
1131 	device_t dev = sc->dev;
1132 	u32 reg, oldreg;
1133 	u8 aq_error = false;
1134 
1135 	oldreg = reg = rd32(hw, hw->aq.arq.len);
1136 
1137 	/* Check if device is in reset */
1138 	if (reg == 0xdeadbeef || reg == 0xffffffff) {
1139 		device_printf(dev, "VF in reset\n");
1140 		return (EBUSY);
1141 	}
1142 
1143 	/* Check for Admin queue errors */
1144 	if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
1145 		device_printf(dev, "ARQ VF Error detected\n");
1146 		reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
1147 		aq_error = true;
1148 	}
1149 	if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
1150 		device_printf(dev, "ARQ Overflow Error detected\n");
1151 		reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
1152 		aq_error = true;
1153 	}
1154 	if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
1155 		device_printf(dev, "ARQ Critical Error detected\n");
1156 		reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
1157 		aq_error = true;
1158 	}
1159 	if (oldreg != reg)
1160 		wr32(hw, hw->aq.arq.len, reg);
1161 
1162 	oldreg = reg = rd32(hw, hw->aq.asq.len);
1163 	if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
1164 		device_printf(dev, "ASQ VF Error detected\n");
1165 		reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
1166 		aq_error = true;
1167 	}
1168 	if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
1169 		device_printf(dev, "ASQ Overflow Error detected\n");
1170 		reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
1171 		aq_error = true;
1172 	}
1173 	if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
1174 		device_printf(dev, "ASQ Critical Error detected\n");
1175 		reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
1176 		aq_error = true;
1177 	}
1178 	if (oldreg != reg)
1179 		wr32(hw, hw->aq.asq.len, reg);
1180 
1181 	return (aq_error ? EIO : 0);
1182 }
1183 
1184 /**
1185  * iavf_process_adminq - Process adminq responses from the PF
1186  * @sc: device softc
1187  * @pending: output parameter indicating how many messages remain
1188  *
1189  * Process the adminq to handle replies from the PF over the virtchnl
1190  * connection.
1191  *
1192  * @returns zero or an iavf_status code on failure
1193  */
1194 static enum iavf_status
1195 iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1196 {
1197 	enum iavf_status status = IAVF_SUCCESS;
1198 	struct iavf_arq_event_info event;
1199 	struct iavf_hw *hw = &sc->hw;
1200 	struct virtchnl_msg *v_msg;
1201 	int error = 0, loop = 0;
1202 	u32 reg;
1203 
1204 	if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) {
1205 		status = IAVF_ERR_ADMIN_QUEUE_ERROR;
1206 		goto reenable_interrupt;
1207 	}
1208 
1209 	error = iavf_check_aq_errors(sc);
1210 	if (error) {
1211 		status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1212 		goto reenable_interrupt;
1213 	}
1214 
1215 	event.buf_len = IAVF_AQ_BUF_SZ;
1216         event.msg_buf = sc->aq_buffer;
1217 	bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1218 	v_msg = (struct virtchnl_msg *)&event.desc;
1219 
1220 	IAVF_VC_LOCK(sc);
1221 	/* clean and process any events */
1222 	do {
1223 		status = iavf_clean_arq_element(hw, &event, pending);
1224 		/*
1225 		 * Also covers normal case when iavf_clean_arq_element()
1226 		 * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK"
1227 		 */
1228 		if (status)
1229 			break;
1230 		iavf_vc_completion(sc, v_msg->v_opcode,
1231 		    v_msg->v_retval, event.msg_buf, event.msg_len);
1232 		bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1233 	} while (*pending && (loop++ < IAVF_ADM_LIMIT));
1234 	IAVF_VC_UNLOCK(sc);
1235 
1236 reenable_interrupt:
1237 	/* Re-enable admin queue interrupt cause */
1238 	reg = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1239 	reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1240 	wr32(hw, IAVF_VFINT_ICR0_ENA1, reg);
1241 
1242 	return (status);
1243 }
1244 
1245 /**
1246  * iavf_if_update_admin_status - Administrative status task
1247  * @ctx: iflib context
1248  *
1249  * Called by iflib to handle administrative status events. The iavf driver
1250  * uses this to process the adminq virtchnl messages outside of interrupt
1251  * context.
1252  */
1253 static void
1254 iavf_if_update_admin_status(if_ctx_t ctx)
1255 {
1256 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1257 	struct iavf_hw *hw = &sc->hw;
1258 	u16 pending = 0;
1259 
1260 	iavf_process_adminq(sc, &pending);
1261 	iavf_update_link_status(sc);
1262 
1263 	/*
1264 	 * If there are still messages to process, reschedule.
1265 	 * Otherwise, re-enable the Admin Queue interrupt.
1266 	 */
1267 	if (pending > 0)
1268 		iflib_admin_intr_deferred(ctx);
1269 	else
1270 		iavf_enable_adminq_irq(hw);
1271 }
1272 
1273 /**
1274  * iavf_if_multi_set - Set multicast address filters
1275  * @ctx: iflib context
1276  *
1277  * Called by iflib to update the current list of multicast filters for the
1278  * device.
1279  */
1280 static void
1281 iavf_if_multi_set(if_ctx_t ctx)
1282 {
1283 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1284 
1285 	iavf_multi_set(sc);
1286 }
1287 
1288 /**
1289  * iavf_if_mtu_set - Set the device MTU
1290  * @ctx: iflib context
1291  * @mtu: MTU value to set
1292  *
1293  * Called by iflib to set the device MTU.
1294  *
1295  * @returns zero on success, or EINVAL if the MTU is invalid.
1296  */
1297 static int
1298 iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1299 {
1300 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1301 	struct iavf_vsi *vsi = &sc->vsi;
1302 
1303 	IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)");
1304 	if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) {
1305 		device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n",
1306 		    mtu, IAVF_MIN_MTU, IAVF_MAX_MTU);
1307 		return (EINVAL);
1308 	}
1309 
1310 	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1311 		ETHER_VLAN_ENCAP_LEN;
1312 
1313 	return (0);
1314 }
1315 
1316 /**
1317  * iavf_if_media_status - Report current media status
1318  * @ctx: iflib context
1319  * @ifmr: ifmedia request structure
1320  *
1321  * Called by iflib to report the current media status in the ifmr.
1322  */
1323 static void
1324 iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1325 {
1326 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1327 
1328 	iavf_media_status_common(sc, ifmr);
1329 }
1330 
1331 /**
1332  * iavf_if_media_change - Change the current media settings
1333  * @ctx: iflib context
1334  *
1335  * Called by iflib to change the current media settings.
1336  *
1337  * @returns zero on success, or an error code on failure.
1338  */
1339 static int
1340 iavf_if_media_change(if_ctx_t ctx)
1341 {
1342 	return iavf_media_change_common(iflib_get_ifp(ctx));
1343 }
1344 
1345 /**
1346  * iavf_if_promisc_set - Set device promiscuous mode
1347  * @ctx: iflib context
1348  * @flags: promiscuous configuration
1349  *
1350  * Called by iflib to request that the device enter promiscuous mode.
1351  *
1352  * @returns zero on success, or an error code on failure.
1353  */
1354 static int
1355 iavf_if_promisc_set(if_ctx_t ctx, int flags)
1356 {
1357 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1358 
1359 	return iavf_config_promisc(sc, flags);
1360 }
1361 
1362 /**
1363  * iavf_if_timer - Periodic timer called by iflib
1364  * @ctx: iflib context
1365  * @qid: The queue being triggered
1366  *
1367  * Called by iflib periodically as a timer task, so that the driver can handle
1368  * periodic work.
1369  *
1370  * @remark this timer is only called while the interface is up, even if
1371  * IFLIB_ADMIN_ALWAYS_RUN is set.
1372  */
1373 static void
1374 iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1375 {
1376 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1377 	struct iavf_hw *hw = &sc->hw;
1378 	u32 val;
1379 
1380 	if (qid != 0)
1381 		return;
1382 
1383 	/* Check for when PF triggers a VF reset */
1384 	val = rd32(hw, IAVF_VFGEN_RSTAT) &
1385 	    IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1386 	if (val != VIRTCHNL_VFR_VFACTIVE
1387 	    && val != VIRTCHNL_VFR_COMPLETED) {
1388 		iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1389 		return;
1390 	}
1391 
1392 	/* Fire off the adminq task */
1393 	iflib_admin_intr_deferred(ctx);
1394 
1395 	/* Update stats */
1396 	iavf_request_stats(sc);
1397 }
1398 
1399 /**
1400  * iavf_if_vlan_register - Register a VLAN
1401  * @ctx: iflib context
1402  * @vtag: the VLAN to register
1403  *
1404  * Register a VLAN filter for a given vtag.
1405  */
1406 static void
1407 iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1408 {
1409 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1410 	struct iavf_vsi *vsi = &sc->vsi;
1411 
1412 	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1413 		return;
1414 
1415 	/* Add VLAN 0 to list, for untagged traffic */
1416 	if (vsi->num_vlans == 0)
1417 		iavf_add_vlan_filter(sc, 0);
1418 
1419 	iavf_add_vlan_filter(sc, vtag);
1420 
1421 	++vsi->num_vlans;
1422 
1423 	iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1424 }
1425 
1426 /**
1427  * iavf_if_vlan_unregister - Unregister a VLAN
1428  * @ctx: iflib context
1429  * @vtag: the VLAN to remove
1430  *
1431  * Unregister (remove) a VLAN filter for the given vtag.
1432  */
1433 static void
1434 iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1435 {
1436 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1437 	struct iavf_vsi *vsi = &sc->vsi;
1438 	int i = 0;
1439 
1440 	if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0))	/* Invalid */
1441 		return;
1442 
1443 	i = iavf_mark_del_vlan_filter(sc, vtag);
1444 	vsi->num_vlans -= i;
1445 
1446 	/* Remove VLAN filter 0 if the last VLAN is being removed */
1447 	if (vsi->num_vlans == 0)
1448 		i += iavf_mark_del_vlan_filter(sc, 0);
1449 
1450 	if (i > 0)
1451 		iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1452 }
1453 
1454 /**
1455  * iavf_if_get_counter - Get network statistic counters
1456  * @ctx: iflib context
1457  * @cnt: The counter to obtain
1458  *
1459  * Called by iflib to obtain the value of the specified counter.
1460  *
1461  * @returns the uint64_t counter value.
1462  */
1463 static uint64_t
1464 iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1465 {
1466 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1467 	struct iavf_vsi *vsi = &sc->vsi;
1468 	if_t ifp = iflib_get_ifp(ctx);
1469 
1470 	switch (cnt) {
1471 	case IFCOUNTER_IPACKETS:
1472 		return (vsi->ipackets);
1473 	case IFCOUNTER_IERRORS:
1474 		return (vsi->ierrors);
1475 	case IFCOUNTER_OPACKETS:
1476 		return (vsi->opackets);
1477 	case IFCOUNTER_OERRORS:
1478 		return (vsi->oerrors);
1479 	case IFCOUNTER_COLLISIONS:
1480 		/* Collisions are by standard impossible in 40G/10G Ethernet */
1481 		return (0);
1482 	case IFCOUNTER_IBYTES:
1483 		return (vsi->ibytes);
1484 	case IFCOUNTER_OBYTES:
1485 		return (vsi->obytes);
1486 	case IFCOUNTER_IMCASTS:
1487 		return (vsi->imcasts);
1488 	case IFCOUNTER_OMCASTS:
1489 		return (vsi->omcasts);
1490 	case IFCOUNTER_IQDROPS:
1491 		return (vsi->iqdrops);
1492 	case IFCOUNTER_OQDROPS:
1493 		return (vsi->oqdrops);
1494 	case IFCOUNTER_NOPROTO:
1495 		return (vsi->noproto);
1496 	default:
1497 		return (if_get_counter_default(ifp, cnt));
1498 	}
1499 }
1500 
1501 /**
1502  * iavf_free_pci_resources - Free PCI resources
1503  * @sc: device softc
1504  *
1505  * Called to release the PCI resources allocated during attach. May be called
1506  * in the error flow of attach_pre, or during detach as part of cleanup.
1507  */
1508 static void
1509 iavf_free_pci_resources(struct iavf_sc *sc)
1510 {
1511 	struct iavf_vsi		*vsi = &sc->vsi;
1512 	struct iavf_rx_queue	*rx_que = vsi->rx_queues;
1513 	device_t                dev = sc->dev;
1514 
1515 	/* We may get here before stations are set up */
1516 	if (rx_que == NULL)
1517 		goto early;
1518 
1519 	/* Release all interrupts */
1520 	iflib_irq_free(vsi->ctx, &vsi->irq);
1521 
1522 	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1523 		iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1524 
1525 early:
1526 	if (sc->pci_mem != NULL)
1527 		bus_release_resource(dev, SYS_RES_MEMORY,
1528 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1529 }
1530 
1531 /**
1532  * iavf_setup_interface - Setup the device interface
1533  * @sc: device softc
1534  *
1535  * Called to setup some device interface settings, such as the ifmedia
1536  * structure.
1537  */
1538 static void
1539 iavf_setup_interface(struct iavf_sc *sc)
1540 {
1541 	struct iavf_vsi *vsi = &sc->vsi;
1542 	if_ctx_t ctx = vsi->ctx;
1543 	struct ifnet *ifp = iflib_get_ifp(ctx);
1544 
1545 	iavf_dbg_init(sc, "begin\n");
1546 
1547 	vsi->shared->isc_max_frame_size =
1548 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
1549 	    + ETHER_VLAN_ENCAP_LEN;
1550 
1551 	iavf_set_initial_baudrate(ifp);
1552 
1553 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1554 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1555 }
1556 
1557 /**
1558  * iavf_msix_adminq - Admin Queue interrupt handler
1559  * @arg: void pointer to the device softc
1560  *
1561  * Interrupt handler for the non-queue interrupt causes. Primarily this will
1562  * be the adminq interrupt, but also includes other miscellaneous causes.
1563  *
1564  * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise
1565  * returns FITLER_HANDLED.
1566  */
1567 static int
1568 iavf_msix_adminq(void *arg)
1569 {
1570 	struct iavf_sc	*sc = (struct iavf_sc *)arg;
1571 	struct iavf_hw	*hw = &sc->hw;
1572 	u32		reg, mask;
1573 
1574 	++sc->admin_irq;
1575 
1576 	if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED))
1577 		return (FILTER_HANDLED);
1578 
1579         reg = rd32(hw, IAVF_VFINT_ICR01);
1580 	/*
1581 	 * For masking off interrupt causes that need to be handled before
1582 	 * they can be re-enabled
1583 	 */
1584         mask = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1585 
1586 	/* Check on the cause */
1587 	if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) {
1588 		mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1589 
1590 		/* Process messages outside of the iflib context lock */
1591 		taskqueue_enqueue(sc->vc_tq, &sc->vc_task);
1592 	}
1593 
1594 	wr32(hw, IAVF_VFINT_ICR0_ENA1, mask);
1595 	iavf_enable_adminq_irq(hw);
1596 
1597 	return (FILTER_HANDLED);
1598 }
1599 
1600 /**
1601  * iavf_enable_intr - Enable device interrupts
1602  * @vsi: the main VSI
1603  *
1604  * Called to enable all queue interrupts.
1605  */
1606 void
1607 iavf_enable_intr(struct iavf_vsi *vsi)
1608 {
1609 	struct iavf_hw *hw = vsi->hw;
1610 	struct iavf_rx_queue *que = vsi->rx_queues;
1611 
1612 	iavf_enable_adminq_irq(hw);
1613 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1614 		iavf_enable_queue_irq(hw, que->rxr.me);
1615 }
1616 
1617 /**
1618  * iavf_disable_intr - Disable device interrupts
1619  * @vsi: the main VSI
1620  *
1621  * Called to disable all interrupts
1622  *
1623  * @remark we never disable the admin status interrupt.
1624  */
1625 void
1626 iavf_disable_intr(struct iavf_vsi *vsi)
1627 {
1628         struct iavf_hw *hw = vsi->hw;
1629         struct iavf_rx_queue *que = vsi->rx_queues;
1630 
1631 	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1632 		iavf_disable_queue_irq(hw, que->rxr.me);
1633 }
1634 
1635 /**
1636  * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt
1637  * @hw: hardware structure
1638  * @id: IRQ vector to enable
1639  *
1640  * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt.
1641  */
1642 static void
1643 iavf_enable_queue_irq(struct iavf_hw *hw, int id)
1644 {
1645 	u32		reg;
1646 
1647 	reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1648 	    IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1649 	    IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1650 	wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg);
1651 }
1652 
1653 /**
1654  * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt
1655  * @hw: hardware structure
1656  * @id: IRQ vector to disable
1657  *
1658  * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt.
1659  */
1660 static void
1661 iavf_disable_queue_irq(struct iavf_hw *hw, int id)
1662 {
1663 	wr32(hw, IAVF_VFINT_DYN_CTLN1(id),
1664 	    IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1665 	rd32(hw, IAVF_VFGEN_RSTAT);
1666 }
1667 
1668 /**
1669  * iavf_configure_itr - Get initial ITR values from tunable values.
1670  * @sc: device softc
1671  *
1672  * Load the initial tunable values for the ITR configuration.
1673  */
1674 static void
1675 iavf_configure_itr(struct iavf_sc *sc)
1676 {
1677 	iavf_configure_tx_itr(sc);
1678 	iavf_configure_rx_itr(sc);
1679 }
1680 
1681 /**
1682  * iavf_set_queue_rx_itr - Update Rx ITR value
1683  * @que: Rx queue to update
1684  *
1685  * Provide a update to the queue RX interrupt moderation value.
1686  */
1687 static void
1688 iavf_set_queue_rx_itr(struct iavf_rx_queue *que)
1689 {
1690 	struct iavf_vsi	*vsi = que->vsi;
1691 	struct iavf_hw	*hw = vsi->hw;
1692 	struct rx_ring	*rxr = &que->rxr;
1693 
1694 	/* Idle, do nothing */
1695 	if (rxr->bytes == 0)
1696 		return;
1697 
1698 	/* Update the hardware if needed */
1699 	if (rxr->itr != vsi->rx_itr_setting) {
1700 		rxr->itr = vsi->rx_itr_setting;
1701 		wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR,
1702 		    que->rxr.me), rxr->itr);
1703 	}
1704 }
1705 
1706 /**
1707  * iavf_msix_que - Main Rx queue interrupt handler
1708  * @arg: void pointer to the Rx queue
1709  *
1710  * Main MSI-X interrupt handler for Rx queue interrupts
1711  *
1712  * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run,
1713  * otherwise returns FILTER_HANDLED.
1714  */
1715 static int
1716 iavf_msix_que(void *arg)
1717 {
1718 	struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg;
1719 	struct iavf_sc *sc = rx_que->vsi->back;
1720 
1721 	++rx_que->irqs;
1722 
1723 	if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING))
1724 		return (FILTER_HANDLED);
1725 
1726 	iavf_set_queue_rx_itr(rx_que);
1727 
1728 	return (FILTER_SCHEDULE_THREAD);
1729 }
1730 
1731 /**
1732  * iavf_update_link_status - Update iflib Link status
1733  * @sc: device softc
1734  *
1735  * Notify the iflib stack of changes in link status. Called after the device
1736  * receives a virtchnl message indicating a change in link status.
1737  */
1738 void
1739 iavf_update_link_status(struct iavf_sc *sc)
1740 {
1741 	struct iavf_vsi *vsi = &sc->vsi;
1742 	u64 baudrate;
1743 
1744 	if (sc->link_up){
1745 		if (vsi->link_active == FALSE) {
1746 			vsi->link_active = TRUE;
1747 			baudrate = iavf_baudrate_from_link_speed(sc);
1748 			iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate);
1749 			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1750 		}
1751 	} else { /* Link down */
1752 		if (vsi->link_active == TRUE) {
1753 			vsi->link_active = FALSE;
1754 			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1755 		}
1756 	}
1757 }
1758 
1759 /**
1760  * iavf_stop - Stop the interface
1761  * @sc: device softc
1762  *
1763  * This routine disables all traffic on the adapter by disabling interrupts
1764  * and sending a message to the PF to tell it to stop the hardware
1765  * Tx/Rx LAN queues.
1766  */
1767 static void
1768 iavf_stop(struct iavf_sc *sc)
1769 {
1770 	iavf_clear_state(&sc->state, IAVF_STATE_RUNNING);
1771 
1772 	iavf_disable_intr(&sc->vsi);
1773 
1774 	iavf_disable_queues_with_retries(sc);
1775 }
1776 
1777 /**
1778  * iavf_if_stop - iflib stop handler
1779  * @ctx: iflib context
1780  *
1781  * Call iavf_stop to stop the interface.
1782  */
1783 static void
1784 iavf_if_stop(if_ctx_t ctx)
1785 {
1786 	struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1787 
1788 	iavf_stop(sc);
1789 }
1790 
1791 /**
1792  * iavf_del_mac_filter - Delete a MAC filter
1793  * @sc: device softc
1794  * @macaddr: MAC address to remove
1795  *
1796  * Marks a MAC filter for deletion.
1797  *
1798  * @returns zero if the filter existed, or ENOENT if it did not.
1799  */
1800 static int
1801 iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1802 {
1803 	struct iavf_mac_filter	*f;
1804 
1805 	f = iavf_find_mac_filter(sc, macaddr);
1806 	if (f == NULL)
1807 		return (ENOENT);
1808 
1809 	f->flags |= IAVF_FILTER_DEL;
1810 	return (0);
1811 }
1812 
1813 /**
1814  * iavf_init_tx_rsqs - Initialize Report Status array
1815  * @vsi: the main VSI
1816  *
1817  * Set the Report Status queue fields to zero in order to initialize the
1818  * queues for transmit.
1819  */
1820 void
1821 iavf_init_tx_rsqs(struct iavf_vsi *vsi)
1822 {
1823 	if_softc_ctx_t scctx = vsi->shared;
1824 	struct iavf_tx_queue *tx_que;
1825 	int i, j;
1826 
1827 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1828 		struct tx_ring *txr = &tx_que->txr;
1829 
1830 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1831 
1832 		/* Initialize the last processed descriptor to be the end of
1833 		 * the ring, rather than the start, so that we avoid an
1834 		 * off-by-one error when calculating how many descriptors are
1835 		 * done in the credits_update function.
1836 		 */
1837 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1838 
1839 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
1840 			txr->tx_rsq[j] = QIDX_INVALID;
1841 	}
1842 }
1843 
1844 /**
1845  * iavf_init_tx_cidx - Initialize Tx cidx values
1846  * @vsi: the main VSI
1847  *
1848  * Initialize the tx_cidx_processed values for Tx queues in order to
1849  * initialize the Tx queues for transmit.
1850  */
1851 void
1852 iavf_init_tx_cidx(struct iavf_vsi *vsi)
1853 {
1854 	if_softc_ctx_t scctx = vsi->shared;
1855 	struct iavf_tx_queue *tx_que;
1856 	int i;
1857 
1858 	for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1859 		struct tx_ring *txr = &tx_que->txr;
1860 
1861 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1862 	}
1863 }
1864 
1865 /**
1866  * iavf_add_device_sysctls - Add device sysctls for configuration
1867  * @sc: device softc
1868  *
1869  * Add the main sysctl nodes and sysctls for device configuration.
1870  */
1871 static void
1872 iavf_add_device_sysctls(struct iavf_sc *sc)
1873 {
1874 	struct iavf_vsi *vsi = &sc->vsi;
1875 	device_t dev = sc->dev;
1876 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1877 	struct sysctl_oid_list *debug_list;
1878 
1879 	iavf_add_device_sysctls_common(sc);
1880 
1881 	debug_list = iavf_create_debug_sysctl_tree(sc);
1882 
1883 	iavf_add_debug_sysctls_common(sc, debug_list);
1884 
1885 	SYSCTL_ADD_PROC(ctx, debug_list,
1886 	    OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
1887 	    sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
1888 
1889 #ifdef IAVF_DEBUG
1890 	SYSCTL_ADD_PROC(ctx, debug_list,
1891 	    OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
1892 	    sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
1893 
1894 	SYSCTL_ADD_PROC(ctx, debug_list,
1895 	    OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
1896 	    sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
1897 #endif
1898 
1899 	/* Add stats sysctls */
1900 	iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi");
1901 
1902 	iavf_add_queues_sysctls(dev, vsi);
1903 }
1904 
1905 /**
1906  * iavf_add_queues_sysctls - Add per-queue sysctls
1907  * @dev: device pointer
1908  * @vsi: the main VSI
1909  *
1910  * Add sysctls for each Tx and Rx queue.
1911  */
1912 void
1913 iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi)
1914 {
1915 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1916 	struct sysctl_oid_list *vsi_list, *queue_list;
1917 	struct sysctl_oid *queue_node;
1918 	char queue_namebuf[32];
1919 
1920 	struct iavf_rx_queue *rx_que;
1921 	struct iavf_tx_queue *tx_que;
1922 	struct tx_ring *txr;
1923 	struct rx_ring *rxr;
1924 
1925 	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
1926 
1927 	/* Queue statistics */
1928 	for (int q = 0; q < vsi->num_rx_queues; q++) {
1929 		bzero(queue_namebuf, sizeof(queue_namebuf));
1930 		snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q);
1931 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1932 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
1933 		queue_list = SYSCTL_CHILDREN(queue_node);
1934 
1935 		rx_que = &(vsi->rx_queues[q]);
1936 		rxr = &(rx_que->rxr);
1937 
1938 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1939 				CTLFLAG_RD, &(rx_que->irqs),
1940 				"irqs on this queue (both Tx and Rx)");
1941 
1942 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1943 				CTLFLAG_RD, &(rxr->rx_packets),
1944 				"Queue Packets Received");
1945 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1946 				CTLFLAG_RD, &(rxr->rx_bytes),
1947 				"Queue Bytes Received");
1948 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
1949 				CTLFLAG_RD, &(rxr->desc_errs),
1950 				"Queue Rx Descriptor Errors");
1951 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1952 				CTLFLAG_RD, &(rxr->itr), 0,
1953 				"Queue Rx ITR Interval");
1954 	}
1955 	for (int q = 0; q < vsi->num_tx_queues; q++) {
1956 		bzero(queue_namebuf, sizeof(queue_namebuf));
1957 		snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q);
1958 		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1959 		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
1960 		queue_list = SYSCTL_CHILDREN(queue_node);
1961 
1962 		tx_que = &(vsi->tx_queues[q]);
1963 		txr = &(tx_que->txr);
1964 
1965 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
1966 				CTLFLAG_RD, &(tx_que->tso),
1967 				"TSO");
1968 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
1969 				CTLFLAG_RD, &(txr->mss_too_small),
1970 				"TSO sends with an MSS less than 64");
1971 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1972 				CTLFLAG_RD, &(txr->tx_packets),
1973 				"Queue Packets Transmitted");
1974 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1975 				CTLFLAG_RD, &(txr->tx_bytes),
1976 				"Queue Bytes Transmitted");
1977 		SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1978 				CTLFLAG_RD, &(txr->itr), 0,
1979 				"Queue Tx ITR Interval");
1980 	}
1981 }
1982 
1983 /**
1984  * iavf_driver_is_detaching - Check if the driver is detaching/unloading
1985  * @sc: device private softc
1986  *
1987  * @returns true if the driver is detaching, false otherwise.
1988  *
1989  * @remark on newer kernels, take advantage of iflib_in_detach in order to
1990  * report detachment correctly as early as possible.
1991  *
1992  * @remark this function is used by various code paths that want to avoid
1993  * running if the driver is about to be removed. This includes sysctls and
1994  * other driver access points. Note that it does not fully resolve
1995  * detach-based race conditions as it is possible for a thread to race with
1996  * iflib_in_detach.
1997  */
1998 bool
1999 iavf_driver_is_detaching(struct iavf_sc *sc)
2000 {
2001 	return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) ||
2002 		iflib_in_detach(sc->vsi.ctx));
2003 }
2004 
2005 /**
2006  * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping
2007  * @oidp: sysctl oid structure
2008  * @arg1: void pointer to device softc
2009  * @arg2: unused
2010  * @req: sysctl request pointer
2011  *
2012  * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors.
2013  *
2014  * @returns zero on success, or an error code on failure.
2015  */
2016 static int
2017 iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2018 {
2019 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2020 	struct iavf_vsi *vsi = &sc->vsi;
2021 	device_t dev = sc->dev;
2022 	struct sbuf *buf;
2023 	int error = 0;
2024 
2025 	struct iavf_rx_queue *rx_que;
2026 	struct iavf_tx_queue *tx_que;
2027 
2028 	UNREFERENCED_2PARAMETER(arg2, oidp);
2029 
2030 	if (iavf_driver_is_detaching(sc))
2031 		return (ESHUTDOWN);
2032 
2033 	buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2034 	if (!buf) {
2035 		device_printf(dev, "Could not allocate sbuf for output.\n");
2036 		return (ENOMEM);
2037 	}
2038 
2039 	sbuf_cat(buf, "\n");
2040 	for (int i = 0; i < vsi->num_rx_queues; i++) {
2041 		rx_que = &vsi->rx_queues[i];
2042 		sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2043 	}
2044 	for (int i = 0; i < vsi->num_tx_queues; i++) {
2045 		tx_que = &vsi->tx_queues[i];
2046 		sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2047 	}
2048 
2049 	error = sbuf_finish(buf);
2050 	if (error)
2051 		device_printf(dev, "Error finishing sbuf: %d\n", error);
2052 	sbuf_delete(buf);
2053 
2054 	return (error);
2055 }
2056 
2057 #ifdef IAVF_DEBUG
2058 #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2059 
2060 /**
2061  * iavf_sysctl_vf_reset - Request a VF reset
2062  * @oidp: sysctl oid pointer
2063  * @arg1: void pointer to device softc
2064  * @arg2: unused
2065  * @req: sysctl request pointer
2066  *
2067  * Request a VF reset for the device.
2068  *
2069  * @returns zero on success, or an error code on failure.
2070  */
2071 static int
2072 iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2073 {
2074 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2075 	int do_reset = 0, error = 0;
2076 
2077 	UNREFERENCED_PARAMETER(arg2);
2078 
2079 	if (iavf_driver_is_detaching(sc))
2080 		return (ESHUTDOWN);
2081 
2082 	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2083 	if ((error) || (req->newptr == NULL))
2084 		return (error);
2085 
2086 	if (do_reset == 1) {
2087 		iavf_reset(sc);
2088 		if (CTX_ACTIVE(sc->vsi.ctx))
2089 			iflib_request_reset(sc->vsi.ctx);
2090 	}
2091 
2092 	return (error);
2093 }
2094 
2095 /**
2096  * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device
2097  * @oidp: sysctl oid pointer
2098  * @arg1: void pointer to device softc
2099  * @arg2: unused
2100  * @req: sysctl request pointer
2101  *
2102  * Sysctl callback to trigger a PCIe FLR.
2103  *
2104  * @returns zero on success, or an error code on failure.
2105  */
2106 static int
2107 iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2108 {
2109 	struct iavf_sc *sc = (struct iavf_sc *)arg1;
2110 	device_t dev = sc->dev;
2111 	int do_reset = 0, error = 0;
2112 
2113 	UNREFERENCED_PARAMETER(arg2);
2114 
2115 	if (iavf_driver_is_detaching(sc))
2116 		return (ESHUTDOWN);
2117 
2118 	error = sysctl_handle_int(oidp, &do_reset, 0, req);
2119 	if ((error) || (req->newptr == NULL))
2120 		return (error);
2121 
2122 	if (do_reset == 1) {
2123 		if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2124 			device_printf(dev, "PCIE FLR failed\n");
2125 			error = EIO;
2126 		}
2127 		else if (CTX_ACTIVE(sc->vsi.ctx))
2128 			iflib_request_reset(sc->vsi.ctx);
2129 	}
2130 
2131 	return (error);
2132 }
2133 #undef CTX_ACTIVE
2134 #endif
2135