1ca853deeSEric Joyner /* SPDX-License-Identifier: BSD-3-Clause */
2*e53a21abSEric Joyner /* Copyright (c) 2024, Intel Corporation
3ca853deeSEric Joyner * All rights reserved.
4ca853deeSEric Joyner *
5ca853deeSEric Joyner * Redistribution and use in source and binary forms, with or without
6ca853deeSEric Joyner * modification, are permitted provided that the following conditions are met:
7ca853deeSEric Joyner *
8ca853deeSEric Joyner * 1. Redistributions of source code must retain the above copyright notice,
9ca853deeSEric Joyner * this list of conditions and the following disclaimer.
10ca853deeSEric Joyner *
11ca853deeSEric Joyner * 2. Redistributions in binary form must reproduce the above copyright
12ca853deeSEric Joyner * notice, this list of conditions and the following disclaimer in the
13ca853deeSEric Joyner * documentation and/or other materials provided with the distribution.
14ca853deeSEric Joyner *
15ca853deeSEric Joyner * 3. Neither the name of the Intel Corporation nor the names of its
16ca853deeSEric Joyner * contributors may be used to endorse or promote products derived from
17ca853deeSEric Joyner * this software without specific prior written permission.
18ca853deeSEric Joyner *
19ca853deeSEric Joyner * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20ca853deeSEric Joyner * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21ca853deeSEric Joyner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22ca853deeSEric Joyner * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23ca853deeSEric Joyner * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24ca853deeSEric Joyner * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25ca853deeSEric Joyner * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26ca853deeSEric Joyner * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27ca853deeSEric Joyner * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28ca853deeSEric Joyner * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29ca853deeSEric Joyner * POSSIBILITY OF SUCH DAMAGE.
30ca853deeSEric Joyner */
31ca853deeSEric Joyner
32ca853deeSEric Joyner /**
33ca853deeSEric Joyner * @file if_iavf_iflib.c
34ca853deeSEric Joyner * @brief iflib driver implementation
35ca853deeSEric Joyner *
36ca853deeSEric Joyner * Contains the main entry point for the iflib driver implementation. It
37ca853deeSEric Joyner * implements the various ifdi driver methods, and sets up the module and
38ca853deeSEric Joyner * driver values to load an iflib driver.
39ca853deeSEric Joyner */
40ca853deeSEric Joyner
41ca853deeSEric Joyner #include "iavf_iflib.h"
42ca853deeSEric Joyner #include "iavf_vc_common.h"
43ca853deeSEric Joyner
44ca853deeSEric Joyner #include "iavf_drv_info.h"
45ca853deeSEric Joyner #include "iavf_sysctls_iflib.h"
46ca853deeSEric Joyner
47ca853deeSEric Joyner /*********************************************************************
48ca853deeSEric Joyner * Function prototypes
49ca853deeSEric Joyner *********************************************************************/
50ca853deeSEric Joyner static void *iavf_register(device_t dev);
51ca853deeSEric Joyner static int iavf_if_attach_pre(if_ctx_t ctx);
52ca853deeSEric Joyner static int iavf_if_attach_post(if_ctx_t ctx);
53ca853deeSEric Joyner static int iavf_if_detach(if_ctx_t ctx);
54ca853deeSEric Joyner static int iavf_if_shutdown(if_ctx_t ctx);
55ca853deeSEric Joyner static int iavf_if_suspend(if_ctx_t ctx);
56ca853deeSEric Joyner static int iavf_if_resume(if_ctx_t ctx);
57ca853deeSEric Joyner static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix);
58ca853deeSEric Joyner static void iavf_if_enable_intr(if_ctx_t ctx);
59ca853deeSEric Joyner static void iavf_if_disable_intr(if_ctx_t ctx);
60ca853deeSEric Joyner static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
61ca853deeSEric Joyner static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
62ca853deeSEric Joyner static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
63ca853deeSEric Joyner static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
64ca853deeSEric Joyner static void iavf_if_queues_free(if_ctx_t ctx);
65ca853deeSEric Joyner static void iavf_if_update_admin_status(if_ctx_t ctx);
66ca853deeSEric Joyner static void iavf_if_multi_set(if_ctx_t ctx);
67ca853deeSEric Joyner static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
68ca853deeSEric Joyner static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
69ca853deeSEric Joyner static int iavf_if_media_change(if_ctx_t ctx);
70ca853deeSEric Joyner static int iavf_if_promisc_set(if_ctx_t ctx, int flags);
71ca853deeSEric Joyner static void iavf_if_timer(if_ctx_t ctx, uint16_t qid);
72ca853deeSEric Joyner static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag);
73ca853deeSEric Joyner static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
74ca853deeSEric Joyner static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt);
75ca853deeSEric Joyner static void iavf_if_init(if_ctx_t ctx);
76ca853deeSEric Joyner static void iavf_if_stop(if_ctx_t ctx);
771d6c12c5SKevin Bowling static bool iavf_if_needs_restart(if_ctx_t, enum iflib_restart_event);
78ca853deeSEric Joyner
79ca853deeSEric Joyner static int iavf_allocate_pci_resources(struct iavf_sc *);
80ca853deeSEric Joyner static void iavf_free_pci_resources(struct iavf_sc *);
81ca853deeSEric Joyner static void iavf_setup_interface(struct iavf_sc *);
82ca853deeSEric Joyner static void iavf_add_device_sysctls(struct iavf_sc *);
83ca853deeSEric Joyner static void iavf_enable_queue_irq(struct iavf_hw *, int);
84ca853deeSEric Joyner static void iavf_disable_queue_irq(struct iavf_hw *, int);
85ca853deeSEric Joyner static void iavf_stop(struct iavf_sc *);
86ca853deeSEric Joyner
87ca853deeSEric Joyner static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr);
88ca853deeSEric Joyner static int iavf_msix_que(void *);
89ca853deeSEric Joyner static int iavf_msix_adminq(void *);
90ca853deeSEric Joyner static void iavf_configure_itr(struct iavf_sc *sc);
91ca853deeSEric Joyner
92ca853deeSEric Joyner static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
93ca853deeSEric Joyner #ifdef IAVF_DEBUG
94ca853deeSEric Joyner static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS);
95ca853deeSEric Joyner static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS);
96ca853deeSEric Joyner #endif
97ca853deeSEric Joyner
98ca853deeSEric Joyner static enum iavf_status iavf_process_adminq(struct iavf_sc *, u16 *);
99ca853deeSEric Joyner static void iavf_vc_task(void *arg, int pending __unused);
100ca853deeSEric Joyner static int iavf_setup_vc_tq(struct iavf_sc *sc);
101ca853deeSEric Joyner static int iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op);
102ca853deeSEric Joyner
103ca853deeSEric Joyner /*********************************************************************
104ca853deeSEric Joyner * FreeBSD Device Interface Entry Points
105ca853deeSEric Joyner *********************************************************************/
106ca853deeSEric Joyner
107ca853deeSEric Joyner /**
108ca853deeSEric Joyner * @var iavf_methods
109ca853deeSEric Joyner * @brief device methods for the iavf driver
110ca853deeSEric Joyner *
111ca853deeSEric Joyner * Device method callbacks used to interact with the driver. For iflib this
112ca853deeSEric Joyner * primarily resolves to the default iflib implementations.
113ca853deeSEric Joyner */
114ca853deeSEric Joyner static device_method_t iavf_methods[] = {
115ca853deeSEric Joyner /* Device interface */
116ca853deeSEric Joyner DEVMETHOD(device_register, iavf_register),
117ca853deeSEric Joyner DEVMETHOD(device_probe, iflib_device_probe),
118ca853deeSEric Joyner DEVMETHOD(device_attach, iflib_device_attach),
119ca853deeSEric Joyner DEVMETHOD(device_detach, iflib_device_detach),
120ca853deeSEric Joyner DEVMETHOD(device_shutdown, iflib_device_shutdown),
121ca853deeSEric Joyner DEVMETHOD_END
122ca853deeSEric Joyner };
123ca853deeSEric Joyner
124ca853deeSEric Joyner static driver_t iavf_driver = {
125ca853deeSEric Joyner "iavf", iavf_methods, sizeof(struct iavf_sc),
126ca853deeSEric Joyner };
127ca853deeSEric Joyner
12883c0a9e8SJohn Baldwin DRIVER_MODULE(iavf, pci, iavf_driver, 0, 0);
129ca853deeSEric Joyner MODULE_VERSION(iavf, 1);
130ca853deeSEric Joyner
131ca853deeSEric Joyner MODULE_DEPEND(iavf, pci, 1, 1, 1);
132ca853deeSEric Joyner MODULE_DEPEND(iavf, ether, 1, 1, 1);
133ca853deeSEric Joyner MODULE_DEPEND(iavf, iflib, 1, 1, 1);
134ca853deeSEric Joyner
135ca853deeSEric Joyner IFLIB_PNP_INFO(pci, iavf, iavf_vendor_info_array);
136ca853deeSEric Joyner
137ca853deeSEric Joyner /**
138ca853deeSEric Joyner * @var M_IAVF
139ca853deeSEric Joyner * @brief main iavf driver allocation type
140ca853deeSEric Joyner *
141ca853deeSEric Joyner * malloc(9) allocation type used by the majority of memory allocations in the
142ca853deeSEric Joyner * iavf iflib driver.
143ca853deeSEric Joyner */
144ca853deeSEric Joyner MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations");
145ca853deeSEric Joyner
146ca853deeSEric Joyner static device_method_t iavf_if_methods[] = {
147ca853deeSEric Joyner DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre),
148ca853deeSEric Joyner DEVMETHOD(ifdi_attach_post, iavf_if_attach_post),
149ca853deeSEric Joyner DEVMETHOD(ifdi_detach, iavf_if_detach),
150ca853deeSEric Joyner DEVMETHOD(ifdi_shutdown, iavf_if_shutdown),
151ca853deeSEric Joyner DEVMETHOD(ifdi_suspend, iavf_if_suspend),
152ca853deeSEric Joyner DEVMETHOD(ifdi_resume, iavf_if_resume),
153ca853deeSEric Joyner DEVMETHOD(ifdi_init, iavf_if_init),
154ca853deeSEric Joyner DEVMETHOD(ifdi_stop, iavf_if_stop),
155ca853deeSEric Joyner DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign),
156ca853deeSEric Joyner DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr),
157ca853deeSEric Joyner DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr),
158ca853deeSEric Joyner DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable),
159ca853deeSEric Joyner DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable),
160ca853deeSEric Joyner DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc),
161ca853deeSEric Joyner DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc),
162ca853deeSEric Joyner DEVMETHOD(ifdi_queues_free, iavf_if_queues_free),
163ca853deeSEric Joyner DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status),
164ca853deeSEric Joyner DEVMETHOD(ifdi_multi_set, iavf_if_multi_set),
165ca853deeSEric Joyner DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set),
166ca853deeSEric Joyner DEVMETHOD(ifdi_media_status, iavf_if_media_status),
167ca853deeSEric Joyner DEVMETHOD(ifdi_media_change, iavf_if_media_change),
168ca853deeSEric Joyner DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set),
169ca853deeSEric Joyner DEVMETHOD(ifdi_timer, iavf_if_timer),
170ca853deeSEric Joyner DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register),
171ca853deeSEric Joyner DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister),
172ca853deeSEric Joyner DEVMETHOD(ifdi_get_counter, iavf_if_get_counter),
1731d6c12c5SKevin Bowling DEVMETHOD(ifdi_needs_restart, iavf_if_needs_restart),
174ca853deeSEric Joyner DEVMETHOD_END
175ca853deeSEric Joyner };
176ca853deeSEric Joyner
177ca853deeSEric Joyner static driver_t iavf_if_driver = {
178ca853deeSEric Joyner "iavf_if", iavf_if_methods, sizeof(struct iavf_sc)
179ca853deeSEric Joyner };
180ca853deeSEric Joyner
181ca853deeSEric Joyner extern struct if_txrx iavf_txrx_hwb;
182ca853deeSEric Joyner extern struct if_txrx iavf_txrx_dwb;
183ca853deeSEric Joyner
184ca853deeSEric Joyner static struct if_shared_ctx iavf_sctx = {
185ca853deeSEric Joyner .isc_magic = IFLIB_MAGIC,
186ca853deeSEric Joyner .isc_q_align = PAGE_SIZE,
187ca853deeSEric Joyner .isc_tx_maxsize = IAVF_MAX_FRAME,
188ca853deeSEric Joyner .isc_tx_maxsegsize = IAVF_MAX_FRAME,
189ca853deeSEric Joyner .isc_tso_maxsize = IAVF_TSO_SIZE + sizeof(struct ether_vlan_header),
190ca853deeSEric Joyner .isc_tso_maxsegsize = IAVF_MAX_DMA_SEG_SIZE,
191ca853deeSEric Joyner .isc_rx_maxsize = IAVF_MAX_FRAME,
192ca853deeSEric Joyner .isc_rx_nsegments = IAVF_MAX_RX_SEGS,
193ca853deeSEric Joyner .isc_rx_maxsegsize = IAVF_MAX_FRAME,
194ca853deeSEric Joyner .isc_nfl = 1,
195ca853deeSEric Joyner .isc_ntxqs = 1,
196ca853deeSEric Joyner .isc_nrxqs = 1,
197ca853deeSEric Joyner
198ca853deeSEric Joyner .isc_admin_intrcnt = 1,
199ca853deeSEric Joyner .isc_vendor_info = iavf_vendor_info_array,
200ca853deeSEric Joyner .isc_driver_version = __DECONST(char *, iavf_driver_version),
201ca853deeSEric Joyner .isc_driver = &iavf_if_driver,
202ca853deeSEric Joyner .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_IS_VF,
203ca853deeSEric Joyner
204ca853deeSEric Joyner .isc_nrxd_min = {IAVF_MIN_RING},
205ca853deeSEric Joyner .isc_ntxd_min = {IAVF_MIN_RING},
206ca853deeSEric Joyner .isc_nrxd_max = {IAVF_MAX_RING},
207ca853deeSEric Joyner .isc_ntxd_max = {IAVF_MAX_RING},
208ca853deeSEric Joyner .isc_nrxd_default = {IAVF_DEFAULT_RING},
209ca853deeSEric Joyner .isc_ntxd_default = {IAVF_DEFAULT_RING},
210ca853deeSEric Joyner };
211ca853deeSEric Joyner
212ca853deeSEric Joyner /*** Functions ***/
213ca853deeSEric Joyner
214ca853deeSEric Joyner /**
215ca853deeSEric Joyner * iavf_register - iflib callback to obtain the shared context pointer
216ca853deeSEric Joyner * @dev: the device being registered
217ca853deeSEric Joyner *
218ca853deeSEric Joyner * Called when the driver is first being attached to the driver. This function
219ca853deeSEric Joyner * is used by iflib to obtain a pointer to the shared context structure which
220ca853deeSEric Joyner * describes the device features.
221ca853deeSEric Joyner *
222ca853deeSEric Joyner * @returns a pointer to the iavf shared context structure.
223ca853deeSEric Joyner */
224ca853deeSEric Joyner static void *
iavf_register(device_t dev __unused)225ca853deeSEric Joyner iavf_register(device_t dev __unused)
226ca853deeSEric Joyner {
227ca853deeSEric Joyner return (&iavf_sctx);
228ca853deeSEric Joyner }
229ca853deeSEric Joyner
230ca853deeSEric Joyner /**
231ca853deeSEric Joyner * iavf_allocate_pci_resources - Allocate PCI resources
232ca853deeSEric Joyner * @sc: the device private softc
233ca853deeSEric Joyner *
234ca853deeSEric Joyner * Allocate PCI resources used by the iflib driver.
235ca853deeSEric Joyner *
236ca853deeSEric Joyner * @returns zero or a non-zero error code on failure
237ca853deeSEric Joyner */
238ca853deeSEric Joyner static int
iavf_allocate_pci_resources(struct iavf_sc * sc)239ca853deeSEric Joyner iavf_allocate_pci_resources(struct iavf_sc *sc)
240ca853deeSEric Joyner {
241ca853deeSEric Joyner return iavf_allocate_pci_resources_common(sc);
242ca853deeSEric Joyner }
243ca853deeSEric Joyner
244ca853deeSEric Joyner /**
245ca853deeSEric Joyner * iavf_if_attach_pre - Begin attaching the device to the driver
246ca853deeSEric Joyner * @ctx: the iflib context pointer
247ca853deeSEric Joyner *
248ca853deeSEric Joyner * Called by iflib to begin the attach process. Allocates resources and
249ca853deeSEric Joyner * initializes the hardware for operation.
250ca853deeSEric Joyner *
251ca853deeSEric Joyner * @returns zero or a non-zero error code on failure.
252ca853deeSEric Joyner */
253ca853deeSEric Joyner static int
iavf_if_attach_pre(if_ctx_t ctx)254ca853deeSEric Joyner iavf_if_attach_pre(if_ctx_t ctx)
255ca853deeSEric Joyner {
256ca853deeSEric Joyner device_t dev;
257ca853deeSEric Joyner struct iavf_sc *sc;
258ca853deeSEric Joyner struct iavf_hw *hw;
259ca853deeSEric Joyner struct iavf_vsi *vsi;
260ca853deeSEric Joyner if_softc_ctx_t scctx;
261ca853deeSEric Joyner int error = 0;
262ca853deeSEric Joyner
263ca853deeSEric Joyner /* Setup pointers */
264ca853deeSEric Joyner dev = iflib_get_dev(ctx);
265ca853deeSEric Joyner sc = iavf_sc_from_ctx(ctx);
266ca853deeSEric Joyner
267ca853deeSEric Joyner vsi = &sc->vsi;
268ca853deeSEric Joyner vsi->back = sc;
269ca853deeSEric Joyner sc->dev = sc->osdep.dev = dev;
270ca853deeSEric Joyner hw = &sc->hw;
271ca853deeSEric Joyner
272ca853deeSEric Joyner vsi->dev = dev;
273ca853deeSEric Joyner vsi->hw = &sc->hw;
274ca853deeSEric Joyner vsi->num_vlans = 0;
275ca853deeSEric Joyner vsi->ctx = ctx;
276ca853deeSEric Joyner sc->media = iflib_get_media(ctx);
277ca853deeSEric Joyner vsi->ifp = iflib_get_ifp(ctx);
278ca853deeSEric Joyner vsi->shared = scctx = iflib_get_softc_ctx(ctx);
279ca853deeSEric Joyner
280ca853deeSEric Joyner iavf_save_tunables(sc);
281ca853deeSEric Joyner
282ca853deeSEric Joyner /* Setup VC mutex */
283ca853deeSEric Joyner snprintf(sc->vc_mtx_name, sizeof(sc->vc_mtx_name),
284ca853deeSEric Joyner "%s:vc", device_get_nameunit(dev));
285ca853deeSEric Joyner mtx_init(&sc->vc_mtx, sc->vc_mtx_name, NULL, MTX_DEF);
286ca853deeSEric Joyner
287ca853deeSEric Joyner /* Do PCI setup - map BAR0, etc */
288ca853deeSEric Joyner error = iavf_allocate_pci_resources(sc);
289ca853deeSEric Joyner if (error) {
290ca853deeSEric Joyner device_printf(dev, "%s: Allocation of PCI resources failed\n",
291ca853deeSEric Joyner __func__);
292ca853deeSEric Joyner goto err_early;
293ca853deeSEric Joyner }
294ca853deeSEric Joyner
295ca853deeSEric Joyner iavf_dbg_init(sc, "Allocated PCI resources and MSI-X vectors\n");
296ca853deeSEric Joyner
297ca853deeSEric Joyner error = iavf_set_mac_type(hw);
298ca853deeSEric Joyner if (error) {
299ca853deeSEric Joyner device_printf(dev, "%s: set_mac_type failed: %d\n",
300ca853deeSEric Joyner __func__, error);
301ca853deeSEric Joyner goto err_pci_res;
302ca853deeSEric Joyner }
303ca853deeSEric Joyner
304ca853deeSEric Joyner error = iavf_reset_complete(hw);
305ca853deeSEric Joyner if (error) {
306ca853deeSEric Joyner device_printf(dev, "%s: Device is still being reset\n",
307ca853deeSEric Joyner __func__);
308ca853deeSEric Joyner goto err_pci_res;
309ca853deeSEric Joyner }
310ca853deeSEric Joyner
311ca853deeSEric Joyner iavf_dbg_init(sc, "VF Device is ready for configuration\n");
312ca853deeSEric Joyner
313ca853deeSEric Joyner /* Sets up Admin Queue */
314ca853deeSEric Joyner error = iavf_setup_vc(sc);
315ca853deeSEric Joyner if (error) {
316ca853deeSEric Joyner device_printf(dev, "%s: Error setting up PF comms, %d\n",
317ca853deeSEric Joyner __func__, error);
318ca853deeSEric Joyner goto err_pci_res;
319ca853deeSEric Joyner }
320ca853deeSEric Joyner
321ca853deeSEric Joyner iavf_dbg_init(sc, "PF API version verified\n");
322ca853deeSEric Joyner
323ca853deeSEric Joyner /* Need API version before sending reset message */
324ca853deeSEric Joyner error = iavf_reset(sc);
325ca853deeSEric Joyner if (error) {
326ca853deeSEric Joyner device_printf(dev, "VF reset failed; reload the driver\n");
327ca853deeSEric Joyner goto err_aq;
328ca853deeSEric Joyner }
329ca853deeSEric Joyner
330ca853deeSEric Joyner iavf_dbg_init(sc, "VF reset complete\n");
331ca853deeSEric Joyner
332ca853deeSEric Joyner /* Ask for VF config from PF */
333ca853deeSEric Joyner error = iavf_vf_config(sc);
334ca853deeSEric Joyner if (error) {
335ca853deeSEric Joyner device_printf(dev, "Error getting configuration from PF: %d\n",
336ca853deeSEric Joyner error);
337ca853deeSEric Joyner goto err_aq;
338ca853deeSEric Joyner }
339ca853deeSEric Joyner
340ca853deeSEric Joyner iavf_print_device_info(sc);
341ca853deeSEric Joyner
342ca853deeSEric Joyner error = iavf_get_vsi_res_from_vf_res(sc);
343ca853deeSEric Joyner if (error)
344ca853deeSEric Joyner goto err_res_buf;
345ca853deeSEric Joyner
346ca853deeSEric Joyner iavf_dbg_init(sc, "Resource Acquisition complete\n");
347ca853deeSEric Joyner
348ca853deeSEric Joyner /* Setup taskqueue to service VC messages */
349ca853deeSEric Joyner error = iavf_setup_vc_tq(sc);
350ca853deeSEric Joyner if (error)
351ca853deeSEric Joyner goto err_vc_tq;
352ca853deeSEric Joyner
353ca853deeSEric Joyner iavf_set_mac_addresses(sc);
354ca853deeSEric Joyner iflib_set_mac(ctx, hw->mac.addr);
355ca853deeSEric Joyner
356ca853deeSEric Joyner /* Allocate filter lists */
357ca853deeSEric Joyner iavf_init_filters(sc);
358ca853deeSEric Joyner
359ca853deeSEric Joyner /* Fill out more iflib parameters */
360ca853deeSEric Joyner scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max =
361ca853deeSEric Joyner sc->vsi_res->num_queue_pairs;
362ca853deeSEric Joyner if (vsi->enable_head_writeback) {
363ca853deeSEric Joyner scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
364ca853deeSEric Joyner * sizeof(struct iavf_tx_desc) + sizeof(u32), DBA_ALIGN);
365ca853deeSEric Joyner scctx->isc_txrx = &iavf_txrx_hwb;
366ca853deeSEric Joyner } else {
367ca853deeSEric Joyner scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
368ca853deeSEric Joyner * sizeof(struct iavf_tx_desc), DBA_ALIGN);
369ca853deeSEric Joyner scctx->isc_txrx = &iavf_txrx_dwb;
370ca853deeSEric Joyner }
371ca853deeSEric Joyner scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
372ca853deeSEric Joyner * sizeof(union iavf_32byte_rx_desc), DBA_ALIGN);
373*e53a21abSEric Joyner scctx->isc_msix_bar = pci_msix_table_bar(dev);
374ca853deeSEric Joyner scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS;
375ca853deeSEric Joyner scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS;
376ca853deeSEric Joyner scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE;
377ca853deeSEric Joyner scctx->isc_tx_tso_segsize_max = IAVF_MAX_DMA_SEG_SIZE;
378ca853deeSEric Joyner scctx->isc_rss_table_size = IAVF_RSS_VSI_LUT_SIZE;
379ca853deeSEric Joyner scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS;
380ca853deeSEric Joyner scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
381ca853deeSEric Joyner
382ca853deeSEric Joyner return (0);
383ca853deeSEric Joyner
384ca853deeSEric Joyner err_vc_tq:
385ca853deeSEric Joyner taskqueue_free(sc->vc_tq);
386ca853deeSEric Joyner err_res_buf:
387ca853deeSEric Joyner free(sc->vf_res, M_IAVF);
388ca853deeSEric Joyner err_aq:
389ca853deeSEric Joyner iavf_shutdown_adminq(hw);
390ca853deeSEric Joyner err_pci_res:
391ca853deeSEric Joyner iavf_free_pci_resources(sc);
392ca853deeSEric Joyner err_early:
393ca853deeSEric Joyner IAVF_VC_LOCK_DESTROY(sc);
394ca853deeSEric Joyner return (error);
395ca853deeSEric Joyner }
396ca853deeSEric Joyner
397ca853deeSEric Joyner /**
398ca853deeSEric Joyner * iavf_vc_task - task used to process VC messages
399ca853deeSEric Joyner * @arg: device softc
400ca853deeSEric Joyner * @pending: unused
401ca853deeSEric Joyner *
402ca853deeSEric Joyner * Processes the admin queue, in order to process the virtual
403ca853deeSEric Joyner * channel messages received from the PF.
404ca853deeSEric Joyner */
405ca853deeSEric Joyner static void
iavf_vc_task(void * arg,int pending __unused)406ca853deeSEric Joyner iavf_vc_task(void *arg, int pending __unused)
407ca853deeSEric Joyner {
408ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg;
409ca853deeSEric Joyner u16 var;
410ca853deeSEric Joyner
411ca853deeSEric Joyner iavf_process_adminq(sc, &var);
412ca853deeSEric Joyner }
413ca853deeSEric Joyner
414ca853deeSEric Joyner /**
415ca853deeSEric Joyner * iavf_setup_vc_tq - Setup task queues
416ca853deeSEric Joyner * @sc: device softc
417ca853deeSEric Joyner *
418ca853deeSEric Joyner * Create taskqueue and tasklet for processing virtual channel messages. This
419ca853deeSEric Joyner * is done in a separate non-iflib taskqueue so that the iflib context lock
420ca853deeSEric Joyner * does not need to be held for VC messages to be processed.
421ca853deeSEric Joyner *
422ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
423ca853deeSEric Joyner */
424ca853deeSEric Joyner static int
iavf_setup_vc_tq(struct iavf_sc * sc)425ca853deeSEric Joyner iavf_setup_vc_tq(struct iavf_sc *sc)
426ca853deeSEric Joyner {
427ca853deeSEric Joyner device_t dev = sc->dev;
428ca853deeSEric Joyner int error = 0;
429ca853deeSEric Joyner
430ca853deeSEric Joyner TASK_INIT(&sc->vc_task, 0, iavf_vc_task, sc);
431ca853deeSEric Joyner
432ca853deeSEric Joyner sc->vc_tq = taskqueue_create_fast("iavf_vc", M_NOWAIT,
433ca853deeSEric Joyner taskqueue_thread_enqueue, &sc->vc_tq);
434ca853deeSEric Joyner if (!sc->vc_tq) {
435ca853deeSEric Joyner device_printf(dev, "taskqueue_create_fast (for VC task) returned NULL!\n");
436ca853deeSEric Joyner return (ENOMEM);
437ca853deeSEric Joyner }
438ca853deeSEric Joyner error = taskqueue_start_threads(&sc->vc_tq, 1, PI_NET, "%s vc",
439ca853deeSEric Joyner device_get_nameunit(dev));
440ca853deeSEric Joyner if (error) {
441ca853deeSEric Joyner device_printf(dev, "taskqueue_start_threads (for VC task) error: %d\n",
442ca853deeSEric Joyner error);
443ca853deeSEric Joyner taskqueue_free(sc->vc_tq);
444ca853deeSEric Joyner return (error);
445ca853deeSEric Joyner }
446ca853deeSEric Joyner
447ca853deeSEric Joyner return (error);
448ca853deeSEric Joyner }
449ca853deeSEric Joyner
450ca853deeSEric Joyner /**
451ca853deeSEric Joyner * iavf_if_attach_post - Finish attaching the device to the driver
452ca853deeSEric Joyner * @ctx: the iflib context pointer
453ca853deeSEric Joyner *
454ca853deeSEric Joyner * Called by iflib after it has setup queues and interrupts. Used to finish up
455ca853deeSEric Joyner * the attach process for a device. Attach logic which must occur after Tx and
456ca853deeSEric Joyner * Rx queues are setup belongs here.
457ca853deeSEric Joyner *
458ca853deeSEric Joyner * @returns zero or a non-zero error code on failure
459ca853deeSEric Joyner */
460ca853deeSEric Joyner static int
iavf_if_attach_post(if_ctx_t ctx)461ca853deeSEric Joyner iavf_if_attach_post(if_ctx_t ctx)
462ca853deeSEric Joyner {
463ee28ad11SJohn Baldwin #ifdef IXL_DEBUG
464ee28ad11SJohn Baldwin device_t dev = iflib_get_dev(ctx);
465ee28ad11SJohn Baldwin #endif
466ca853deeSEric Joyner struct iavf_sc *sc;
467ca853deeSEric Joyner struct iavf_hw *hw;
468ca853deeSEric Joyner struct iavf_vsi *vsi;
469ca853deeSEric Joyner int error = 0;
470ca853deeSEric Joyner
471ca853deeSEric Joyner INIT_DBG_DEV(dev, "begin");
472ca853deeSEric Joyner
473ca853deeSEric Joyner sc = iavf_sc_from_ctx(ctx);
474ca853deeSEric Joyner vsi = &sc->vsi;
475ca853deeSEric Joyner hw = &sc->hw;
476ca853deeSEric Joyner
477ca853deeSEric Joyner /* Save off determined number of queues for interface */
478ca853deeSEric Joyner vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
479ca853deeSEric Joyner vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
480ca853deeSEric Joyner
481ca853deeSEric Joyner /* Setup the stack interface */
482ca853deeSEric Joyner iavf_setup_interface(sc);
483ca853deeSEric Joyner
484ca853deeSEric Joyner iavf_dbg_init(sc, "Interface setup complete\n");
485ca853deeSEric Joyner
486ca853deeSEric Joyner /* Initialize statistics & add sysctls */
487ca853deeSEric Joyner bzero(&sc->vsi.eth_stats, sizeof(struct iavf_eth_stats));
488ca853deeSEric Joyner iavf_add_device_sysctls(sc);
489ca853deeSEric Joyner
490ca853deeSEric Joyner atomic_store_rel_32(&sc->queues_enabled, 0);
491ca853deeSEric Joyner iavf_set_state(&sc->state, IAVF_STATE_INITIALIZED);
492ca853deeSEric Joyner
493ca853deeSEric Joyner /* We want AQ enabled early for init */
494ca853deeSEric Joyner iavf_enable_adminq_irq(hw);
495ca853deeSEric Joyner
496ca853deeSEric Joyner INIT_DBG_DEV(dev, "end");
497ca853deeSEric Joyner
498ca853deeSEric Joyner return (error);
499ca853deeSEric Joyner }
500ca853deeSEric Joyner
501ca853deeSEric Joyner /**
502ca853deeSEric Joyner * iavf_if_detach - Detach a device from the driver
503ca853deeSEric Joyner * @ctx: the iflib context of the device to detach
504ca853deeSEric Joyner *
505ca853deeSEric Joyner * Called by iflib to detach a given device from the driver. Clean up any
506ca853deeSEric Joyner * resources associated with the driver and shut the device down.
507ca853deeSEric Joyner *
508ca853deeSEric Joyner * @remark iflib always ignores the return value of IFDI_DETACH, so this
509ca853deeSEric Joyner * function is effectively not allowed to fail. Instead, it should clean up
510ca853deeSEric Joyner * and release as much as possible even if something goes wrong.
511ca853deeSEric Joyner *
512ca853deeSEric Joyner * @returns zero
513ca853deeSEric Joyner */
514ca853deeSEric Joyner static int
iavf_if_detach(if_ctx_t ctx)515ca853deeSEric Joyner iavf_if_detach(if_ctx_t ctx)
516ca853deeSEric Joyner {
517ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
518ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
519ca853deeSEric Joyner device_t dev = sc->dev;
520ca853deeSEric Joyner enum iavf_status status;
521ca853deeSEric Joyner
522ca853deeSEric Joyner INIT_DBG_DEV(dev, "begin");
523ca853deeSEric Joyner
524ca853deeSEric Joyner iavf_clear_state(&sc->state, IAVF_STATE_INITIALIZED);
525ca853deeSEric Joyner
526ca853deeSEric Joyner /* Drain admin queue taskqueue */
527ca853deeSEric Joyner taskqueue_free(sc->vc_tq);
528ca853deeSEric Joyner IAVF_VC_LOCK_DESTROY(sc);
529ca853deeSEric Joyner
530ca853deeSEric Joyner /* Remove all the media and link information */
531ca853deeSEric Joyner ifmedia_removeall(sc->media);
532ca853deeSEric Joyner
533ca853deeSEric Joyner iavf_disable_adminq_irq(hw);
534ca853deeSEric Joyner status = iavf_shutdown_adminq(&sc->hw);
535ca853deeSEric Joyner if (status != IAVF_SUCCESS) {
536ca853deeSEric Joyner device_printf(dev,
537ca853deeSEric Joyner "iavf_shutdown_adminq() failed with status %s\n",
538ca853deeSEric Joyner iavf_stat_str(hw, status));
539ca853deeSEric Joyner }
540ca853deeSEric Joyner
541ca853deeSEric Joyner free(sc->vf_res, M_IAVF);
542ca853deeSEric Joyner sc->vf_res = NULL;
543ca853deeSEric Joyner iavf_free_pci_resources(sc);
544ca853deeSEric Joyner iavf_free_filters(sc);
545ca853deeSEric Joyner
546ca853deeSEric Joyner INIT_DBG_DEV(dev, "end");
547ca853deeSEric Joyner return (0);
548ca853deeSEric Joyner }
549ca853deeSEric Joyner
550ca853deeSEric Joyner /**
551ca853deeSEric Joyner * iavf_if_shutdown - called by iflib to handle shutdown
552ca853deeSEric Joyner * @ctx: the iflib context pointer
553ca853deeSEric Joyner *
554ca853deeSEric Joyner * Callback for the IFDI_SHUTDOWN iflib function.
555ca853deeSEric Joyner *
556ca853deeSEric Joyner * @returns zero or an error code on failure
557ca853deeSEric Joyner */
558ca853deeSEric Joyner static int
iavf_if_shutdown(if_ctx_t ctx __unused)559ca853deeSEric Joyner iavf_if_shutdown(if_ctx_t ctx __unused)
560ca853deeSEric Joyner {
561ca853deeSEric Joyner return (0);
562ca853deeSEric Joyner }
563ca853deeSEric Joyner
564ca853deeSEric Joyner /**
565ca853deeSEric Joyner * iavf_if_suspend - called by iflib to handle suspend
566ca853deeSEric Joyner * @ctx: the iflib context pointer
567ca853deeSEric Joyner *
568ca853deeSEric Joyner * Callback for the IFDI_SUSPEND iflib function.
569ca853deeSEric Joyner *
570ca853deeSEric Joyner * @returns zero or an error code on failure
571ca853deeSEric Joyner */
572ca853deeSEric Joyner static int
iavf_if_suspend(if_ctx_t ctx __unused)573ca853deeSEric Joyner iavf_if_suspend(if_ctx_t ctx __unused)
574ca853deeSEric Joyner {
575ca853deeSEric Joyner return (0);
576ca853deeSEric Joyner }
577ca853deeSEric Joyner
578ca853deeSEric Joyner /**
579ca853deeSEric Joyner * iavf_if_resume - called by iflib to handle resume
580ca853deeSEric Joyner * @ctx: the iflib context pointer
581ca853deeSEric Joyner *
582ca853deeSEric Joyner * Callback for the IFDI_RESUME iflib function.
583ca853deeSEric Joyner *
584ca853deeSEric Joyner * @returns zero or an error code on failure
585ca853deeSEric Joyner */
586ca853deeSEric Joyner static int
iavf_if_resume(if_ctx_t ctx __unused)587ca853deeSEric Joyner iavf_if_resume(if_ctx_t ctx __unused)
588ca853deeSEric Joyner {
589ca853deeSEric Joyner return (0);
590ca853deeSEric Joyner }
591ca853deeSEric Joyner
592ca853deeSEric Joyner /**
593ca853deeSEric Joyner * iavf_vc_sleep_wait - Sleep for a response from a VC message
594ca853deeSEric Joyner * @sc: device softc
595ca853deeSEric Joyner * @op: the op code to sleep on
596ca853deeSEric Joyner *
597ca853deeSEric Joyner * Sleep until a response from the PF for the VC message sent by the
598ca853deeSEric Joyner * given op.
599ca853deeSEric Joyner *
600ca853deeSEric Joyner * @returns zero on success, or EWOULDBLOCK if the sleep times out.
601ca853deeSEric Joyner */
602ca853deeSEric Joyner static int
iavf_vc_sleep_wait(struct iavf_sc * sc,u32 op)603ca853deeSEric Joyner iavf_vc_sleep_wait(struct iavf_sc *sc, u32 op)
604ca853deeSEric Joyner {
605ca853deeSEric Joyner int error = 0;
606ca853deeSEric Joyner
607ca853deeSEric Joyner IAVF_VC_LOCK_ASSERT(sc);
608ca853deeSEric Joyner
609ca853deeSEric Joyner iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS);
610ca853deeSEric Joyner
611ca853deeSEric Joyner error = mtx_sleep(iavf_vc_get_op_chan(sc, op),
612ca853deeSEric Joyner &sc->vc_mtx, PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT);
613ca853deeSEric Joyner
614ca853deeSEric Joyner return (error);
615ca853deeSEric Joyner }
616ca853deeSEric Joyner
617ca853deeSEric Joyner /**
618ca853deeSEric Joyner * iavf_send_vc_msg_sleep - Send a virtchnl message and wait for a response
619ca853deeSEric Joyner * @sc: device softc
620ca853deeSEric Joyner * @op: the op code to send
621ca853deeSEric Joyner *
622ca853deeSEric Joyner * Send a virtchnl message to the PF, and sleep or busy wait for a response
623ca853deeSEric Joyner * from the PF, depending on iflib context lock type.
624ca853deeSEric Joyner *
625ca853deeSEric Joyner * @remark this function does not wait if the device is detaching, on kernels
626ca853deeSEric Joyner * that support indicating to the driver that the device is detaching
627ca853deeSEric Joyner *
628ca853deeSEric Joyner * @returns zero or an error code on failure.
629ca853deeSEric Joyner */
630ca853deeSEric Joyner int
iavf_send_vc_msg_sleep(struct iavf_sc * sc,u32 op)631ca853deeSEric Joyner iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op)
632ca853deeSEric Joyner {
633ca853deeSEric Joyner if_ctx_t ctx = sc->vsi.ctx;
634ca853deeSEric Joyner int error = 0;
635ca853deeSEric Joyner
636ca853deeSEric Joyner IAVF_VC_LOCK(sc);
637ca853deeSEric Joyner error = iavf_vc_send_cmd(sc, op);
638ca853deeSEric Joyner if (error != 0) {
639ca853deeSEric Joyner iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
640ca853deeSEric Joyner goto release_lock;
641ca853deeSEric Joyner }
642ca853deeSEric Joyner
643ca853deeSEric Joyner /* Don't wait for a response if the device is being detached. */
644ca853deeSEric Joyner if (!iflib_in_detach(ctx)) {
645ca853deeSEric Joyner error = iavf_vc_sleep_wait(sc, op);
646ca853deeSEric Joyner IAVF_VC_LOCK_ASSERT(sc);
647ca853deeSEric Joyner
648ca853deeSEric Joyner if (error == EWOULDBLOCK)
649ca853deeSEric Joyner device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS);
650ca853deeSEric Joyner }
651ca853deeSEric Joyner release_lock:
652ca853deeSEric Joyner IAVF_VC_UNLOCK(sc);
653ca853deeSEric Joyner return (error);
654ca853deeSEric Joyner }
655ca853deeSEric Joyner
656ca853deeSEric Joyner /**
657ca853deeSEric Joyner * iavf_send_vc_msg - Send a virtchnl message to the PF
658ca853deeSEric Joyner * @sc: device softc
659ca853deeSEric Joyner * @op: the op code to send
660ca853deeSEric Joyner *
661ca853deeSEric Joyner * Send a virtchnl message to the PF and do not wait for a response.
662ca853deeSEric Joyner *
663ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
664ca853deeSEric Joyner */
665ca853deeSEric Joyner int
iavf_send_vc_msg(struct iavf_sc * sc,u32 op)666ca853deeSEric Joyner iavf_send_vc_msg(struct iavf_sc *sc, u32 op)
667ca853deeSEric Joyner {
668ca853deeSEric Joyner int error = 0;
669ca853deeSEric Joyner
670ca853deeSEric Joyner error = iavf_vc_send_cmd(sc, op);
671ca853deeSEric Joyner if (error != 0)
672ca853deeSEric Joyner iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error);
673ca853deeSEric Joyner
674ca853deeSEric Joyner return (error);
675ca853deeSEric Joyner }
676ca853deeSEric Joyner
677ca853deeSEric Joyner /**
678ca853deeSEric Joyner * iavf_init_queues - initialize Tx and Rx queues
679ca853deeSEric Joyner * @vsi: the VSI to initialize
680ca853deeSEric Joyner *
681ca853deeSEric Joyner * Refresh the Tx and Rx ring contents and update the tail pointers for each
682ca853deeSEric Joyner * queue.
683ca853deeSEric Joyner */
684ca853deeSEric Joyner static void
iavf_init_queues(struct iavf_vsi * vsi)685ca853deeSEric Joyner iavf_init_queues(struct iavf_vsi *vsi)
686ca853deeSEric Joyner {
687ca853deeSEric Joyner struct iavf_tx_queue *tx_que = vsi->tx_queues;
688ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues;
689ca853deeSEric Joyner struct rx_ring *rxr;
690ca853deeSEric Joyner uint32_t mbuf_sz;
691ca853deeSEric Joyner
692ca853deeSEric Joyner mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
693ca853deeSEric Joyner MPASS(mbuf_sz <= UINT16_MAX);
694ca853deeSEric Joyner
695ca853deeSEric Joyner for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++)
696ca853deeSEric Joyner iavf_init_tx_ring(vsi, tx_que);
697ca853deeSEric Joyner
698ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
699ca853deeSEric Joyner rxr = &rx_que->rxr;
700ca853deeSEric Joyner
701ca853deeSEric Joyner rxr->mbuf_sz = mbuf_sz;
702ca853deeSEric Joyner wr32(vsi->hw, rxr->tail, 0);
703ca853deeSEric Joyner }
704ca853deeSEric Joyner }
705ca853deeSEric Joyner
706ca853deeSEric Joyner /**
707ca853deeSEric Joyner * iavf_if_init - Initialize device for operation
708ca853deeSEric Joyner * @ctx: the iflib context pointer
709ca853deeSEric Joyner *
710ca853deeSEric Joyner * Initializes a device for operation. Called by iflib in response to an
711ca853deeSEric Joyner * interface up event from the stack.
712ca853deeSEric Joyner *
713ca853deeSEric Joyner * @remark this function does not return a value and thus cannot indicate
714ca853deeSEric Joyner * failure to initialize.
715ca853deeSEric Joyner */
716ca853deeSEric Joyner static void
iavf_if_init(if_ctx_t ctx)717ca853deeSEric Joyner iavf_if_init(if_ctx_t ctx)
718ca853deeSEric Joyner {
719ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
720ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
721ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
722ca853deeSEric Joyner if_t ifp = iflib_get_ifp(ctx);
723ca853deeSEric Joyner u8 tmpaddr[ETHER_ADDR_LEN];
724ca853deeSEric Joyner enum iavf_status status;
725ca853deeSEric Joyner device_t dev = sc->dev;
726ca853deeSEric Joyner int error = 0;
727ca853deeSEric Joyner
728ca853deeSEric Joyner INIT_DBG_IF(ifp, "begin");
729ca853deeSEric Joyner
7300834f13dSPiotr Kubaj sx_assert(iflib_ctx_lock_get(ctx), SA_XLOCKED);
731ca853deeSEric Joyner
732ca853deeSEric Joyner error = iavf_reset_complete(hw);
733ca853deeSEric Joyner if (error) {
734ca853deeSEric Joyner device_printf(sc->dev, "%s: VF reset failed\n",
735ca853deeSEric Joyner __func__);
736ca853deeSEric Joyner }
737ca853deeSEric Joyner
738ca853deeSEric Joyner if (!iavf_check_asq_alive(hw)) {
739ca853deeSEric Joyner iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n");
740ca853deeSEric Joyner pci_enable_busmaster(dev);
741ca853deeSEric Joyner
742ca853deeSEric Joyner status = iavf_shutdown_adminq(hw);
743ca853deeSEric Joyner if (status != IAVF_SUCCESS) {
744ca853deeSEric Joyner device_printf(dev,
745ca853deeSEric Joyner "%s: iavf_shutdown_adminq failed: %s\n",
746ca853deeSEric Joyner __func__, iavf_stat_str(hw, status));
747ca853deeSEric Joyner return;
748ca853deeSEric Joyner }
749ca853deeSEric Joyner
750ca853deeSEric Joyner status = iavf_init_adminq(hw);
751ca853deeSEric Joyner if (status != IAVF_SUCCESS) {
752ca853deeSEric Joyner device_printf(dev,
753ca853deeSEric Joyner "%s: iavf_init_adminq failed: %s\n",
754ca853deeSEric Joyner __func__, iavf_stat_str(hw, status));
755ca853deeSEric Joyner return;
756ca853deeSEric Joyner }
757ca853deeSEric Joyner }
758ca853deeSEric Joyner
759ca853deeSEric Joyner /* Make sure queues are disabled */
760ca853deeSEric Joyner iavf_disable_queues_with_retries(sc);
761ca853deeSEric Joyner
762d8096b2dSJustin Hibbits bcopy(if_getlladdr(ifp), tmpaddr, ETHER_ADDR_LEN);
763ca853deeSEric Joyner if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
764ca853deeSEric Joyner (iavf_validate_mac_addr(tmpaddr) == IAVF_SUCCESS)) {
765ca853deeSEric Joyner error = iavf_del_mac_filter(sc, hw->mac.addr);
766ca853deeSEric Joyner if (error == 0)
767ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER);
768ca853deeSEric Joyner
769ca853deeSEric Joyner bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
770ca853deeSEric Joyner }
771ca853deeSEric Joyner
772ca853deeSEric Joyner error = iavf_add_mac_filter(sc, hw->mac.addr, 0);
773ca853deeSEric Joyner if (!error || error == EEXIST)
774ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER);
775ca853deeSEric Joyner iflib_set_mac(ctx, hw->mac.addr);
776ca853deeSEric Joyner
777ca853deeSEric Joyner /* Prepare the queues for operation */
778ca853deeSEric Joyner iavf_init_queues(vsi);
779ca853deeSEric Joyner
780ca853deeSEric Joyner /* Set initial ITR values */
781ca853deeSEric Joyner iavf_configure_itr(sc);
782ca853deeSEric Joyner
783ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES);
784ca853deeSEric Joyner
785ca853deeSEric Joyner /* Set up RSS */
786ca853deeSEric Joyner iavf_config_rss(sc);
787ca853deeSEric Joyner
788ca853deeSEric Joyner /* Map vectors */
789ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS);
790ca853deeSEric Joyner
791ca853deeSEric Joyner /* Init SW TX ring indices */
792ca853deeSEric Joyner if (vsi->enable_head_writeback)
793ca853deeSEric Joyner iavf_init_tx_cidx(vsi);
794ca853deeSEric Joyner else
795ca853deeSEric Joyner iavf_init_tx_rsqs(vsi);
796ca853deeSEric Joyner
797ca853deeSEric Joyner /* Configure promiscuous mode */
798ca853deeSEric Joyner iavf_config_promisc(sc, if_getflags(ifp));
799ca853deeSEric Joyner
800ca853deeSEric Joyner /* Enable queues */
801ca853deeSEric Joyner iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES);
802ca853deeSEric Joyner
803ca853deeSEric Joyner iavf_set_state(&sc->state, IAVF_STATE_RUNNING);
804ca853deeSEric Joyner }
805ca853deeSEric Joyner
806ca853deeSEric Joyner /**
807ca853deeSEric Joyner * iavf_if_msix_intr_assign - Assign MSI-X interrupts
808ca853deeSEric Joyner * @ctx: the iflib context pointer
809ca853deeSEric Joyner * @msix: the number of MSI-X vectors available
810ca853deeSEric Joyner *
811ca853deeSEric Joyner * Called by iflib to assign MSI-X interrupt vectors to queues. Assigns and
812ca853deeSEric Joyner * sets up vectors for each Tx and Rx queue, as well as the administrative
813ca853deeSEric Joyner * control interrupt.
814ca853deeSEric Joyner *
815ca853deeSEric Joyner * @returns zero or an error code on failure
816ca853deeSEric Joyner */
817ca853deeSEric Joyner static int
iavf_if_msix_intr_assign(if_ctx_t ctx,int msix __unused)818ca853deeSEric Joyner iavf_if_msix_intr_assign(if_ctx_t ctx, int msix __unused)
819ca853deeSEric Joyner {
820ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
821ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
822ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues;
823ca853deeSEric Joyner struct iavf_tx_queue *tx_que = vsi->tx_queues;
824ca853deeSEric Joyner int err, i, rid, vector = 0;
825ca853deeSEric Joyner char buf[16];
826ca853deeSEric Joyner
827ca853deeSEric Joyner MPASS(vsi->shared->isc_nrxqsets > 0);
828ca853deeSEric Joyner MPASS(vsi->shared->isc_ntxqsets > 0);
829ca853deeSEric Joyner
830ca853deeSEric Joyner /* Admin Que is vector 0*/
831ca853deeSEric Joyner rid = vector + 1;
832ca853deeSEric Joyner err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
833ca853deeSEric Joyner iavf_msix_adminq, sc, 0, "aq");
834ca853deeSEric Joyner if (err) {
835ca853deeSEric Joyner iflib_irq_free(ctx, &vsi->irq);
836ca853deeSEric Joyner device_printf(iflib_get_dev(ctx),
837ca853deeSEric Joyner "Failed to register Admin Que handler");
838ca853deeSEric Joyner return (err);
839ca853deeSEric Joyner }
840ca853deeSEric Joyner
841ca853deeSEric Joyner /* Now set up the stations */
842ca853deeSEric Joyner for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
843ca853deeSEric Joyner rid = vector + 1;
844ca853deeSEric Joyner
845ca853deeSEric Joyner snprintf(buf, sizeof(buf), "rxq%d", i);
846ca853deeSEric Joyner err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
847ca853deeSEric Joyner IFLIB_INTR_RXTX, iavf_msix_que, rx_que, rx_que->rxr.me, buf);
848ca853deeSEric Joyner if (err) {
849ca853deeSEric Joyner device_printf(iflib_get_dev(ctx),
850ca853deeSEric Joyner "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
851ca853deeSEric Joyner vsi->num_rx_queues = i + 1;
852ca853deeSEric Joyner goto fail;
853ca853deeSEric Joyner }
854ca853deeSEric Joyner rx_que->msix = vector;
855ca853deeSEric Joyner }
856ca853deeSEric Joyner
857ca853deeSEric Joyner bzero(buf, sizeof(buf));
858ca853deeSEric Joyner
859ca853deeSEric Joyner for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
860ca853deeSEric Joyner snprintf(buf, sizeof(buf), "txq%d", i);
861ca853deeSEric Joyner iflib_softirq_alloc_generic(ctx,
862ca853deeSEric Joyner &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
863ca853deeSEric Joyner IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
864ca853deeSEric Joyner
865ca853deeSEric Joyner tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
866ca853deeSEric Joyner }
867ca853deeSEric Joyner
868ca853deeSEric Joyner return (0);
869ca853deeSEric Joyner fail:
870ca853deeSEric Joyner iflib_irq_free(ctx, &vsi->irq);
871ca853deeSEric Joyner rx_que = vsi->rx_queues;
8720834f13dSPiotr Kubaj for (i = 0; i < vsi->num_rx_queues; i++, rx_que++)
873ca853deeSEric Joyner iflib_irq_free(ctx, &rx_que->que_irq);
874ca853deeSEric Joyner return (err);
875ca853deeSEric Joyner }
876ca853deeSEric Joyner
877ca853deeSEric Joyner /**
878ca853deeSEric Joyner * iavf_if_enable_intr - Enable all interrupts for a device
879ca853deeSEric Joyner * @ctx: the iflib context pointer
880ca853deeSEric Joyner *
881ca853deeSEric Joyner * Called by iflib to request enabling all interrupts.
882ca853deeSEric Joyner */
883ca853deeSEric Joyner static void
iavf_if_enable_intr(if_ctx_t ctx)884ca853deeSEric Joyner iavf_if_enable_intr(if_ctx_t ctx)
885ca853deeSEric Joyner {
886ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
887ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
888ca853deeSEric Joyner
889ca853deeSEric Joyner iavf_enable_intr(vsi);
890ca853deeSEric Joyner }
891ca853deeSEric Joyner
892ca853deeSEric Joyner /**
893ca853deeSEric Joyner * iavf_if_disable_intr - Disable all interrupts for a device
894ca853deeSEric Joyner * @ctx: the iflib context pointer
895ca853deeSEric Joyner *
896ca853deeSEric Joyner * Called by iflib to request disabling all interrupts.
897ca853deeSEric Joyner */
898ca853deeSEric Joyner static void
iavf_if_disable_intr(if_ctx_t ctx)899ca853deeSEric Joyner iavf_if_disable_intr(if_ctx_t ctx)
900ca853deeSEric Joyner {
901ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
902ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
903ca853deeSEric Joyner
904ca853deeSEric Joyner iavf_disable_intr(vsi);
905ca853deeSEric Joyner }
906ca853deeSEric Joyner
907ca853deeSEric Joyner /**
908ca853deeSEric Joyner * iavf_if_rx_queue_intr_enable - Enable one Rx queue interrupt
909ca853deeSEric Joyner * @ctx: the iflib context pointer
910ca853deeSEric Joyner * @rxqid: Rx queue index
911ca853deeSEric Joyner *
912ca853deeSEric Joyner * Enables the interrupt associated with a specified Rx queue.
913ca853deeSEric Joyner *
914ca853deeSEric Joyner * @returns zero
915ca853deeSEric Joyner */
916ca853deeSEric Joyner static int
iavf_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)917ca853deeSEric Joyner iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
918ca853deeSEric Joyner {
919ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
920ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
921ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw;
922ca853deeSEric Joyner struct iavf_rx_queue *rx_que = &vsi->rx_queues[rxqid];
923ca853deeSEric Joyner
924ca853deeSEric Joyner iavf_enable_queue_irq(hw, rx_que->msix - 1);
925ca853deeSEric Joyner return (0);
926ca853deeSEric Joyner }
927ca853deeSEric Joyner
928ca853deeSEric Joyner /**
929ca853deeSEric Joyner * iavf_if_tx_queue_intr_enable - Enable one Tx queue interrupt
930ca853deeSEric Joyner * @ctx: the iflib context pointer
931ca853deeSEric Joyner * @txqid: Tx queue index
932ca853deeSEric Joyner *
933ca853deeSEric Joyner * Enables the interrupt associated with a specified Tx queue.
934ca853deeSEric Joyner *
935ca853deeSEric Joyner * @returns zero
936ca853deeSEric Joyner */
937ca853deeSEric Joyner static int
iavf_if_tx_queue_intr_enable(if_ctx_t ctx,uint16_t txqid)938ca853deeSEric Joyner iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
939ca853deeSEric Joyner {
940ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
941ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
942ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw;
943ca853deeSEric Joyner struct iavf_tx_queue *tx_que = &vsi->tx_queues[txqid];
944ca853deeSEric Joyner
945ca853deeSEric Joyner iavf_enable_queue_irq(hw, tx_que->msix - 1);
946ca853deeSEric Joyner return (0);
947ca853deeSEric Joyner }
948ca853deeSEric Joyner
949ca853deeSEric Joyner /**
950ca853deeSEric Joyner * iavf_if_tx_queues_alloc - Allocate Tx queue memory
951ca853deeSEric Joyner * @ctx: the iflib context pointer
952ca853deeSEric Joyner * @vaddrs: Array of virtual addresses
953ca853deeSEric Joyner * @paddrs: Array of physical addresses
954ca853deeSEric Joyner * @ntxqs: the number of Tx queues per group (should always be 1)
955ca853deeSEric Joyner * @ntxqsets: the number of Tx queues
956ca853deeSEric Joyner *
957ca853deeSEric Joyner * Allocates memory for the specified number of Tx queues. This includes
958ca853deeSEric Joyner * memory for the queue structures and the report status array for the queues.
959ca853deeSEric Joyner * The virtual and physical addresses are saved for later use during
960ca853deeSEric Joyner * initialization.
961ca853deeSEric Joyner *
962ca853deeSEric Joyner * @returns zero or a non-zero error code on failure
963ca853deeSEric Joyner */
964ca853deeSEric Joyner static int
iavf_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)965ca853deeSEric Joyner iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
966ca853deeSEric Joyner {
967ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
968ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
969ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared;
970ca853deeSEric Joyner struct iavf_tx_queue *que;
971ca853deeSEric Joyner int i, j, error = 0;
972ca853deeSEric Joyner
973ca853deeSEric Joyner MPASS(scctx->isc_ntxqsets > 0);
974ca853deeSEric Joyner MPASS(ntxqs == 1);
975ca853deeSEric Joyner MPASS(scctx->isc_ntxqsets == ntxqsets);
976ca853deeSEric Joyner
977ca853deeSEric Joyner /* Allocate queue structure memory */
978ca853deeSEric Joyner if (!(vsi->tx_queues =
979ca853deeSEric Joyner (struct iavf_tx_queue *)malloc(sizeof(struct iavf_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
980ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
981ca853deeSEric Joyner return (ENOMEM);
982ca853deeSEric Joyner }
983ca853deeSEric Joyner
984ca853deeSEric Joyner for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
985ca853deeSEric Joyner struct tx_ring *txr = &que->txr;
986ca853deeSEric Joyner
987ca853deeSEric Joyner txr->me = i;
988ca853deeSEric Joyner que->vsi = vsi;
989ca853deeSEric Joyner
990ca853deeSEric Joyner if (!vsi->enable_head_writeback) {
991ca853deeSEric Joyner /* Allocate report status array */
992ca853deeSEric Joyner if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) {
993ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
994ca853deeSEric Joyner error = ENOMEM;
995ca853deeSEric Joyner goto fail;
996ca853deeSEric Joyner }
997ca853deeSEric Joyner /* Init report status array */
998ca853deeSEric Joyner for (j = 0; j < scctx->isc_ntxd[0]; j++)
999ca853deeSEric Joyner txr->tx_rsq[j] = QIDX_INVALID;
1000ca853deeSEric Joyner }
1001ca853deeSEric Joyner /* get the virtual and physical address of the hardware queues */
1002ca853deeSEric Joyner txr->tail = IAVF_QTX_TAIL1(txr->me);
1003ca853deeSEric Joyner txr->tx_base = (struct iavf_tx_desc *)vaddrs[i * ntxqs];
1004ca853deeSEric Joyner txr->tx_paddr = paddrs[i * ntxqs];
1005ca853deeSEric Joyner txr->que = que;
1006ca853deeSEric Joyner }
1007ca853deeSEric Joyner
1008ca853deeSEric Joyner return (0);
1009ca853deeSEric Joyner fail:
1010ca853deeSEric Joyner iavf_if_queues_free(ctx);
1011ca853deeSEric Joyner return (error);
1012ca853deeSEric Joyner }
1013ca853deeSEric Joyner
1014ca853deeSEric Joyner /**
1015ca853deeSEric Joyner * iavf_if_rx_queues_alloc - Allocate Rx queue memory
1016ca853deeSEric Joyner * @ctx: the iflib context pointer
1017ca853deeSEric Joyner * @vaddrs: Array of virtual addresses
1018ca853deeSEric Joyner * @paddrs: Array of physical addresses
1019ca853deeSEric Joyner * @nrxqs: number of Rx queues per group (should always be 1)
1020ca853deeSEric Joyner * @nrxqsets: the number of Rx queues to allocate
1021ca853deeSEric Joyner *
1022ca853deeSEric Joyner * Called by iflib to allocate driver memory for a number of Rx queues.
1023ca853deeSEric Joyner * Allocates memory for the drivers private Rx queue data structure, and saves
1024ca853deeSEric Joyner * the physical and virtual addresses for later use.
1025ca853deeSEric Joyner *
1026ca853deeSEric Joyner * @returns zero or a non-zero error code on failure
1027ca853deeSEric Joyner */
1028ca853deeSEric Joyner static int
iavf_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)1029ca853deeSEric Joyner iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1030ca853deeSEric Joyner {
1031ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1032ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1033ca853deeSEric Joyner struct iavf_rx_queue *que;
1034ca853deeSEric Joyner int i, error = 0;
1035ca853deeSEric Joyner
1036ca853deeSEric Joyner #ifdef INVARIANTS
1037ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared;
1038ca853deeSEric Joyner MPASS(scctx->isc_nrxqsets > 0);
1039ca853deeSEric Joyner MPASS(nrxqs == 1);
1040ca853deeSEric Joyner MPASS(scctx->isc_nrxqsets == nrxqsets);
1041ca853deeSEric Joyner #endif
1042ca853deeSEric Joyner
1043ca853deeSEric Joyner /* Allocate queue structure memory */
1044ca853deeSEric Joyner if (!(vsi->rx_queues =
1045ca853deeSEric Joyner (struct iavf_rx_queue *) malloc(sizeof(struct iavf_rx_queue) *
1046ca853deeSEric Joyner nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) {
1047ca853deeSEric Joyner device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1048ca853deeSEric Joyner error = ENOMEM;
1049ca853deeSEric Joyner goto fail;
1050ca853deeSEric Joyner }
1051ca853deeSEric Joyner
1052ca853deeSEric Joyner for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1053ca853deeSEric Joyner struct rx_ring *rxr = &que->rxr;
1054ca853deeSEric Joyner
1055ca853deeSEric Joyner rxr->me = i;
1056ca853deeSEric Joyner que->vsi = vsi;
1057ca853deeSEric Joyner
1058ca853deeSEric Joyner /* get the virtual and physical address of the hardware queues */
1059ca853deeSEric Joyner rxr->tail = IAVF_QRX_TAIL1(rxr->me);
1060ca853deeSEric Joyner rxr->rx_base = (union iavf_rx_desc *)vaddrs[i * nrxqs];
1061ca853deeSEric Joyner rxr->rx_paddr = paddrs[i * nrxqs];
1062ca853deeSEric Joyner rxr->que = que;
1063ca853deeSEric Joyner }
1064ca853deeSEric Joyner
1065ca853deeSEric Joyner return (0);
1066ca853deeSEric Joyner fail:
1067ca853deeSEric Joyner iavf_if_queues_free(ctx);
1068ca853deeSEric Joyner return (error);
1069ca853deeSEric Joyner }
1070ca853deeSEric Joyner
1071ca853deeSEric Joyner /**
1072ca853deeSEric Joyner * iavf_if_queues_free - Free driver queue memory
1073ca853deeSEric Joyner * @ctx: the iflib context pointer
1074ca853deeSEric Joyner *
1075ca853deeSEric Joyner * Called by iflib to release memory allocated by the driver when setting up
1076ca853deeSEric Joyner * Tx and Rx queues.
1077ca853deeSEric Joyner *
1078ca853deeSEric Joyner * @remark The ordering of this function and iavf_if_detach is not guaranteed.
1079ca853deeSEric Joyner * It is possible for this function to be called either before or after the
1080ca853deeSEric Joyner * iavf_if_detach. Thus, care must be taken to ensure that either ordering of
1081ca853deeSEric Joyner * iavf_if_detach and iavf_if_queues_free is safe.
1082ca853deeSEric Joyner */
1083ca853deeSEric Joyner static void
iavf_if_queues_free(if_ctx_t ctx)1084ca853deeSEric Joyner iavf_if_queues_free(if_ctx_t ctx)
1085ca853deeSEric Joyner {
1086ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1087ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1088ca853deeSEric Joyner
1089ca853deeSEric Joyner if (!vsi->enable_head_writeback) {
1090ca853deeSEric Joyner struct iavf_tx_queue *que;
1091ca853deeSEric Joyner int i = 0;
1092ca853deeSEric Joyner
1093ca853deeSEric Joyner for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) {
1094ca853deeSEric Joyner struct tx_ring *txr = &que->txr;
1095ca853deeSEric Joyner if (txr->tx_rsq != NULL) {
1096ca853deeSEric Joyner free(txr->tx_rsq, M_IAVF);
1097ca853deeSEric Joyner txr->tx_rsq = NULL;
1098ca853deeSEric Joyner }
1099ca853deeSEric Joyner }
1100ca853deeSEric Joyner }
1101ca853deeSEric Joyner
1102ca853deeSEric Joyner if (vsi->tx_queues != NULL) {
1103ca853deeSEric Joyner free(vsi->tx_queues, M_IAVF);
1104ca853deeSEric Joyner vsi->tx_queues = NULL;
1105ca853deeSEric Joyner }
1106ca853deeSEric Joyner if (vsi->rx_queues != NULL) {
1107ca853deeSEric Joyner free(vsi->rx_queues, M_IAVF);
1108ca853deeSEric Joyner vsi->rx_queues = NULL;
1109ca853deeSEric Joyner }
1110ca853deeSEric Joyner }
1111ca853deeSEric Joyner
1112ca853deeSEric Joyner /**
1113ca853deeSEric Joyner * iavf_check_aq_errors - Check for AdminQ errors
1114ca853deeSEric Joyner * @sc: device softc
1115ca853deeSEric Joyner *
1116ca853deeSEric Joyner * Check the AdminQ registers for errors, and determine whether or not a reset
1117ca853deeSEric Joyner * may be required to resolve them.
1118ca853deeSEric Joyner *
1119ca853deeSEric Joyner * @post if there are errors, the VF device will be stopped and a reset will
1120ca853deeSEric Joyner * be requested.
1121ca853deeSEric Joyner *
1122ca853deeSEric Joyner * @returns zero if there are no issues, EBUSY if the device is resetting,
1123ca853deeSEric Joyner * or EIO if there are any AQ errors.
1124ca853deeSEric Joyner */
1125ca853deeSEric Joyner static int
iavf_check_aq_errors(struct iavf_sc * sc)1126ca853deeSEric Joyner iavf_check_aq_errors(struct iavf_sc *sc)
1127ca853deeSEric Joyner {
1128ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
1129ca853deeSEric Joyner device_t dev = sc->dev;
1130ca853deeSEric Joyner u32 reg, oldreg;
1131ca853deeSEric Joyner u8 aq_error = false;
1132ca853deeSEric Joyner
1133ca853deeSEric Joyner oldreg = reg = rd32(hw, hw->aq.arq.len);
1134ca853deeSEric Joyner
1135ca853deeSEric Joyner /* Check if device is in reset */
1136ca853deeSEric Joyner if (reg == 0xdeadbeef || reg == 0xffffffff) {
1137ca853deeSEric Joyner device_printf(dev, "VF in reset\n");
1138ca853deeSEric Joyner return (EBUSY);
1139ca853deeSEric Joyner }
1140ca853deeSEric Joyner
1141ca853deeSEric Joyner /* Check for Admin queue errors */
1142ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
1143ca853deeSEric Joyner device_printf(dev, "ARQ VF Error detected\n");
1144ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
1145ca853deeSEric Joyner aq_error = true;
1146ca853deeSEric Joyner }
1147ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
1148ca853deeSEric Joyner device_printf(dev, "ARQ Overflow Error detected\n");
1149ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
1150ca853deeSEric Joyner aq_error = true;
1151ca853deeSEric Joyner }
1152ca853deeSEric Joyner if (reg & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
1153ca853deeSEric Joyner device_printf(dev, "ARQ Critical Error detected\n");
1154ca853deeSEric Joyner reg &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
1155ca853deeSEric Joyner aq_error = true;
1156ca853deeSEric Joyner }
1157ca853deeSEric Joyner if (oldreg != reg)
1158ca853deeSEric Joyner wr32(hw, hw->aq.arq.len, reg);
1159ca853deeSEric Joyner
1160ca853deeSEric Joyner oldreg = reg = rd32(hw, hw->aq.asq.len);
1161ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
1162ca853deeSEric Joyner device_printf(dev, "ASQ VF Error detected\n");
1163ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
1164ca853deeSEric Joyner aq_error = true;
1165ca853deeSEric Joyner }
1166ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
1167ca853deeSEric Joyner device_printf(dev, "ASQ Overflow Error detected\n");
1168ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
1169ca853deeSEric Joyner aq_error = true;
1170ca853deeSEric Joyner }
1171ca853deeSEric Joyner if (reg & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
1172ca853deeSEric Joyner device_printf(dev, "ASQ Critical Error detected\n");
1173ca853deeSEric Joyner reg &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
1174ca853deeSEric Joyner aq_error = true;
1175ca853deeSEric Joyner }
1176ca853deeSEric Joyner if (oldreg != reg)
1177ca853deeSEric Joyner wr32(hw, hw->aq.asq.len, reg);
1178ca853deeSEric Joyner
1179ca853deeSEric Joyner return (aq_error ? EIO : 0);
1180ca853deeSEric Joyner }
1181ca853deeSEric Joyner
1182ca853deeSEric Joyner /**
1183ca853deeSEric Joyner * iavf_process_adminq - Process adminq responses from the PF
1184ca853deeSEric Joyner * @sc: device softc
1185ca853deeSEric Joyner * @pending: output parameter indicating how many messages remain
1186ca853deeSEric Joyner *
1187ca853deeSEric Joyner * Process the adminq to handle replies from the PF over the virtchnl
1188ca853deeSEric Joyner * connection.
1189ca853deeSEric Joyner *
1190ca853deeSEric Joyner * @returns zero or an iavf_status code on failure
1191ca853deeSEric Joyner */
1192ca853deeSEric Joyner static enum iavf_status
iavf_process_adminq(struct iavf_sc * sc,u16 * pending)1193ca853deeSEric Joyner iavf_process_adminq(struct iavf_sc *sc, u16 *pending)
1194ca853deeSEric Joyner {
1195ca853deeSEric Joyner enum iavf_status status = IAVF_SUCCESS;
1196ca853deeSEric Joyner struct iavf_arq_event_info event;
1197ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
1198ca853deeSEric Joyner struct virtchnl_msg *v_msg;
1199ca853deeSEric Joyner int error = 0, loop = 0;
1200ca853deeSEric Joyner u32 reg;
1201ca853deeSEric Joyner
1202ca853deeSEric Joyner if (iavf_test_state(&sc->state, IAVF_STATE_RESET_PENDING)) {
1203ca853deeSEric Joyner status = IAVF_ERR_ADMIN_QUEUE_ERROR;
1204ca853deeSEric Joyner goto reenable_interrupt;
1205ca853deeSEric Joyner }
1206ca853deeSEric Joyner
1207ca853deeSEric Joyner error = iavf_check_aq_errors(sc);
1208ca853deeSEric Joyner if (error) {
1209ca853deeSEric Joyner status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
1210ca853deeSEric Joyner goto reenable_interrupt;
1211ca853deeSEric Joyner }
1212ca853deeSEric Joyner
1213ca853deeSEric Joyner event.buf_len = IAVF_AQ_BUF_SZ;
1214ca853deeSEric Joyner event.msg_buf = sc->aq_buffer;
1215ca853deeSEric Joyner bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1216ca853deeSEric Joyner v_msg = (struct virtchnl_msg *)&event.desc;
1217ca853deeSEric Joyner
1218ca853deeSEric Joyner IAVF_VC_LOCK(sc);
1219ca853deeSEric Joyner /* clean and process any events */
1220ca853deeSEric Joyner do {
1221ca853deeSEric Joyner status = iavf_clean_arq_element(hw, &event, pending);
1222ca853deeSEric Joyner /*
1223ca853deeSEric Joyner * Also covers normal case when iavf_clean_arq_element()
1224ca853deeSEric Joyner * returns "IAVF_ERR_ADMIN_QUEUE_NO_WORK"
1225ca853deeSEric Joyner */
1226ca853deeSEric Joyner if (status)
1227ca853deeSEric Joyner break;
1228ca853deeSEric Joyner iavf_vc_completion(sc, v_msg->v_opcode,
1229ca853deeSEric Joyner v_msg->v_retval, event.msg_buf, event.msg_len);
1230ca853deeSEric Joyner bzero(event.msg_buf, IAVF_AQ_BUF_SZ);
1231ca853deeSEric Joyner } while (*pending && (loop++ < IAVF_ADM_LIMIT));
1232ca853deeSEric Joyner IAVF_VC_UNLOCK(sc);
1233ca853deeSEric Joyner
1234ca853deeSEric Joyner reenable_interrupt:
1235ca853deeSEric Joyner /* Re-enable admin queue interrupt cause */
1236ca853deeSEric Joyner reg = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1237ca853deeSEric Joyner reg |= IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1238ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ICR0_ENA1, reg);
1239ca853deeSEric Joyner
1240ca853deeSEric Joyner return (status);
1241ca853deeSEric Joyner }
1242ca853deeSEric Joyner
1243ca853deeSEric Joyner /**
1244ca853deeSEric Joyner * iavf_if_update_admin_status - Administrative status task
1245ca853deeSEric Joyner * @ctx: iflib context
1246ca853deeSEric Joyner *
1247ca853deeSEric Joyner * Called by iflib to handle administrative status events. The iavf driver
1248ca853deeSEric Joyner * uses this to process the adminq virtchnl messages outside of interrupt
1249ca853deeSEric Joyner * context.
1250ca853deeSEric Joyner */
1251ca853deeSEric Joyner static void
iavf_if_update_admin_status(if_ctx_t ctx)1252ca853deeSEric Joyner iavf_if_update_admin_status(if_ctx_t ctx)
1253ca853deeSEric Joyner {
1254ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1255ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
1256ca853deeSEric Joyner u16 pending = 0;
1257ca853deeSEric Joyner
1258ca853deeSEric Joyner iavf_process_adminq(sc, &pending);
1259ca853deeSEric Joyner iavf_update_link_status(sc);
1260ca853deeSEric Joyner
1261ca853deeSEric Joyner /*
1262ca853deeSEric Joyner * If there are still messages to process, reschedule.
1263ca853deeSEric Joyner * Otherwise, re-enable the Admin Queue interrupt.
1264ca853deeSEric Joyner */
1265ca853deeSEric Joyner if (pending > 0)
1266ca853deeSEric Joyner iflib_admin_intr_deferred(ctx);
1267ca853deeSEric Joyner else
1268ca853deeSEric Joyner iavf_enable_adminq_irq(hw);
1269ca853deeSEric Joyner }
1270ca853deeSEric Joyner
1271ca853deeSEric Joyner /**
1272ca853deeSEric Joyner * iavf_if_multi_set - Set multicast address filters
1273ca853deeSEric Joyner * @ctx: iflib context
1274ca853deeSEric Joyner *
1275ca853deeSEric Joyner * Called by iflib to update the current list of multicast filters for the
1276ca853deeSEric Joyner * device.
1277ca853deeSEric Joyner */
1278ca853deeSEric Joyner static void
iavf_if_multi_set(if_ctx_t ctx)1279ca853deeSEric Joyner iavf_if_multi_set(if_ctx_t ctx)
1280ca853deeSEric Joyner {
1281ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1282ca853deeSEric Joyner
1283ca853deeSEric Joyner iavf_multi_set(sc);
1284ca853deeSEric Joyner }
1285ca853deeSEric Joyner
1286ca853deeSEric Joyner /**
1287ca853deeSEric Joyner * iavf_if_mtu_set - Set the device MTU
1288ca853deeSEric Joyner * @ctx: iflib context
1289ca853deeSEric Joyner * @mtu: MTU value to set
1290ca853deeSEric Joyner *
1291ca853deeSEric Joyner * Called by iflib to set the device MTU.
1292ca853deeSEric Joyner *
1293ca853deeSEric Joyner * @returns zero on success, or EINVAL if the MTU is invalid.
1294ca853deeSEric Joyner */
1295ca853deeSEric Joyner static int
iavf_if_mtu_set(if_ctx_t ctx,uint32_t mtu)1296ca853deeSEric Joyner iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1297ca853deeSEric Joyner {
1298ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1299ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1300ca853deeSEric Joyner
1301ca853deeSEric Joyner IOCTL_DEBUGOUT("ioctl: SiOCSIFMTU (Set Interface MTU)");
1302ca853deeSEric Joyner if (mtu < IAVF_MIN_MTU || mtu > IAVF_MAX_MTU) {
1303ca853deeSEric Joyner device_printf(sc->dev, "mtu %d is not in valid range [%d-%d]\n",
1304ca853deeSEric Joyner mtu, IAVF_MIN_MTU, IAVF_MAX_MTU);
1305ca853deeSEric Joyner return (EINVAL);
1306ca853deeSEric Joyner }
1307ca853deeSEric Joyner
1308ca853deeSEric Joyner vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1309ca853deeSEric Joyner ETHER_VLAN_ENCAP_LEN;
1310ca853deeSEric Joyner
1311ca853deeSEric Joyner return (0);
1312ca853deeSEric Joyner }
1313ca853deeSEric Joyner
1314ca853deeSEric Joyner /**
1315ca853deeSEric Joyner * iavf_if_media_status - Report current media status
1316ca853deeSEric Joyner * @ctx: iflib context
1317ca853deeSEric Joyner * @ifmr: ifmedia request structure
1318ca853deeSEric Joyner *
1319ca853deeSEric Joyner * Called by iflib to report the current media status in the ifmr.
1320ca853deeSEric Joyner */
1321ca853deeSEric Joyner static void
iavf_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1322ca853deeSEric Joyner iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1323ca853deeSEric Joyner {
1324ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1325ca853deeSEric Joyner
1326ca853deeSEric Joyner iavf_media_status_common(sc, ifmr);
1327ca853deeSEric Joyner }
1328ca853deeSEric Joyner
1329ca853deeSEric Joyner /**
1330ca853deeSEric Joyner * iavf_if_media_change - Change the current media settings
1331ca853deeSEric Joyner * @ctx: iflib context
1332ca853deeSEric Joyner *
1333ca853deeSEric Joyner * Called by iflib to change the current media settings.
1334ca853deeSEric Joyner *
1335ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
1336ca853deeSEric Joyner */
1337ca853deeSEric Joyner static int
iavf_if_media_change(if_ctx_t ctx)1338ca853deeSEric Joyner iavf_if_media_change(if_ctx_t ctx)
1339ca853deeSEric Joyner {
1340ca853deeSEric Joyner return iavf_media_change_common(iflib_get_ifp(ctx));
1341ca853deeSEric Joyner }
1342ca853deeSEric Joyner
1343ca853deeSEric Joyner /**
1344ca853deeSEric Joyner * iavf_if_promisc_set - Set device promiscuous mode
1345ca853deeSEric Joyner * @ctx: iflib context
1346ca853deeSEric Joyner * @flags: promiscuous configuration
1347ca853deeSEric Joyner *
1348ca853deeSEric Joyner * Called by iflib to request that the device enter promiscuous mode.
1349ca853deeSEric Joyner *
1350ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
1351ca853deeSEric Joyner */
1352ca853deeSEric Joyner static int
iavf_if_promisc_set(if_ctx_t ctx,int flags)1353ca853deeSEric Joyner iavf_if_promisc_set(if_ctx_t ctx, int flags)
1354ca853deeSEric Joyner {
1355ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1356ca853deeSEric Joyner
1357ca853deeSEric Joyner return iavf_config_promisc(sc, flags);
1358ca853deeSEric Joyner }
1359ca853deeSEric Joyner
1360ca853deeSEric Joyner /**
1361ca853deeSEric Joyner * iavf_if_timer - Periodic timer called by iflib
1362ca853deeSEric Joyner * @ctx: iflib context
1363ca853deeSEric Joyner * @qid: The queue being triggered
1364ca853deeSEric Joyner *
1365ca853deeSEric Joyner * Called by iflib periodically as a timer task, so that the driver can handle
1366ca853deeSEric Joyner * periodic work.
1367ca853deeSEric Joyner *
1368ca853deeSEric Joyner * @remark this timer is only called while the interface is up, even if
1369ca853deeSEric Joyner * IFLIB_ADMIN_ALWAYS_RUN is set.
1370ca853deeSEric Joyner */
1371ca853deeSEric Joyner static void
iavf_if_timer(if_ctx_t ctx,uint16_t qid)1372ca853deeSEric Joyner iavf_if_timer(if_ctx_t ctx, uint16_t qid)
1373ca853deeSEric Joyner {
1374ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1375ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
1376ca853deeSEric Joyner u32 val;
1377ca853deeSEric Joyner
1378ca853deeSEric Joyner if (qid != 0)
1379ca853deeSEric Joyner return;
1380ca853deeSEric Joyner
1381ca853deeSEric Joyner /* Check for when PF triggers a VF reset */
1382ca853deeSEric Joyner val = rd32(hw, IAVF_VFGEN_RSTAT) &
1383ca853deeSEric Joyner IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1384ca853deeSEric Joyner if (val != VIRTCHNL_VFR_VFACTIVE
1385ca853deeSEric Joyner && val != VIRTCHNL_VFR_COMPLETED) {
1386ca853deeSEric Joyner iavf_dbg_info(sc, "reset in progress! (%d)\n", val);
1387ca853deeSEric Joyner return;
1388ca853deeSEric Joyner }
1389ca853deeSEric Joyner
1390ca853deeSEric Joyner /* Fire off the adminq task */
1391ca853deeSEric Joyner iflib_admin_intr_deferred(ctx);
1392ca853deeSEric Joyner
1393ca853deeSEric Joyner /* Update stats */
1394ca853deeSEric Joyner iavf_request_stats(sc);
1395ca853deeSEric Joyner }
1396ca853deeSEric Joyner
1397ca853deeSEric Joyner /**
1398ca853deeSEric Joyner * iavf_if_vlan_register - Register a VLAN
1399ca853deeSEric Joyner * @ctx: iflib context
1400ca853deeSEric Joyner * @vtag: the VLAN to register
1401ca853deeSEric Joyner *
1402ca853deeSEric Joyner * Register a VLAN filter for a given vtag.
1403ca853deeSEric Joyner */
1404ca853deeSEric Joyner static void
iavf_if_vlan_register(if_ctx_t ctx,u16 vtag)1405ca853deeSEric Joyner iavf_if_vlan_register(if_ctx_t ctx, u16 vtag)
1406ca853deeSEric Joyner {
1407ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1408ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1409ca853deeSEric Joyner
1410ca853deeSEric Joyner if ((vtag == 0) || (vtag > 4095)) /* Invalid */
1411ca853deeSEric Joyner return;
1412ca853deeSEric Joyner
1413ca853deeSEric Joyner /* Add VLAN 0 to list, for untagged traffic */
1414ca853deeSEric Joyner if (vsi->num_vlans == 0)
1415ca853deeSEric Joyner iavf_add_vlan_filter(sc, 0);
1416ca853deeSEric Joyner
1417ca853deeSEric Joyner iavf_add_vlan_filter(sc, vtag);
1418ca853deeSEric Joyner
1419ca853deeSEric Joyner ++vsi->num_vlans;
1420ca853deeSEric Joyner
1421ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
1422ca853deeSEric Joyner }
1423ca853deeSEric Joyner
1424ca853deeSEric Joyner /**
1425ca853deeSEric Joyner * iavf_if_vlan_unregister - Unregister a VLAN
1426ca853deeSEric Joyner * @ctx: iflib context
1427ca853deeSEric Joyner * @vtag: the VLAN to remove
1428ca853deeSEric Joyner *
1429ca853deeSEric Joyner * Unregister (remove) a VLAN filter for the given vtag.
1430ca853deeSEric Joyner */
1431ca853deeSEric Joyner static void
iavf_if_vlan_unregister(if_ctx_t ctx,u16 vtag)1432ca853deeSEric Joyner iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1433ca853deeSEric Joyner {
1434ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1435ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1436ca853deeSEric Joyner int i = 0;
1437ca853deeSEric Joyner
1438ca853deeSEric Joyner if ((vtag == 0) || (vtag > 4095) || (vsi->num_vlans == 0)) /* Invalid */
1439ca853deeSEric Joyner return;
1440ca853deeSEric Joyner
1441ca853deeSEric Joyner i = iavf_mark_del_vlan_filter(sc, vtag);
1442ca853deeSEric Joyner vsi->num_vlans -= i;
1443ca853deeSEric Joyner
1444ca853deeSEric Joyner /* Remove VLAN filter 0 if the last VLAN is being removed */
1445ca853deeSEric Joyner if (vsi->num_vlans == 0)
1446ca853deeSEric Joyner i += iavf_mark_del_vlan_filter(sc, 0);
1447ca853deeSEric Joyner
1448ca853deeSEric Joyner if (i > 0)
1449ca853deeSEric Joyner iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
1450ca853deeSEric Joyner }
1451ca853deeSEric Joyner
1452ca853deeSEric Joyner /**
1453ca853deeSEric Joyner * iavf_if_get_counter - Get network statistic counters
1454ca853deeSEric Joyner * @ctx: iflib context
1455ca853deeSEric Joyner * @cnt: The counter to obtain
1456ca853deeSEric Joyner *
1457ca853deeSEric Joyner * Called by iflib to obtain the value of the specified counter.
1458ca853deeSEric Joyner *
1459ca853deeSEric Joyner * @returns the uint64_t counter value.
1460ca853deeSEric Joyner */
1461ca853deeSEric Joyner static uint64_t
iavf_if_get_counter(if_ctx_t ctx,ift_counter cnt)1462ca853deeSEric Joyner iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1463ca853deeSEric Joyner {
1464ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1465ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1466ca853deeSEric Joyner if_t ifp = iflib_get_ifp(ctx);
1467ca853deeSEric Joyner
1468ca853deeSEric Joyner switch (cnt) {
1469ca853deeSEric Joyner case IFCOUNTER_IPACKETS:
1470ca853deeSEric Joyner return (vsi->ipackets);
1471ca853deeSEric Joyner case IFCOUNTER_IERRORS:
1472ca853deeSEric Joyner return (vsi->ierrors);
1473ca853deeSEric Joyner case IFCOUNTER_OPACKETS:
1474ca853deeSEric Joyner return (vsi->opackets);
1475ca853deeSEric Joyner case IFCOUNTER_OERRORS:
1476ca853deeSEric Joyner return (vsi->oerrors);
1477ca853deeSEric Joyner case IFCOUNTER_COLLISIONS:
1478ca853deeSEric Joyner /* Collisions are by standard impossible in 40G/10G Ethernet */
1479ca853deeSEric Joyner return (0);
1480ca853deeSEric Joyner case IFCOUNTER_IBYTES:
1481ca853deeSEric Joyner return (vsi->ibytes);
1482ca853deeSEric Joyner case IFCOUNTER_OBYTES:
1483ca853deeSEric Joyner return (vsi->obytes);
1484ca853deeSEric Joyner case IFCOUNTER_IMCASTS:
1485ca853deeSEric Joyner return (vsi->imcasts);
1486ca853deeSEric Joyner case IFCOUNTER_OMCASTS:
1487ca853deeSEric Joyner return (vsi->omcasts);
1488ca853deeSEric Joyner case IFCOUNTER_IQDROPS:
1489ca853deeSEric Joyner return (vsi->iqdrops);
1490ca853deeSEric Joyner case IFCOUNTER_OQDROPS:
1491ca853deeSEric Joyner return (vsi->oqdrops);
1492ca853deeSEric Joyner case IFCOUNTER_NOPROTO:
1493ca853deeSEric Joyner return (vsi->noproto);
1494ca853deeSEric Joyner default:
1495ca853deeSEric Joyner return (if_get_counter_default(ifp, cnt));
1496ca853deeSEric Joyner }
1497ca853deeSEric Joyner }
1498ca853deeSEric Joyner
14991d6c12c5SKevin Bowling /* iavf_if_needs_restart - Tell iflib when the driver needs to be reinitialized
15001d6c12c5SKevin Bowling * @ctx: iflib context
15011d6c12c5SKevin Bowling * @event: event code to check
15021d6c12c5SKevin Bowling *
15031d6c12c5SKevin Bowling * Defaults to returning false for unknown events.
15041d6c12c5SKevin Bowling *
15051d6c12c5SKevin Bowling * @returns true if iflib needs to reinit the interface
15061d6c12c5SKevin Bowling */
15071d6c12c5SKevin Bowling static bool
iavf_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)15081d6c12c5SKevin Bowling iavf_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
15091d6c12c5SKevin Bowling {
15101d6c12c5SKevin Bowling switch (event) {
15111d6c12c5SKevin Bowling case IFLIB_RESTART_VLAN_CONFIG:
15121d6c12c5SKevin Bowling return (true);
15131d6c12c5SKevin Bowling default:
15141d6c12c5SKevin Bowling return (false);
15151d6c12c5SKevin Bowling }
15161d6c12c5SKevin Bowling }
15171d6c12c5SKevin Bowling
1518ca853deeSEric Joyner /**
1519ca853deeSEric Joyner * iavf_free_pci_resources - Free PCI resources
1520ca853deeSEric Joyner * @sc: device softc
1521ca853deeSEric Joyner *
1522ca853deeSEric Joyner * Called to release the PCI resources allocated during attach. May be called
1523ca853deeSEric Joyner * in the error flow of attach_pre, or during detach as part of cleanup.
1524ca853deeSEric Joyner */
1525ca853deeSEric Joyner static void
iavf_free_pci_resources(struct iavf_sc * sc)1526ca853deeSEric Joyner iavf_free_pci_resources(struct iavf_sc *sc)
1527ca853deeSEric Joyner {
1528ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1529ca853deeSEric Joyner struct iavf_rx_queue *rx_que = vsi->rx_queues;
1530ca853deeSEric Joyner device_t dev = sc->dev;
1531ca853deeSEric Joyner
1532ca853deeSEric Joyner /* We may get here before stations are set up */
1533ca853deeSEric Joyner if (rx_que == NULL)
1534ca853deeSEric Joyner goto early;
1535ca853deeSEric Joyner
1536ca853deeSEric Joyner /* Release all interrupts */
1537ca853deeSEric Joyner iflib_irq_free(vsi->ctx, &vsi->irq);
1538ca853deeSEric Joyner
1539ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1540ca853deeSEric Joyner iflib_irq_free(vsi->ctx, &rx_que->que_irq);
1541ca853deeSEric Joyner
1542ca853deeSEric Joyner early:
1543ca853deeSEric Joyner if (sc->pci_mem != NULL)
1544ca853deeSEric Joyner bus_release_resource(dev, SYS_RES_MEMORY,
1545ca853deeSEric Joyner rman_get_rid(sc->pci_mem), sc->pci_mem);
1546ca853deeSEric Joyner }
1547ca853deeSEric Joyner
1548ca853deeSEric Joyner /**
1549ca853deeSEric Joyner * iavf_setup_interface - Setup the device interface
1550ca853deeSEric Joyner * @sc: device softc
1551ca853deeSEric Joyner *
1552ca853deeSEric Joyner * Called to setup some device interface settings, such as the ifmedia
1553ca853deeSEric Joyner * structure.
1554ca853deeSEric Joyner */
1555ca853deeSEric Joyner static void
iavf_setup_interface(struct iavf_sc * sc)1556ca853deeSEric Joyner iavf_setup_interface(struct iavf_sc *sc)
1557ca853deeSEric Joyner {
1558ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1559ca853deeSEric Joyner if_ctx_t ctx = vsi->ctx;
1560d8096b2dSJustin Hibbits if_t ifp = iflib_get_ifp(ctx);
1561ca853deeSEric Joyner
1562ca853deeSEric Joyner iavf_dbg_init(sc, "begin\n");
1563ca853deeSEric Joyner
1564ca853deeSEric Joyner vsi->shared->isc_max_frame_size =
1565d8096b2dSJustin Hibbits if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN
1566ca853deeSEric Joyner + ETHER_VLAN_ENCAP_LEN;
1567ca853deeSEric Joyner
1568ca853deeSEric Joyner iavf_set_initial_baudrate(ifp);
1569ca853deeSEric Joyner
1570ca853deeSEric Joyner ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1571ca853deeSEric Joyner ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1572ca853deeSEric Joyner }
1573ca853deeSEric Joyner
1574ca853deeSEric Joyner /**
1575ca853deeSEric Joyner * iavf_msix_adminq - Admin Queue interrupt handler
1576ca853deeSEric Joyner * @arg: void pointer to the device softc
1577ca853deeSEric Joyner *
1578ca853deeSEric Joyner * Interrupt handler for the non-queue interrupt causes. Primarily this will
1579ca853deeSEric Joyner * be the adminq interrupt, but also includes other miscellaneous causes.
1580ca853deeSEric Joyner *
1581ca853deeSEric Joyner * @returns FILTER_SCHEDULE_THREAD if the admin task needs to be run, otherwise
1582ca853deeSEric Joyner * returns FITLER_HANDLED.
1583ca853deeSEric Joyner */
1584ca853deeSEric Joyner static int
iavf_msix_adminq(void * arg)1585ca853deeSEric Joyner iavf_msix_adminq(void *arg)
1586ca853deeSEric Joyner {
1587ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg;
1588ca853deeSEric Joyner struct iavf_hw *hw = &sc->hw;
1589ca853deeSEric Joyner u32 reg, mask;
1590ca853deeSEric Joyner
1591ca853deeSEric Joyner ++sc->admin_irq;
1592ca853deeSEric Joyner
1593ca853deeSEric Joyner if (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED))
1594ca853deeSEric Joyner return (FILTER_HANDLED);
1595ca853deeSEric Joyner
1596ca853deeSEric Joyner reg = rd32(hw, IAVF_VFINT_ICR01);
1597ca853deeSEric Joyner /*
1598ca853deeSEric Joyner * For masking off interrupt causes that need to be handled before
1599ca853deeSEric Joyner * they can be re-enabled
1600ca853deeSEric Joyner */
1601ca853deeSEric Joyner mask = rd32(hw, IAVF_VFINT_ICR0_ENA1);
1602ca853deeSEric Joyner
1603ca853deeSEric Joyner /* Check on the cause */
1604ca853deeSEric Joyner if (reg & IAVF_VFINT_ICR01_ADMINQ_MASK) {
1605ca853deeSEric Joyner mask &= ~IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK;
1606ca853deeSEric Joyner
1607ca853deeSEric Joyner /* Process messages outside of the iflib context lock */
1608ca853deeSEric Joyner taskqueue_enqueue(sc->vc_tq, &sc->vc_task);
1609ca853deeSEric Joyner }
1610ca853deeSEric Joyner
1611ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ICR0_ENA1, mask);
1612ca853deeSEric Joyner iavf_enable_adminq_irq(hw);
1613ca853deeSEric Joyner
1614ca853deeSEric Joyner return (FILTER_HANDLED);
1615ca853deeSEric Joyner }
1616ca853deeSEric Joyner
1617ca853deeSEric Joyner /**
1618ca853deeSEric Joyner * iavf_enable_intr - Enable device interrupts
1619ca853deeSEric Joyner * @vsi: the main VSI
1620ca853deeSEric Joyner *
1621ca853deeSEric Joyner * Called to enable all queue interrupts.
1622ca853deeSEric Joyner */
1623ca853deeSEric Joyner void
iavf_enable_intr(struct iavf_vsi * vsi)1624ca853deeSEric Joyner iavf_enable_intr(struct iavf_vsi *vsi)
1625ca853deeSEric Joyner {
1626ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw;
1627ca853deeSEric Joyner struct iavf_rx_queue *que = vsi->rx_queues;
1628ca853deeSEric Joyner
1629ca853deeSEric Joyner iavf_enable_adminq_irq(hw);
1630ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1631ca853deeSEric Joyner iavf_enable_queue_irq(hw, que->rxr.me);
1632ca853deeSEric Joyner }
1633ca853deeSEric Joyner
1634ca853deeSEric Joyner /**
1635ca853deeSEric Joyner * iavf_disable_intr - Disable device interrupts
1636ca853deeSEric Joyner * @vsi: the main VSI
1637ca853deeSEric Joyner *
1638ca853deeSEric Joyner * Called to disable all interrupts
1639ca853deeSEric Joyner *
1640ca853deeSEric Joyner * @remark we never disable the admin status interrupt.
1641ca853deeSEric Joyner */
1642ca853deeSEric Joyner void
iavf_disable_intr(struct iavf_vsi * vsi)1643ca853deeSEric Joyner iavf_disable_intr(struct iavf_vsi *vsi)
1644ca853deeSEric Joyner {
1645ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw;
1646ca853deeSEric Joyner struct iavf_rx_queue *que = vsi->rx_queues;
1647ca853deeSEric Joyner
1648ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1649ca853deeSEric Joyner iavf_disable_queue_irq(hw, que->rxr.me);
1650ca853deeSEric Joyner }
1651ca853deeSEric Joyner
1652ca853deeSEric Joyner /**
1653ca853deeSEric Joyner * iavf_enable_queue_irq - Enable IRQ register for a queue interrupt
1654ca853deeSEric Joyner * @hw: hardware structure
1655ca853deeSEric Joyner * @id: IRQ vector to enable
1656ca853deeSEric Joyner *
1657ca853deeSEric Joyner * Writes the IAVF_VFINT_DYN_CTLN1 register to enable a given IRQ interrupt.
1658ca853deeSEric Joyner */
1659ca853deeSEric Joyner static void
iavf_enable_queue_irq(struct iavf_hw * hw,int id)1660ca853deeSEric Joyner iavf_enable_queue_irq(struct iavf_hw *hw, int id)
1661ca853deeSEric Joyner {
1662ca853deeSEric Joyner u32 reg;
1663ca853deeSEric Joyner
1664ca853deeSEric Joyner reg = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1665ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1666ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK;
1667ca853deeSEric Joyner wr32(hw, IAVF_VFINT_DYN_CTLN1(id), reg);
1668ca853deeSEric Joyner }
1669ca853deeSEric Joyner
1670ca853deeSEric Joyner /**
1671ca853deeSEric Joyner * iavf_disable_queue_irq - Disable IRQ register for a queue interrupt
1672ca853deeSEric Joyner * @hw: hardware structure
1673ca853deeSEric Joyner * @id: IRQ vector to disable
1674ca853deeSEric Joyner *
1675ca853deeSEric Joyner * Writes the IAVF_VFINT_DYN_CTLN1 register to disable a given IRQ interrupt.
1676ca853deeSEric Joyner */
1677ca853deeSEric Joyner static void
iavf_disable_queue_irq(struct iavf_hw * hw,int id)1678ca853deeSEric Joyner iavf_disable_queue_irq(struct iavf_hw *hw, int id)
1679ca853deeSEric Joyner {
1680ca853deeSEric Joyner wr32(hw, IAVF_VFINT_DYN_CTLN1(id),
1681ca853deeSEric Joyner IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
1682ca853deeSEric Joyner rd32(hw, IAVF_VFGEN_RSTAT);
1683ca853deeSEric Joyner }
1684ca853deeSEric Joyner
1685ca853deeSEric Joyner /**
1686ca853deeSEric Joyner * iavf_configure_itr - Get initial ITR values from tunable values.
1687ca853deeSEric Joyner * @sc: device softc
1688ca853deeSEric Joyner *
1689ca853deeSEric Joyner * Load the initial tunable values for the ITR configuration.
1690ca853deeSEric Joyner */
1691ca853deeSEric Joyner static void
iavf_configure_itr(struct iavf_sc * sc)1692ca853deeSEric Joyner iavf_configure_itr(struct iavf_sc *sc)
1693ca853deeSEric Joyner {
1694ca853deeSEric Joyner iavf_configure_tx_itr(sc);
1695ca853deeSEric Joyner iavf_configure_rx_itr(sc);
1696ca853deeSEric Joyner }
1697ca853deeSEric Joyner
1698ca853deeSEric Joyner /**
1699ca853deeSEric Joyner * iavf_set_queue_rx_itr - Update Rx ITR value
1700ca853deeSEric Joyner * @que: Rx queue to update
1701ca853deeSEric Joyner *
1702ca853deeSEric Joyner * Provide a update to the queue RX interrupt moderation value.
1703ca853deeSEric Joyner */
1704ca853deeSEric Joyner static void
iavf_set_queue_rx_itr(struct iavf_rx_queue * que)1705ca853deeSEric Joyner iavf_set_queue_rx_itr(struct iavf_rx_queue *que)
1706ca853deeSEric Joyner {
1707ca853deeSEric Joyner struct iavf_vsi *vsi = que->vsi;
1708ca853deeSEric Joyner struct iavf_hw *hw = vsi->hw;
1709ca853deeSEric Joyner struct rx_ring *rxr = &que->rxr;
1710ca853deeSEric Joyner
1711ca853deeSEric Joyner /* Idle, do nothing */
1712ca853deeSEric Joyner if (rxr->bytes == 0)
1713ca853deeSEric Joyner return;
1714ca853deeSEric Joyner
1715ca853deeSEric Joyner /* Update the hardware if needed */
1716ca853deeSEric Joyner if (rxr->itr != vsi->rx_itr_setting) {
1717ca853deeSEric Joyner rxr->itr = vsi->rx_itr_setting;
1718ca853deeSEric Joyner wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR,
1719ca853deeSEric Joyner que->rxr.me), rxr->itr);
1720ca853deeSEric Joyner }
1721ca853deeSEric Joyner }
1722ca853deeSEric Joyner
1723ca853deeSEric Joyner /**
1724ca853deeSEric Joyner * iavf_msix_que - Main Rx queue interrupt handler
1725ca853deeSEric Joyner * @arg: void pointer to the Rx queue
1726ca853deeSEric Joyner *
1727ca853deeSEric Joyner * Main MSI-X interrupt handler for Rx queue interrupts
1728ca853deeSEric Joyner *
1729ca853deeSEric Joyner * @returns FILTER_SCHEDULE_THREAD if the main thread for Rx needs to run,
1730ca853deeSEric Joyner * otherwise returns FILTER_HANDLED.
1731ca853deeSEric Joyner */
1732ca853deeSEric Joyner static int
iavf_msix_que(void * arg)1733ca853deeSEric Joyner iavf_msix_que(void *arg)
1734ca853deeSEric Joyner {
1735ca853deeSEric Joyner struct iavf_rx_queue *rx_que = (struct iavf_rx_queue *)arg;
1736ca853deeSEric Joyner struct iavf_sc *sc = rx_que->vsi->back;
1737ca853deeSEric Joyner
1738ca853deeSEric Joyner ++rx_que->irqs;
1739ca853deeSEric Joyner
1740ca853deeSEric Joyner if (!iavf_test_state(&sc->state, IAVF_STATE_RUNNING))
1741ca853deeSEric Joyner return (FILTER_HANDLED);
1742ca853deeSEric Joyner
1743ca853deeSEric Joyner iavf_set_queue_rx_itr(rx_que);
1744ca853deeSEric Joyner
1745ca853deeSEric Joyner return (FILTER_SCHEDULE_THREAD);
1746ca853deeSEric Joyner }
1747ca853deeSEric Joyner
1748ca853deeSEric Joyner /**
1749ca853deeSEric Joyner * iavf_update_link_status - Update iflib Link status
1750ca853deeSEric Joyner * @sc: device softc
1751ca853deeSEric Joyner *
1752ca853deeSEric Joyner * Notify the iflib stack of changes in link status. Called after the device
1753ca853deeSEric Joyner * receives a virtchnl message indicating a change in link status.
1754ca853deeSEric Joyner */
1755ca853deeSEric Joyner void
iavf_update_link_status(struct iavf_sc * sc)1756ca853deeSEric Joyner iavf_update_link_status(struct iavf_sc *sc)
1757ca853deeSEric Joyner {
1758ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1759ca853deeSEric Joyner u64 baudrate;
1760ca853deeSEric Joyner
1761ca853deeSEric Joyner if (sc->link_up){
1762ca853deeSEric Joyner if (vsi->link_active == FALSE) {
1763ca853deeSEric Joyner vsi->link_active = TRUE;
1764ca853deeSEric Joyner baudrate = iavf_baudrate_from_link_speed(sc);
1765ca853deeSEric Joyner iavf_dbg_info(sc, "baudrate: %llu\n", (unsigned long long)baudrate);
1766ca853deeSEric Joyner iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1767ca853deeSEric Joyner }
1768ca853deeSEric Joyner } else { /* Link down */
1769ca853deeSEric Joyner if (vsi->link_active == TRUE) {
1770ca853deeSEric Joyner vsi->link_active = FALSE;
1771ca853deeSEric Joyner iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1772ca853deeSEric Joyner }
1773ca853deeSEric Joyner }
1774ca853deeSEric Joyner }
1775ca853deeSEric Joyner
1776ca853deeSEric Joyner /**
1777ca853deeSEric Joyner * iavf_stop - Stop the interface
1778ca853deeSEric Joyner * @sc: device softc
1779ca853deeSEric Joyner *
1780ca853deeSEric Joyner * This routine disables all traffic on the adapter by disabling interrupts
1781ca853deeSEric Joyner * and sending a message to the PF to tell it to stop the hardware
1782ca853deeSEric Joyner * Tx/Rx LAN queues.
1783ca853deeSEric Joyner */
1784ca853deeSEric Joyner static void
iavf_stop(struct iavf_sc * sc)1785ca853deeSEric Joyner iavf_stop(struct iavf_sc *sc)
1786ca853deeSEric Joyner {
1787ca853deeSEric Joyner iavf_clear_state(&sc->state, IAVF_STATE_RUNNING);
1788ca853deeSEric Joyner
1789ca853deeSEric Joyner iavf_disable_intr(&sc->vsi);
1790ca853deeSEric Joyner
1791ca853deeSEric Joyner iavf_disable_queues_with_retries(sc);
1792ca853deeSEric Joyner }
1793ca853deeSEric Joyner
1794ca853deeSEric Joyner /**
1795ca853deeSEric Joyner * iavf_if_stop - iflib stop handler
1796ca853deeSEric Joyner * @ctx: iflib context
1797ca853deeSEric Joyner *
1798ca853deeSEric Joyner * Call iavf_stop to stop the interface.
1799ca853deeSEric Joyner */
1800ca853deeSEric Joyner static void
iavf_if_stop(if_ctx_t ctx)1801ca853deeSEric Joyner iavf_if_stop(if_ctx_t ctx)
1802ca853deeSEric Joyner {
1803ca853deeSEric Joyner struct iavf_sc *sc = iavf_sc_from_ctx(ctx);
1804ca853deeSEric Joyner
1805ca853deeSEric Joyner iavf_stop(sc);
1806ca853deeSEric Joyner }
1807ca853deeSEric Joyner
1808ca853deeSEric Joyner /**
1809ca853deeSEric Joyner * iavf_del_mac_filter - Delete a MAC filter
1810ca853deeSEric Joyner * @sc: device softc
1811ca853deeSEric Joyner * @macaddr: MAC address to remove
1812ca853deeSEric Joyner *
1813ca853deeSEric Joyner * Marks a MAC filter for deletion.
1814ca853deeSEric Joyner *
1815ca853deeSEric Joyner * @returns zero if the filter existed, or ENOENT if it did not.
1816ca853deeSEric Joyner */
1817ca853deeSEric Joyner static int
iavf_del_mac_filter(struct iavf_sc * sc,u8 * macaddr)1818ca853deeSEric Joyner iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr)
1819ca853deeSEric Joyner {
1820ca853deeSEric Joyner struct iavf_mac_filter *f;
1821ca853deeSEric Joyner
1822ca853deeSEric Joyner f = iavf_find_mac_filter(sc, macaddr);
1823ca853deeSEric Joyner if (f == NULL)
1824ca853deeSEric Joyner return (ENOENT);
1825ca853deeSEric Joyner
1826ca853deeSEric Joyner f->flags |= IAVF_FILTER_DEL;
1827ca853deeSEric Joyner return (0);
1828ca853deeSEric Joyner }
1829ca853deeSEric Joyner
1830ca853deeSEric Joyner /**
1831ca853deeSEric Joyner * iavf_init_tx_rsqs - Initialize Report Status array
1832ca853deeSEric Joyner * @vsi: the main VSI
1833ca853deeSEric Joyner *
1834ca853deeSEric Joyner * Set the Report Status queue fields to zero in order to initialize the
1835ca853deeSEric Joyner * queues for transmit.
1836ca853deeSEric Joyner */
1837ca853deeSEric Joyner void
iavf_init_tx_rsqs(struct iavf_vsi * vsi)1838ca853deeSEric Joyner iavf_init_tx_rsqs(struct iavf_vsi *vsi)
1839ca853deeSEric Joyner {
1840ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared;
1841ca853deeSEric Joyner struct iavf_tx_queue *tx_que;
1842ca853deeSEric Joyner int i, j;
1843ca853deeSEric Joyner
1844ca853deeSEric Joyner for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1845ca853deeSEric Joyner struct tx_ring *txr = &tx_que->txr;
1846ca853deeSEric Joyner
1847ca853deeSEric Joyner txr->tx_rs_cidx = txr->tx_rs_pidx;
1848ca853deeSEric Joyner
1849ca853deeSEric Joyner /* Initialize the last processed descriptor to be the end of
1850ca853deeSEric Joyner * the ring, rather than the start, so that we avoid an
1851ca853deeSEric Joyner * off-by-one error when calculating how many descriptors are
1852ca853deeSEric Joyner * done in the credits_update function.
1853ca853deeSEric Joyner */
1854ca853deeSEric Joyner txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1855ca853deeSEric Joyner
1856ca853deeSEric Joyner for (j = 0; j < scctx->isc_ntxd[0]; j++)
1857ca853deeSEric Joyner txr->tx_rsq[j] = QIDX_INVALID;
1858ca853deeSEric Joyner }
1859ca853deeSEric Joyner }
1860ca853deeSEric Joyner
1861ca853deeSEric Joyner /**
1862ca853deeSEric Joyner * iavf_init_tx_cidx - Initialize Tx cidx values
1863ca853deeSEric Joyner * @vsi: the main VSI
1864ca853deeSEric Joyner *
1865ca853deeSEric Joyner * Initialize the tx_cidx_processed values for Tx queues in order to
1866ca853deeSEric Joyner * initialize the Tx queues for transmit.
1867ca853deeSEric Joyner */
1868ca853deeSEric Joyner void
iavf_init_tx_cidx(struct iavf_vsi * vsi)1869ca853deeSEric Joyner iavf_init_tx_cidx(struct iavf_vsi *vsi)
1870ca853deeSEric Joyner {
1871ca853deeSEric Joyner if_softc_ctx_t scctx = vsi->shared;
1872ca853deeSEric Joyner struct iavf_tx_queue *tx_que;
1873ca853deeSEric Joyner int i;
1874ca853deeSEric Joyner
1875ca853deeSEric Joyner for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) {
1876ca853deeSEric Joyner struct tx_ring *txr = &tx_que->txr;
1877ca853deeSEric Joyner
1878ca853deeSEric Joyner txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1879ca853deeSEric Joyner }
1880ca853deeSEric Joyner }
1881ca853deeSEric Joyner
1882ca853deeSEric Joyner /**
1883ca853deeSEric Joyner * iavf_add_device_sysctls - Add device sysctls for configuration
1884ca853deeSEric Joyner * @sc: device softc
1885ca853deeSEric Joyner *
1886ca853deeSEric Joyner * Add the main sysctl nodes and sysctls for device configuration.
1887ca853deeSEric Joyner */
1888ca853deeSEric Joyner static void
iavf_add_device_sysctls(struct iavf_sc * sc)1889ca853deeSEric Joyner iavf_add_device_sysctls(struct iavf_sc *sc)
1890ca853deeSEric Joyner {
1891ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
1892ca853deeSEric Joyner device_t dev = sc->dev;
1893ca853deeSEric Joyner struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1894ca853deeSEric Joyner struct sysctl_oid_list *debug_list;
1895ca853deeSEric Joyner
1896ca853deeSEric Joyner iavf_add_device_sysctls_common(sc);
1897ca853deeSEric Joyner
1898ca853deeSEric Joyner debug_list = iavf_create_debug_sysctl_tree(sc);
1899ca853deeSEric Joyner
1900ca853deeSEric Joyner iavf_add_debug_sysctls_common(sc, debug_list);
1901ca853deeSEric Joyner
1902ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list,
1903ca853deeSEric Joyner OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD,
1904ca853deeSEric Joyner sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
1905ca853deeSEric Joyner
1906ca853deeSEric Joyner #ifdef IAVF_DEBUG
1907ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list,
1908ca853deeSEric Joyner OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR,
1909ca853deeSEric Joyner sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF");
1910ca853deeSEric Joyner
1911ca853deeSEric Joyner SYSCTL_ADD_PROC(ctx, debug_list,
1912ca853deeSEric Joyner OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR,
1913ca853deeSEric Joyner sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW");
1914ca853deeSEric Joyner #endif
1915ca853deeSEric Joyner
1916ca853deeSEric Joyner /* Add stats sysctls */
1917ca853deeSEric Joyner iavf_add_vsi_sysctls(dev, vsi, ctx, "vsi");
1918ca853deeSEric Joyner
1919ca853deeSEric Joyner iavf_add_queues_sysctls(dev, vsi);
1920ca853deeSEric Joyner }
1921ca853deeSEric Joyner
1922ca853deeSEric Joyner /**
1923ca853deeSEric Joyner * iavf_add_queues_sysctls - Add per-queue sysctls
1924ca853deeSEric Joyner * @dev: device pointer
1925ca853deeSEric Joyner * @vsi: the main VSI
1926ca853deeSEric Joyner *
1927ca853deeSEric Joyner * Add sysctls for each Tx and Rx queue.
1928ca853deeSEric Joyner */
1929ca853deeSEric Joyner void
iavf_add_queues_sysctls(device_t dev,struct iavf_vsi * vsi)1930ca853deeSEric Joyner iavf_add_queues_sysctls(device_t dev, struct iavf_vsi *vsi)
1931ca853deeSEric Joyner {
1932ca853deeSEric Joyner struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1933ca853deeSEric Joyner struct sysctl_oid_list *vsi_list, *queue_list;
1934ca853deeSEric Joyner struct sysctl_oid *queue_node;
1935ca853deeSEric Joyner char queue_namebuf[32];
1936ca853deeSEric Joyner
1937ca853deeSEric Joyner struct iavf_rx_queue *rx_que;
1938ca853deeSEric Joyner struct iavf_tx_queue *tx_que;
1939ca853deeSEric Joyner struct tx_ring *txr;
1940ca853deeSEric Joyner struct rx_ring *rxr;
1941ca853deeSEric Joyner
1942ca853deeSEric Joyner vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
1943ca853deeSEric Joyner
1944ca853deeSEric Joyner /* Queue statistics */
1945ca853deeSEric Joyner for (int q = 0; q < vsi->num_rx_queues; q++) {
1946ca853deeSEric Joyner bzero(queue_namebuf, sizeof(queue_namebuf));
1947ca853deeSEric Joyner snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "rxq%02d", q);
1948ca853deeSEric Joyner queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1949ca853deeSEric Joyner OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #");
1950ca853deeSEric Joyner queue_list = SYSCTL_CHILDREN(queue_node);
1951ca853deeSEric Joyner
1952ca853deeSEric Joyner rx_que = &(vsi->rx_queues[q]);
1953ca853deeSEric Joyner rxr = &(rx_que->rxr);
1954ca853deeSEric Joyner
1955ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1956ca853deeSEric Joyner CTLFLAG_RD, &(rx_que->irqs),
1957ca853deeSEric Joyner "irqs on this queue (both Tx and Rx)");
1958ca853deeSEric Joyner
1959ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1960ca853deeSEric Joyner CTLFLAG_RD, &(rxr->rx_packets),
1961ca853deeSEric Joyner "Queue Packets Received");
1962ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1963ca853deeSEric Joyner CTLFLAG_RD, &(rxr->rx_bytes),
1964ca853deeSEric Joyner "Queue Bytes Received");
1965ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err",
1966ca853deeSEric Joyner CTLFLAG_RD, &(rxr->desc_errs),
1967ca853deeSEric Joyner "Queue Rx Descriptor Errors");
1968ca853deeSEric Joyner SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1969ca853deeSEric Joyner CTLFLAG_RD, &(rxr->itr), 0,
1970ca853deeSEric Joyner "Queue Rx ITR Interval");
1971ca853deeSEric Joyner }
1972ca853deeSEric Joyner for (int q = 0; q < vsi->num_tx_queues; q++) {
1973ca853deeSEric Joyner bzero(queue_namebuf, sizeof(queue_namebuf));
1974ca853deeSEric Joyner snprintf(queue_namebuf, IAVF_QUEUE_NAME_LEN, "txq%02d", q);
1975ca853deeSEric Joyner queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
1976ca853deeSEric Joyner OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #");
1977ca853deeSEric Joyner queue_list = SYSCTL_CHILDREN(queue_node);
1978ca853deeSEric Joyner
1979ca853deeSEric Joyner tx_que = &(vsi->tx_queues[q]);
1980ca853deeSEric Joyner txr = &(tx_que->txr);
1981ca853deeSEric Joyner
1982ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso",
1983ca853deeSEric Joyner CTLFLAG_RD, &(tx_que->tso),
1984ca853deeSEric Joyner "TSO");
1985ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
1986ca853deeSEric Joyner CTLFLAG_RD, &(txr->mss_too_small),
1987ca853deeSEric Joyner "TSO sends with an MSS less than 64");
1988ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets",
1989ca853deeSEric Joyner CTLFLAG_RD, &(txr->tx_packets),
1990ca853deeSEric Joyner "Queue Packets Transmitted");
1991ca853deeSEric Joyner SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes",
1992ca853deeSEric Joyner CTLFLAG_RD, &(txr->tx_bytes),
1993ca853deeSEric Joyner "Queue Bytes Transmitted");
1994ca853deeSEric Joyner SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr",
1995ca853deeSEric Joyner CTLFLAG_RD, &(txr->itr), 0,
1996ca853deeSEric Joyner "Queue Tx ITR Interval");
1997ca853deeSEric Joyner }
1998ca853deeSEric Joyner }
1999ca853deeSEric Joyner
2000ca853deeSEric Joyner /**
2001ca853deeSEric Joyner * iavf_driver_is_detaching - Check if the driver is detaching/unloading
2002ca853deeSEric Joyner * @sc: device private softc
2003ca853deeSEric Joyner *
2004ca853deeSEric Joyner * @returns true if the driver is detaching, false otherwise.
2005ca853deeSEric Joyner *
2006ca853deeSEric Joyner * @remark on newer kernels, take advantage of iflib_in_detach in order to
2007ca853deeSEric Joyner * report detachment correctly as early as possible.
2008ca853deeSEric Joyner *
2009ca853deeSEric Joyner * @remark this function is used by various code paths that want to avoid
2010ca853deeSEric Joyner * running if the driver is about to be removed. This includes sysctls and
2011ca853deeSEric Joyner * other driver access points. Note that it does not fully resolve
2012ca853deeSEric Joyner * detach-based race conditions as it is possible for a thread to race with
2013ca853deeSEric Joyner * iflib_in_detach.
2014ca853deeSEric Joyner */
2015ca853deeSEric Joyner bool
iavf_driver_is_detaching(struct iavf_sc * sc)2016ca853deeSEric Joyner iavf_driver_is_detaching(struct iavf_sc *sc)
2017ca853deeSEric Joyner {
2018ca853deeSEric Joyner return (!iavf_test_state(&sc->state, IAVF_STATE_INITIALIZED) ||
2019ca853deeSEric Joyner iflib_in_detach(sc->vsi.ctx));
2020ca853deeSEric Joyner }
2021ca853deeSEric Joyner
2022ca853deeSEric Joyner /**
2023ca853deeSEric Joyner * iavf_sysctl_queue_interrupt_table - Sysctl for displaying Tx queue mapping
2024ca853deeSEric Joyner * @oidp: sysctl oid structure
2025ca853deeSEric Joyner * @arg1: void pointer to device softc
2026ca853deeSEric Joyner * @arg2: unused
2027ca853deeSEric Joyner * @req: sysctl request pointer
2028ca853deeSEric Joyner *
2029ca853deeSEric Joyner * Print out mapping of TX queue indexes and Rx queue indexes to MSI-X vectors.
2030ca853deeSEric Joyner *
2031ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
2032ca853deeSEric Joyner */
2033ca853deeSEric Joyner static int
iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)2034ca853deeSEric Joyner iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
2035ca853deeSEric Joyner {
2036ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1;
2037ca853deeSEric Joyner struct iavf_vsi *vsi = &sc->vsi;
2038ca853deeSEric Joyner device_t dev = sc->dev;
2039ca853deeSEric Joyner struct sbuf *buf;
2040ca853deeSEric Joyner int error = 0;
2041ca853deeSEric Joyner
2042ca853deeSEric Joyner struct iavf_rx_queue *rx_que;
2043ca853deeSEric Joyner struct iavf_tx_queue *tx_que;
2044ca853deeSEric Joyner
2045ca853deeSEric Joyner UNREFERENCED_2PARAMETER(arg2, oidp);
2046ca853deeSEric Joyner
2047ca853deeSEric Joyner if (iavf_driver_is_detaching(sc))
2048ca853deeSEric Joyner return (ESHUTDOWN);
2049ca853deeSEric Joyner
2050ca853deeSEric Joyner buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2051ca853deeSEric Joyner if (!buf) {
2052ca853deeSEric Joyner device_printf(dev, "Could not allocate sbuf for output.\n");
2053ca853deeSEric Joyner return (ENOMEM);
2054ca853deeSEric Joyner }
2055ca853deeSEric Joyner
2056ca853deeSEric Joyner sbuf_cat(buf, "\n");
2057ca853deeSEric Joyner for (int i = 0; i < vsi->num_rx_queues; i++) {
2058ca853deeSEric Joyner rx_que = &vsi->rx_queues[i];
2059ca853deeSEric Joyner sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
2060ca853deeSEric Joyner }
2061ca853deeSEric Joyner for (int i = 0; i < vsi->num_tx_queues; i++) {
2062ca853deeSEric Joyner tx_que = &vsi->tx_queues[i];
2063ca853deeSEric Joyner sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
2064ca853deeSEric Joyner }
2065ca853deeSEric Joyner
2066ca853deeSEric Joyner error = sbuf_finish(buf);
2067ca853deeSEric Joyner if (error)
2068ca853deeSEric Joyner device_printf(dev, "Error finishing sbuf: %d\n", error);
2069ca853deeSEric Joyner sbuf_delete(buf);
2070ca853deeSEric Joyner
2071ca853deeSEric Joyner return (error);
2072ca853deeSEric Joyner }
2073ca853deeSEric Joyner
2074ca853deeSEric Joyner #ifdef IAVF_DEBUG
2075ca853deeSEric Joyner #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
2076ca853deeSEric Joyner
2077ca853deeSEric Joyner /**
2078ca853deeSEric Joyner * iavf_sysctl_vf_reset - Request a VF reset
2079ca853deeSEric Joyner * @oidp: sysctl oid pointer
2080ca853deeSEric Joyner * @arg1: void pointer to device softc
2081ca853deeSEric Joyner * @arg2: unused
2082ca853deeSEric Joyner * @req: sysctl request pointer
2083ca853deeSEric Joyner *
2084ca853deeSEric Joyner * Request a VF reset for the device.
2085ca853deeSEric Joyner *
2086ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
2087ca853deeSEric Joyner */
2088ca853deeSEric Joyner static int
iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)2089ca853deeSEric Joyner iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS)
2090ca853deeSEric Joyner {
2091ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1;
2092ca853deeSEric Joyner int do_reset = 0, error = 0;
2093ca853deeSEric Joyner
2094ca853deeSEric Joyner UNREFERENCED_PARAMETER(arg2);
2095ca853deeSEric Joyner
2096ca853deeSEric Joyner if (iavf_driver_is_detaching(sc))
2097ca853deeSEric Joyner return (ESHUTDOWN);
2098ca853deeSEric Joyner
2099ca853deeSEric Joyner error = sysctl_handle_int(oidp, &do_reset, 0, req);
2100ca853deeSEric Joyner if ((error) || (req->newptr == NULL))
2101ca853deeSEric Joyner return (error);
2102ca853deeSEric Joyner
2103ca853deeSEric Joyner if (do_reset == 1) {
2104ca853deeSEric Joyner iavf_reset(sc);
2105ca853deeSEric Joyner if (CTX_ACTIVE(sc->vsi.ctx))
2106ca853deeSEric Joyner iflib_request_reset(sc->vsi.ctx);
2107ca853deeSEric Joyner }
2108ca853deeSEric Joyner
2109ca853deeSEric Joyner return (error);
2110ca853deeSEric Joyner }
2111ca853deeSEric Joyner
2112ca853deeSEric Joyner /**
2113ca853deeSEric Joyner * iavf_sysctl_vflr_reset - Trigger a PCIe FLR for the device
2114ca853deeSEric Joyner * @oidp: sysctl oid pointer
2115ca853deeSEric Joyner * @arg1: void pointer to device softc
2116ca853deeSEric Joyner * @arg2: unused
2117ca853deeSEric Joyner * @req: sysctl request pointer
2118ca853deeSEric Joyner *
2119ca853deeSEric Joyner * Sysctl callback to trigger a PCIe FLR.
2120ca853deeSEric Joyner *
2121ca853deeSEric Joyner * @returns zero on success, or an error code on failure.
2122ca853deeSEric Joyner */
2123ca853deeSEric Joyner static int
iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)2124ca853deeSEric Joyner iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS)
2125ca853deeSEric Joyner {
2126ca853deeSEric Joyner struct iavf_sc *sc = (struct iavf_sc *)arg1;
2127ca853deeSEric Joyner device_t dev = sc->dev;
2128ca853deeSEric Joyner int do_reset = 0, error = 0;
2129ca853deeSEric Joyner
2130ca853deeSEric Joyner UNREFERENCED_PARAMETER(arg2);
2131ca853deeSEric Joyner
2132ca853deeSEric Joyner if (iavf_driver_is_detaching(sc))
2133ca853deeSEric Joyner return (ESHUTDOWN);
2134ca853deeSEric Joyner
2135ca853deeSEric Joyner error = sysctl_handle_int(oidp, &do_reset, 0, req);
2136ca853deeSEric Joyner if ((error) || (req->newptr == NULL))
2137ca853deeSEric Joyner return (error);
2138ca853deeSEric Joyner
2139ca853deeSEric Joyner if (do_reset == 1) {
2140ca853deeSEric Joyner if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) {
2141ca853deeSEric Joyner device_printf(dev, "PCIE FLR failed\n");
2142ca853deeSEric Joyner error = EIO;
2143ca853deeSEric Joyner }
2144ca853deeSEric Joyner else if (CTX_ACTIVE(sc->vsi.ctx))
2145ca853deeSEric Joyner iflib_request_reset(sc->vsi.ctx);
2146ca853deeSEric Joyner }
2147ca853deeSEric Joyner
2148ca853deeSEric Joyner return (error);
2149ca853deeSEric Joyner }
2150ca853deeSEric Joyner #undef CTX_ACTIVE
2151ca853deeSEric Joyner #endif
2152