xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision f7c32ed617858bcd22f8d1b03199099d50125721)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ifdi_if.h"
42 
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45 
46 /************************************************************************
47  * Driver version
48  ************************************************************************/
49 char ixv_driver_version[] = "2.0.1-k";
50 
51 /************************************************************************
52  * PCI Device ID Table
53  *
54  *   Used by probe to select devices to load on
55  *   Last field stores an index into ixv_strings
56  *   Last entry must be all 0s
57  *
58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  ************************************************************************/
60 static pci_vendor_info_t ixv_vendor_info_array[] =
61 {
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
66 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
67 	/* required last entry */
68 PVID_END
69 };
70 
71 /************************************************************************
72  * Function prototypes
73  ************************************************************************/
74 static void     *ixv_register(device_t);
75 static int      ixv_if_attach_pre(if_ctx_t);
76 static int      ixv_if_attach_post(if_ctx_t);
77 static int      ixv_if_detach(if_ctx_t);
78 
79 static int      ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
80 static int      ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81 static int      ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
82 static void     ixv_if_queues_free(if_ctx_t);
83 static void     ixv_identify_hardware(if_ctx_t);
84 static void     ixv_init_device_features(struct ixgbe_softc *);
85 static int      ixv_allocate_pci_resources(if_ctx_t);
86 static void     ixv_free_pci_resources(if_ctx_t);
87 static int      ixv_setup_interface(if_ctx_t);
88 static void     ixv_if_media_status(if_ctx_t, struct ifmediareq *);
89 static int      ixv_if_media_change(if_ctx_t);
90 static void     ixv_if_update_admin_status(if_ctx_t);
91 static int      ixv_if_msix_intr_assign(if_ctx_t, int);
92 
93 static int      ixv_if_mtu_set(if_ctx_t, uint32_t);
94 static void     ixv_if_init(if_ctx_t);
95 static void     ixv_if_local_timer(if_ctx_t, uint16_t);
96 static void     ixv_if_stop(if_ctx_t);
97 static int      ixv_negotiate_api(struct ixgbe_softc *);
98 
99 static void     ixv_initialize_transmit_units(if_ctx_t);
100 static void     ixv_initialize_receive_units(if_ctx_t);
101 static void     ixv_initialize_rss_mapping(struct ixgbe_softc *);
102 
103 static void     ixv_setup_vlan_support(if_ctx_t);
104 static void     ixv_configure_ivars(struct ixgbe_softc *);
105 static void     ixv_if_enable_intr(if_ctx_t);
106 static void     ixv_if_disable_intr(if_ctx_t);
107 static void     ixv_if_multi_set(if_ctx_t);
108 
109 static void     ixv_if_register_vlan(if_ctx_t, u16);
110 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
111 
112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static bool	ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114 
115 static void     ixv_save_stats(struct ixgbe_softc *);
116 static void     ixv_init_stats(struct ixgbe_softc *);
117 static void     ixv_update_stats(struct ixgbe_softc *);
118 static void     ixv_add_stats_sysctls(struct ixgbe_softc *);
119 
120 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121 static void     ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
122 
123 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124 
125 /* The MSI-X Interrupt handlers */
126 static int      ixv_msix_que(void *);
127 static int      ixv_msix_mbx(void *);
128 
129 /************************************************************************
130  * FreeBSD Device Interface Entry Points
131  ************************************************************************/
132 static device_method_t ixv_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_register, ixv_register),
135 	DEVMETHOD(device_probe, iflib_device_probe),
136 	DEVMETHOD(device_attach, iflib_device_attach),
137 	DEVMETHOD(device_detach, iflib_device_detach),
138 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
139 	DEVMETHOD_END
140 };
141 
142 static driver_t ixv_driver = {
143 	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
144 };
145 
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
149 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
150 MODULE_DEPEND(ixv, pci, 1, 1, 1);
151 MODULE_DEPEND(ixv, ether, 1, 1, 1);
152 
153 static device_method_t ixv_if_methods[] = {
154 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 	DEVMETHOD(ifdi_detach, ixv_if_detach),
157 	DEVMETHOD(ifdi_init, ixv_if_init),
158 	DEVMETHOD(ifdi_stop, ixv_if_stop),
159 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176 	DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
177 	DEVMETHOD_END
178 };
179 
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
182 };
183 
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187 
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191 
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = false;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200 
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208 
209 static struct if_shared_ctx ixv_sctx_init = {
210 	.isc_magic = IFLIB_MAGIC,
211 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213 	.isc_tx_maxsegsize = PAGE_SIZE,
214 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215 	.isc_tso_maxsegsize = PAGE_SIZE,
216 	.isc_rx_maxsize = MJUM16BYTES,
217 	.isc_rx_nsegments = 1,
218 	.isc_rx_maxsegsize = MJUM16BYTES,
219 	.isc_nfl = 1,
220 	.isc_ntxqs = 1,
221 	.isc_nrxqs = 1,
222 	.isc_admin_intrcnt = 1,
223 	.isc_vendor_info = ixv_vendor_info_array,
224 	.isc_driver_version = ixv_driver_version,
225 	.isc_driver = &ixv_if_driver,
226 	.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
227 
228 	.isc_nrxd_min = {MIN_RXD},
229 	.isc_ntxd_min = {MIN_TXD},
230 	.isc_nrxd_max = {MAX_RXD},
231 	.isc_ntxd_max = {MAX_TXD},
232 	.isc_nrxd_default = {DEFAULT_RXD},
233 	.isc_ntxd_default = {DEFAULT_TXD},
234 };
235 
236 static void *
237 ixv_register(device_t dev)
238 {
239 	return (&ixv_sctx_init);
240 }
241 
242 /************************************************************************
243  * ixv_if_tx_queues_alloc
244  ************************************************************************/
245 static int
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247     int ntxqs, int ntxqsets)
248 {
249 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
250 	if_softc_ctx_t     scctx = sc->shared;
251 	struct ix_tx_queue *que;
252 	int                i, j, error;
253 
254 	MPASS(sc->num_tx_queues == ntxqsets);
255 	MPASS(ntxqs == 1);
256 
257 	/* Allocate queue structure memory */
258 	sc->tx_queues =
259 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
261 	if (!sc->tx_queues) {
262 		device_printf(iflib_get_dev(ctx),
263 		    "Unable to allocate TX ring memory\n");
264 		return (ENOMEM);
265 	}
266 
267 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
268 		struct tx_ring *txr = &que->txr;
269 
270 		txr->me = i;
271 		txr->sc =  que->sc = sc;
272 
273 		/* Allocate report status array */
274 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275 			error = ENOMEM;
276 			goto fail;
277 		}
278 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 			txr->tx_rsq[j] = QIDX_INVALID;
280 		/* get the virtual and physical address of the hardware queues */
281 		txr->tail = IXGBE_VFTDT(txr->me);
282 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 		txr->tx_paddr = paddrs[i*ntxqs];
284 
285 		txr->bytes = 0;
286 		txr->total_packets = 0;
287 
288 	}
289 
290 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 	    sc->num_tx_queues);
292 
293 	return (0);
294 
295  fail:
296 	ixv_if_queues_free(ctx);
297 
298 	return (error);
299 } /* ixv_if_tx_queues_alloc */
300 
301 /************************************************************************
302  * ixv_if_rx_queues_alloc
303  ************************************************************************/
304 static int
305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306     int nrxqs, int nrxqsets)
307 {
308 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
309 	struct ix_rx_queue *que;
310 	int                i, error;
311 
312 	MPASS(sc->num_rx_queues == nrxqsets);
313 	MPASS(nrxqs == 1);
314 
315 	/* Allocate queue structure memory */
316 	sc->rx_queues =
317 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
319 	if (!sc->rx_queues) {
320 		device_printf(iflib_get_dev(ctx),
321 		    "Unable to allocate TX ring memory\n");
322 		error = ENOMEM;
323 		goto fail;
324 	}
325 
326 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
327 		struct rx_ring *rxr = &que->rxr;
328 		rxr->me = i;
329 		rxr->sc = que->sc = sc;
330 
331 
332 		/* get the virtual and physical address of the hw queues */
333 		rxr->tail = IXGBE_VFRDT(rxr->me);
334 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 		rxr->rx_paddr = paddrs[i*nrxqs];
336 		rxr->bytes = 0;
337 		rxr->que = que;
338 	}
339 
340 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 	    sc->num_rx_queues);
342 
343 	return (0);
344 
345 fail:
346 	ixv_if_queues_free(ctx);
347 
348 	return (error);
349 } /* ixv_if_rx_queues_alloc */
350 
351 /************************************************************************
352  * ixv_if_queues_free
353  ************************************************************************/
354 static void
355 ixv_if_queues_free(if_ctx_t ctx)
356 {
357 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
358 	struct ix_tx_queue *que = sc->tx_queues;
359 	int                i;
360 
361 	if (que == NULL)
362 		goto free;
363 
364 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
365 		struct tx_ring *txr = &que->txr;
366 		if (txr->tx_rsq == NULL)
367 			break;
368 
369 		free(txr->tx_rsq, M_DEVBUF);
370 		txr->tx_rsq = NULL;
371 	}
372 	if (sc->tx_queues != NULL)
373 		free(sc->tx_queues, M_DEVBUF);
374 free:
375 	if (sc->rx_queues != NULL)
376 		free(sc->rx_queues, M_DEVBUF);
377 	sc->tx_queues = NULL;
378 	sc->rx_queues = NULL;
379 } /* ixv_if_queues_free */
380 
381 /************************************************************************
382  * ixv_if_attach_pre - Device initialization routine
383  *
384  *   Called when the driver is being loaded.
385  *   Identifies the type of hardware, allocates all resources
386  *   and initializes the hardware.
387  *
388  *   return 0 on success, positive on failure
389  ************************************************************************/
390 static int
391 ixv_if_attach_pre(if_ctx_t ctx)
392 {
393 	struct ixgbe_softc  *sc;
394 	device_t        dev;
395 	if_softc_ctx_t  scctx;
396 	struct ixgbe_hw *hw;
397 	int             error = 0;
398 
399 	INIT_DEBUGOUT("ixv_attach: begin");
400 
401 	/* Allocate, clear, and link in our sc structure */
402 	dev = iflib_get_dev(ctx);
403 	sc = iflib_get_softc(ctx);
404 	sc->dev = dev;
405 	sc->ctx = ctx;
406 	sc->hw.back = sc;
407 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
408 	sc->media = iflib_get_media(ctx);
409 	hw = &sc->hw;
410 
411 	/* Do base PCI setup - map BAR0 */
412 	if (ixv_allocate_pci_resources(ctx)) {
413 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414 		error = ENXIO;
415 		goto err_out;
416 	}
417 
418 	/* SYSCTL APIs */
419 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422 	    sc, 0, ixv_sysctl_debug, "I", "Debug Info");
423 
424 	/* Determine hardware revision */
425 	ixv_identify_hardware(ctx);
426 	ixv_init_device_features(sc);
427 
428 	/* Initialize the shared code */
429 	error = ixgbe_init_ops_vf(hw);
430 	if (error) {
431 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432 		error = EIO;
433 		goto err_out;
434 	}
435 
436 	/* Setup the mailbox */
437 	ixgbe_init_mbx_params_vf(hw);
438 
439 	error = hw->mac.ops.reset_hw(hw);
440 	if (error == IXGBE_ERR_RESET_FAILED)
441 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 	else if (error)
443 		device_printf(dev, "...reset_hw() failed with error %d\n",
444 		    error);
445 	if (error) {
446 		error = EIO;
447 		goto err_out;
448 	}
449 
450 	error = hw->mac.ops.init_hw(hw);
451 	if (error) {
452 		device_printf(dev, "...init_hw() failed with error %d\n",
453 		    error);
454 		error = EIO;
455 		goto err_out;
456 	}
457 
458 	/* Negotiate mailbox API version */
459 	error = ixv_negotiate_api(sc);
460 	if (error) {
461 		device_printf(dev,
462 		    "Mailbox API negotiation failed during attach!\n");
463 		goto err_out;
464 	}
465 
466 	/* If no mac address was assigned, make a random one */
467 	if (!ixv_check_ether_addr(hw->mac.addr)) {
468 		ether_gen_addr(iflib_get_ifp(ctx),
469 		    (struct ether_addr *)hw->mac.addr);
470 		bcopy(hw->mac.addr, hw->mac.perm_addr,
471 		    sizeof(hw->mac.perm_addr));
472 	}
473 
474 	/* Most of the iflib initialization... */
475 
476 	iflib_set_mac(ctx, hw->mac.addr);
477 	switch (sc->hw.mac.type) {
478 	case ixgbe_mac_X550_vf:
479 	case ixgbe_mac_X550EM_x_vf:
480 	case ixgbe_mac_X550EM_a_vf:
481 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
482 		break;
483 	default:
484 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
485 	}
486 	scctx->isc_txqsizes[0] =
487 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
488 	    sizeof(u32), DBA_ALIGN);
489 	scctx->isc_rxqsizes[0] =
490 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
491 	    DBA_ALIGN);
492 	/* XXX */
493 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
494 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
495 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
496 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
497 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
498 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
499 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
500 
501 	scctx->isc_txrx = &ixgbe_txrx;
502 
503 	/*
504 	 * Tell the upper layer(s) we support everything the PF
505 	 * driver does except...
506 	 *   Wake-on-LAN
507 	 */
508 	scctx->isc_capabilities = IXGBE_CAPS;
509 	scctx->isc_capabilities ^= IFCAP_WOL;
510 	scctx->isc_capenable = scctx->isc_capabilities;
511 
512 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
513 
514 	return (0);
515 
516 err_out:
517 	ixv_free_pci_resources(ctx);
518 
519 	return (error);
520 } /* ixv_if_attach_pre */
521 
522 static int
523 ixv_if_attach_post(if_ctx_t ctx)
524 {
525 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
526 	device_t       dev = iflib_get_dev(ctx);
527 	int            error = 0;
528 
529 	/* Setup OS specific network interface */
530 	error = ixv_setup_interface(ctx);
531 	if (error) {
532 		device_printf(dev, "Interface setup failed: %d\n", error);
533 		goto end;
534 	}
535 
536 	/* Do the stats setup */
537 	ixv_save_stats(sc);
538 	ixv_init_stats(sc);
539 	ixv_add_stats_sysctls(sc);
540 
541 end:
542 	return error;
543 } /* ixv_if_attach_post */
544 
545 /************************************************************************
546  * ixv_detach - Device removal routine
547  *
548  *   Called when the driver is being removed.
549  *   Stops the adapter and deallocates all the resources
550  *   that were allocated for driver operation.
551  *
552  *   return 0 on success, positive on failure
553  ************************************************************************/
554 static int
555 ixv_if_detach(if_ctx_t ctx)
556 {
557 	INIT_DEBUGOUT("ixv_detach: begin");
558 
559 	ixv_free_pci_resources(ctx);
560 
561 	return (0);
562 } /* ixv_if_detach */
563 
564 /************************************************************************
565  * ixv_if_mtu_set
566  ************************************************************************/
567 static int
568 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
569 {
570 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
571 	struct ifnet   *ifp = iflib_get_ifp(ctx);
572 	int            error = 0;
573 
574 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
575 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
576 		error = EINVAL;
577 	} else {
578 		ifp->if_mtu = mtu;
579 		sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
580 	}
581 
582 	return error;
583 } /* ixv_if_mtu_set */
584 
585 /************************************************************************
586  * ixv_if_init - Init entry point
587  *
588  *   Used in two ways: It is used by the stack as an init entry
589  *   point in network interface structure. It is also used
590  *   by the driver as a hw/sw initialization routine to get
591  *   to a consistent state.
592  *
593  *   return 0 on success, positive on failure
594  ************************************************************************/
595 static void
596 ixv_if_init(if_ctx_t ctx)
597 {
598 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
599 	struct ifnet    *ifp = iflib_get_ifp(ctx);
600 	device_t        dev = iflib_get_dev(ctx);
601 	struct ixgbe_hw *hw = &sc->hw;
602 	int             error = 0;
603 
604 	INIT_DEBUGOUT("ixv_if_init: begin");
605 	hw->adapter_stopped = false;
606 	hw->mac.ops.stop_adapter(hw);
607 
608 	/* reprogram the RAR[0] in case user changed it. */
609 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
610 
611 	/* Get the latest mac address, User can use a LAA */
612 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
613 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
614 
615 	/* Reset VF and renegotiate mailbox API version */
616 	hw->mac.ops.reset_hw(hw);
617 	hw->mac.ops.start_hw(hw);
618 	error = ixv_negotiate_api(sc);
619 	if (error) {
620 		device_printf(dev,
621 		    "Mailbox API negotiation failed in if_init!\n");
622 		return;
623 	}
624 
625 	ixv_initialize_transmit_units(ctx);
626 
627 	/* Setup Multicast table */
628 	ixv_if_multi_set(ctx);
629 
630 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
631 
632 	/* Configure RX settings */
633 	ixv_initialize_receive_units(ctx);
634 
635 	/* Set up VLAN offload and filter */
636 	ixv_setup_vlan_support(ctx);
637 
638 	/* Set up MSI-X routing */
639 	ixv_configure_ivars(sc);
640 
641 	/* Set up auto-mask */
642 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
643 
644 	/* Set moderation on the Link interrupt */
645 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
646 
647 	/* Stats init */
648 	ixv_init_stats(sc);
649 
650 	/* Config/Enable Link */
651 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
652 	    false);
653 
654 	/* And now turn on interrupts */
655 	ixv_if_enable_intr(ctx);
656 
657 	return;
658 } /* ixv_if_init */
659 
660 /************************************************************************
661  * ixv_enable_queue
662  ************************************************************************/
663 static inline void
664 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
665 {
666 	struct ixgbe_hw *hw = &sc->hw;
667 	u32             queue = 1 << vector;
668 	u32             mask;
669 
670 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
671 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
672 } /* ixv_enable_queue */
673 
674 /************************************************************************
675  * ixv_disable_queue
676  ************************************************************************/
677 static inline void
678 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
679 {
680 	struct ixgbe_hw *hw = &sc->hw;
681 	u64             queue = (u64)(1 << vector);
682 	u32             mask;
683 
684 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
685 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
686 } /* ixv_disable_queue */
687 
688 
689 /************************************************************************
690  * ixv_msix_que - MSI-X Queue Interrupt Service routine
691  ************************************************************************/
692 static int
693 ixv_msix_que(void *arg)
694 {
695 	struct ix_rx_queue *que = arg;
696 	struct ixgbe_softc     *sc = que->sc;
697 
698 	ixv_disable_queue(sc, que->msix);
699 	++que->irqs;
700 
701 	return (FILTER_SCHEDULE_THREAD);
702 } /* ixv_msix_que */
703 
704 /************************************************************************
705  * ixv_msix_mbx
706  ************************************************************************/
707 static int
708 ixv_msix_mbx(void *arg)
709 {
710 	struct ixgbe_softc  *sc = arg;
711 	struct ixgbe_hw *hw = &sc->hw;
712 	u32             reg;
713 
714 	++sc->link_irq;
715 
716 	/* First get the cause */
717 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
718 	/* Clear interrupt with write */
719 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
720 
721 	/* Link status change */
722 	if (reg & IXGBE_EICR_LSC)
723 		iflib_admin_intr_deferred(sc->ctx);
724 
725 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
726 
727 	return (FILTER_HANDLED);
728 } /* ixv_msix_mbx */
729 
730 /************************************************************************
731  * ixv_media_status - Media Ioctl callback
732  *
733  *   Called whenever the user queries the status of
734  *   the interface using ifconfig.
735  ************************************************************************/
736 static void
737 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
738 {
739 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
740 
741 	INIT_DEBUGOUT("ixv_media_status: begin");
742 
743 	iflib_admin_intr_deferred(ctx);
744 
745 	ifmr->ifm_status = IFM_AVALID;
746 	ifmr->ifm_active = IFM_ETHER;
747 
748 	if (!sc->link_active)
749 		return;
750 
751 	ifmr->ifm_status |= IFM_ACTIVE;
752 
753 	switch (sc->link_speed) {
754 		case IXGBE_LINK_SPEED_1GB_FULL:
755 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
756 			break;
757 		case IXGBE_LINK_SPEED_10GB_FULL:
758 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
759 			break;
760 		case IXGBE_LINK_SPEED_100_FULL:
761 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
762 			break;
763 		case IXGBE_LINK_SPEED_10_FULL:
764 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
765 			break;
766 	}
767 } /* ixv_if_media_status */
768 
769 /************************************************************************
770  * ixv_if_media_change - Media Ioctl callback
771  *
772  *   Called when the user changes speed/duplex using
773  *   media/mediopt option with ifconfig.
774  ************************************************************************/
775 static int
776 ixv_if_media_change(if_ctx_t ctx)
777 {
778 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
779 	struct ifmedia *ifm = iflib_get_media(ctx);
780 
781 	INIT_DEBUGOUT("ixv_media_change: begin");
782 
783 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
784 		return (EINVAL);
785 
786 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
787 	case IFM_AUTO:
788 		break;
789 	default:
790 		device_printf(sc->dev, "Only auto media type\n");
791 		return (EINVAL);
792 	}
793 
794 	return (0);
795 } /* ixv_if_media_change */
796 
797 
798 /************************************************************************
799  * ixv_negotiate_api
800  *
801  *   Negotiate the Mailbox API with the PF;
802  *   start with the most featured API first.
803  ************************************************************************/
804 static int
805 ixv_negotiate_api(struct ixgbe_softc *sc)
806 {
807 	struct ixgbe_hw *hw = &sc->hw;
808 	int             mbx_api[] = { ixgbe_mbox_api_11,
809 	                              ixgbe_mbox_api_10,
810 	                              ixgbe_mbox_api_unknown };
811 	int             i = 0;
812 
813 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
814 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
815 			return (0);
816 		i++;
817 	}
818 
819 	return (EINVAL);
820 } /* ixv_negotiate_api */
821 
822 
823 /************************************************************************
824  * ixv_if_multi_set - Multicast Update
825  *
826  *   Called whenever multicast address list is updated.
827  ************************************************************************/
828 static void
829 ixv_if_multi_set(if_ctx_t ctx)
830 {
831 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
832 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
833 	u8                 *update_ptr;
834 	struct ifmultiaddr *ifma;
835 	if_t               ifp = iflib_get_ifp(ctx);
836 	int                mcnt = 0;
837 
838 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
839 
840 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
841 		if (ifma->ifma_addr->sa_family != AF_LINK)
842 			continue;
843 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
844 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
845 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
846 		mcnt++;
847 	}
848 
849 	update_ptr = mta;
850 
851 	sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
852 	    ixv_mc_array_itr, true);
853 } /* ixv_if_multi_set */
854 
855 /************************************************************************
856  * ixv_mc_array_itr
857  *
858  *   An iterator function needed by the multicast shared code.
859  *   It feeds the shared code routine the addresses in the
860  *   array of ixv_set_multi() one by one.
861  ************************************************************************/
862 static u8 *
863 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
864 {
865 	u8 *addr = *update_ptr;
866 	u8 *newptr;
867 
868 	*vmdq = 0;
869 
870 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
871 	*update_ptr = newptr;
872 
873 	return addr;
874 } /* ixv_mc_array_itr */
875 
876 /************************************************************************
877  * ixv_if_local_timer - Timer routine
878  *
879  *   Checks for link status, updates statistics,
880  *   and runs the watchdog check.
881  ************************************************************************/
882 static void
883 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
884 {
885 	if (qid != 0)
886 		return;
887 
888 	/* Fire off the adminq task */
889 	iflib_admin_intr_deferred(ctx);
890 } /* ixv_if_local_timer */
891 
892 /************************************************************************
893  * ixv_if_update_admin_status - Update OS on link state
894  *
895  * Note: Only updates the OS on the cached link state.
896  *       The real check of the hardware only happens with
897  *       a link interrupt.
898  ************************************************************************/
899 static void
900 ixv_if_update_admin_status(if_ctx_t ctx)
901 {
902 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
903 	device_t       dev = iflib_get_dev(ctx);
904 	s32            status;
905 
906 	sc->hw.mac.get_link_status = true;
907 
908 	status = ixgbe_check_link(&sc->hw, &sc->link_speed,
909 	    &sc->link_up, false);
910 
911 	if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
912 		/* Mailbox's Clear To Send status is lost or timeout occurred.
913 		 * We need reinitialization. */
914 		iflib_get_ifp(ctx)->if_init(ctx);
915 	}
916 
917 	if (sc->link_up) {
918 		if (sc->link_active == false) {
919 			if (bootverbose)
920 				device_printf(dev, "Link is up %d Gbps %s \n",
921 				    ((sc->link_speed == 128) ? 10 : 1),
922 				    "Full Duplex");
923 			sc->link_active = true;
924 			iflib_link_state_change(ctx, LINK_STATE_UP,
925 			    IF_Gbps(10));
926 		}
927 	} else { /* Link down */
928 		if (sc->link_active == true) {
929 			if (bootverbose)
930 				device_printf(dev, "Link is Down\n");
931 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
932 			sc->link_active = false;
933 		}
934 	}
935 
936 	/* Stats Update */
937 	ixv_update_stats(sc);
938 } /* ixv_if_update_admin_status */
939 
940 
941 /************************************************************************
942  * ixv_if_stop - Stop the hardware
943  *
944  *   Disables all traffic on the adapter by issuing a
945  *   global reset on the MAC and deallocates TX/RX buffers.
946  ************************************************************************/
947 static void
948 ixv_if_stop(if_ctx_t ctx)
949 {
950 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
951 	struct ixgbe_hw *hw = &sc->hw;
952 
953 	INIT_DEBUGOUT("ixv_stop: begin\n");
954 
955 	ixv_if_disable_intr(ctx);
956 
957 	hw->mac.ops.reset_hw(hw);
958 	sc->hw.adapter_stopped = false;
959 	hw->mac.ops.stop_adapter(hw);
960 
961 	/* Update the stack */
962 	sc->link_up = false;
963 	ixv_if_update_admin_status(ctx);
964 
965 	/* reprogram the RAR[0] in case user changed it. */
966 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
967 } /* ixv_if_stop */
968 
969 
970 /************************************************************************
971  * ixv_identify_hardware - Determine hardware revision.
972  ************************************************************************/
973 static void
974 ixv_identify_hardware(if_ctx_t ctx)
975 {
976 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
977 	device_t        dev = iflib_get_dev(ctx);
978 	struct ixgbe_hw *hw = &sc->hw;
979 
980 	/* Save off the information about this board */
981 	hw->vendor_id = pci_get_vendor(dev);
982 	hw->device_id = pci_get_device(dev);
983 	hw->revision_id = pci_get_revid(dev);
984 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
985 	hw->subsystem_device_id = pci_get_subdevice(dev);
986 
987 	/* A subset of set_mac_type */
988 	switch (hw->device_id) {
989 	case IXGBE_DEV_ID_82599_VF:
990 		hw->mac.type = ixgbe_mac_82599_vf;
991 		break;
992 	case IXGBE_DEV_ID_X540_VF:
993 		hw->mac.type = ixgbe_mac_X540_vf;
994 		break;
995 	case IXGBE_DEV_ID_X550_VF:
996 		hw->mac.type = ixgbe_mac_X550_vf;
997 		break;
998 	case IXGBE_DEV_ID_X550EM_X_VF:
999 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1000 		break;
1001 	case IXGBE_DEV_ID_X550EM_A_VF:
1002 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1003 		break;
1004 	default:
1005 		device_printf(dev, "unknown mac type\n");
1006 		hw->mac.type = ixgbe_mac_unknown;
1007 		break;
1008 	}
1009 } /* ixv_identify_hardware */
1010 
1011 /************************************************************************
1012  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1013  ************************************************************************/
1014 static int
1015 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1016 {
1017 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1018 	device_t           dev = iflib_get_dev(ctx);
1019 	struct ix_rx_queue *rx_que = sc->rx_queues;
1020 	struct ix_tx_queue *tx_que;
1021 	int                error, rid, vector = 0;
1022 	char               buf[16];
1023 
1024 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1025 		rid = vector + 1;
1026 
1027 		snprintf(buf, sizeof(buf), "rxq%d", i);
1028 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1029 		    IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1030 
1031 		if (error) {
1032 			device_printf(iflib_get_dev(ctx),
1033 			    "Failed to allocate que int %d err: %d", i, error);
1034 			sc->num_rx_queues = i + 1;
1035 			goto fail;
1036 		}
1037 
1038 		rx_que->msix = vector;
1039 	}
1040 
1041 	for (int i = 0; i < sc->num_tx_queues; i++) {
1042 		snprintf(buf, sizeof(buf), "txq%d", i);
1043 		tx_que = &sc->tx_queues[i];
1044 		tx_que->msix = i % sc->num_rx_queues;
1045 		iflib_softirq_alloc_generic(ctx,
1046 		    &sc->rx_queues[tx_que->msix].que_irq,
1047 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1048 	}
1049 	rid = vector + 1;
1050 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1051 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1052 	if (error) {
1053 		device_printf(iflib_get_dev(ctx),
1054 		    "Failed to register admin handler");
1055 		return (error);
1056 	}
1057 
1058 	sc->vector = vector;
1059 	/*
1060 	 * Due to a broken design QEMU will fail to properly
1061 	 * enable the guest for MSIX unless the vectors in
1062 	 * the table are all set up, so we must rewrite the
1063 	 * ENABLE in the MSIX control register again at this
1064 	 * point to cause it to successfully initialize us.
1065 	 */
1066 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1067 		int msix_ctrl;
1068 		pci_find_cap(dev, PCIY_MSIX, &rid);
1069 		rid += PCIR_MSIX_CTRL;
1070 		msix_ctrl = pci_read_config(dev, rid, 2);
1071 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1072 		pci_write_config(dev, rid, msix_ctrl, 2);
1073 	}
1074 
1075 	return (0);
1076 
1077 fail:
1078 	iflib_irq_free(ctx, &sc->irq);
1079 	rx_que = sc->rx_queues;
1080 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1081 		iflib_irq_free(ctx, &rx_que->que_irq);
1082 
1083 	return (error);
1084 } /* ixv_if_msix_intr_assign */
1085 
1086 /************************************************************************
1087  * ixv_allocate_pci_resources
1088  ************************************************************************/
1089 static int
1090 ixv_allocate_pci_resources(if_ctx_t ctx)
1091 {
1092 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1093 	device_t       dev = iflib_get_dev(ctx);
1094 	int            rid;
1095 
1096 	rid = PCIR_BAR(0);
1097 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1098 	    RF_ACTIVE);
1099 
1100 	if (!(sc->pci_mem)) {
1101 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1102 		return (ENXIO);
1103 	}
1104 
1105 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1106 	sc->osdep.mem_bus_space_handle =
1107 	    rman_get_bushandle(sc->pci_mem);
1108 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1109 
1110 	return (0);
1111 } /* ixv_allocate_pci_resources */
1112 
1113 /************************************************************************
1114  * ixv_free_pci_resources
1115  ************************************************************************/
1116 static void
1117 ixv_free_pci_resources(if_ctx_t ctx)
1118 {
1119 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1120 	struct ix_rx_queue *que = sc->rx_queues;
1121 	device_t           dev = iflib_get_dev(ctx);
1122 
1123 	/* Release all MSI-X queue resources */
1124 	if (sc->intr_type == IFLIB_INTR_MSIX)
1125 		iflib_irq_free(ctx, &sc->irq);
1126 
1127 	if (que != NULL) {
1128 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1129 			iflib_irq_free(ctx, &que->que_irq);
1130 		}
1131 	}
1132 
1133 	if (sc->pci_mem != NULL)
1134 		bus_release_resource(dev, SYS_RES_MEMORY,
1135 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1136 } /* ixv_free_pci_resources */
1137 
1138 /************************************************************************
1139  * ixv_setup_interface
1140  *
1141  *   Setup networking device structure and register an interface.
1142  ************************************************************************/
1143 static int
1144 ixv_setup_interface(if_ctx_t ctx)
1145 {
1146 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1147 	if_softc_ctx_t scctx = sc->shared;
1148 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1149 
1150 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1151 
1152 	if_setbaudrate(ifp, IF_Gbps(10));
1153 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1154 
1155 
1156 	sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1157 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1158 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1159 
1160 	return 0;
1161 } /* ixv_setup_interface */
1162 
1163 /************************************************************************
1164  * ixv_if_get_counter
1165  ************************************************************************/
1166 static uint64_t
1167 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1168 {
1169 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1170 	if_t           ifp = iflib_get_ifp(ctx);
1171 
1172 	switch (cnt) {
1173 	case IFCOUNTER_IPACKETS:
1174 		return (sc->ipackets);
1175 	case IFCOUNTER_OPACKETS:
1176 		return (sc->opackets);
1177 	case IFCOUNTER_IBYTES:
1178 		return (sc->ibytes);
1179 	case IFCOUNTER_OBYTES:
1180 		return (sc->obytes);
1181 	case IFCOUNTER_IMCASTS:
1182 		return (sc->imcasts);
1183 	default:
1184 		return (if_get_counter_default(ifp, cnt));
1185 	}
1186 } /* ixv_if_get_counter */
1187 
1188 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1189  * @ctx: iflib context
1190  * @event: event code to check
1191  *
1192  * Defaults to returning true for every event.
1193  *
1194  * @returns true if iflib needs to reinit the interface
1195  */
1196 static bool
1197 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1198 {
1199 	switch (event) {
1200 	case IFLIB_RESTART_VLAN_CONFIG:
1201 		/* XXX: This may not need to return true */
1202 	default:
1203 		return (true);
1204 	}
1205 }
1206 
1207 /************************************************************************
1208  * ixv_initialize_transmit_units - Enable transmit unit.
1209  ************************************************************************/
1210 static void
1211 ixv_initialize_transmit_units(if_ctx_t ctx)
1212 {
1213 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1214 	struct ixgbe_hw    *hw = &sc->hw;
1215 	if_softc_ctx_t     scctx = sc->shared;
1216 	struct ix_tx_queue *que = sc->tx_queues;
1217 	int                i;
1218 
1219 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
1220 		struct tx_ring *txr = &que->txr;
1221 		u64            tdba = txr->tx_paddr;
1222 		u32            txctrl, txdctl;
1223 		int            j = txr->me;
1224 
1225 		/* Set WTHRESH to 8, burst writeback */
1226 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1227 		txdctl |= (8 << 16);
1228 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1229 
1230 		/* Set the HW Tx Head and Tail indices */
1231 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1232 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1233 
1234 		/* Set Tx Tail register */
1235 		txr->tail = IXGBE_VFTDT(j);
1236 
1237 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1238 		/* Initialize the last processed descriptor to be the end of
1239 		 * the ring, rather than the start, so that we avoid an
1240 		 * off-by-one error when calculating how many descriptors are
1241 		 * done in the credits_update function.
1242 		 */
1243 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1244 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1245 			txr->tx_rsq[k] = QIDX_INVALID;
1246 
1247 		/* Set Ring parameters */
1248 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1249 		    (tdba & 0x00000000ffffffffULL));
1250 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1251 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1252 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1253 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1254 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1255 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1256 
1257 		/* Now enable */
1258 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1259 		txdctl |= IXGBE_TXDCTL_ENABLE;
1260 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1261 	}
1262 
1263 	return;
1264 } /* ixv_initialize_transmit_units */
1265 
1266 /************************************************************************
1267  * ixv_initialize_rss_mapping
1268  ************************************************************************/
1269 static void
1270 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1271 {
1272 	struct ixgbe_hw *hw = &sc->hw;
1273 	u32             reta = 0, mrqc, rss_key[10];
1274 	int             queue_id;
1275 	int             i, j;
1276 	u32             rss_hash_config;
1277 
1278 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
1279 		/* Fetch the configured RSS key */
1280 		rss_getkey((uint8_t *)&rss_key);
1281 	} else {
1282 		/* set up random bits */
1283 		arc4rand(&rss_key, sizeof(rss_key), 0);
1284 	}
1285 
1286 	/* Now fill out hash function seeds */
1287 	for (i = 0; i < 10; i++)
1288 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1289 
1290 	/* Set up the redirection table */
1291 	for (i = 0, j = 0; i < 64; i++, j++) {
1292 		if (j == sc->num_rx_queues)
1293 			j = 0;
1294 
1295 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
1296 			/*
1297 			 * Fetch the RSS bucket id for the given indirection
1298 			 * entry. Cap it at the number of configured buckets
1299 			 * (which is num_rx_queues.)
1300 			 */
1301 			queue_id = rss_get_indirection_to_bucket(i);
1302 			queue_id = queue_id % sc->num_rx_queues;
1303 		} else
1304 			queue_id = j;
1305 
1306 		/*
1307 		 * The low 8 bits are for hash value (n+0);
1308 		 * The next 8 bits are for hash value (n+1), etc.
1309 		 */
1310 		reta >>= 8;
1311 		reta |= ((uint32_t)queue_id) << 24;
1312 		if ((i & 3) == 3) {
1313 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1314 			reta = 0;
1315 		}
1316 	}
1317 
1318 	/* Perform hash on these packet types */
1319 	if (sc->feat_en & IXGBE_FEATURE_RSS)
1320 		rss_hash_config = rss_gethashconfig();
1321 	else {
1322 		/*
1323 		 * Disable UDP - IP fragments aren't currently being handled
1324 		 * and so we end up with a mix of 2-tuple and 4-tuple
1325 		 * traffic.
1326 		 */
1327 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1328 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1329 		                | RSS_HASHTYPE_RSS_IPV6
1330 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1331 	}
1332 
1333 	mrqc = IXGBE_MRQC_RSSEN;
1334 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1335 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1336 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1337 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1338 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1339 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1340 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1341 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1342 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1343 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1344 		    __func__);
1345 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1346 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1347 		    __func__);
1348 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1349 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1350 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1351 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1352 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1353 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1354 		    __func__);
1355 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1356 } /* ixv_initialize_rss_mapping */
1357 
1358 
1359 /************************************************************************
1360  * ixv_initialize_receive_units - Setup receive registers and features.
1361  ************************************************************************/
1362 static void
1363 ixv_initialize_receive_units(if_ctx_t ctx)
1364 {
1365 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1366 	if_softc_ctx_t     scctx;
1367 	struct ixgbe_hw    *hw = &sc->hw;
1368 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1369 	struct ix_rx_queue *que = sc->rx_queues;
1370 	u32                bufsz, psrtype;
1371 
1372 	if (ifp->if_mtu > ETHERMTU)
1373 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1374 	else
1375 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1376 
1377 	psrtype = IXGBE_PSRTYPE_TCPHDR
1378 	        | IXGBE_PSRTYPE_UDPHDR
1379 	        | IXGBE_PSRTYPE_IPV4HDR
1380 	        | IXGBE_PSRTYPE_IPV6HDR
1381 	        | IXGBE_PSRTYPE_L2HDR;
1382 
1383 	if (sc->num_rx_queues > 1)
1384 		psrtype |= 1 << 29;
1385 
1386 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1387 
1388 	/* Tell PF our max_frame size */
1389 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1390 		device_printf(sc->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1391 	}
1392 	scctx = sc->shared;
1393 
1394 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1395 		struct rx_ring *rxr = &que->rxr;
1396 		u64            rdba = rxr->rx_paddr;
1397 		u32            reg, rxdctl;
1398 		int            j = rxr->me;
1399 
1400 		/* Disable the queue */
1401 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1402 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1403 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1404 		for (int k = 0; k < 10; k++) {
1405 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1406 			    IXGBE_RXDCTL_ENABLE)
1407 				msec_delay(1);
1408 			else
1409 				break;
1410 		}
1411 		wmb();
1412 		/* Setup the Base and Length of the Rx Descriptor Ring */
1413 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1414 		    (rdba & 0x00000000ffffffffULL));
1415 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1416 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1417 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1418 
1419 		/* Reset the ring indices */
1420 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1421 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1422 
1423 		/* Set up the SRRCTL register */
1424 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1425 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1426 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1427 		reg |= bufsz;
1428 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1429 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1430 
1431 		/* Capture Rx Tail index */
1432 		rxr->tail = IXGBE_VFRDT(rxr->me);
1433 
1434 		/* Do the queue enabling last */
1435 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1436 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1437 		for (int l = 0; l < 10; l++) {
1438 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1439 			    IXGBE_RXDCTL_ENABLE)
1440 				break;
1441 			msec_delay(1);
1442 		}
1443 		wmb();
1444 
1445 		/* Set the Tail Pointer */
1446 #ifdef DEV_NETMAP
1447 		/*
1448 		 * In netmap mode, we must preserve the buffers made
1449 		 * available to userspace before the if_init()
1450 		 * (this is true by default on the TX side, because
1451 		 * init makes all buffers available to userspace).
1452 		 *
1453 		 * netmap_reset() and the device specific routines
1454 		 * (e.g. ixgbe_setup_receive_rings()) map these
1455 		 * buffers at the end of the NIC ring, so here we
1456 		 * must set the RDT (tail) register to make sure
1457 		 * they are not overwritten.
1458 		 *
1459 		 * In this driver the NIC ring starts at RDH = 0,
1460 		 * RDT points to the last slot available for reception (?),
1461 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1462 		 */
1463 		if (ifp->if_capenable & IFCAP_NETMAP) {
1464 			struct netmap_adapter *na = NA(ifp);
1465 			struct netmap_kring *kring = na->rx_rings[j];
1466 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1467 
1468 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1469 		} else
1470 #endif /* DEV_NETMAP */
1471 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1472 			    scctx->isc_nrxd[0] - 1);
1473 	}
1474 
1475 	/*
1476 	 * Do not touch RSS and RETA settings for older hardware
1477 	 * as those are shared among PF and all VF.
1478 	 */
1479 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1480 		ixv_initialize_rss_mapping(sc);
1481 } /* ixv_initialize_receive_units */
1482 
1483 /************************************************************************
1484  * ixv_setup_vlan_support
1485  ************************************************************************/
1486 static void
1487 ixv_setup_vlan_support(if_ctx_t ctx)
1488 {
1489 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1490 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1491 	struct ixgbe_hw *hw = &sc->hw;
1492 	u32             ctrl, vid, vfta, retry;
1493 
1494 	/*
1495 	 * We get here thru if_init, meaning
1496 	 * a soft reset, this has already cleared
1497 	 * the VFTA and other state, so if there
1498 	 * have been no vlan's registered do nothing.
1499 	 */
1500 	if (sc->num_vlans == 0)
1501 		return;
1502 
1503 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1504 		/* Enable the queues */
1505 		for (int i = 0; i < sc->num_rx_queues; i++) {
1506 			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1507 			ctrl |= IXGBE_RXDCTL_VME;
1508 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1509 			/*
1510 			 * Let Rx path know that it needs to store VLAN tag
1511 			 * as part of extra mbuf info.
1512 			 */
1513 			sc->rx_queues[i].rxr.vtag_strip = true;
1514 		}
1515 	}
1516 
1517 	/*
1518 	 * If filtering VLAN tags is disabled,
1519 	 * there is no need to fill VLAN Filter Table Array (VFTA).
1520 	 */
1521 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1522 		return;
1523 
1524 	/*
1525 	 * A soft reset zero's out the VFTA, so
1526 	 * we need to repopulate it now.
1527 	 */
1528 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1529 		if (ixv_shadow_vfta[i] == 0)
1530 			continue;
1531 		vfta = ixv_shadow_vfta[i];
1532 		/*
1533 		 * Reconstruct the vlan id's
1534 		 * based on the bits set in each
1535 		 * of the array ints.
1536 		 */
1537 		for (int j = 0; j < 32; j++) {
1538 			retry = 0;
1539 			if ((vfta & (1 << j)) == 0)
1540 				continue;
1541 			vid = (i * 32) + j;
1542 			/* Call the shared code mailbox routine */
1543 			while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1544 				if (++retry > 5)
1545 					break;
1546 			}
1547 		}
1548 	}
1549 } /* ixv_setup_vlan_support */
1550 
1551 /************************************************************************
1552  * ixv_if_register_vlan
1553  *
1554  *   Run via a vlan config EVENT, it enables us to use the
1555  *   HW Filter table since we can get the vlan id. This just
1556  *   creates the entry in the soft version of the VFTA, init
1557  *   will repopulate the real table.
1558  ************************************************************************/
1559 static void
1560 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1561 {
1562 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1563 	u16            index, bit;
1564 
1565 	index = (vtag >> 5) & 0x7F;
1566 	bit = vtag & 0x1F;
1567 	ixv_shadow_vfta[index] |= (1 << bit);
1568 	++sc->num_vlans;
1569 } /* ixv_if_register_vlan */
1570 
1571 /************************************************************************
1572  * ixv_if_unregister_vlan
1573  *
1574  *   Run via a vlan unconfig EVENT, remove our entry
1575  *   in the soft vfta.
1576  ************************************************************************/
1577 static void
1578 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1579 {
1580 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1581 	u16            index, bit;
1582 
1583 	index = (vtag >> 5) & 0x7F;
1584 	bit = vtag & 0x1F;
1585 	ixv_shadow_vfta[index] &= ~(1 << bit);
1586 	--sc->num_vlans;
1587 } /* ixv_if_unregister_vlan */
1588 
1589 /************************************************************************
1590  * ixv_if_enable_intr
1591  ************************************************************************/
1592 static void
1593 ixv_if_enable_intr(if_ctx_t ctx)
1594 {
1595 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1596 	struct ixgbe_hw *hw = &sc->hw;
1597 	struct ix_rx_queue *que = sc->rx_queues;
1598 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1599 
1600 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1601 
1602 	mask = IXGBE_EIMS_ENABLE_MASK;
1603 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1604 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1605 
1606 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
1607 		ixv_enable_queue(sc, que->msix);
1608 
1609 	IXGBE_WRITE_FLUSH(hw);
1610 } /* ixv_if_enable_intr */
1611 
1612 /************************************************************************
1613  * ixv_if_disable_intr
1614  ************************************************************************/
1615 static void
1616 ixv_if_disable_intr(if_ctx_t ctx)
1617 {
1618 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1619 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1620 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1621 	IXGBE_WRITE_FLUSH(&sc->hw);
1622 } /* ixv_if_disable_intr */
1623 
1624 /************************************************************************
1625  * ixv_if_rx_queue_intr_enable
1626  ************************************************************************/
1627 static int
1628 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1629 {
1630 	struct ixgbe_softc	*sc = iflib_get_softc(ctx);
1631 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1632 
1633 	ixv_enable_queue(sc, que->rxr.me);
1634 
1635 	return (0);
1636 } /* ixv_if_rx_queue_intr_enable */
1637 
1638 /************************************************************************
1639  * ixv_set_ivar
1640  *
1641  *   Setup the correct IVAR register for a particular MSI-X interrupt
1642  *    - entry is the register array entry
1643  *    - vector is the MSI-X vector for this queue
1644  *    - type is RX/TX/MISC
1645  ************************************************************************/
1646 static void
1647 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1648 {
1649 	struct ixgbe_hw *hw = &sc->hw;
1650 	u32             ivar, index;
1651 
1652 	vector |= IXGBE_IVAR_ALLOC_VAL;
1653 
1654 	if (type == -1) { /* MISC IVAR */
1655 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1656 		ivar &= ~0xFF;
1657 		ivar |= vector;
1658 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1659 	} else {          /* RX/TX IVARS */
1660 		index = (16 * (entry & 1)) + (8 * type);
1661 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1662 		ivar &= ~(0xFF << index);
1663 		ivar |= (vector << index);
1664 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1665 	}
1666 } /* ixv_set_ivar */
1667 
1668 /************************************************************************
1669  * ixv_configure_ivars
1670  ************************************************************************/
1671 static void
1672 ixv_configure_ivars(struct ixgbe_softc *sc)
1673 {
1674 	struct ix_rx_queue *que = sc->rx_queues;
1675 
1676 	MPASS(sc->num_rx_queues == sc->num_tx_queues);
1677 
1678 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1679 		/* First the RX queue entry */
1680 		ixv_set_ivar(sc, i, que->msix, 0);
1681 		/* ... and the TX */
1682 		ixv_set_ivar(sc, i, que->msix, 1);
1683 		/* Set an initial value in EITR */
1684 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1685 		    IXGBE_EITR_DEFAULT);
1686 	}
1687 
1688 	/* For the mailbox interrupt */
1689 	ixv_set_ivar(sc, 1, sc->vector, -1);
1690 } /* ixv_configure_ivars */
1691 
1692 /************************************************************************
1693  * ixv_save_stats
1694  *
1695  *   The VF stats registers never have a truly virgin
1696  *   starting point, so this routine tries to make an
1697  *   artificial one, marking ground zero on attach as
1698  *   it were.
1699  ************************************************************************/
1700 static void
1701 ixv_save_stats(struct ixgbe_softc *sc)
1702 {
1703 	if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1704 		sc->stats.vf.saved_reset_vfgprc +=
1705 		    sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1706 		sc->stats.vf.saved_reset_vfgptc +=
1707 		    sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1708 		sc->stats.vf.saved_reset_vfgorc +=
1709 		    sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1710 		sc->stats.vf.saved_reset_vfgotc +=
1711 		    sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1712 		sc->stats.vf.saved_reset_vfmprc +=
1713 		    sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1714 	}
1715 } /* ixv_save_stats */
1716 
1717 /************************************************************************
1718  * ixv_init_stats
1719  ************************************************************************/
1720 static void
1721 ixv_init_stats(struct ixgbe_softc *sc)
1722 {
1723 	struct ixgbe_hw *hw = &sc->hw;
1724 
1725 	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1726 	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1727 	sc->stats.vf.last_vfgorc |=
1728 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1729 
1730 	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1731 	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1732 	sc->stats.vf.last_vfgotc |=
1733 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1734 
1735 	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1736 
1737 	sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1738 	sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1739 	sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1740 	sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1741 	sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1742 } /* ixv_init_stats */
1743 
1744 #define UPDATE_STAT_32(reg, last, count)                \
1745 {                                                       \
1746 	u32 current = IXGBE_READ_REG(hw, reg);          \
1747 	if (current < last)                             \
1748 		count += 0x100000000LL;                 \
1749 	last = current;                                 \
1750 	count &= 0xFFFFFFFF00000000LL;                  \
1751 	count |= current;                               \
1752 }
1753 
1754 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1755 {                                                       \
1756 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1757 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1758 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1759 	if (current < last)                             \
1760 		count += 0x1000000000LL;                \
1761 	last = current;                                 \
1762 	count &= 0xFFFFFFF000000000LL;                  \
1763 	count |= current;                               \
1764 }
1765 
1766 /************************************************************************
1767  * ixv_update_stats - Update the board statistics counters.
1768  ************************************************************************/
1769 void
1770 ixv_update_stats(struct ixgbe_softc *sc)
1771 {
1772 	struct ixgbe_hw *hw = &sc->hw;
1773 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1774 
1775 	UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1776 	    sc->stats.vf.vfgprc);
1777 	UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1778 	    sc->stats.vf.vfgptc);
1779 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1780 	    sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1781 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1782 	    sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1783 	UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1784 	    sc->stats.vf.vfmprc);
1785 
1786 	/* Fill out the OS statistics structure */
1787 	IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1788 	IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1789 	IXGBE_SET_IBYTES(sc, stats->vfgorc);
1790 	IXGBE_SET_OBYTES(sc, stats->vfgotc);
1791 	IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1792 } /* ixv_update_stats */
1793 
1794 /************************************************************************
1795  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1796  ************************************************************************/
1797 static void
1798 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1799 {
1800 	device_t                dev = sc->dev;
1801 	struct ix_tx_queue      *tx_que = sc->tx_queues;
1802 	struct ix_rx_queue      *rx_que = sc->rx_queues;
1803 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1804 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1805 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1806 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1807 	struct sysctl_oid       *stat_node, *queue_node;
1808 	struct sysctl_oid_list  *stat_list, *queue_list;
1809 
1810 #define QUEUE_NAME_LEN 32
1811 	char                    namebuf[QUEUE_NAME_LEN];
1812 
1813 	/* Driver Statistics */
1814 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1815 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1816 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1817 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1818 
1819 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1820 		struct tx_ring *txr = &tx_que->txr;
1821 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1822 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1823 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1824 		queue_list = SYSCTL_CHILDREN(queue_node);
1825 
1826 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1827 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1828 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1829 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1830 	}
1831 
1832 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1833 		struct rx_ring *rxr = &rx_que->rxr;
1834 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1835 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1836 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1837 		queue_list = SYSCTL_CHILDREN(queue_node);
1838 
1839 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1840 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1841 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1842 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1843 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1844 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1845 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1846 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1847 	}
1848 
1849 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1850 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1851 	    "VF Statistics (read from HW registers)");
1852 	stat_list = SYSCTL_CHILDREN(stat_node);
1853 
1854 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1855 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1856 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1857 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1858 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1859 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1860 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1861 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1862 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1863 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1864 } /* ixv_add_stats_sysctls */
1865 
1866 /************************************************************************
1867  * ixv_print_debug_info
1868  *
1869  *   Called only when em_display_debug_stats is enabled.
1870  *   Provides a way to take a look at important statistics
1871  *   maintained by the driver and hardware.
1872  ************************************************************************/
1873 static void
1874 ixv_print_debug_info(struct ixgbe_softc *sc)
1875 {
1876 	device_t dev = sc->dev;
1877 	struct ixgbe_hw *hw = &sc->hw;
1878 
1879 	device_printf(dev, "Error Byte Count = %u \n",
1880 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1881 
1882 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1883 } /* ixv_print_debug_info */
1884 
1885 /************************************************************************
1886  * ixv_sysctl_debug
1887  ************************************************************************/
1888 static int
1889 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1890 {
1891 	struct ixgbe_softc *sc;
1892 	int error, result;
1893 
1894 	result = -1;
1895 	error = sysctl_handle_int(oidp, &result, 0, req);
1896 
1897 	if (error || !req->newptr)
1898 		return (error);
1899 
1900 	if (result == 1) {
1901 		sc = (struct ixgbe_softc *)arg1;
1902 		ixv_print_debug_info(sc);
1903 	}
1904 
1905 	return error;
1906 } /* ixv_sysctl_debug */
1907 
1908 /************************************************************************
1909  * ixv_init_device_features
1910  ************************************************************************/
1911 static void
1912 ixv_init_device_features(struct ixgbe_softc *sc)
1913 {
1914 	sc->feat_cap = IXGBE_FEATURE_NETMAP
1915 				    | IXGBE_FEATURE_VF
1916 				    | IXGBE_FEATURE_LEGACY_TX;
1917 
1918 	/* A tad short on feature flags for VFs, atm. */
1919 	switch (sc->hw.mac.type) {
1920 	case ixgbe_mac_82599_vf:
1921 		break;
1922 	case ixgbe_mac_X540_vf:
1923 		break;
1924 	case ixgbe_mac_X550_vf:
1925 	case ixgbe_mac_X550EM_x_vf:
1926 	case ixgbe_mac_X550EM_a_vf:
1927 		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1928 		sc->feat_cap |= IXGBE_FEATURE_RSS;
1929 		break;
1930 	default:
1931 		break;
1932 	}
1933 
1934 	/* Enabled by default... */
1935 	/* Is a virtual function (VF) */
1936 	if (sc->feat_cap & IXGBE_FEATURE_VF)
1937 		sc->feat_en |= IXGBE_FEATURE_VF;
1938 	/* Netmap */
1939 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1940 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
1941 	/* Receive-Side Scaling (RSS) */
1942 	if (sc->feat_cap & IXGBE_FEATURE_RSS)
1943 		sc->feat_en |= IXGBE_FEATURE_RSS;
1944 	/* Needs advanced context descriptor regardless of offloads req'd */
1945 	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1946 		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1947 } /* ixv_init_device_features */
1948 
1949