xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision b1c5f60ce87cc2f179dfb81de507d9b7bf59564c)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ifdi_if.h"
42 
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45 
46 /************************************************************************
47  * Driver version
48  ************************************************************************/
49 char ixv_driver_version[] = "2.0.1-k";
50 
51 /************************************************************************
52  * PCI Device ID Table
53  *
54  *   Used by probe to select devices to load on
55  *   Last field stores an index into ixv_strings
56  *   Last entry must be all 0s
57  *
58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  ************************************************************************/
60 static pci_vendor_info_t ixv_vendor_info_array[] =
61 {
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
66 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
67 	/* required last entry */
68 PVID_END
69 };
70 
71 /************************************************************************
72  * Function prototypes
73  ************************************************************************/
74 static void     *ixv_register(device_t);
75 static int      ixv_if_attach_pre(if_ctx_t);
76 static int      ixv_if_attach_post(if_ctx_t);
77 static int      ixv_if_detach(if_ctx_t);
78 
79 static int      ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
80 static int      ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81 static int      ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
82 static void     ixv_if_queues_free(if_ctx_t);
83 static void     ixv_identify_hardware(if_ctx_t);
84 static void     ixv_init_device_features(struct ixgbe_softc *);
85 static int      ixv_allocate_pci_resources(if_ctx_t);
86 static void     ixv_free_pci_resources(if_ctx_t);
87 static int      ixv_setup_interface(if_ctx_t);
88 static void     ixv_if_media_status(if_ctx_t, struct ifmediareq *);
89 static int      ixv_if_media_change(if_ctx_t);
90 static void     ixv_if_update_admin_status(if_ctx_t);
91 static int      ixv_if_msix_intr_assign(if_ctx_t, int);
92 
93 static int      ixv_if_mtu_set(if_ctx_t, uint32_t);
94 static void     ixv_if_init(if_ctx_t);
95 static void     ixv_if_local_timer(if_ctx_t, uint16_t);
96 static void     ixv_if_stop(if_ctx_t);
97 static int      ixv_negotiate_api(struct ixgbe_softc *);
98 
99 static void     ixv_initialize_transmit_units(if_ctx_t);
100 static void     ixv_initialize_receive_units(if_ctx_t);
101 static void     ixv_initialize_rss_mapping(struct ixgbe_softc *);
102 
103 static void     ixv_setup_vlan_support(if_ctx_t);
104 static void     ixv_configure_ivars(struct ixgbe_softc *);
105 static void     ixv_if_enable_intr(if_ctx_t);
106 static void     ixv_if_disable_intr(if_ctx_t);
107 static void     ixv_if_multi_set(if_ctx_t);
108 
109 static void     ixv_if_register_vlan(if_ctx_t, u16);
110 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
111 
112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static bool	ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114 
115 static void     ixv_save_stats(struct ixgbe_softc *);
116 static void     ixv_init_stats(struct ixgbe_softc *);
117 static void     ixv_update_stats(struct ixgbe_softc *);
118 static void     ixv_add_stats_sysctls(struct ixgbe_softc *);
119 
120 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121 static void     ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
122 
123 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124 
125 /* The MSI-X Interrupt handlers */
126 static int      ixv_msix_que(void *);
127 static int      ixv_msix_mbx(void *);
128 
129 /************************************************************************
130  * FreeBSD Device Interface Entry Points
131  ************************************************************************/
132 static device_method_t ixv_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_register, ixv_register),
135 	DEVMETHOD(device_probe, iflib_device_probe),
136 	DEVMETHOD(device_attach, iflib_device_attach),
137 	DEVMETHOD(device_detach, iflib_device_detach),
138 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
139 	DEVMETHOD_END
140 };
141 
142 static driver_t ixv_driver = {
143 	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
144 };
145 
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
149 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
150 MODULE_DEPEND(ixv, pci, 1, 1, 1);
151 MODULE_DEPEND(ixv, ether, 1, 1, 1);
152 
153 static device_method_t ixv_if_methods[] = {
154 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 	DEVMETHOD(ifdi_detach, ixv_if_detach),
157 	DEVMETHOD(ifdi_init, ixv_if_init),
158 	DEVMETHOD(ifdi_stop, ixv_if_stop),
159 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176 	DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
177 	DEVMETHOD_END
178 };
179 
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
182 };
183 
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187 
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191 
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = false;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200 
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208 
209 static struct if_shared_ctx ixv_sctx_init = {
210 	.isc_magic = IFLIB_MAGIC,
211 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213 	.isc_tx_maxsegsize = PAGE_SIZE,
214 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215 	.isc_tso_maxsegsize = PAGE_SIZE,
216 	.isc_rx_maxsize = MJUM16BYTES,
217 	.isc_rx_nsegments = 1,
218 	.isc_rx_maxsegsize = MJUM16BYTES,
219 	.isc_nfl = 1,
220 	.isc_ntxqs = 1,
221 	.isc_nrxqs = 1,
222 	.isc_admin_intrcnt = 1,
223 	.isc_vendor_info = ixv_vendor_info_array,
224 	.isc_driver_version = ixv_driver_version,
225 	.isc_driver = &ixv_if_driver,
226 	.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
227 
228 	.isc_nrxd_min = {MIN_RXD},
229 	.isc_ntxd_min = {MIN_TXD},
230 	.isc_nrxd_max = {MAX_RXD},
231 	.isc_ntxd_max = {MAX_TXD},
232 	.isc_nrxd_default = {DEFAULT_RXD},
233 	.isc_ntxd_default = {DEFAULT_TXD},
234 };
235 
236 static void *
237 ixv_register(device_t dev)
238 {
239 	return (&ixv_sctx_init);
240 }
241 
242 /************************************************************************
243  * ixv_if_tx_queues_alloc
244  ************************************************************************/
245 static int
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247     int ntxqs, int ntxqsets)
248 {
249 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
250 	if_softc_ctx_t     scctx = sc->shared;
251 	struct ix_tx_queue *que;
252 	int                i, j, error;
253 
254 	MPASS(sc->num_tx_queues == ntxqsets);
255 	MPASS(ntxqs == 1);
256 
257 	/* Allocate queue structure memory */
258 	sc->tx_queues =
259 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
261 	if (!sc->tx_queues) {
262 		device_printf(iflib_get_dev(ctx),
263 		    "Unable to allocate TX ring memory\n");
264 		return (ENOMEM);
265 	}
266 
267 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
268 		struct tx_ring *txr = &que->txr;
269 
270 		txr->me = i;
271 		txr->sc =  que->sc = sc;
272 
273 		/* Allocate report status array */
274 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275 			error = ENOMEM;
276 			goto fail;
277 		}
278 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 			txr->tx_rsq[j] = QIDX_INVALID;
280 		/* get the virtual and physical address of the hardware queues */
281 		txr->tail = IXGBE_VFTDT(txr->me);
282 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 		txr->tx_paddr = paddrs[i*ntxqs];
284 
285 		txr->bytes = 0;
286 		txr->total_packets = 0;
287 
288 	}
289 
290 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 	    sc->num_tx_queues);
292 
293 	return (0);
294 
295  fail:
296 	ixv_if_queues_free(ctx);
297 
298 	return (error);
299 } /* ixv_if_tx_queues_alloc */
300 
301 /************************************************************************
302  * ixv_if_rx_queues_alloc
303  ************************************************************************/
304 static int
305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306     int nrxqs, int nrxqsets)
307 {
308 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
309 	struct ix_rx_queue *que;
310 	int                i, error;
311 
312 	MPASS(sc->num_rx_queues == nrxqsets);
313 	MPASS(nrxqs == 1);
314 
315 	/* Allocate queue structure memory */
316 	sc->rx_queues =
317 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
319 	if (!sc->rx_queues) {
320 		device_printf(iflib_get_dev(ctx),
321 		    "Unable to allocate TX ring memory\n");
322 		error = ENOMEM;
323 		goto fail;
324 	}
325 
326 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
327 		struct rx_ring *rxr = &que->rxr;
328 		rxr->me = i;
329 		rxr->sc = que->sc = sc;
330 
331 
332 		/* get the virtual and physical address of the hw queues */
333 		rxr->tail = IXGBE_VFRDT(rxr->me);
334 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 		rxr->rx_paddr = paddrs[i*nrxqs];
336 		rxr->bytes = 0;
337 		rxr->que = que;
338 	}
339 
340 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 	    sc->num_rx_queues);
342 
343 	return (0);
344 
345 fail:
346 	ixv_if_queues_free(ctx);
347 
348 	return (error);
349 } /* ixv_if_rx_queues_alloc */
350 
351 /************************************************************************
352  * ixv_if_queues_free
353  ************************************************************************/
354 static void
355 ixv_if_queues_free(if_ctx_t ctx)
356 {
357 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
358 	struct ix_tx_queue *que = sc->tx_queues;
359 	int                i;
360 
361 	if (que == NULL)
362 		goto free;
363 
364 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
365 		struct tx_ring *txr = &que->txr;
366 		if (txr->tx_rsq == NULL)
367 			break;
368 
369 		free(txr->tx_rsq, M_DEVBUF);
370 		txr->tx_rsq = NULL;
371 	}
372 	if (sc->tx_queues != NULL)
373 		free(sc->tx_queues, M_DEVBUF);
374 free:
375 	if (sc->rx_queues != NULL)
376 		free(sc->rx_queues, M_DEVBUF);
377 	sc->tx_queues = NULL;
378 	sc->rx_queues = NULL;
379 } /* ixv_if_queues_free */
380 
381 /************************************************************************
382  * ixv_if_attach_pre - Device initialization routine
383  *
384  *   Called when the driver is being loaded.
385  *   Identifies the type of hardware, allocates all resources
386  *   and initializes the hardware.
387  *
388  *   return 0 on success, positive on failure
389  ************************************************************************/
390 static int
391 ixv_if_attach_pre(if_ctx_t ctx)
392 {
393 	struct ixgbe_softc  *sc;
394 	device_t        dev;
395 	if_softc_ctx_t  scctx;
396 	struct ixgbe_hw *hw;
397 	int             error = 0;
398 
399 	INIT_DEBUGOUT("ixv_attach: begin");
400 
401 	/* Allocate, clear, and link in our sc structure */
402 	dev = iflib_get_dev(ctx);
403 	sc = iflib_get_softc(ctx);
404 	sc->dev = dev;
405 	sc->ctx = ctx;
406 	sc->hw.back = sc;
407 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
408 	sc->media = iflib_get_media(ctx);
409 	hw = &sc->hw;
410 
411 	/* Do base PCI setup - map BAR0 */
412 	if (ixv_allocate_pci_resources(ctx)) {
413 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414 		error = ENXIO;
415 		goto err_out;
416 	}
417 
418 	/* SYSCTL APIs */
419 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422 	    sc, 0, ixv_sysctl_debug, "I", "Debug Info");
423 
424 	/* Determine hardware revision */
425 	ixv_identify_hardware(ctx);
426 	ixv_init_device_features(sc);
427 
428 	/* Initialize the shared code */
429 	error = ixgbe_init_ops_vf(hw);
430 	if (error) {
431 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432 		error = EIO;
433 		goto err_out;
434 	}
435 
436 	/* Setup the mailbox */
437 	ixgbe_init_mbx_params_vf(hw);
438 
439 	error = hw->mac.ops.reset_hw(hw);
440 	if (error == IXGBE_ERR_RESET_FAILED)
441 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 	else if (error)
443 		device_printf(dev, "...reset_hw() failed with error %d\n",
444 		    error);
445 	if (error) {
446 		error = EIO;
447 		goto err_out;
448 	}
449 
450 	error = hw->mac.ops.init_hw(hw);
451 	if (error) {
452 		device_printf(dev, "...init_hw() failed with error %d\n",
453 		    error);
454 		error = EIO;
455 		goto err_out;
456 	}
457 
458 	/* Negotiate mailbox API version */
459 	error = ixv_negotiate_api(sc);
460 	if (error) {
461 		device_printf(dev,
462 		    "Mailbox API negotiation failed during attach!\n");
463 		goto err_out;
464 	}
465 
466 	/* Check if VF was disabled by PF */
467 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
468 	if (error) {
469 		/* PF is not capable of controlling VF state. Enable the link. */
470 		sc->link_enabled = true;
471 	}
472 
473 	/* If no mac address was assigned, make a random one */
474 	if (!ixv_check_ether_addr(hw->mac.addr)) {
475 		ether_gen_addr(iflib_get_ifp(ctx),
476 		    (struct ether_addr *)hw->mac.addr);
477 		bcopy(hw->mac.addr, hw->mac.perm_addr,
478 		    sizeof(hw->mac.perm_addr));
479 	}
480 
481 	/* Most of the iflib initialization... */
482 
483 	iflib_set_mac(ctx, hw->mac.addr);
484 	switch (sc->hw.mac.type) {
485 	case ixgbe_mac_X550_vf:
486 	case ixgbe_mac_X550EM_x_vf:
487 	case ixgbe_mac_X550EM_a_vf:
488 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
489 		break;
490 	default:
491 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
492 	}
493 	scctx->isc_txqsizes[0] =
494 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
495 	    sizeof(u32), DBA_ALIGN);
496 	scctx->isc_rxqsizes[0] =
497 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
498 	    DBA_ALIGN);
499 	/* XXX */
500 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
501 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
502 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
503 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
504 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
505 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
506 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
507 
508 	scctx->isc_txrx = &ixgbe_txrx;
509 
510 	/*
511 	 * Tell the upper layer(s) we support everything the PF
512 	 * driver does except...
513 	 *   Wake-on-LAN
514 	 */
515 	scctx->isc_capabilities = IXGBE_CAPS;
516 	scctx->isc_capabilities ^= IFCAP_WOL;
517 	scctx->isc_capenable = scctx->isc_capabilities;
518 
519 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
520 
521 	return (0);
522 
523 err_out:
524 	ixv_free_pci_resources(ctx);
525 
526 	return (error);
527 } /* ixv_if_attach_pre */
528 
529 static int
530 ixv_if_attach_post(if_ctx_t ctx)
531 {
532 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
533 	device_t       dev = iflib_get_dev(ctx);
534 	int            error = 0;
535 
536 	/* Setup OS specific network interface */
537 	error = ixv_setup_interface(ctx);
538 	if (error) {
539 		device_printf(dev, "Interface setup failed: %d\n", error);
540 		goto end;
541 	}
542 
543 	/* Do the stats setup */
544 	ixv_save_stats(sc);
545 	ixv_init_stats(sc);
546 	ixv_add_stats_sysctls(sc);
547 
548 end:
549 	return error;
550 } /* ixv_if_attach_post */
551 
552 /************************************************************************
553  * ixv_detach - Device removal routine
554  *
555  *   Called when the driver is being removed.
556  *   Stops the adapter and deallocates all the resources
557  *   that were allocated for driver operation.
558  *
559  *   return 0 on success, positive on failure
560  ************************************************************************/
561 static int
562 ixv_if_detach(if_ctx_t ctx)
563 {
564 	INIT_DEBUGOUT("ixv_detach: begin");
565 
566 	ixv_free_pci_resources(ctx);
567 
568 	return (0);
569 } /* ixv_if_detach */
570 
571 /************************************************************************
572  * ixv_if_mtu_set
573  ************************************************************************/
574 static int
575 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
576 {
577 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
578 	struct ifnet   *ifp = iflib_get_ifp(ctx);
579 	int            error = 0;
580 
581 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
582 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
583 		error = EINVAL;
584 	} else {
585 		ifp->if_mtu = mtu;
586 		sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
587 	}
588 
589 	return error;
590 } /* ixv_if_mtu_set */
591 
592 /************************************************************************
593  * ixv_if_init - Init entry point
594  *
595  *   Used in two ways: It is used by the stack as an init entry
596  *   point in network interface structure. It is also used
597  *   by the driver as a hw/sw initialization routine to get
598  *   to a consistent state.
599  *
600  *   return 0 on success, positive on failure
601  ************************************************************************/
602 static void
603 ixv_if_init(if_ctx_t ctx)
604 {
605 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
606 	struct ifnet    *ifp = iflib_get_ifp(ctx);
607 	device_t        dev = iflib_get_dev(ctx);
608 	struct ixgbe_hw *hw = &sc->hw;
609 	int             error = 0;
610 
611 	INIT_DEBUGOUT("ixv_if_init: begin");
612 	hw->adapter_stopped = false;
613 	hw->mac.ops.stop_adapter(hw);
614 
615 	/* reprogram the RAR[0] in case user changed it. */
616 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
617 
618 	/* Get the latest mac address, User can use a LAA */
619 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
620 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
621 
622 	/* Reset VF and renegotiate mailbox API version */
623 	hw->mac.ops.reset_hw(hw);
624 	hw->mac.ops.start_hw(hw);
625 	error = ixv_negotiate_api(sc);
626 	if (error) {
627 		device_printf(dev,
628 		    "Mailbox API negotiation failed in if_init!\n");
629 		return;
630 	}
631 
632 	ixv_initialize_transmit_units(ctx);
633 
634 	/* Setup Multicast table */
635 	ixv_if_multi_set(ctx);
636 
637 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
638 
639 	/* Configure RX settings */
640 	ixv_initialize_receive_units(ctx);
641 
642 	/* Set up VLAN offload and filter */
643 	ixv_setup_vlan_support(ctx);
644 
645 	/* Set up MSI-X routing */
646 	ixv_configure_ivars(sc);
647 
648 	/* Set up auto-mask */
649 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
650 
651 	/* Set moderation on the Link interrupt */
652 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
653 
654 	/* Stats init */
655 	ixv_init_stats(sc);
656 
657 	/* Config/Enable Link */
658 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
659 	if (error) {
660 		/* PF is not capable of controlling VF state. Enable the link. */
661 		sc->link_enabled = true;
662 	} else if (sc->link_enabled == false)
663 		device_printf(dev, "VF is disabled by PF\n");
664 
665 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
666 	    false);
667 
668 	/* And now turn on interrupts */
669 	ixv_if_enable_intr(ctx);
670 
671 	return;
672 } /* ixv_if_init */
673 
674 /************************************************************************
675  * ixv_enable_queue
676  ************************************************************************/
677 static inline void
678 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
679 {
680 	struct ixgbe_hw *hw = &sc->hw;
681 	u32             queue = 1 << vector;
682 	u32             mask;
683 
684 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
685 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
686 } /* ixv_enable_queue */
687 
688 /************************************************************************
689  * ixv_disable_queue
690  ************************************************************************/
691 static inline void
692 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
693 {
694 	struct ixgbe_hw *hw = &sc->hw;
695 	u64             queue = (u64)(1 << vector);
696 	u32             mask;
697 
698 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
699 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
700 } /* ixv_disable_queue */
701 
702 
703 /************************************************************************
704  * ixv_msix_que - MSI-X Queue Interrupt Service routine
705  ************************************************************************/
706 static int
707 ixv_msix_que(void *arg)
708 {
709 	struct ix_rx_queue *que = arg;
710 	struct ixgbe_softc     *sc = que->sc;
711 
712 	ixv_disable_queue(sc, que->msix);
713 	++que->irqs;
714 
715 	return (FILTER_SCHEDULE_THREAD);
716 } /* ixv_msix_que */
717 
718 /************************************************************************
719  * ixv_msix_mbx
720  ************************************************************************/
721 static int
722 ixv_msix_mbx(void *arg)
723 {
724 	struct ixgbe_softc  *sc = arg;
725 	struct ixgbe_hw *hw = &sc->hw;
726 	u32             reg;
727 
728 	++sc->link_irq;
729 
730 	/* First get the cause */
731 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
732 	/* Clear interrupt with write */
733 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
734 
735 	/* Link status change */
736 	if (reg & IXGBE_EICR_LSC)
737 		iflib_admin_intr_deferred(sc->ctx);
738 
739 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
740 
741 	return (FILTER_HANDLED);
742 } /* ixv_msix_mbx */
743 
744 /************************************************************************
745  * ixv_media_status - Media Ioctl callback
746  *
747  *   Called whenever the user queries the status of
748  *   the interface using ifconfig.
749  ************************************************************************/
750 static void
751 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
752 {
753 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
754 
755 	INIT_DEBUGOUT("ixv_media_status: begin");
756 
757 	iflib_admin_intr_deferred(ctx);
758 
759 	ifmr->ifm_status = IFM_AVALID;
760 	ifmr->ifm_active = IFM_ETHER;
761 
762 	if (!sc->link_active)
763 		return;
764 
765 	ifmr->ifm_status |= IFM_ACTIVE;
766 
767 	switch (sc->link_speed) {
768 		case IXGBE_LINK_SPEED_1GB_FULL:
769 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
770 			break;
771 		case IXGBE_LINK_SPEED_10GB_FULL:
772 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
773 			break;
774 		case IXGBE_LINK_SPEED_100_FULL:
775 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
776 			break;
777 		case IXGBE_LINK_SPEED_10_FULL:
778 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
779 			break;
780 	}
781 } /* ixv_if_media_status */
782 
783 /************************************************************************
784  * ixv_if_media_change - Media Ioctl callback
785  *
786  *   Called when the user changes speed/duplex using
787  *   media/mediopt option with ifconfig.
788  ************************************************************************/
789 static int
790 ixv_if_media_change(if_ctx_t ctx)
791 {
792 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
793 	struct ifmedia *ifm = iflib_get_media(ctx);
794 
795 	INIT_DEBUGOUT("ixv_media_change: begin");
796 
797 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
798 		return (EINVAL);
799 
800 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
801 	case IFM_AUTO:
802 		break;
803 	default:
804 		device_printf(sc->dev, "Only auto media type\n");
805 		return (EINVAL);
806 	}
807 
808 	return (0);
809 } /* ixv_if_media_change */
810 
811 
812 /************************************************************************
813  * ixv_negotiate_api
814  *
815  *   Negotiate the Mailbox API with the PF;
816  *   start with the most featured API first.
817  ************************************************************************/
818 static int
819 ixv_negotiate_api(struct ixgbe_softc *sc)
820 {
821 	struct ixgbe_hw *hw = &sc->hw;
822 	int             mbx_api[] = { ixgbe_mbox_api_12,
823 	                              ixgbe_mbox_api_11,
824 	                              ixgbe_mbox_api_10,
825 	                              ixgbe_mbox_api_unknown };
826 	int             i = 0;
827 
828 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
829 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
830 			return (0);
831 		i++;
832 	}
833 
834 	return (EINVAL);
835 } /* ixv_negotiate_api */
836 
837 
838 /************************************************************************
839  * ixv_if_multi_set - Multicast Update
840  *
841  *   Called whenever multicast address list is updated.
842  ************************************************************************/
843 static void
844 ixv_if_multi_set(if_ctx_t ctx)
845 {
846 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
847 	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
848 	u8                 *update_ptr;
849 	struct ifmultiaddr *ifma;
850 	if_t               ifp = iflib_get_ifp(ctx);
851 	int                mcnt = 0;
852 
853 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
854 
855 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
856 		if (ifma->ifma_addr->sa_family != AF_LINK)
857 			continue;
858 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
859 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
860 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
861 		mcnt++;
862 	}
863 
864 	update_ptr = mta;
865 
866 	sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
867 	    ixv_mc_array_itr, true);
868 } /* ixv_if_multi_set */
869 
870 /************************************************************************
871  * ixv_mc_array_itr
872  *
873  *   An iterator function needed by the multicast shared code.
874  *   It feeds the shared code routine the addresses in the
875  *   array of ixv_set_multi() one by one.
876  ************************************************************************/
877 static u8 *
878 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
879 {
880 	u8 *addr = *update_ptr;
881 	u8 *newptr;
882 
883 	*vmdq = 0;
884 
885 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
886 	*update_ptr = newptr;
887 
888 	return addr;
889 } /* ixv_mc_array_itr */
890 
891 /************************************************************************
892  * ixv_if_local_timer - Timer routine
893  *
894  *   Checks for link status, updates statistics,
895  *   and runs the watchdog check.
896  ************************************************************************/
897 static void
898 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
899 {
900 	if (qid != 0)
901 		return;
902 
903 	/* Fire off the adminq task */
904 	iflib_admin_intr_deferred(ctx);
905 } /* ixv_if_local_timer */
906 
907 /************************************************************************
908  * ixv_if_update_admin_status - Update OS on link state
909  *
910  * Note: Only updates the OS on the cached link state.
911  *       The real check of the hardware only happens with
912  *       a link interrupt.
913  ************************************************************************/
914 static void
915 ixv_if_update_admin_status(if_ctx_t ctx)
916 {
917 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
918 	device_t       dev = iflib_get_dev(ctx);
919 	s32            status;
920 
921 	sc->hw.mac.get_link_status = true;
922 
923 	status = ixgbe_check_link(&sc->hw, &sc->link_speed,
924 	    &sc->link_up, false);
925 
926 	if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
927 		/* Mailbox's Clear To Send status is lost or timeout occurred.
928 		 * We need reinitialization. */
929 		iflib_get_ifp(ctx)->if_init(ctx);
930 	}
931 
932 	if (sc->link_up && sc->link_enabled) {
933 		if (sc->link_active == false) {
934 			if (bootverbose)
935 				device_printf(dev, "Link is up %d Gbps %s \n",
936 				    ((sc->link_speed == 128) ? 10 : 1),
937 				    "Full Duplex");
938 			sc->link_active = true;
939 			iflib_link_state_change(ctx, LINK_STATE_UP,
940 			    IF_Gbps(10));
941 		}
942 	} else { /* Link down */
943 		if (sc->link_active == true) {
944 			if (bootverbose)
945 				device_printf(dev, "Link is Down\n");
946 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
947 			sc->link_active = false;
948 		}
949 	}
950 
951 	/* Stats Update */
952 	ixv_update_stats(sc);
953 } /* ixv_if_update_admin_status */
954 
955 
956 /************************************************************************
957  * ixv_if_stop - Stop the hardware
958  *
959  *   Disables all traffic on the adapter by issuing a
960  *   global reset on the MAC and deallocates TX/RX buffers.
961  ************************************************************************/
962 static void
963 ixv_if_stop(if_ctx_t ctx)
964 {
965 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
966 	struct ixgbe_hw *hw = &sc->hw;
967 
968 	INIT_DEBUGOUT("ixv_stop: begin\n");
969 
970 	ixv_if_disable_intr(ctx);
971 
972 	hw->mac.ops.reset_hw(hw);
973 	sc->hw.adapter_stopped = false;
974 	hw->mac.ops.stop_adapter(hw);
975 
976 	/* Update the stack */
977 	sc->link_up = false;
978 	ixv_if_update_admin_status(ctx);
979 
980 	/* reprogram the RAR[0] in case user changed it. */
981 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
982 } /* ixv_if_stop */
983 
984 
985 /************************************************************************
986  * ixv_identify_hardware - Determine hardware revision.
987  ************************************************************************/
988 static void
989 ixv_identify_hardware(if_ctx_t ctx)
990 {
991 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
992 	device_t        dev = iflib_get_dev(ctx);
993 	struct ixgbe_hw *hw = &sc->hw;
994 
995 	/* Save off the information about this board */
996 	hw->vendor_id = pci_get_vendor(dev);
997 	hw->device_id = pci_get_device(dev);
998 	hw->revision_id = pci_get_revid(dev);
999 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
1000 	hw->subsystem_device_id = pci_get_subdevice(dev);
1001 
1002 	/* A subset of set_mac_type */
1003 	switch (hw->device_id) {
1004 	case IXGBE_DEV_ID_82599_VF:
1005 		hw->mac.type = ixgbe_mac_82599_vf;
1006 		break;
1007 	case IXGBE_DEV_ID_X540_VF:
1008 		hw->mac.type = ixgbe_mac_X540_vf;
1009 		break;
1010 	case IXGBE_DEV_ID_X550_VF:
1011 		hw->mac.type = ixgbe_mac_X550_vf;
1012 		break;
1013 	case IXGBE_DEV_ID_X550EM_X_VF:
1014 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1015 		break;
1016 	case IXGBE_DEV_ID_X550EM_A_VF:
1017 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1018 		break;
1019 	default:
1020 		device_printf(dev, "unknown mac type\n");
1021 		hw->mac.type = ixgbe_mac_unknown;
1022 		break;
1023 	}
1024 } /* ixv_identify_hardware */
1025 
1026 /************************************************************************
1027  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1028  ************************************************************************/
1029 static int
1030 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1031 {
1032 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1033 	device_t           dev = iflib_get_dev(ctx);
1034 	struct ix_rx_queue *rx_que = sc->rx_queues;
1035 	struct ix_tx_queue *tx_que;
1036 	int                error, rid, vector = 0;
1037 	char               buf[16];
1038 
1039 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1040 		rid = vector + 1;
1041 
1042 		snprintf(buf, sizeof(buf), "rxq%d", i);
1043 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1044 		    IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1045 
1046 		if (error) {
1047 			device_printf(iflib_get_dev(ctx),
1048 			    "Failed to allocate que int %d err: %d", i, error);
1049 			sc->num_rx_queues = i + 1;
1050 			goto fail;
1051 		}
1052 
1053 		rx_que->msix = vector;
1054 	}
1055 
1056 	for (int i = 0; i < sc->num_tx_queues; i++) {
1057 		snprintf(buf, sizeof(buf), "txq%d", i);
1058 		tx_que = &sc->tx_queues[i];
1059 		tx_que->msix = i % sc->num_rx_queues;
1060 		iflib_softirq_alloc_generic(ctx,
1061 		    &sc->rx_queues[tx_que->msix].que_irq,
1062 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1063 	}
1064 	rid = vector + 1;
1065 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1066 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1067 	if (error) {
1068 		device_printf(iflib_get_dev(ctx),
1069 		    "Failed to register admin handler");
1070 		return (error);
1071 	}
1072 
1073 	sc->vector = vector;
1074 	/*
1075 	 * Due to a broken design QEMU will fail to properly
1076 	 * enable the guest for MSIX unless the vectors in
1077 	 * the table are all set up, so we must rewrite the
1078 	 * ENABLE in the MSIX control register again at this
1079 	 * point to cause it to successfully initialize us.
1080 	 */
1081 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1082 		int msix_ctrl;
1083 		pci_find_cap(dev, PCIY_MSIX, &rid);
1084 		rid += PCIR_MSIX_CTRL;
1085 		msix_ctrl = pci_read_config(dev, rid, 2);
1086 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1087 		pci_write_config(dev, rid, msix_ctrl, 2);
1088 	}
1089 
1090 	return (0);
1091 
1092 fail:
1093 	iflib_irq_free(ctx, &sc->irq);
1094 	rx_que = sc->rx_queues;
1095 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1096 		iflib_irq_free(ctx, &rx_que->que_irq);
1097 
1098 	return (error);
1099 } /* ixv_if_msix_intr_assign */
1100 
1101 /************************************************************************
1102  * ixv_allocate_pci_resources
1103  ************************************************************************/
1104 static int
1105 ixv_allocate_pci_resources(if_ctx_t ctx)
1106 {
1107 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1108 	device_t       dev = iflib_get_dev(ctx);
1109 	int            rid;
1110 
1111 	rid = PCIR_BAR(0);
1112 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1113 	    RF_ACTIVE);
1114 
1115 	if (!(sc->pci_mem)) {
1116 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1117 		return (ENXIO);
1118 	}
1119 
1120 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1121 	sc->osdep.mem_bus_space_handle =
1122 	    rman_get_bushandle(sc->pci_mem);
1123 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1124 
1125 	return (0);
1126 } /* ixv_allocate_pci_resources */
1127 
1128 /************************************************************************
1129  * ixv_free_pci_resources
1130  ************************************************************************/
1131 static void
1132 ixv_free_pci_resources(if_ctx_t ctx)
1133 {
1134 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1135 	struct ix_rx_queue *que = sc->rx_queues;
1136 	device_t           dev = iflib_get_dev(ctx);
1137 
1138 	/* Release all MSI-X queue resources */
1139 	if (sc->intr_type == IFLIB_INTR_MSIX)
1140 		iflib_irq_free(ctx, &sc->irq);
1141 
1142 	if (que != NULL) {
1143 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1144 			iflib_irq_free(ctx, &que->que_irq);
1145 		}
1146 	}
1147 
1148 	if (sc->pci_mem != NULL)
1149 		bus_release_resource(dev, SYS_RES_MEMORY,
1150 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1151 } /* ixv_free_pci_resources */
1152 
1153 /************************************************************************
1154  * ixv_setup_interface
1155  *
1156  *   Setup networking device structure and register an interface.
1157  ************************************************************************/
1158 static int
1159 ixv_setup_interface(if_ctx_t ctx)
1160 {
1161 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1162 	if_softc_ctx_t scctx = sc->shared;
1163 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1164 
1165 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1166 
1167 	if_setbaudrate(ifp, IF_Gbps(10));
1168 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1169 
1170 
1171 	sc->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1172 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1173 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1174 
1175 	return 0;
1176 } /* ixv_setup_interface */
1177 
1178 /************************************************************************
1179  * ixv_if_get_counter
1180  ************************************************************************/
1181 static uint64_t
1182 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1183 {
1184 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1185 	if_t           ifp = iflib_get_ifp(ctx);
1186 
1187 	switch (cnt) {
1188 	case IFCOUNTER_IPACKETS:
1189 		return (sc->ipackets);
1190 	case IFCOUNTER_OPACKETS:
1191 		return (sc->opackets);
1192 	case IFCOUNTER_IBYTES:
1193 		return (sc->ibytes);
1194 	case IFCOUNTER_OBYTES:
1195 		return (sc->obytes);
1196 	case IFCOUNTER_IMCASTS:
1197 		return (sc->imcasts);
1198 	default:
1199 		return (if_get_counter_default(ifp, cnt));
1200 	}
1201 } /* ixv_if_get_counter */
1202 
1203 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1204  * @ctx: iflib context
1205  * @event: event code to check
1206  *
1207  * Defaults to returning true for every event.
1208  *
1209  * @returns true if iflib needs to reinit the interface
1210  */
1211 static bool
1212 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1213 {
1214 	switch (event) {
1215 	case IFLIB_RESTART_VLAN_CONFIG:
1216 		/* XXX: This may not need to return true */
1217 	default:
1218 		return (true);
1219 	}
1220 }
1221 
1222 /************************************************************************
1223  * ixv_initialize_transmit_units - Enable transmit unit.
1224  ************************************************************************/
1225 static void
1226 ixv_initialize_transmit_units(if_ctx_t ctx)
1227 {
1228 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1229 	struct ixgbe_hw    *hw = &sc->hw;
1230 	if_softc_ctx_t     scctx = sc->shared;
1231 	struct ix_tx_queue *que = sc->tx_queues;
1232 	int                i;
1233 
1234 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
1235 		struct tx_ring *txr = &que->txr;
1236 		u64            tdba = txr->tx_paddr;
1237 		u32            txctrl, txdctl;
1238 		int            j = txr->me;
1239 
1240 		/* Set WTHRESH to 8, burst writeback */
1241 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1242 		txdctl |= (8 << 16);
1243 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1244 
1245 		/* Set the HW Tx Head and Tail indices */
1246 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1247 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1248 
1249 		/* Set Tx Tail register */
1250 		txr->tail = IXGBE_VFTDT(j);
1251 
1252 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1253 		/* Initialize the last processed descriptor to be the end of
1254 		 * the ring, rather than the start, so that we avoid an
1255 		 * off-by-one error when calculating how many descriptors are
1256 		 * done in the credits_update function.
1257 		 */
1258 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1259 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1260 			txr->tx_rsq[k] = QIDX_INVALID;
1261 
1262 		/* Set Ring parameters */
1263 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1264 		    (tdba & 0x00000000ffffffffULL));
1265 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1266 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1267 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1268 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1269 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1270 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1271 
1272 		/* Now enable */
1273 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1274 		txdctl |= IXGBE_TXDCTL_ENABLE;
1275 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1276 	}
1277 
1278 	return;
1279 } /* ixv_initialize_transmit_units */
1280 
1281 /************************************************************************
1282  * ixv_initialize_rss_mapping
1283  ************************************************************************/
1284 static void
1285 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1286 {
1287 	struct ixgbe_hw *hw = &sc->hw;
1288 	u32             reta = 0, mrqc, rss_key[10];
1289 	int             queue_id;
1290 	int             i, j;
1291 	u32             rss_hash_config;
1292 
1293 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
1294 		/* Fetch the configured RSS key */
1295 		rss_getkey((uint8_t *)&rss_key);
1296 	} else {
1297 		/* set up random bits */
1298 		arc4rand(&rss_key, sizeof(rss_key), 0);
1299 	}
1300 
1301 	/* Now fill out hash function seeds */
1302 	for (i = 0; i < 10; i++)
1303 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1304 
1305 	/* Set up the redirection table */
1306 	for (i = 0, j = 0; i < 64; i++, j++) {
1307 		if (j == sc->num_rx_queues)
1308 			j = 0;
1309 
1310 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
1311 			/*
1312 			 * Fetch the RSS bucket id for the given indirection
1313 			 * entry. Cap it at the number of configured buckets
1314 			 * (which is num_rx_queues.)
1315 			 */
1316 			queue_id = rss_get_indirection_to_bucket(i);
1317 			queue_id = queue_id % sc->num_rx_queues;
1318 		} else
1319 			queue_id = j;
1320 
1321 		/*
1322 		 * The low 8 bits are for hash value (n+0);
1323 		 * The next 8 bits are for hash value (n+1), etc.
1324 		 */
1325 		reta >>= 8;
1326 		reta |= ((uint32_t)queue_id) << 24;
1327 		if ((i & 3) == 3) {
1328 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1329 			reta = 0;
1330 		}
1331 	}
1332 
1333 	/* Perform hash on these packet types */
1334 	if (sc->feat_en & IXGBE_FEATURE_RSS)
1335 		rss_hash_config = rss_gethashconfig();
1336 	else {
1337 		/*
1338 		 * Disable UDP - IP fragments aren't currently being handled
1339 		 * and so we end up with a mix of 2-tuple and 4-tuple
1340 		 * traffic.
1341 		 */
1342 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1343 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1344 		                | RSS_HASHTYPE_RSS_IPV6
1345 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1346 	}
1347 
1348 	mrqc = IXGBE_MRQC_RSSEN;
1349 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1350 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1351 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1352 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1353 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1354 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1355 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1356 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1357 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1358 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1359 		    __func__);
1360 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1361 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1362 		    __func__);
1363 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1364 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1365 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1366 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1367 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1368 		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1369 		    __func__);
1370 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1371 } /* ixv_initialize_rss_mapping */
1372 
1373 
1374 /************************************************************************
1375  * ixv_initialize_receive_units - Setup receive registers and features.
1376  ************************************************************************/
1377 static void
1378 ixv_initialize_receive_units(if_ctx_t ctx)
1379 {
1380 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1381 	if_softc_ctx_t     scctx;
1382 	struct ixgbe_hw    *hw = &sc->hw;
1383 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1384 	struct ix_rx_queue *que = sc->rx_queues;
1385 	u32                bufsz, psrtype;
1386 
1387 	if (ifp->if_mtu > ETHERMTU)
1388 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1389 	else
1390 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1391 
1392 	psrtype = IXGBE_PSRTYPE_TCPHDR
1393 	        | IXGBE_PSRTYPE_UDPHDR
1394 	        | IXGBE_PSRTYPE_IPV4HDR
1395 	        | IXGBE_PSRTYPE_IPV6HDR
1396 	        | IXGBE_PSRTYPE_L2HDR;
1397 
1398 	if (sc->num_rx_queues > 1)
1399 		psrtype |= 1 << 29;
1400 
1401 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1402 
1403 	/* Tell PF our max_frame size */
1404 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1405 		device_printf(sc->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1406 	}
1407 	scctx = sc->shared;
1408 
1409 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1410 		struct rx_ring *rxr = &que->rxr;
1411 		u64            rdba = rxr->rx_paddr;
1412 		u32            reg, rxdctl;
1413 		int            j = rxr->me;
1414 
1415 		/* Disable the queue */
1416 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1417 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1418 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1419 		for (int k = 0; k < 10; k++) {
1420 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1421 			    IXGBE_RXDCTL_ENABLE)
1422 				msec_delay(1);
1423 			else
1424 				break;
1425 		}
1426 		wmb();
1427 		/* Setup the Base and Length of the Rx Descriptor Ring */
1428 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1429 		    (rdba & 0x00000000ffffffffULL));
1430 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1431 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1432 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1433 
1434 		/* Reset the ring indices */
1435 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1436 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1437 
1438 		/* Set up the SRRCTL register */
1439 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1440 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1441 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1442 		reg |= bufsz;
1443 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1444 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1445 
1446 		/* Capture Rx Tail index */
1447 		rxr->tail = IXGBE_VFRDT(rxr->me);
1448 
1449 		/* Do the queue enabling last */
1450 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1451 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1452 		for (int l = 0; l < 10; l++) {
1453 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1454 			    IXGBE_RXDCTL_ENABLE)
1455 				break;
1456 			msec_delay(1);
1457 		}
1458 		wmb();
1459 
1460 		/* Set the Tail Pointer */
1461 #ifdef DEV_NETMAP
1462 		/*
1463 		 * In netmap mode, we must preserve the buffers made
1464 		 * available to userspace before the if_init()
1465 		 * (this is true by default on the TX side, because
1466 		 * init makes all buffers available to userspace).
1467 		 *
1468 		 * netmap_reset() and the device specific routines
1469 		 * (e.g. ixgbe_setup_receive_rings()) map these
1470 		 * buffers at the end of the NIC ring, so here we
1471 		 * must set the RDT (tail) register to make sure
1472 		 * they are not overwritten.
1473 		 *
1474 		 * In this driver the NIC ring starts at RDH = 0,
1475 		 * RDT points to the last slot available for reception (?),
1476 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1477 		 */
1478 		if (ifp->if_capenable & IFCAP_NETMAP) {
1479 			struct netmap_adapter *na = NA(ifp);
1480 			struct netmap_kring *kring = na->rx_rings[j];
1481 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1482 
1483 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1484 		} else
1485 #endif /* DEV_NETMAP */
1486 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1487 			    scctx->isc_nrxd[0] - 1);
1488 	}
1489 
1490 	/*
1491 	 * Do not touch RSS and RETA settings for older hardware
1492 	 * as those are shared among PF and all VF.
1493 	 */
1494 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1495 		ixv_initialize_rss_mapping(sc);
1496 } /* ixv_initialize_receive_units */
1497 
1498 /************************************************************************
1499  * ixv_setup_vlan_support
1500  ************************************************************************/
1501 static void
1502 ixv_setup_vlan_support(if_ctx_t ctx)
1503 {
1504 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1505 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1506 	struct ixgbe_hw *hw = &sc->hw;
1507 	u32             ctrl, vid, vfta, retry;
1508 
1509 	/*
1510 	 * We get here thru if_init, meaning
1511 	 * a soft reset, this has already cleared
1512 	 * the VFTA and other state, so if there
1513 	 * have been no vlan's registered do nothing.
1514 	 */
1515 	if (sc->num_vlans == 0)
1516 		return;
1517 
1518 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1519 		/* Enable the queues */
1520 		for (int i = 0; i < sc->num_rx_queues; i++) {
1521 			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1522 			ctrl |= IXGBE_RXDCTL_VME;
1523 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1524 			/*
1525 			 * Let Rx path know that it needs to store VLAN tag
1526 			 * as part of extra mbuf info.
1527 			 */
1528 			sc->rx_queues[i].rxr.vtag_strip = true;
1529 		}
1530 	}
1531 
1532 	/*
1533 	 * If filtering VLAN tags is disabled,
1534 	 * there is no need to fill VLAN Filter Table Array (VFTA).
1535 	 */
1536 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1537 		return;
1538 
1539 	/*
1540 	 * A soft reset zero's out the VFTA, so
1541 	 * we need to repopulate it now.
1542 	 */
1543 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1544 		if (ixv_shadow_vfta[i] == 0)
1545 			continue;
1546 		vfta = ixv_shadow_vfta[i];
1547 		/*
1548 		 * Reconstruct the vlan id's
1549 		 * based on the bits set in each
1550 		 * of the array ints.
1551 		 */
1552 		for (int j = 0; j < 32; j++) {
1553 			retry = 0;
1554 			if ((vfta & (1 << j)) == 0)
1555 				continue;
1556 			vid = (i * 32) + j;
1557 			/* Call the shared code mailbox routine */
1558 			while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1559 				if (++retry > 5)
1560 					break;
1561 			}
1562 		}
1563 	}
1564 } /* ixv_setup_vlan_support */
1565 
1566 /************************************************************************
1567  * ixv_if_register_vlan
1568  *
1569  *   Run via a vlan config EVENT, it enables us to use the
1570  *   HW Filter table since we can get the vlan id. This just
1571  *   creates the entry in the soft version of the VFTA, init
1572  *   will repopulate the real table.
1573  ************************************************************************/
1574 static void
1575 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1576 {
1577 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1578 	u16            index, bit;
1579 
1580 	index = (vtag >> 5) & 0x7F;
1581 	bit = vtag & 0x1F;
1582 	ixv_shadow_vfta[index] |= (1 << bit);
1583 	++sc->num_vlans;
1584 } /* ixv_if_register_vlan */
1585 
1586 /************************************************************************
1587  * ixv_if_unregister_vlan
1588  *
1589  *   Run via a vlan unconfig EVENT, remove our entry
1590  *   in the soft vfta.
1591  ************************************************************************/
1592 static void
1593 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1594 {
1595 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1596 	u16            index, bit;
1597 
1598 	index = (vtag >> 5) & 0x7F;
1599 	bit = vtag & 0x1F;
1600 	ixv_shadow_vfta[index] &= ~(1 << bit);
1601 	--sc->num_vlans;
1602 } /* ixv_if_unregister_vlan */
1603 
1604 /************************************************************************
1605  * ixv_if_enable_intr
1606  ************************************************************************/
1607 static void
1608 ixv_if_enable_intr(if_ctx_t ctx)
1609 {
1610 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1611 	struct ixgbe_hw *hw = &sc->hw;
1612 	struct ix_rx_queue *que = sc->rx_queues;
1613 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1614 
1615 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1616 
1617 	mask = IXGBE_EIMS_ENABLE_MASK;
1618 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1619 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1620 
1621 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
1622 		ixv_enable_queue(sc, que->msix);
1623 
1624 	IXGBE_WRITE_FLUSH(hw);
1625 } /* ixv_if_enable_intr */
1626 
1627 /************************************************************************
1628  * ixv_if_disable_intr
1629  ************************************************************************/
1630 static void
1631 ixv_if_disable_intr(if_ctx_t ctx)
1632 {
1633 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1634 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1635 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1636 	IXGBE_WRITE_FLUSH(&sc->hw);
1637 } /* ixv_if_disable_intr */
1638 
1639 /************************************************************************
1640  * ixv_if_rx_queue_intr_enable
1641  ************************************************************************/
1642 static int
1643 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1644 {
1645 	struct ixgbe_softc	*sc = iflib_get_softc(ctx);
1646 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1647 
1648 	ixv_enable_queue(sc, que->rxr.me);
1649 
1650 	return (0);
1651 } /* ixv_if_rx_queue_intr_enable */
1652 
1653 /************************************************************************
1654  * ixv_set_ivar
1655  *
1656  *   Setup the correct IVAR register for a particular MSI-X interrupt
1657  *    - entry is the register array entry
1658  *    - vector is the MSI-X vector for this queue
1659  *    - type is RX/TX/MISC
1660  ************************************************************************/
1661 static void
1662 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1663 {
1664 	struct ixgbe_hw *hw = &sc->hw;
1665 	u32             ivar, index;
1666 
1667 	vector |= IXGBE_IVAR_ALLOC_VAL;
1668 
1669 	if (type == -1) { /* MISC IVAR */
1670 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1671 		ivar &= ~0xFF;
1672 		ivar |= vector;
1673 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1674 	} else {          /* RX/TX IVARS */
1675 		index = (16 * (entry & 1)) + (8 * type);
1676 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1677 		ivar &= ~(0xFF << index);
1678 		ivar |= (vector << index);
1679 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1680 	}
1681 } /* ixv_set_ivar */
1682 
1683 /************************************************************************
1684  * ixv_configure_ivars
1685  ************************************************************************/
1686 static void
1687 ixv_configure_ivars(struct ixgbe_softc *sc)
1688 {
1689 	struct ix_rx_queue *que = sc->rx_queues;
1690 
1691 	MPASS(sc->num_rx_queues == sc->num_tx_queues);
1692 
1693 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1694 		/* First the RX queue entry */
1695 		ixv_set_ivar(sc, i, que->msix, 0);
1696 		/* ... and the TX */
1697 		ixv_set_ivar(sc, i, que->msix, 1);
1698 		/* Set an initial value in EITR */
1699 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1700 		    IXGBE_EITR_DEFAULT);
1701 	}
1702 
1703 	/* For the mailbox interrupt */
1704 	ixv_set_ivar(sc, 1, sc->vector, -1);
1705 } /* ixv_configure_ivars */
1706 
1707 /************************************************************************
1708  * ixv_save_stats
1709  *
1710  *   The VF stats registers never have a truly virgin
1711  *   starting point, so this routine tries to make an
1712  *   artificial one, marking ground zero on attach as
1713  *   it were.
1714  ************************************************************************/
1715 static void
1716 ixv_save_stats(struct ixgbe_softc *sc)
1717 {
1718 	if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1719 		sc->stats.vf.saved_reset_vfgprc +=
1720 		    sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1721 		sc->stats.vf.saved_reset_vfgptc +=
1722 		    sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1723 		sc->stats.vf.saved_reset_vfgorc +=
1724 		    sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1725 		sc->stats.vf.saved_reset_vfgotc +=
1726 		    sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1727 		sc->stats.vf.saved_reset_vfmprc +=
1728 		    sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1729 	}
1730 } /* ixv_save_stats */
1731 
1732 /************************************************************************
1733  * ixv_init_stats
1734  ************************************************************************/
1735 static void
1736 ixv_init_stats(struct ixgbe_softc *sc)
1737 {
1738 	struct ixgbe_hw *hw = &sc->hw;
1739 
1740 	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1741 	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1742 	sc->stats.vf.last_vfgorc |=
1743 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1744 
1745 	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1746 	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1747 	sc->stats.vf.last_vfgotc |=
1748 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1749 
1750 	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1751 
1752 	sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1753 	sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1754 	sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1755 	sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1756 	sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1757 } /* ixv_init_stats */
1758 
1759 #define UPDATE_STAT_32(reg, last, count)                \
1760 {                                                       \
1761 	u32 current = IXGBE_READ_REG(hw, reg);          \
1762 	if (current < last)                             \
1763 		count += 0x100000000LL;                 \
1764 	last = current;                                 \
1765 	count &= 0xFFFFFFFF00000000LL;                  \
1766 	count |= current;                               \
1767 }
1768 
1769 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1770 {                                                       \
1771 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1772 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1773 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1774 	if (current < last)                             \
1775 		count += 0x1000000000LL;                \
1776 	last = current;                                 \
1777 	count &= 0xFFFFFFF000000000LL;                  \
1778 	count |= current;                               \
1779 }
1780 
1781 /************************************************************************
1782  * ixv_update_stats - Update the board statistics counters.
1783  ************************************************************************/
1784 void
1785 ixv_update_stats(struct ixgbe_softc *sc)
1786 {
1787 	struct ixgbe_hw *hw = &sc->hw;
1788 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1789 
1790 	UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1791 	    sc->stats.vf.vfgprc);
1792 	UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1793 	    sc->stats.vf.vfgptc);
1794 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1795 	    sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1796 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1797 	    sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1798 	UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1799 	    sc->stats.vf.vfmprc);
1800 
1801 	/* Fill out the OS statistics structure */
1802 	IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1803 	IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1804 	IXGBE_SET_IBYTES(sc, stats->vfgorc);
1805 	IXGBE_SET_OBYTES(sc, stats->vfgotc);
1806 	IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1807 } /* ixv_update_stats */
1808 
1809 /************************************************************************
1810  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1811  ************************************************************************/
1812 static void
1813 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1814 {
1815 	device_t                dev = sc->dev;
1816 	struct ix_tx_queue      *tx_que = sc->tx_queues;
1817 	struct ix_rx_queue      *rx_que = sc->rx_queues;
1818 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1819 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1820 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1821 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1822 	struct sysctl_oid       *stat_node, *queue_node;
1823 	struct sysctl_oid_list  *stat_list, *queue_list;
1824 
1825 #define QUEUE_NAME_LEN 32
1826 	char                    namebuf[QUEUE_NAME_LEN];
1827 
1828 	/* Driver Statistics */
1829 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1830 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1831 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1832 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1833 
1834 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1835 		struct tx_ring *txr = &tx_que->txr;
1836 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1837 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1838 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1839 		queue_list = SYSCTL_CHILDREN(queue_node);
1840 
1841 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1842 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1843 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1844 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1845 	}
1846 
1847 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1848 		struct rx_ring *rxr = &rx_que->rxr;
1849 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1850 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1851 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1852 		queue_list = SYSCTL_CHILDREN(queue_node);
1853 
1854 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1855 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1856 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1857 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1858 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1859 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1860 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1861 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1862 	}
1863 
1864 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1865 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1866 	    "VF Statistics (read from HW registers)");
1867 	stat_list = SYSCTL_CHILDREN(stat_node);
1868 
1869 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1870 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1871 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1872 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1873 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1874 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1875 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1876 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1877 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1878 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1879 } /* ixv_add_stats_sysctls */
1880 
1881 /************************************************************************
1882  * ixv_print_debug_info
1883  *
1884  *   Called only when em_display_debug_stats is enabled.
1885  *   Provides a way to take a look at important statistics
1886  *   maintained by the driver and hardware.
1887  ************************************************************************/
1888 static void
1889 ixv_print_debug_info(struct ixgbe_softc *sc)
1890 {
1891 	device_t dev = sc->dev;
1892 	struct ixgbe_hw *hw = &sc->hw;
1893 
1894 	device_printf(dev, "Error Byte Count = %u \n",
1895 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1896 
1897 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1898 } /* ixv_print_debug_info */
1899 
1900 /************************************************************************
1901  * ixv_sysctl_debug
1902  ************************************************************************/
1903 static int
1904 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1905 {
1906 	struct ixgbe_softc *sc;
1907 	int error, result;
1908 
1909 	result = -1;
1910 	error = sysctl_handle_int(oidp, &result, 0, req);
1911 
1912 	if (error || !req->newptr)
1913 		return (error);
1914 
1915 	if (result == 1) {
1916 		sc = (struct ixgbe_softc *)arg1;
1917 		ixv_print_debug_info(sc);
1918 	}
1919 
1920 	return error;
1921 } /* ixv_sysctl_debug */
1922 
1923 /************************************************************************
1924  * ixv_init_device_features
1925  ************************************************************************/
1926 static void
1927 ixv_init_device_features(struct ixgbe_softc *sc)
1928 {
1929 	sc->feat_cap = IXGBE_FEATURE_NETMAP
1930 				    | IXGBE_FEATURE_VF
1931 				    | IXGBE_FEATURE_LEGACY_TX;
1932 
1933 	/* A tad short on feature flags for VFs, atm. */
1934 	switch (sc->hw.mac.type) {
1935 	case ixgbe_mac_82599_vf:
1936 		break;
1937 	case ixgbe_mac_X540_vf:
1938 		break;
1939 	case ixgbe_mac_X550_vf:
1940 	case ixgbe_mac_X550EM_x_vf:
1941 	case ixgbe_mac_X550EM_a_vf:
1942 		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1943 		sc->feat_cap |= IXGBE_FEATURE_RSS;
1944 		break;
1945 	default:
1946 		break;
1947 	}
1948 
1949 	/* Enabled by default... */
1950 	/* Is a virtual function (VF) */
1951 	if (sc->feat_cap & IXGBE_FEATURE_VF)
1952 		sc->feat_en |= IXGBE_FEATURE_VF;
1953 	/* Netmap */
1954 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1955 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
1956 	/* Receive-Side Scaling (RSS) */
1957 	if (sc->feat_cap & IXGBE_FEATURE_RSS)
1958 		sc->feat_en |= IXGBE_FEATURE_RSS;
1959 	/* Needs advanced context descriptor regardless of offloads req'd */
1960 	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1961 		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1962 } /* ixv_init_device_features */
1963 
1964