xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision 6813d08ff55ae587abd7e2297e051d491c218de0)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 	/* required last entry */
67 PVID_END
68 };
69 
70 /************************************************************************
71  * Function prototypes
72  ************************************************************************/
73 static void     *ixv_register(device_t dev);
74 static int      ixv_if_attach_pre(if_ctx_t ctx);
75 static int      ixv_if_attach_post(if_ctx_t ctx);
76 static int      ixv_if_detach(if_ctx_t ctx);
77 
78 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void     ixv_if_queues_free(if_ctx_t ctx);
82 static void     ixv_identify_hardware(if_ctx_t ctx);
83 static void     ixv_init_device_features(struct adapter *);
84 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void     ixv_free_pci_resources(if_ctx_t ctx);
86 static int      ixv_setup_interface(if_ctx_t ctx);
87 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int      ixv_if_media_change(if_ctx_t ctx);
89 static void     ixv_if_update_admin_status(if_ctx_t ctx);
90 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
91 
92 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void     ixv_if_init(if_ctx_t ctx);
94 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void     ixv_if_stop(if_ctx_t ctx);
96 static int      ixv_negotiate_api(struct adapter *);
97 
98 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void     ixv_initialize_receive_units(if_ctx_t ctx);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101 
102 static void     ixv_setup_vlan_support(if_ctx_t ctx);
103 static void     ixv_configure_ivars(struct adapter *);
104 static void     ixv_if_enable_intr(if_ctx_t ctx);
105 static void     ixv_if_disable_intr(if_ctx_t ctx);
106 static void     ixv_if_multi_set(if_ctx_t ctx);
107 
108 static void     ixv_if_register_vlan(if_ctx_t, u16);
109 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110 
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112 
113 static void     ixv_save_stats(struct adapter *);
114 static void     ixv_init_stats(struct adapter *);
115 static void     ixv_update_stats(struct adapter *);
116 static void     ixv_add_stats_sysctls(struct adapter *adapter);
117 
118 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
120 
121 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122 
123 /* The MSI-X Interrupt handlers */
124 static int      ixv_msix_que(void *);
125 static int      ixv_msix_mbx(void *);
126 
127 /************************************************************************
128  * FreeBSD Device Interface Entry Points
129  ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 	/* Device interface */
132 	DEVMETHOD(device_register, ixv_register),
133 	DEVMETHOD(device_probe, iflib_device_probe),
134 	DEVMETHOD(device_attach, iflib_device_attach),
135 	DEVMETHOD(device_detach, iflib_device_detach),
136 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
137 	DEVMETHOD_END
138 };
139 
140 static driver_t ixv_driver = {
141 	"ixv", ixv_methods, sizeof(struct adapter),
142 };
143 
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_PNP_INFO("U16:vendor;U16:device", pci, ixv, ixv_vendor_info_array,
147     sizeof(ixv_vendor_info_array[0]), nitems(ixv_vendor_info_array) - 1);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150 #ifdef DEV_NETMAP
151 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
152 #endif /* DEV_NETMAP */
153 
154 static device_method_t ixv_if_methods[] = {
155 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
156 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
157 	DEVMETHOD(ifdi_detach, ixv_if_detach),
158 	DEVMETHOD(ifdi_init, ixv_if_init),
159 	DEVMETHOD(ifdi_stop, ixv_if_stop),
160 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
161 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
162 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
163 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
165 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
166 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
167 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
168 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
169 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
170 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
171 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
172 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
173 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
174 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
175 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
176 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
177 	DEVMETHOD_END
178 };
179 
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 };
183 
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187 
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191 
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = FALSE;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200 
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208 
209 static struct if_shared_ctx ixv_sctx_init = {
210 	.isc_magic = IFLIB_MAGIC,
211 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212 	.isc_tx_maxsize = IXGBE_TSO_SIZE,
213 
214 	.isc_tx_maxsegsize = PAGE_SIZE,
215 
216 	.isc_rx_maxsize = MJUM16BYTES,
217 	.isc_rx_nsegments = 1,
218 	.isc_rx_maxsegsize = MJUM16BYTES,
219 	.isc_nfl = 1,
220 	.isc_ntxqs = 1,
221 	.isc_nrxqs = 1,
222 	.isc_admin_intrcnt = 1,
223 	.isc_vendor_info = ixv_vendor_info_array,
224 	.isc_driver_version = ixv_driver_version,
225 	.isc_driver = &ixv_if_driver,
226 
227 	.isc_nrxd_min = {MIN_RXD},
228 	.isc_ntxd_min = {MIN_TXD},
229 	.isc_nrxd_max = {MAX_RXD},
230 	.isc_ntxd_max = {MAX_TXD},
231 	.isc_nrxd_default = {DEFAULT_RXD},
232 	.isc_ntxd_default = {DEFAULT_TXD},
233 };
234 
235 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
236 
237 static void *
238 ixv_register(device_t dev)
239 {
240 	return (ixv_sctx);
241 }
242 
243 /************************************************************************
244  * ixv_if_tx_queues_alloc
245  ************************************************************************/
246 static int
247 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
248                        int ntxqs, int ntxqsets)
249 {
250 	struct adapter     *adapter = iflib_get_softc(ctx);
251 	if_softc_ctx_t     scctx = adapter->shared;
252 	struct ix_tx_queue *que;
253 	int                i, j, error;
254 
255 	MPASS(adapter->num_tx_queues == ntxqsets);
256 	MPASS(ntxqs == 1);
257 
258 	/* Allocate queue structure memory */
259 	adapter->tx_queues =
260 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
261 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
262 	if (!adapter->tx_queues) {
263 		device_printf(iflib_get_dev(ctx),
264 		    "Unable to allocate TX ring memory\n");
265 		return (ENOMEM);
266 	}
267 
268 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
269 		struct tx_ring *txr = &que->txr;
270 
271 		txr->me = i;
272 		txr->adapter =  que->adapter = adapter;
273 		adapter->active_queues |= (u64)1 << txr->me;
274 
275 		/* Allocate report status array */
276 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
277 			error = ENOMEM;
278 			goto fail;
279 		}
280 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
281 			txr->tx_rsq[j] = QIDX_INVALID;
282 		/* get the virtual and physical address of the hardware queues */
283 		txr->tail = IXGBE_VFTDT(txr->me);
284 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
285 		txr->tx_paddr = paddrs[i*ntxqs];
286 
287 		txr->bytes = 0;
288 		txr->total_packets = 0;
289 
290 	}
291 
292 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
293 	    adapter->num_tx_queues);
294 
295 	return (0);
296 
297  fail:
298 	ixv_if_queues_free(ctx);
299 
300 	return (error);
301 } /* ixv_if_tx_queues_alloc */
302 
303 /************************************************************************
304  * ixv_if_rx_queues_alloc
305  ************************************************************************/
306 static int
307 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
308                        int nrxqs, int nrxqsets)
309 {
310 	struct adapter     *adapter = iflib_get_softc(ctx);
311 	struct ix_rx_queue *que;
312 	int                i, error;
313 
314 	MPASS(adapter->num_rx_queues == nrxqsets);
315 	MPASS(nrxqs == 1);
316 
317 	/* Allocate queue structure memory */
318 	adapter->rx_queues =
319 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
320 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
321 	if (!adapter->rx_queues) {
322 		device_printf(iflib_get_dev(ctx),
323 		    "Unable to allocate TX ring memory\n");
324 		error = ENOMEM;
325 		goto fail;
326 	}
327 
328 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
329 		struct rx_ring *rxr = &que->rxr;
330 		rxr->me = i;
331 		rxr->adapter = que->adapter = adapter;
332 
333 
334 		/* get the virtual and physical address of the hw queues */
335 		rxr->tail = IXGBE_VFRDT(rxr->me);
336 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
337 		rxr->rx_paddr = paddrs[i*nrxqs];
338 		rxr->bytes = 0;
339 		rxr->que = que;
340 	}
341 
342 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
343 	    adapter->num_rx_queues);
344 
345 	return (0);
346 
347 fail:
348 	ixv_if_queues_free(ctx);
349 
350 	return (error);
351 } /* ixv_if_rx_queues_alloc */
352 
353 /************************************************************************
354  * ixv_if_queues_free
355  ************************************************************************/
356 static void
357 ixv_if_queues_free(if_ctx_t ctx)
358 {
359 	struct adapter     *adapter = iflib_get_softc(ctx);
360 	struct ix_tx_queue *que = adapter->tx_queues;
361 	int                i;
362 
363 	if (que == NULL)
364 		goto free;
365 
366 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
367 		struct tx_ring *txr = &que->txr;
368 		if (txr->tx_rsq == NULL)
369 			break;
370 
371 		free(txr->tx_rsq, M_DEVBUF);
372 		txr->tx_rsq = NULL;
373 	}
374 	if (adapter->tx_queues != NULL)
375 		free(adapter->tx_queues, M_DEVBUF);
376 free:
377 	if (adapter->rx_queues != NULL)
378 		free(adapter->rx_queues, M_DEVBUF);
379 	adapter->tx_queues = NULL;
380 	adapter->rx_queues = NULL;
381 } /* ixv_if_queues_free */
382 
383 /************************************************************************
384  * ixv_if_attach_pre - Device initialization routine
385  *
386  *   Called when the driver is being loaded.
387  *   Identifies the type of hardware, allocates all resources
388  *   and initializes the hardware.
389  *
390  *   return 0 on success, positive on failure
391  ************************************************************************/
392 static int
393 ixv_if_attach_pre(if_ctx_t ctx)
394 {
395 	struct adapter  *adapter;
396 	device_t        dev;
397 	if_softc_ctx_t  scctx;
398 	struct ixgbe_hw *hw;
399 	int             error = 0;
400 
401 	INIT_DEBUGOUT("ixv_attach: begin");
402 
403 	/* Allocate, clear, and link in our adapter structure */
404 	dev = iflib_get_dev(ctx);
405 	adapter = iflib_get_softc(ctx);
406 	adapter->dev = dev;
407 	adapter->ctx = ctx;
408 	adapter->hw.back = adapter;
409 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
410 	adapter->media = iflib_get_media(ctx);
411 	hw = &adapter->hw;
412 
413 	/* Do base PCI setup - map BAR0 */
414 	if (ixv_allocate_pci_resources(ctx)) {
415 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
416 		error = ENXIO;
417 		goto err_out;
418 	}
419 
420 	/* SYSCTL APIs */
421 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
422 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
423 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
424 	    "Debug Info");
425 
426 	/* Determine hardware revision */
427 	ixv_identify_hardware(ctx);
428 	ixv_init_device_features(adapter);
429 
430 	/* Initialize the shared code */
431 	error = ixgbe_init_ops_vf(hw);
432 	if (error) {
433 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
434 		error = EIO;
435 		goto err_out;
436 	}
437 
438 	/* Setup the mailbox */
439 	ixgbe_init_mbx_params_vf(hw);
440 
441 	error = hw->mac.ops.reset_hw(hw);
442 	if (error == IXGBE_ERR_RESET_FAILED)
443 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
444 	else if (error)
445 		device_printf(dev, "...reset_hw() failed with error %d\n",
446 		    error);
447 	if (error) {
448 		error = EIO;
449 		goto err_out;
450 	}
451 
452 	error = hw->mac.ops.init_hw(hw);
453 	if (error) {
454 		device_printf(dev, "...init_hw() failed with error %d\n",
455 		    error);
456 		error = EIO;
457 		goto err_out;
458 	}
459 
460 	/* Negotiate mailbox API version */
461 	error = ixv_negotiate_api(adapter);
462 	if (error) {
463 		device_printf(dev,
464 		    "Mailbox API negotiation failed during attach!\n");
465 		goto err_out;
466 	}
467 
468 	/* If no mac address was assigned, make a random one */
469 	if (!ixv_check_ether_addr(hw->mac.addr)) {
470 		u8 addr[ETHER_ADDR_LEN];
471 		arc4rand(&addr, sizeof(addr), 0);
472 		addr[0] &= 0xFE;
473 		addr[0] |= 0x02;
474 		bcopy(addr, hw->mac.addr, sizeof(addr));
475 		bcopy(addr, hw->mac.perm_addr, sizeof(addr));
476 	}
477 
478 	/* Most of the iflib initialization... */
479 
480 	iflib_set_mac(ctx, hw->mac.addr);
481 	switch (adapter->hw.mac.type) {
482 	case ixgbe_mac_X550_vf:
483 	case ixgbe_mac_X550EM_x_vf:
484 	case ixgbe_mac_X550EM_a_vf:
485 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
486 		break;
487 	default:
488 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
489 	}
490 	scctx->isc_txqsizes[0] =
491 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
492 	    sizeof(u32), DBA_ALIGN);
493 	scctx->isc_rxqsizes[0] =
494 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
495 	    DBA_ALIGN);
496 	/* XXX */
497 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
498 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
499 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
500 	scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
501 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
502 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
503 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
504 
505 	scctx->isc_txrx = &ixgbe_txrx;
506 
507 	/*
508 	 * Tell the upper layer(s) we support everything the PF
509 	 * driver does except...
510 	 *   hardware stats
511 	 *   Wake-on-LAN
512 	 */
513 	scctx->isc_capenable = IXGBE_CAPS;
514 	scctx->isc_capenable ^= IFCAP_HWSTATS | IFCAP_WOL;
515 
516 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
517 
518 	return (0);
519 
520 err_out:
521 	ixv_free_pci_resources(ctx);
522 
523 	return (error);
524 } /* ixv_if_attach_pre */
525 
526 static int
527 ixv_if_attach_post(if_ctx_t ctx)
528 {
529 	struct adapter *adapter = iflib_get_softc(ctx);
530 	device_t       dev = iflib_get_dev(ctx);
531 	int            error = 0;
532 
533 	/* Setup OS specific network interface */
534 	error = ixv_setup_interface(ctx);
535 	if (error) {
536 		device_printf(dev, "Interface setup failed: %d\n", error);
537 		goto end;
538 	}
539 
540 	/* Do the stats setup */
541 	ixv_save_stats(adapter);
542 	ixv_init_stats(adapter);
543 	ixv_add_stats_sysctls(adapter);
544 
545 end:
546 	return error;
547 } /* ixv_if_attach_post */
548 
549 /************************************************************************
550  * ixv_detach - Device removal routine
551  *
552  *   Called when the driver is being removed.
553  *   Stops the adapter and deallocates all the resources
554  *   that were allocated for driver operation.
555  *
556  *   return 0 on success, positive on failure
557  ************************************************************************/
558 static int
559 ixv_if_detach(if_ctx_t ctx)
560 {
561 	INIT_DEBUGOUT("ixv_detach: begin");
562 
563 	ixv_free_pci_resources(ctx);
564 
565 	return (0);
566 } /* ixv_if_detach */
567 
568 /************************************************************************
569  * ixv_if_mtu_set
570  ************************************************************************/
571 static int
572 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
573 {
574 	struct adapter *adapter = iflib_get_softc(ctx);
575 	struct ifnet   *ifp = iflib_get_ifp(ctx);
576 	int            error = 0;
577 
578 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
579 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
580 		error = EINVAL;
581 	} else {
582 		ifp->if_mtu = mtu;
583 		adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
584 	}
585 
586 	return error;
587 } /* ixv_if_mtu_set */
588 
589 /************************************************************************
590  * ixv_if_init - Init entry point
591  *
592  *   Used in two ways: It is used by the stack as an init entry
593  *   point in network interface structure. It is also used
594  *   by the driver as a hw/sw initialization routine to get
595  *   to a consistent state.
596  *
597  *   return 0 on success, positive on failure
598  ************************************************************************/
599 static void
600 ixv_if_init(if_ctx_t ctx)
601 {
602 	struct adapter  *adapter = iflib_get_softc(ctx);
603 	struct ifnet    *ifp = iflib_get_ifp(ctx);
604 	device_t        dev = iflib_get_dev(ctx);
605 	struct ixgbe_hw *hw = &adapter->hw;
606 	int             error = 0;
607 
608 	INIT_DEBUGOUT("ixv_if_init: begin");
609 	hw->adapter_stopped = FALSE;
610 	hw->mac.ops.stop_adapter(hw);
611 
612 	/* reprogram the RAR[0] in case user changed it. */
613 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
614 
615 	/* Get the latest mac address, User can use a LAA */
616 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
617 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
618 
619 	/* Reset VF and renegotiate mailbox API version */
620 	hw->mac.ops.reset_hw(hw);
621 	hw->mac.ops.start_hw(hw);
622 	error = ixv_negotiate_api(adapter);
623 	if (error) {
624 		device_printf(dev,
625 		    "Mailbox API negotiation failed in if_init!\n");
626 		return;
627 	}
628 
629 	ixv_initialize_transmit_units(ctx);
630 
631 	/* Setup Multicast table */
632 	ixv_if_multi_set(ctx);
633 
634 	/*
635 	 * Determine the correct mbuf pool
636 	 * for doing jumbo/headersplit
637 	 */
638 	if (ifp->if_mtu > ETHERMTU)
639 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
640 	else
641 		adapter->rx_mbuf_sz = MCLBYTES;
642 
643 	/* Configure RX settings */
644 	ixv_initialize_receive_units(ctx);
645 
646 	/* Set up VLAN offload and filter */
647 	ixv_setup_vlan_support(ctx);
648 
649 	/* Set up MSI-X routing */
650 	ixv_configure_ivars(adapter);
651 
652 	/* Set up auto-mask */
653 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
654 
655 	/* Set moderation on the Link interrupt */
656 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
657 
658 	/* Stats init */
659 	ixv_init_stats(adapter);
660 
661 	/* Config/Enable Link */
662 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
663 	    FALSE);
664 
665 	/* And now turn on interrupts */
666 	ixv_if_enable_intr(ctx);
667 
668 	return;
669 } /* ixv_if_init */
670 
671 /************************************************************************
672  * ixv_enable_queue
673  ************************************************************************/
674 static inline void
675 ixv_enable_queue(struct adapter *adapter, u32 vector)
676 {
677 	struct ixgbe_hw *hw = &adapter->hw;
678 	u32             queue = 1 << vector;
679 	u32             mask;
680 
681 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
682 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
683 } /* ixv_enable_queue */
684 
685 /************************************************************************
686  * ixv_disable_queue
687  ************************************************************************/
688 static inline void
689 ixv_disable_queue(struct adapter *adapter, u32 vector)
690 {
691 	struct ixgbe_hw *hw = &adapter->hw;
692 	u64             queue = (u64)(1 << vector);
693 	u32             mask;
694 
695 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
696 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
697 } /* ixv_disable_queue */
698 
699 
700 /************************************************************************
701  * ixv_msix_que - MSI-X Queue Interrupt Service routine
702  ************************************************************************/
703 static int
704 ixv_msix_que(void *arg)
705 {
706 	struct ix_rx_queue *que = arg;
707 	struct adapter     *adapter = que->adapter;
708 
709 	ixv_disable_queue(adapter, que->msix);
710 	++que->irqs;
711 
712 	return (FILTER_SCHEDULE_THREAD);
713 } /* ixv_msix_que */
714 
715 /************************************************************************
716  * ixv_msix_mbx
717  ************************************************************************/
718 static int
719 ixv_msix_mbx(void *arg)
720 {
721 	struct adapter  *adapter = arg;
722 	struct ixgbe_hw *hw = &adapter->hw;
723 	u32             reg;
724 
725 	++adapter->link_irq;
726 
727 	/* First get the cause */
728 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
729 	/* Clear interrupt with write */
730 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
731 
732 	/* Link status change */
733 	if (reg & IXGBE_EICR_LSC)
734 		iflib_admin_intr_deferred(adapter->ctx);
735 
736 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
737 
738 	return (FILTER_HANDLED);
739 } /* ixv_msix_mbx */
740 
741 /************************************************************************
742  * ixv_media_status - Media Ioctl callback
743  *
744  *   Called whenever the user queries the status of
745  *   the interface using ifconfig.
746  ************************************************************************/
747 static void
748 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
749 {
750 	struct adapter *adapter = iflib_get_softc(ctx);
751 
752 	INIT_DEBUGOUT("ixv_media_status: begin");
753 
754 	iflib_admin_intr_deferred(ctx);
755 
756 	ifmr->ifm_status = IFM_AVALID;
757 	ifmr->ifm_active = IFM_ETHER;
758 
759 	if (!adapter->link_active)
760 		return;
761 
762 	ifmr->ifm_status |= IFM_ACTIVE;
763 
764 	switch (adapter->link_speed) {
765 		case IXGBE_LINK_SPEED_1GB_FULL:
766 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
767 			break;
768 		case IXGBE_LINK_SPEED_10GB_FULL:
769 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
770 			break;
771 		case IXGBE_LINK_SPEED_100_FULL:
772 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
773 			break;
774 		case IXGBE_LINK_SPEED_10_FULL:
775 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
776 			break;
777 	}
778 } /* ixv_if_media_status */
779 
780 /************************************************************************
781  * ixv_if_media_change - Media Ioctl callback
782  *
783  *   Called when the user changes speed/duplex using
784  *   media/mediopt option with ifconfig.
785  ************************************************************************/
786 static int
787 ixv_if_media_change(if_ctx_t ctx)
788 {
789 	struct adapter *adapter = iflib_get_softc(ctx);
790 	struct ifmedia *ifm = iflib_get_media(ctx);
791 
792 	INIT_DEBUGOUT("ixv_media_change: begin");
793 
794 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
795 		return (EINVAL);
796 
797 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
798 	case IFM_AUTO:
799 		break;
800 	default:
801 		device_printf(adapter->dev, "Only auto media type\n");
802 		return (EINVAL);
803 	}
804 
805 	return (0);
806 } /* ixv_if_media_change */
807 
808 
809 /************************************************************************
810  * ixv_negotiate_api
811  *
812  *   Negotiate the Mailbox API with the PF;
813  *   start with the most featured API first.
814  ************************************************************************/
815 static int
816 ixv_negotiate_api(struct adapter *adapter)
817 {
818 	struct ixgbe_hw *hw = &adapter->hw;
819 	int             mbx_api[] = { ixgbe_mbox_api_11,
820 	                              ixgbe_mbox_api_10,
821 	                              ixgbe_mbox_api_unknown };
822 	int             i = 0;
823 
824 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
825 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
826 			return (0);
827 		i++;
828 	}
829 
830 	return (EINVAL);
831 } /* ixv_negotiate_api */
832 
833 
834 /************************************************************************
835  * ixv_if_multi_set - Multicast Update
836  *
837  *   Called whenever multicast address list is updated.
838  ************************************************************************/
839 static void
840 ixv_if_multi_set(if_ctx_t ctx)
841 {
842 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
843 	struct adapter     *adapter = iflib_get_softc(ctx);
844 	u8                 *update_ptr;
845 	struct ifmultiaddr *ifma;
846 	if_t               ifp = iflib_get_ifp(ctx);
847 	int                mcnt = 0;
848 
849 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
850 
851 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
852 		if (ifma->ifma_addr->sa_family != AF_LINK)
853 			continue;
854 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
855 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
856 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
857 		mcnt++;
858 	}
859 
860 	update_ptr = mta;
861 
862 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
863 	    ixv_mc_array_itr, TRUE);
864 } /* ixv_if_multi_set */
865 
866 /************************************************************************
867  * ixv_mc_array_itr
868  *
869  *   An iterator function needed by the multicast shared code.
870  *   It feeds the shared code routine the addresses in the
871  *   array of ixv_set_multi() one by one.
872  ************************************************************************/
873 static u8 *
874 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
875 {
876 	u8 *addr = *update_ptr;
877 	u8 *newptr;
878 
879 	*vmdq = 0;
880 
881 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
882 	*update_ptr = newptr;
883 
884 	return addr;
885 } /* ixv_mc_array_itr */
886 
887 /************************************************************************
888  * ixv_if_local_timer - Timer routine
889  *
890  *   Checks for link status, updates statistics,
891  *   and runs the watchdog check.
892  ************************************************************************/
893 static void
894 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
895 {
896 	if (qid != 0)
897 		return;
898 
899 	/* Fire off the adminq task */
900 	iflib_admin_intr_deferred(ctx);
901 } /* ixv_if_local_timer */
902 
903 /************************************************************************
904  * ixv_if_update_admin_status - Update OS on link state
905  *
906  * Note: Only updates the OS on the cached link state.
907  *       The real check of the hardware only happens with
908  *       a link interrupt.
909  ************************************************************************/
910 static void
911 ixv_if_update_admin_status(if_ctx_t ctx)
912 {
913 	struct adapter *adapter = iflib_get_softc(ctx);
914 	device_t       dev = iflib_get_dev(ctx);
915 	s32            status;
916 
917 	adapter->hw.mac.get_link_status = TRUE;
918 
919 	status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
920 	    &adapter->link_up, FALSE);
921 
922 	if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
923 		/* Mailbox's Clear To Send status is lost or timeout occurred.
924 		 * We need reinitialization. */
925 		iflib_get_ifp(ctx)->if_init(ctx);
926 	}
927 
928 	if (adapter->link_up) {
929 		if (adapter->link_active == FALSE) {
930 			if (bootverbose)
931 				device_printf(dev, "Link is up %d Gbps %s \n",
932 				    ((adapter->link_speed == 128) ? 10 : 1),
933 				    "Full Duplex");
934 			adapter->link_active = TRUE;
935 			iflib_link_state_change(ctx, LINK_STATE_UP,
936 			    IF_Gbps(10));
937 		}
938 	} else { /* Link down */
939 		if (adapter->link_active == TRUE) {
940 			if (bootverbose)
941 				device_printf(dev, "Link is Down\n");
942 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
943 			adapter->link_active = FALSE;
944 		}
945 	}
946 
947 	/* Stats Update */
948 	ixv_update_stats(adapter);
949 } /* ixv_if_update_admin_status */
950 
951 
952 /************************************************************************
953  * ixv_if_stop - Stop the hardware
954  *
955  *   Disables all traffic on the adapter by issuing a
956  *   global reset on the MAC and deallocates TX/RX buffers.
957  ************************************************************************/
958 static void
959 ixv_if_stop(if_ctx_t ctx)
960 {
961 	struct adapter  *adapter = iflib_get_softc(ctx);
962 	struct ixgbe_hw *hw = &adapter->hw;
963 
964 	INIT_DEBUGOUT("ixv_stop: begin\n");
965 
966 	ixv_if_disable_intr(ctx);
967 
968 	hw->mac.ops.reset_hw(hw);
969 	adapter->hw.adapter_stopped = FALSE;
970 	hw->mac.ops.stop_adapter(hw);
971 
972 	/* Update the stack */
973 	adapter->link_up = FALSE;
974 	ixv_if_update_admin_status(ctx);
975 
976 	/* reprogram the RAR[0] in case user changed it. */
977 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
978 } /* ixv_if_stop */
979 
980 
981 /************************************************************************
982  * ixv_identify_hardware - Determine hardware revision.
983  ************************************************************************/
984 static void
985 ixv_identify_hardware(if_ctx_t ctx)
986 {
987 	struct adapter  *adapter = iflib_get_softc(ctx);
988 	device_t        dev = iflib_get_dev(ctx);
989 	struct ixgbe_hw *hw = &adapter->hw;
990 
991 	/* Save off the information about this board */
992 	hw->vendor_id = pci_get_vendor(dev);
993 	hw->device_id = pci_get_device(dev);
994 	hw->revision_id = pci_get_revid(dev);
995 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
996 	hw->subsystem_device_id = pci_get_subdevice(dev);
997 
998 	/* A subset of set_mac_type */
999 	switch (hw->device_id) {
1000 	case IXGBE_DEV_ID_82599_VF:
1001 		hw->mac.type = ixgbe_mac_82599_vf;
1002 		break;
1003 	case IXGBE_DEV_ID_X540_VF:
1004 		hw->mac.type = ixgbe_mac_X540_vf;
1005 		break;
1006 	case IXGBE_DEV_ID_X550_VF:
1007 		hw->mac.type = ixgbe_mac_X550_vf;
1008 		break;
1009 	case IXGBE_DEV_ID_X550EM_X_VF:
1010 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1011 		break;
1012 	case IXGBE_DEV_ID_X550EM_A_VF:
1013 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1014 		break;
1015 	default:
1016 		device_printf(dev, "unknown mac type\n");
1017 		hw->mac.type = ixgbe_mac_unknown;
1018 		break;
1019 	}
1020 } /* ixv_identify_hardware */
1021 
1022 /************************************************************************
1023  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1024  ************************************************************************/
1025 static int
1026 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1027 {
1028 	struct adapter     *adapter = iflib_get_softc(ctx);
1029 	device_t           dev = iflib_get_dev(ctx);
1030 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1031 	struct ix_tx_queue *tx_que;
1032 	int                error, rid, vector = 0;
1033 	char               buf[16];
1034 
1035 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1036 		rid = vector + 1;
1037 
1038 		snprintf(buf, sizeof(buf), "rxq%d", i);
1039 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1040 		    IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1041 
1042 		if (error) {
1043 			device_printf(iflib_get_dev(ctx),
1044 			    "Failed to allocate que int %d err: %d", i, error);
1045 			adapter->num_rx_queues = i + 1;
1046 			goto fail;
1047 		}
1048 
1049 		rx_que->msix = vector;
1050 		adapter->active_queues |= (u64)(1 << rx_que->msix);
1051 
1052 	}
1053 
1054 	for (int i = 0; i < adapter->num_tx_queues; i++) {
1055 		snprintf(buf, sizeof(buf), "txq%d", i);
1056 		tx_que = &adapter->tx_queues[i];
1057 		tx_que->msix = i % adapter->num_rx_queues;
1058 		iflib_softirq_alloc_generic(ctx,
1059 		    &adapter->rx_queues[tx_que->msix].que_irq,
1060 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1061 	}
1062 	rid = vector + 1;
1063 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1064 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1065 	if (error) {
1066 		device_printf(iflib_get_dev(ctx),
1067 		    "Failed to register admin handler");
1068 		return (error);
1069 	}
1070 
1071 	adapter->vector = vector;
1072 	/*
1073 	 * Due to a broken design QEMU will fail to properly
1074 	 * enable the guest for MSIX unless the vectors in
1075 	 * the table are all set up, so we must rewrite the
1076 	 * ENABLE in the MSIX control register again at this
1077 	 * point to cause it to successfully initialize us.
1078 	 */
1079 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1080 		int msix_ctrl;
1081 		pci_find_cap(dev, PCIY_MSIX, &rid);
1082 		rid += PCIR_MSIX_CTRL;
1083 		msix_ctrl = pci_read_config(dev, rid, 2);
1084 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1085 		pci_write_config(dev, rid, msix_ctrl, 2);
1086 	}
1087 
1088 	return (0);
1089 
1090 fail:
1091 	iflib_irq_free(ctx, &adapter->irq);
1092 	rx_que = adapter->rx_queues;
1093 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1094 		iflib_irq_free(ctx, &rx_que->que_irq);
1095 
1096 	return (error);
1097 } /* ixv_if_msix_intr_assign */
1098 
1099 /************************************************************************
1100  * ixv_allocate_pci_resources
1101  ************************************************************************/
1102 static int
1103 ixv_allocate_pci_resources(if_ctx_t ctx)
1104 {
1105 	struct adapter *adapter = iflib_get_softc(ctx);
1106 	device_t       dev = iflib_get_dev(ctx);
1107 	int            rid;
1108 
1109 	rid = PCIR_BAR(0);
1110 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1111 	    RF_ACTIVE);
1112 
1113 	if (!(adapter->pci_mem)) {
1114 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1115 		return (ENXIO);
1116 	}
1117 
1118 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1119 	adapter->osdep.mem_bus_space_handle =
1120 	    rman_get_bushandle(adapter->pci_mem);
1121 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1122 
1123 	return (0);
1124 } /* ixv_allocate_pci_resources */
1125 
1126 /************************************************************************
1127  * ixv_free_pci_resources
1128  ************************************************************************/
1129 static void
1130 ixv_free_pci_resources(if_ctx_t ctx)
1131 {
1132 	struct adapter     *adapter = iflib_get_softc(ctx);
1133 	struct ix_rx_queue *que = adapter->rx_queues;
1134 	device_t           dev = iflib_get_dev(ctx);
1135 
1136 	/* Release all msix queue resources */
1137 	if (adapter->intr_type == IFLIB_INTR_MSIX)
1138 		iflib_irq_free(ctx, &adapter->irq);
1139 
1140 	if (que != NULL) {
1141 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1142 			iflib_irq_free(ctx, &que->que_irq);
1143 		}
1144 	}
1145 
1146 	/* Clean the Legacy or Link interrupt last */
1147 	if (adapter->pci_mem != NULL)
1148 		bus_release_resource(dev, SYS_RES_MEMORY,
1149 				     PCIR_BAR(0), adapter->pci_mem);
1150 } /* ixv_free_pci_resources */
1151 
1152 /************************************************************************
1153  * ixv_setup_interface
1154  *
1155  *   Setup networking device structure and register an interface.
1156  ************************************************************************/
1157 static int
1158 ixv_setup_interface(if_ctx_t ctx)
1159 {
1160 	struct adapter *adapter = iflib_get_softc(ctx);
1161 	if_softc_ctx_t scctx = adapter->shared;
1162 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1163 
1164 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1165 
1166 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1167 	if_setbaudrate(ifp, IF_Gbps(10));
1168 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1169 
1170 
1171 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1172 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1173 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1174 
1175 	return 0;
1176 } /* ixv_setup_interface */
1177 
1178 /************************************************************************
1179  * ixv_if_get_counter
1180  ************************************************************************/
1181 static uint64_t
1182 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1183 {
1184 	struct adapter *adapter = iflib_get_softc(ctx);
1185 	if_t           ifp = iflib_get_ifp(ctx);
1186 
1187 	switch (cnt) {
1188 	case IFCOUNTER_IPACKETS:
1189 		return (adapter->ipackets);
1190 	case IFCOUNTER_OPACKETS:
1191 		return (adapter->opackets);
1192 	case IFCOUNTER_IBYTES:
1193 		return (adapter->ibytes);
1194 	case IFCOUNTER_OBYTES:
1195 		return (adapter->obytes);
1196 	case IFCOUNTER_IMCASTS:
1197 		return (adapter->imcasts);
1198 	default:
1199 		return (if_get_counter_default(ifp, cnt));
1200 	}
1201 } /* ixv_if_get_counter */
1202 
1203 /************************************************************************
1204  * ixv_initialize_transmit_units - Enable transmit unit.
1205  ************************************************************************/
1206 static void
1207 ixv_initialize_transmit_units(if_ctx_t ctx)
1208 {
1209 	struct adapter     *adapter = iflib_get_softc(ctx);
1210 	struct ixgbe_hw    *hw = &adapter->hw;
1211 	if_softc_ctx_t     scctx = adapter->shared;
1212 	struct ix_tx_queue *que = adapter->tx_queues;
1213 	int                i;
1214 
1215 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1216 		struct tx_ring *txr = &que->txr;
1217 		u64            tdba = txr->tx_paddr;
1218 		u32            txctrl, txdctl;
1219 		int            j = txr->me;
1220 
1221 		/* Set WTHRESH to 8, burst writeback */
1222 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1223 		txdctl |= (8 << 16);
1224 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1225 
1226 		/* Set the HW Tx Head and Tail indices */
1227 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1228 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1229 
1230 		/* Set Tx Tail register */
1231 		txr->tail = IXGBE_VFTDT(j);
1232 
1233 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
1234 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1235 			txr->tx_rsq[k] = QIDX_INVALID;
1236 
1237 		/* Set Ring parameters */
1238 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1239 		    (tdba & 0x00000000ffffffffULL));
1240 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1241 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1242 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1243 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1244 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1245 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1246 
1247 		/* Now enable */
1248 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1249 		txdctl |= IXGBE_TXDCTL_ENABLE;
1250 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1251 	}
1252 
1253 	return;
1254 } /* ixv_initialize_transmit_units */
1255 
1256 /************************************************************************
1257  * ixv_initialize_rss_mapping
1258  ************************************************************************/
1259 static void
1260 ixv_initialize_rss_mapping(struct adapter *adapter)
1261 {
1262 	struct ixgbe_hw *hw = &adapter->hw;
1263 	u32             reta = 0, mrqc, rss_key[10];
1264 	int             queue_id;
1265 	int             i, j;
1266 	u32             rss_hash_config;
1267 
1268 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1269 		/* Fetch the configured RSS key */
1270 		rss_getkey((uint8_t *)&rss_key);
1271 	} else {
1272 		/* set up random bits */
1273 		arc4rand(&rss_key, sizeof(rss_key), 0);
1274 	}
1275 
1276 	/* Now fill out hash function seeds */
1277 	for (i = 0; i < 10; i++)
1278 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1279 
1280 	/* Set up the redirection table */
1281 	for (i = 0, j = 0; i < 64; i++, j++) {
1282 		if (j == adapter->num_rx_queues)
1283 			j = 0;
1284 
1285 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1286 			/*
1287 			 * Fetch the RSS bucket id for the given indirection
1288 			 * entry. Cap it at the number of configured buckets
1289 			 * (which is num_rx_queues.)
1290 			 */
1291 			queue_id = rss_get_indirection_to_bucket(i);
1292 			queue_id = queue_id % adapter->num_rx_queues;
1293 		} else
1294 			queue_id = j;
1295 
1296 		/*
1297 		 * The low 8 bits are for hash value (n+0);
1298 		 * The next 8 bits are for hash value (n+1), etc.
1299 		 */
1300 		reta >>= 8;
1301 		reta |= ((uint32_t)queue_id) << 24;
1302 		if ((i & 3) == 3) {
1303 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1304 			reta = 0;
1305 		}
1306 	}
1307 
1308 	/* Perform hash on these packet types */
1309 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
1310 		rss_hash_config = rss_gethashconfig();
1311 	else {
1312 		/*
1313 		 * Disable UDP - IP fragments aren't currently being handled
1314 		 * and so we end up with a mix of 2-tuple and 4-tuple
1315 		 * traffic.
1316 		 */
1317 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1318 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1319 		                | RSS_HASHTYPE_RSS_IPV6
1320 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1321 	}
1322 
1323 	mrqc = IXGBE_MRQC_RSSEN;
1324 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1325 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1326 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1327 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1328 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1329 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1330 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1331 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1332 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1333 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1334 		    __func__);
1335 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1336 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1337 		    __func__);
1338 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1339 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1340 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1341 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1342 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1343 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1344 		    __func__);
1345 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1346 } /* ixv_initialize_rss_mapping */
1347 
1348 
1349 /************************************************************************
1350  * ixv_initialize_receive_units - Setup receive registers and features.
1351  ************************************************************************/
1352 static void
1353 ixv_initialize_receive_units(if_ctx_t ctx)
1354 {
1355 	struct adapter     *adapter = iflib_get_softc(ctx);
1356 	if_softc_ctx_t     scctx;
1357 	struct ixgbe_hw    *hw = &adapter->hw;
1358 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1359 	struct ix_rx_queue *que = adapter->rx_queues;
1360 	u32                bufsz, psrtype;
1361 
1362 	if (ifp->if_mtu > ETHERMTU)
1363 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1364 	else
1365 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1366 
1367 	psrtype = IXGBE_PSRTYPE_TCPHDR
1368 	        | IXGBE_PSRTYPE_UDPHDR
1369 	        | IXGBE_PSRTYPE_IPV4HDR
1370 	        | IXGBE_PSRTYPE_IPV6HDR
1371 	        | IXGBE_PSRTYPE_L2HDR;
1372 
1373 	if (adapter->num_rx_queues > 1)
1374 		psrtype |= 1 << 29;
1375 
1376 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1377 
1378 	/* Tell PF our max_frame size */
1379 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1380 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1381 	}
1382 	scctx = adapter->shared;
1383 
1384 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1385 		struct rx_ring *rxr = &que->rxr;
1386 		u64            rdba = rxr->rx_paddr;
1387 		u32            reg, rxdctl;
1388 		int            j = rxr->me;
1389 
1390 		/* Disable the queue */
1391 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1392 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1393 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1394 		for (int k = 0; k < 10; k++) {
1395 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1396 			    IXGBE_RXDCTL_ENABLE)
1397 				msec_delay(1);
1398 			else
1399 				break;
1400 		}
1401 		wmb();
1402 		/* Setup the Base and Length of the Rx Descriptor Ring */
1403 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1404 		    (rdba & 0x00000000ffffffffULL));
1405 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1406 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1407 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1408 
1409 		/* Reset the ring indices */
1410 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1411 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1412 
1413 		/* Set up the SRRCTL register */
1414 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1415 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1416 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1417 		reg |= bufsz;
1418 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1419 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1420 
1421 		/* Capture Rx Tail index */
1422 		rxr->tail = IXGBE_VFRDT(rxr->me);
1423 
1424 		/* Do the queue enabling last */
1425 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1426 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1427 		for (int l = 0; l < 10; l++) {
1428 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1429 			    IXGBE_RXDCTL_ENABLE)
1430 				break;
1431 			msec_delay(1);
1432 		}
1433 		wmb();
1434 
1435 		/* Set the Tail Pointer */
1436 #ifdef DEV_NETMAP
1437 		/*
1438 		 * In netmap mode, we must preserve the buffers made
1439 		 * available to userspace before the if_init()
1440 		 * (this is true by default on the TX side, because
1441 		 * init makes all buffers available to userspace).
1442 		 *
1443 		 * netmap_reset() and the device specific routines
1444 		 * (e.g. ixgbe_setup_receive_rings()) map these
1445 		 * buffers at the end of the NIC ring, so here we
1446 		 * must set the RDT (tail) register to make sure
1447 		 * they are not overwritten.
1448 		 *
1449 		 * In this driver the NIC ring starts at RDH = 0,
1450 		 * RDT points to the last slot available for reception (?),
1451 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1452 		 */
1453 		if (ifp->if_capenable & IFCAP_NETMAP) {
1454 			struct netmap_adapter *na = NA(ifp);
1455 			struct netmap_kring *kring = na->rx_rings[j];
1456 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1457 
1458 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1459 		} else
1460 #endif /* DEV_NETMAP */
1461 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1462 			    scctx->isc_nrxd[0] - 1);
1463 	}
1464 
1465 	ixv_initialize_rss_mapping(adapter);
1466 } /* ixv_initialize_receive_units */
1467 
1468 /************************************************************************
1469  * ixv_setup_vlan_support
1470  ************************************************************************/
1471 static void
1472 ixv_setup_vlan_support(if_ctx_t ctx)
1473 {
1474 	struct adapter  *adapter = iflib_get_softc(ctx);
1475 	struct ixgbe_hw *hw = &adapter->hw;
1476 	u32             ctrl, vid, vfta, retry;
1477 
1478 	/*
1479 	 * We get here thru if_init, meaning
1480 	 * a soft reset, this has already cleared
1481 	 * the VFTA and other state, so if there
1482 	 * have been no vlan's registered do nothing.
1483 	 */
1484 	if (adapter->num_vlans == 0)
1485 		return;
1486 
1487 	/* Enable the queues */
1488 	for (int i = 0; i < adapter->num_rx_queues; i++) {
1489 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1490 		ctrl |= IXGBE_RXDCTL_VME;
1491 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1492 		/*
1493 		 * Let Rx path know that it needs to store VLAN tag
1494 		 * as part of extra mbuf info.
1495 		 */
1496 		adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1497 	}
1498 
1499 	/*
1500 	 * A soft reset zero's out the VFTA, so
1501 	 * we need to repopulate it now.
1502 	 */
1503 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1504 		if (ixv_shadow_vfta[i] == 0)
1505 			continue;
1506 		vfta = ixv_shadow_vfta[i];
1507 		/*
1508 		 * Reconstruct the vlan id's
1509 		 * based on the bits set in each
1510 		 * of the array ints.
1511 		 */
1512 		for (int j = 0; j < 32; j++) {
1513 			retry = 0;
1514 			if ((vfta & (1 << j)) == 0)
1515 				continue;
1516 			vid = (i * 32) + j;
1517 			/* Call the shared code mailbox routine */
1518 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1519 				if (++retry > 5)
1520 					break;
1521 			}
1522 		}
1523 	}
1524 } /* ixv_setup_vlan_support */
1525 
1526 /************************************************************************
1527  * ixv_if_register_vlan
1528  *
1529  *   Run via a vlan config EVENT, it enables us to use the
1530  *   HW Filter table since we can get the vlan id. This just
1531  *   creates the entry in the soft version of the VFTA, init
1532  *   will repopulate the real table.
1533  ************************************************************************/
1534 static void
1535 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1536 {
1537 	struct adapter *adapter = iflib_get_softc(ctx);
1538 	u16            index, bit;
1539 
1540 	index = (vtag >> 5) & 0x7F;
1541 	bit = vtag & 0x1F;
1542 	ixv_shadow_vfta[index] |= (1 << bit);
1543 	++adapter->num_vlans;
1544 } /* ixv_if_register_vlan */
1545 
1546 /************************************************************************
1547  * ixv_if_unregister_vlan
1548  *
1549  *   Run via a vlan unconfig EVENT, remove our entry
1550  *   in the soft vfta.
1551  ************************************************************************/
1552 static void
1553 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1554 {
1555 	struct adapter *adapter = iflib_get_softc(ctx);
1556 	u16            index, bit;
1557 
1558 	index = (vtag >> 5) & 0x7F;
1559 	bit = vtag & 0x1F;
1560 	ixv_shadow_vfta[index] &= ~(1 << bit);
1561 	--adapter->num_vlans;
1562 } /* ixv_if_unregister_vlan */
1563 
1564 /************************************************************************
1565  * ixv_if_enable_intr
1566  ************************************************************************/
1567 static void
1568 ixv_if_enable_intr(if_ctx_t ctx)
1569 {
1570 	struct adapter  *adapter = iflib_get_softc(ctx);
1571 	struct ixgbe_hw *hw = &adapter->hw;
1572 	struct ix_rx_queue *que = adapter->rx_queues;
1573 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1574 
1575 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1576 
1577 	mask = IXGBE_EIMS_ENABLE_MASK;
1578 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1579 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1580 
1581 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1582 		ixv_enable_queue(adapter, que->msix);
1583 
1584 	IXGBE_WRITE_FLUSH(hw);
1585 } /* ixv_if_enable_intr */
1586 
1587 /************************************************************************
1588  * ixv_if_disable_intr
1589  ************************************************************************/
1590 static void
1591 ixv_if_disable_intr(if_ctx_t ctx)
1592 {
1593 	struct adapter *adapter = iflib_get_softc(ctx);
1594 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1595 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1596 	IXGBE_WRITE_FLUSH(&adapter->hw);
1597 } /* ixv_if_disable_intr */
1598 
1599 /************************************************************************
1600  * ixv_if_rx_queue_intr_enable
1601  ************************************************************************/
1602 static int
1603 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1604 {
1605 	struct adapter	*adapter = iflib_get_softc(ctx);
1606 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1607 
1608 	ixv_enable_queue(adapter, que->rxr.me);
1609 
1610 	return (0);
1611 } /* ixv_if_rx_queue_intr_enable */
1612 
1613 /************************************************************************
1614  * ixv_set_ivar
1615  *
1616  *   Setup the correct IVAR register for a particular MSI-X interrupt
1617  *    - entry is the register array entry
1618  *    - vector is the MSI-X vector for this queue
1619  *    - type is RX/TX/MISC
1620  ************************************************************************/
1621 static void
1622 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1623 {
1624 	struct ixgbe_hw *hw = &adapter->hw;
1625 	u32             ivar, index;
1626 
1627 	vector |= IXGBE_IVAR_ALLOC_VAL;
1628 
1629 	if (type == -1) { /* MISC IVAR */
1630 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1631 		ivar &= ~0xFF;
1632 		ivar |= vector;
1633 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1634 	} else {          /* RX/TX IVARS */
1635 		index = (16 * (entry & 1)) + (8 * type);
1636 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1637 		ivar &= ~(0xFF << index);
1638 		ivar |= (vector << index);
1639 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1640 	}
1641 } /* ixv_set_ivar */
1642 
1643 /************************************************************************
1644  * ixv_configure_ivars
1645  ************************************************************************/
1646 static void
1647 ixv_configure_ivars(struct adapter *adapter)
1648 {
1649 	struct ix_rx_queue *que = adapter->rx_queues;
1650 
1651 	MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1652 
1653 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1654 		/* First the RX queue entry */
1655 		ixv_set_ivar(adapter, i, que->msix, 0);
1656 		/* ... and the TX */
1657 		ixv_set_ivar(adapter, i, que->msix, 1);
1658 		/* Set an initial value in EITR */
1659 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1660 		    IXGBE_EITR_DEFAULT);
1661 	}
1662 
1663 	/* For the mailbox interrupt */
1664 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
1665 } /* ixv_configure_ivars */
1666 
1667 /************************************************************************
1668  * ixv_save_stats
1669  *
1670  *   The VF stats registers never have a truly virgin
1671  *   starting point, so this routine tries to make an
1672  *   artificial one, marking ground zero on attach as
1673  *   it were.
1674  ************************************************************************/
1675 static void
1676 ixv_save_stats(struct adapter *adapter)
1677 {
1678 	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1679 		adapter->stats.vf.saved_reset_vfgprc +=
1680 		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1681 		adapter->stats.vf.saved_reset_vfgptc +=
1682 		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1683 		adapter->stats.vf.saved_reset_vfgorc +=
1684 		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1685 		adapter->stats.vf.saved_reset_vfgotc +=
1686 		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1687 		adapter->stats.vf.saved_reset_vfmprc +=
1688 		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1689 	}
1690 } /* ixv_save_stats */
1691 
1692 /************************************************************************
1693  * ixv_init_stats
1694  ************************************************************************/
1695 static void
1696 ixv_init_stats(struct adapter *adapter)
1697 {
1698 	struct ixgbe_hw *hw = &adapter->hw;
1699 
1700 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1701 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1702 	adapter->stats.vf.last_vfgorc |=
1703 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1704 
1705 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1706 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1707 	adapter->stats.vf.last_vfgotc |=
1708 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1709 
1710 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1711 
1712 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1713 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1714 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1715 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1716 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1717 } /* ixv_init_stats */
1718 
1719 #define UPDATE_STAT_32(reg, last, count)                \
1720 {                                                       \
1721 	u32 current = IXGBE_READ_REG(hw, reg);          \
1722 	if (current < last)                             \
1723 		count += 0x100000000LL;                 \
1724 	last = current;                                 \
1725 	count &= 0xFFFFFFFF00000000LL;                  \
1726 	count |= current;                               \
1727 }
1728 
1729 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1730 {                                                       \
1731 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1732 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1733 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1734 	if (current < last)                             \
1735 		count += 0x1000000000LL;                \
1736 	last = current;                                 \
1737 	count &= 0xFFFFFFF000000000LL;                  \
1738 	count |= current;                               \
1739 }
1740 
1741 /************************************************************************
1742  * ixv_update_stats - Update the board statistics counters.
1743  ************************************************************************/
1744 void
1745 ixv_update_stats(struct adapter *adapter)
1746 {
1747 	struct ixgbe_hw *hw = &adapter->hw;
1748 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1749 
1750 	UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1751 	    adapter->stats.vf.vfgprc);
1752 	UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1753 	    adapter->stats.vf.vfgptc);
1754 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1755 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1756 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1757 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1758 	UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1759 	    adapter->stats.vf.vfmprc);
1760 
1761 	/* Fill out the OS statistics structure */
1762 	IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1763 	IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1764 	IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1765 	IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1766 	IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1767 } /* ixv_update_stats */
1768 
1769 /************************************************************************
1770  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1771  ************************************************************************/
1772 static void
1773 ixv_add_stats_sysctls(struct adapter *adapter)
1774 {
1775 	device_t                dev = adapter->dev;
1776 	struct ix_tx_queue      *tx_que = adapter->tx_queues;
1777 	struct ix_rx_queue      *rx_que = adapter->rx_queues;
1778 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1779 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1780 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1781 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1782 	struct sysctl_oid       *stat_node, *queue_node;
1783 	struct sysctl_oid_list  *stat_list, *queue_list;
1784 
1785 #define QUEUE_NAME_LEN 32
1786 	char                    namebuf[QUEUE_NAME_LEN];
1787 
1788 	/* Driver Statistics */
1789 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1790 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1791 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1792 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1793 
1794 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1795 		struct tx_ring *txr = &tx_que->txr;
1796 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1797 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1798 		    CTLFLAG_RD, NULL, "Queue Name");
1799 		queue_list = SYSCTL_CHILDREN(queue_node);
1800 
1801 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1802 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1803 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1804 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1805 	}
1806 
1807 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1808 		struct rx_ring *rxr = &rx_que->rxr;
1809 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1810 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1811 		    CTLFLAG_RD, NULL, "Queue Name");
1812 		queue_list = SYSCTL_CHILDREN(queue_node);
1813 
1814 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1815 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1816 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1817 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1818 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1819 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1820 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1821 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1822 	}
1823 
1824 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1825 	    CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1826 	stat_list = SYSCTL_CHILDREN(stat_node);
1827 
1828 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1829 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1830 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1831 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1832 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1833 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1834 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1835 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1836 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1837 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1838 } /* ixv_add_stats_sysctls */
1839 
1840 /************************************************************************
1841  * ixv_print_debug_info
1842  *
1843  *   Called only when em_display_debug_stats is enabled.
1844  *   Provides a way to take a look at important statistics
1845  *   maintained by the driver and hardware.
1846  ************************************************************************/
1847 static void
1848 ixv_print_debug_info(struct adapter *adapter)
1849 {
1850 	device_t        dev = adapter->dev;
1851 	struct ixgbe_hw *hw = &adapter->hw;
1852 
1853 	device_printf(dev, "Error Byte Count = %u \n",
1854 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1855 
1856 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1857 } /* ixv_print_debug_info */
1858 
1859 /************************************************************************
1860  * ixv_sysctl_debug
1861  ************************************************************************/
1862 static int
1863 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1864 {
1865 	struct adapter *adapter;
1866 	int            error, result;
1867 
1868 	result = -1;
1869 	error = sysctl_handle_int(oidp, &result, 0, req);
1870 
1871 	if (error || !req->newptr)
1872 		return (error);
1873 
1874 	if (result == 1) {
1875 		adapter = (struct adapter *)arg1;
1876 		ixv_print_debug_info(adapter);
1877 	}
1878 
1879 	return error;
1880 } /* ixv_sysctl_debug */
1881 
1882 /************************************************************************
1883  * ixv_init_device_features
1884  ************************************************************************/
1885 static void
1886 ixv_init_device_features(struct adapter *adapter)
1887 {
1888 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
1889 	                  | IXGBE_FEATURE_VF
1890 	                  | IXGBE_FEATURE_RSS
1891 	                  | IXGBE_FEATURE_LEGACY_TX;
1892 
1893 	/* A tad short on feature flags for VFs, atm. */
1894 	switch (adapter->hw.mac.type) {
1895 	case ixgbe_mac_82599_vf:
1896 		break;
1897 	case ixgbe_mac_X540_vf:
1898 		break;
1899 	case ixgbe_mac_X550_vf:
1900 	case ixgbe_mac_X550EM_x_vf:
1901 	case ixgbe_mac_X550EM_a_vf:
1902 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1903 		break;
1904 	default:
1905 		break;
1906 	}
1907 
1908 	/* Enabled by default... */
1909 	/* Is a virtual function (VF) */
1910 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
1911 		adapter->feat_en |= IXGBE_FEATURE_VF;
1912 	/* Netmap */
1913 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1914 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1915 	/* Receive-Side Scaling (RSS) */
1916 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1917 		adapter->feat_en |= IXGBE_FEATURE_RSS;
1918 	/* Needs advanced context descriptor regardless of offloads req'd */
1919 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1920 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1921 } /* ixv_init_device_features */
1922 
1923