xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision eb69d1f144a6fcc765d1b9d44a5ae8082353e70b)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 char ixv_driver_version[] = "2.0.0-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 	/* required last entry */
67 PVID_END
68 };
69 
70 /************************************************************************
71  * Function prototypes
72  ************************************************************************/
73 static void     *ixv_register(device_t dev);
74 static int      ixv_if_attach_pre(if_ctx_t ctx);
75 static int      ixv_if_attach_post(if_ctx_t ctx);
76 static int      ixv_if_detach(if_ctx_t ctx);
77 
78 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void     ixv_if_queues_free(if_ctx_t ctx);
82 static void     ixv_identify_hardware(if_ctx_t ctx);
83 static void     ixv_init_device_features(struct adapter *);
84 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void     ixv_free_pci_resources(if_ctx_t ctx);
86 static int      ixv_setup_interface(if_ctx_t ctx);
87 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int      ixv_if_media_change(if_ctx_t ctx);
89 static void     ixv_if_update_admin_status(if_ctx_t ctx);
90 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
91 
92 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void     ixv_if_init(if_ctx_t ctx);
94 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void     ixv_if_stop(if_ctx_t ctx);
96 static int      ixv_negotiate_api(struct adapter *);
97 
98 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void     ixv_initialize_receive_units(if_ctx_t ctx);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101 
102 static void     ixv_setup_vlan_support(if_ctx_t ctx);
103 static void     ixv_configure_ivars(struct adapter *);
104 static void     ixv_if_enable_intr(if_ctx_t ctx);
105 static void     ixv_if_disable_intr(if_ctx_t ctx);
106 static void     ixv_if_multi_set(if_ctx_t ctx);
107 
108 static void     ixv_if_register_vlan(if_ctx_t, u16);
109 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110 
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112 
113 static void     ixv_save_stats(struct adapter *);
114 static void     ixv_init_stats(struct adapter *);
115 static void     ixv_update_stats(struct adapter *);
116 static void     ixv_add_stats_sysctls(struct adapter *adapter);
117 
118 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
120 
121 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122 
123 /* The MSI-X Interrupt handlers */
124 static int      ixv_msix_que(void *);
125 static int      ixv_msix_mbx(void *);
126 
127 /************************************************************************
128  * FreeBSD Device Interface Entry Points
129  ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 	/* Device interface */
132 	DEVMETHOD(device_register, ixv_register),
133 	DEVMETHOD(device_probe, iflib_device_probe),
134 	DEVMETHOD(device_attach, iflib_device_attach),
135 	DEVMETHOD(device_detach, iflib_device_detach),
136 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
137 	DEVMETHOD_END
138 };
139 
140 static driver_t ixv_driver = {
141 	"ixv", ixv_methods, sizeof(struct adapter),
142 };
143 
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 MODULE_DEPEND(ixv, pci, 1, 1, 1);
147 MODULE_DEPEND(ixv, ether, 1, 1, 1);
148 #ifdef DEV_NETMAP
149 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
150 #endif /* DEV_NETMAP */
151 
152 static device_method_t ixv_if_methods[] = {
153 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
154 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
155 	DEVMETHOD(ifdi_detach, ixv_if_detach),
156 	DEVMETHOD(ifdi_init, ixv_if_init),
157 	DEVMETHOD(ifdi_stop, ixv_if_stop),
158 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
159 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
160 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
161 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
164 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
165 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
166 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
167 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
168 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
169 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
170 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
171 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
172 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
173 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
174 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
175 	DEVMETHOD_END
176 };
177 
178 static driver_t ixv_if_driver = {
179   "ixv_if", ixv_if_methods, sizeof(struct adapter)
180 };
181 
182 /*
183  * TUNEABLE PARAMETERS:
184  */
185 
186 /* Flow control setting, default to full */
187 static int ixv_flow_control = ixgbe_fc_full;
188 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
189 
190 /*
191  * Header split: this causes the hardware to DMA
192  * the header into a separate mbuf from the payload,
193  * it can be a performance win in some workloads, but
194  * in others it actually hurts, its off by default.
195  */
196 static int ixv_header_split = FALSE;
197 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
198 
199 /*
200  * Shadow VFTA table, this is needed because
201  * the real filter table gets cleared during
202  * a soft reset and we need to repopulate it.
203  */
204 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
205 extern struct if_txrx ixgbe_txrx;
206 
207 static struct if_shared_ctx ixv_sctx_init = {
208 	.isc_magic = IFLIB_MAGIC,
209 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
210 	.isc_tx_maxsize = IXGBE_TSO_SIZE,
211 
212 	.isc_tx_maxsegsize = PAGE_SIZE,
213 
214 	.isc_rx_maxsize = MJUM16BYTES,
215 	.isc_rx_nsegments = 1,
216 	.isc_rx_maxsegsize = MJUM16BYTES,
217 	.isc_nfl = 1,
218 	.isc_ntxqs = 1,
219 	.isc_nrxqs = 1,
220 	.isc_admin_intrcnt = 1,
221 	.isc_vendor_info = ixv_vendor_info_array,
222 	.isc_driver_version = ixv_driver_version,
223 	.isc_driver = &ixv_if_driver,
224 
225 	.isc_nrxd_min = {MIN_RXD},
226 	.isc_ntxd_min = {MIN_TXD},
227 	.isc_nrxd_max = {MAX_RXD},
228 	.isc_ntxd_max = {MAX_TXD},
229 	.isc_nrxd_default = {DEFAULT_RXD},
230 	.isc_ntxd_default = {DEFAULT_TXD},
231 };
232 
233 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
234 
235 static void *
236 ixv_register(device_t dev)
237 {
238 	return (ixv_sctx);
239 }
240 
241 /************************************************************************
242  * ixv_if_tx_queues_alloc
243  ************************************************************************/
244 static int
245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
246                        int ntxqs, int ntxqsets)
247 {
248 	struct adapter     *adapter = iflib_get_softc(ctx);
249 	if_softc_ctx_t     scctx = adapter->shared;
250 	struct ix_tx_queue *que;
251 	int                i, j, error;
252 
253 	MPASS(adapter->num_tx_queues == ntxqsets);
254 	MPASS(ntxqs == 1);
255 
256 	/* Allocate queue structure memory */
257 	adapter->tx_queues =
258 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
259 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
260 	if (!adapter->tx_queues) {
261 		device_printf(iflib_get_dev(ctx),
262 		    "Unable to allocate TX ring memory\n");
263 		return (ENOMEM);
264 	}
265 
266 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
267 		struct tx_ring *txr = &que->txr;
268 
269 		txr->me = i;
270 		txr->adapter =  que->adapter = adapter;
271 		adapter->active_queues |= (u64)1 << txr->me;
272 
273 		/* Allocate report status array */
274 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275 			error = ENOMEM;
276 			goto fail;
277 		}
278 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 			txr->tx_rsq[j] = QIDX_INVALID;
280 		/* get the virtual and physical address of the hardware queues */
281 		txr->tail = IXGBE_VFTDT(txr->me);
282 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 		txr->tx_paddr = paddrs[i*ntxqs];
284 
285 		txr->bytes = 0;
286 		txr->total_packets = 0;
287 
288 	}
289 
290 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 	    adapter->num_tx_queues);
292 
293 	return (0);
294 
295  fail:
296 	ixv_if_queues_free(ctx);
297 
298 	return (error);
299 } /* ixv_if_tx_queues_alloc */
300 
301 /************************************************************************
302  * ixv_if_rx_queues_alloc
303  ************************************************************************/
304 static int
305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306                        int nrxqs, int nrxqsets)
307 {
308 	struct adapter     *adapter = iflib_get_softc(ctx);
309 	struct ix_rx_queue *que;
310 	int                i, error;
311 
312 	MPASS(adapter->num_rx_queues == nrxqsets);
313 	MPASS(nrxqs == 1);
314 
315 	/* Allocate queue structure memory */
316 	adapter->rx_queues =
317 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
318 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
319 	if (!adapter->rx_queues) {
320 		device_printf(iflib_get_dev(ctx),
321 		    "Unable to allocate TX ring memory\n");
322 		error = ENOMEM;
323 		goto fail;
324 	}
325 
326 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
327 		struct rx_ring *rxr = &que->rxr;
328 		rxr->me = i;
329 		rxr->adapter = que->adapter = adapter;
330 
331 
332 		/* get the virtual and physical address of the hw queues */
333 		rxr->tail = IXGBE_VFRDT(rxr->me);
334 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 		rxr->rx_paddr = paddrs[i*nrxqs];
336 		rxr->bytes = 0;
337 		rxr->que = que;
338 	}
339 
340 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 	    adapter->num_rx_queues);
342 
343 	return (0);
344 
345 fail:
346 	ixv_if_queues_free(ctx);
347 
348 	return (error);
349 } /* ixv_if_rx_queues_alloc */
350 
351 /************************************************************************
352  * ixv_if_queues_free
353  ************************************************************************/
354 static void
355 ixv_if_queues_free(if_ctx_t ctx)
356 {
357 	struct adapter     *adapter = iflib_get_softc(ctx);
358 	struct ix_tx_queue *que = adapter->tx_queues;
359 	int                i;
360 
361 	if (que == NULL)
362 		goto free;
363 
364 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
365 		struct tx_ring *txr = &que->txr;
366 		if (txr->tx_rsq == NULL)
367 			break;
368 
369 		free(txr->tx_rsq, M_DEVBUF);
370 		txr->tx_rsq = NULL;
371 	}
372 	if (adapter->tx_queues != NULL)
373 		free(adapter->tx_queues, M_DEVBUF);
374 free:
375 	if (adapter->rx_queues != NULL)
376 		free(adapter->rx_queues, M_DEVBUF);
377 	adapter->tx_queues = NULL;
378 	adapter->rx_queues = NULL;
379 } /* ixv_if_queues_free */
380 
381 /************************************************************************
382  * ixv_if_attach_pre - Device initialization routine
383  *
384  *   Called when the driver is being loaded.
385  *   Identifies the type of hardware, allocates all resources
386  *   and initializes the hardware.
387  *
388  *   return 0 on success, positive on failure
389  ************************************************************************/
390 static int
391 ixv_if_attach_pre(if_ctx_t ctx)
392 {
393 	struct adapter  *adapter;
394 	device_t        dev;
395 	if_softc_ctx_t  scctx;
396 	struct ixgbe_hw *hw;
397 	int             error = 0;
398 
399 	INIT_DEBUGOUT("ixv_attach: begin");
400 
401 	/* Allocate, clear, and link in our adapter structure */
402 	dev = iflib_get_dev(ctx);
403 	adapter = iflib_get_softc(ctx);
404 	adapter->dev = dev;
405 	adapter->ctx = ctx;
406 	adapter->hw.back = adapter;
407 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
408 	adapter->media = iflib_get_media(ctx);
409 	hw = &adapter->hw;
410 
411 	/* Do base PCI setup - map BAR0 */
412 	if (ixv_allocate_pci_resources(ctx)) {
413 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414 		error = ENXIO;
415 		goto err_out;
416 	}
417 
418 	/* SYSCTL APIs */
419 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
422 	    "Debug Info");
423 
424 	/* Determine hardware revision */
425 	ixv_identify_hardware(ctx);
426 	ixv_init_device_features(adapter);
427 
428 	/* Initialize the shared code */
429 	error = ixgbe_init_ops_vf(hw);
430 	if (error) {
431 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432 		error = EIO;
433 		goto err_out;
434 	}
435 
436 	/* Setup the mailbox */
437 	ixgbe_init_mbx_params_vf(hw);
438 
439 	error = hw->mac.ops.reset_hw(hw);
440 	if (error == IXGBE_ERR_RESET_FAILED)
441 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 	else if (error)
443 		device_printf(dev, "...reset_hw() failed with error %d\n",
444 		    error);
445 	if (error) {
446 		error = EIO;
447 		goto err_out;
448 	}
449 
450 	error = hw->mac.ops.init_hw(hw);
451 	if (error) {
452 		device_printf(dev, "...init_hw() failed with error %d\n",
453 		    error);
454 		error = EIO;
455 		goto err_out;
456 	}
457 
458 	/* Negotiate mailbox API version */
459 	error = ixv_negotiate_api(adapter);
460 	if (error) {
461 		device_printf(dev,
462 		    "Mailbox API negotiation failed during attach!\n");
463 		goto err_out;
464 	}
465 
466 	/* If no mac address was assigned, make a random one */
467 	if (!ixv_check_ether_addr(hw->mac.addr)) {
468 		u8 addr[ETHER_ADDR_LEN];
469 		arc4rand(&addr, sizeof(addr), 0);
470 		addr[0] &= 0xFE;
471 		addr[0] |= 0x02;
472 		bcopy(addr, hw->mac.addr, sizeof(addr));
473 		bcopy(addr, hw->mac.perm_addr, sizeof(addr));
474 	}
475 
476 	/* Most of the iflib initialization... */
477 
478 	iflib_set_mac(ctx, hw->mac.addr);
479 	switch (adapter->hw.mac.type) {
480 	case ixgbe_mac_X550_vf:
481 	case ixgbe_mac_X550EM_x_vf:
482 	case ixgbe_mac_X550EM_a_vf:
483 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
484 		break;
485 	default:
486 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
487 	}
488 	scctx->isc_txqsizes[0] =
489 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
490 	    sizeof(u32), DBA_ALIGN);
491 	scctx->isc_rxqsizes[0] =
492 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
493 	    DBA_ALIGN);
494 	/* XXX */
495 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
496 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
497 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
498 	scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
499 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
500 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
501 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
502 
503 	scctx->isc_txrx = &ixgbe_txrx;
504 
505 	/*
506 	 * Tell the upper layer(s) we support everything the PF
507 	 * driver does except...
508 	 *   hardware stats
509 	 *   Wake-on-LAN
510 	 */
511 	scctx->isc_capenable = IXGBE_CAPS;
512 	scctx->isc_capenable ^= IFCAP_HWSTATS | IFCAP_WOL;
513 
514 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
515 
516 	return (0);
517 
518 err_out:
519 	ixv_free_pci_resources(ctx);
520 
521 	return (error);
522 } /* ixv_if_attach_pre */
523 
524 static int
525 ixv_if_attach_post(if_ctx_t ctx)
526 {
527 	struct adapter *adapter = iflib_get_softc(ctx);
528 	device_t       dev = iflib_get_dev(ctx);
529 	int            error = 0;
530 
531 	/* Setup OS specific network interface */
532 	error = ixv_setup_interface(ctx);
533 	if (error) {
534 		device_printf(dev, "Interface setup failed: %d\n", error);
535 		goto end;
536 	}
537 
538 	/* Do the stats setup */
539 	ixv_save_stats(adapter);
540 	ixv_init_stats(adapter);
541 	ixv_add_stats_sysctls(adapter);
542 
543 end:
544 	return error;
545 } /* ixv_if_attach_post */
546 
547 /************************************************************************
548  * ixv_detach - Device removal routine
549  *
550  *   Called when the driver is being removed.
551  *   Stops the adapter and deallocates all the resources
552  *   that were allocated for driver operation.
553  *
554  *   return 0 on success, positive on failure
555  ************************************************************************/
556 static int
557 ixv_if_detach(if_ctx_t ctx)
558 {
559 	INIT_DEBUGOUT("ixv_detach: begin");
560 
561 	ixv_free_pci_resources(ctx);
562 
563 	return (0);
564 } /* ixv_if_detach */
565 
566 /************************************************************************
567  * ixv_if_mtu_set
568  ************************************************************************/
569 static int
570 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
571 {
572 	struct adapter *adapter = iflib_get_softc(ctx);
573 	struct ifnet   *ifp = iflib_get_ifp(ctx);
574 	int            error = 0;
575 
576 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
577 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
578 		error = EINVAL;
579 	} else {
580 		ifp->if_mtu = mtu;
581 		adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
582 	}
583 
584 	return error;
585 } /* ixv_if_mtu_set */
586 
587 /************************************************************************
588  * ixv_if_init - Init entry point
589  *
590  *   Used in two ways: It is used by the stack as an init entry
591  *   point in network interface structure. It is also used
592  *   by the driver as a hw/sw initialization routine to get
593  *   to a consistent state.
594  *
595  *   return 0 on success, positive on failure
596  ************************************************************************/
597 static void
598 ixv_if_init(if_ctx_t ctx)
599 {
600 	struct adapter  *adapter = iflib_get_softc(ctx);
601 	struct ifnet    *ifp = iflib_get_ifp(ctx);
602 	device_t        dev = iflib_get_dev(ctx);
603 	struct ixgbe_hw *hw = &adapter->hw;
604 	int             error = 0;
605 
606 	INIT_DEBUGOUT("ixv_if_init: begin");
607 	hw->adapter_stopped = FALSE;
608 	hw->mac.ops.stop_adapter(hw);
609 
610 	/* reprogram the RAR[0] in case user changed it. */
611 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
612 
613 	/* Get the latest mac address, User can use a LAA */
614 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
615 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
616 
617 	/* Reset VF and renegotiate mailbox API version */
618 	hw->mac.ops.reset_hw(hw);
619 	error = ixv_negotiate_api(adapter);
620 	if (error) {
621 		device_printf(dev,
622 		    "Mailbox API negotiation failed in if_init!\n");
623 		return;
624 	}
625 
626 	ixv_initialize_transmit_units(ctx);
627 
628 	/* Setup Multicast table */
629 	ixv_if_multi_set(ctx);
630 
631 	/*
632 	 * Determine the correct mbuf pool
633 	 * for doing jumbo/headersplit
634 	 */
635 	if (ifp->if_mtu > ETHERMTU)
636 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
637 	else
638 		adapter->rx_mbuf_sz = MCLBYTES;
639 
640 	/* Configure RX settings */
641 	ixv_initialize_receive_units(ctx);
642 
643 	/* Set up VLAN offload and filter */
644 	ixv_setup_vlan_support(ctx);
645 
646 	/* Set up MSI-X routing */
647 	ixv_configure_ivars(adapter);
648 
649 	/* Set up auto-mask */
650 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
651 
652 	/* Set moderation on the Link interrupt */
653 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
654 
655 	/* Stats init */
656 	ixv_init_stats(adapter);
657 
658 	/* Config/Enable Link */
659 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
660 	    FALSE);
661 
662 	/* And now turn on interrupts */
663 	ixv_if_enable_intr(ctx);
664 
665 	return;
666 } /* ixv_if_init */
667 
668 /************************************************************************
669  * ixv_enable_queue
670  ************************************************************************/
671 static inline void
672 ixv_enable_queue(struct adapter *adapter, u32 vector)
673 {
674 	struct ixgbe_hw *hw = &adapter->hw;
675 	u32             queue = 1 << vector;
676 	u32             mask;
677 
678 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
679 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
680 } /* ixv_enable_queue */
681 
682 /************************************************************************
683  * ixv_disable_queue
684  ************************************************************************/
685 static inline void
686 ixv_disable_queue(struct adapter *adapter, u32 vector)
687 {
688 	struct ixgbe_hw *hw = &adapter->hw;
689 	u64             queue = (u64)(1 << vector);
690 	u32             mask;
691 
692 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
693 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
694 } /* ixv_disable_queue */
695 
696 
697 /************************************************************************
698  * ixv_msix_que - MSI-X Queue Interrupt Service routine
699  ************************************************************************/
700 static int
701 ixv_msix_que(void *arg)
702 {
703 	struct ix_rx_queue *que = arg;
704 	struct adapter     *adapter = que->adapter;
705 
706 	ixv_disable_queue(adapter, que->msix);
707 	++que->irqs;
708 
709 	return (FILTER_SCHEDULE_THREAD);
710 } /* ixv_msix_que */
711 
712 /************************************************************************
713  * ixv_msix_mbx
714  ************************************************************************/
715 static int
716 ixv_msix_mbx(void *arg)
717 {
718 	struct adapter  *adapter = arg;
719 	struct ixgbe_hw *hw = &adapter->hw;
720 	u32             reg;
721 
722 	++adapter->link_irq;
723 
724 	/* First get the cause */
725 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
726 	/* Clear interrupt with write */
727 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
728 
729 	/* Link status change */
730 	if (reg & IXGBE_EICR_LSC)
731 		iflib_admin_intr_deferred(adapter->ctx);
732 
733 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
734 
735 	return (FILTER_HANDLED);
736 } /* ixv_msix_mbx */
737 
738 /************************************************************************
739  * ixv_media_status - Media Ioctl callback
740  *
741  *   Called whenever the user queries the status of
742  *   the interface using ifconfig.
743  ************************************************************************/
744 static void
745 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
746 {
747 	struct adapter *adapter = iflib_get_softc(ctx);
748 
749 	INIT_DEBUGOUT("ixv_media_status: begin");
750 
751 	iflib_admin_intr_deferred(ctx);
752 
753 	ifmr->ifm_status = IFM_AVALID;
754 	ifmr->ifm_active = IFM_ETHER;
755 
756 	if (!adapter->link_active)
757 		return;
758 
759 	ifmr->ifm_status |= IFM_ACTIVE;
760 
761 	switch (adapter->link_speed) {
762 		case IXGBE_LINK_SPEED_1GB_FULL:
763 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
764 			break;
765 		case IXGBE_LINK_SPEED_10GB_FULL:
766 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
767 			break;
768 		case IXGBE_LINK_SPEED_100_FULL:
769 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
770 			break;
771 		case IXGBE_LINK_SPEED_10_FULL:
772 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
773 			break;
774 	}
775 } /* ixv_if_media_status */
776 
777 /************************************************************************
778  * ixv_if_media_change - Media Ioctl callback
779  *
780  *   Called when the user changes speed/duplex using
781  *   media/mediopt option with ifconfig.
782  ************************************************************************/
783 static int
784 ixv_if_media_change(if_ctx_t ctx)
785 {
786 	struct adapter *adapter = iflib_get_softc(ctx);
787 	struct ifmedia *ifm = iflib_get_media(ctx);
788 
789 	INIT_DEBUGOUT("ixv_media_change: begin");
790 
791 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
792 		return (EINVAL);
793 
794 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
795 	case IFM_AUTO:
796 		break;
797 	default:
798 		device_printf(adapter->dev, "Only auto media type\n");
799 		return (EINVAL);
800 	}
801 
802 	return (0);
803 } /* ixv_if_media_change */
804 
805 
806 /************************************************************************
807  * ixv_negotiate_api
808  *
809  *   Negotiate the Mailbox API with the PF;
810  *   start with the most featured API first.
811  ************************************************************************/
812 static int
813 ixv_negotiate_api(struct adapter *adapter)
814 {
815 	struct ixgbe_hw *hw = &adapter->hw;
816 	int             mbx_api[] = { ixgbe_mbox_api_11,
817 	                              ixgbe_mbox_api_10,
818 	                              ixgbe_mbox_api_unknown };
819 	int             i = 0;
820 
821 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
822 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
823 			return (0);
824 		i++;
825 	}
826 
827 	return (EINVAL);
828 } /* ixv_negotiate_api */
829 
830 
831 /************************************************************************
832  * ixv_if_multi_set - Multicast Update
833  *
834  *   Called whenever multicast address list is updated.
835  ************************************************************************/
836 static void
837 ixv_if_multi_set(if_ctx_t ctx)
838 {
839 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
840 	struct adapter     *adapter = iflib_get_softc(ctx);
841 	u8                 *update_ptr;
842 	struct ifmultiaddr *ifma;
843 	if_t               ifp = iflib_get_ifp(ctx);
844 	int                mcnt = 0;
845 
846 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
847 
848 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
849 		if (ifma->ifma_addr->sa_family != AF_LINK)
850 			continue;
851 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
852 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
853 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
854 		mcnt++;
855 	}
856 
857 	update_ptr = mta;
858 
859 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
860 	    ixv_mc_array_itr, TRUE);
861 } /* ixv_if_multi_set */
862 
863 /************************************************************************
864  * ixv_mc_array_itr
865  *
866  *   An iterator function needed by the multicast shared code.
867  *   It feeds the shared code routine the addresses in the
868  *   array of ixv_set_multi() one by one.
869  ************************************************************************/
870 static u8 *
871 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
872 {
873 	u8 *addr = *update_ptr;
874 	u8 *newptr;
875 
876 	*vmdq = 0;
877 
878 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
879 	*update_ptr = newptr;
880 
881 	return addr;
882 } /* ixv_mc_array_itr */
883 
884 /************************************************************************
885  * ixv_if_local_timer - Timer routine
886  *
887  *   Checks for link status, updates statistics,
888  *   and runs the watchdog check.
889  ************************************************************************/
890 static void
891 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
892 {
893 	if (qid != 0)
894 		return;
895 
896 	/* Fire off the adminq task */
897 	iflib_admin_intr_deferred(ctx);
898 } /* ixv_if_local_timer */
899 
900 /************************************************************************
901  * ixv_if_update_admin_status - Update OS on link state
902  *
903  * Note: Only updates the OS on the cached link state.
904  *       The real check of the hardware only happens with
905  *       a link interrupt.
906  ************************************************************************/
907 static void
908 ixv_if_update_admin_status(if_ctx_t ctx)
909 {
910 	struct adapter *adapter = iflib_get_softc(ctx);
911 	device_t       dev = iflib_get_dev(ctx);
912 
913 	adapter->hw.mac.get_link_status = TRUE;
914 	ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up,
915 	    FALSE);
916 
917 	if (adapter->link_up) {
918 		if (adapter->link_active == FALSE) {
919 			if (bootverbose)
920 				device_printf(dev, "Link is up %d Gbps %s \n",
921 				    ((adapter->link_speed == 128) ? 10 : 1),
922 				    "Full Duplex");
923 			adapter->link_active = TRUE;
924 			iflib_link_state_change(ctx, LINK_STATE_UP,
925 			    IF_Gbps(10));
926 		}
927 	} else { /* Link down */
928 		if (adapter->link_active == TRUE) {
929 			if (bootverbose)
930 				device_printf(dev, "Link is Down\n");
931 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
932 			adapter->link_active = FALSE;
933 		}
934 	}
935 
936 	/* Stats Update */
937 	ixv_update_stats(adapter);
938 } /* ixv_if_update_admin_status */
939 
940 
941 /************************************************************************
942  * ixv_if_stop - Stop the hardware
943  *
944  *   Disables all traffic on the adapter by issuing a
945  *   global reset on the MAC and deallocates TX/RX buffers.
946  ************************************************************************/
947 static void
948 ixv_if_stop(if_ctx_t ctx)
949 {
950 	struct adapter  *adapter = iflib_get_softc(ctx);
951 	struct ixgbe_hw *hw = &adapter->hw;
952 
953 	INIT_DEBUGOUT("ixv_stop: begin\n");
954 
955 	ixv_if_disable_intr(ctx);
956 
957 	hw->mac.ops.reset_hw(hw);
958 	adapter->hw.adapter_stopped = FALSE;
959 	hw->mac.ops.stop_adapter(hw);
960 
961 	/* Update the stack */
962 	adapter->link_up = FALSE;
963 	ixv_if_update_admin_status(ctx);
964 
965 	/* reprogram the RAR[0] in case user changed it. */
966 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
967 } /* ixv_if_stop */
968 
969 
970 /************************************************************************
971  * ixv_identify_hardware - Determine hardware revision.
972  ************************************************************************/
973 static void
974 ixv_identify_hardware(if_ctx_t ctx)
975 {
976 	struct adapter  *adapter = iflib_get_softc(ctx);
977 	device_t        dev = iflib_get_dev(ctx);
978 	struct ixgbe_hw *hw = &adapter->hw;
979 
980 	/* Save off the information about this board */
981 	hw->vendor_id = pci_get_vendor(dev);
982 	hw->device_id = pci_get_device(dev);
983 	hw->revision_id = pci_get_revid(dev);
984 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
985 	hw->subsystem_device_id = pci_get_subdevice(dev);
986 
987 	/* A subset of set_mac_type */
988 	switch (hw->device_id) {
989 	case IXGBE_DEV_ID_82599_VF:
990 		hw->mac.type = ixgbe_mac_82599_vf;
991 		break;
992 	case IXGBE_DEV_ID_X540_VF:
993 		hw->mac.type = ixgbe_mac_X540_vf;
994 		break;
995 	case IXGBE_DEV_ID_X550_VF:
996 		hw->mac.type = ixgbe_mac_X550_vf;
997 		break;
998 	case IXGBE_DEV_ID_X550EM_X_VF:
999 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1000 		break;
1001 	case IXGBE_DEV_ID_X550EM_A_VF:
1002 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1003 		break;
1004 	default:
1005 		device_printf(dev, "unknown mac type\n");
1006 		hw->mac.type = ixgbe_mac_unknown;
1007 		break;
1008 	}
1009 } /* ixv_identify_hardware */
1010 
1011 /************************************************************************
1012  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1013  ************************************************************************/
1014 static int
1015 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1016 {
1017 	struct adapter     *adapter = iflib_get_softc(ctx);
1018 	device_t           dev = iflib_get_dev(ctx);
1019 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1020 	struct ix_tx_queue *tx_que;
1021 	int                error, rid, vector = 0;
1022 	char               buf[16];
1023 
1024 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1025 		rid = vector + 1;
1026 
1027 		snprintf(buf, sizeof(buf), "rxq%d", i);
1028 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1029 		    IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1030 
1031 		if (error) {
1032 			device_printf(iflib_get_dev(ctx),
1033 			    "Failed to allocate que int %d err: %d", i, error);
1034 			adapter->num_rx_queues = i + 1;
1035 			goto fail;
1036 		}
1037 
1038 		rx_que->msix = vector;
1039 		adapter->active_queues |= (u64)(1 << rx_que->msix);
1040 
1041 	}
1042 
1043 	for (int i = 0; i < adapter->num_tx_queues; i++) {
1044 		snprintf(buf, sizeof(buf), "txq%d", i);
1045 		tx_que = &adapter->tx_queues[i];
1046 		tx_que->msix = i % adapter->num_rx_queues;
1047 		iflib_softirq_alloc_generic(ctx,
1048 		    &adapter->rx_queues[tx_que->msix].que_irq,
1049 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1050 	}
1051 	rid = vector + 1;
1052 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1053 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1054 	if (error) {
1055 		device_printf(iflib_get_dev(ctx),
1056 		    "Failed to register admin handler");
1057 		return (error);
1058 	}
1059 
1060 	adapter->vector = vector;
1061 	/*
1062 	 * Due to a broken design QEMU will fail to properly
1063 	 * enable the guest for MSIX unless the vectors in
1064 	 * the table are all set up, so we must rewrite the
1065 	 * ENABLE in the MSIX control register again at this
1066 	 * point to cause it to successfully initialize us.
1067 	 */
1068 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1069 		int msix_ctrl;
1070 		pci_find_cap(dev, PCIY_MSIX, &rid);
1071 		rid += PCIR_MSIX_CTRL;
1072 		msix_ctrl = pci_read_config(dev, rid, 2);
1073 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1074 		pci_write_config(dev, rid, msix_ctrl, 2);
1075 	}
1076 
1077 	return (0);
1078 
1079 fail:
1080 	iflib_irq_free(ctx, &adapter->irq);
1081 	rx_que = adapter->rx_queues;
1082 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1083 		iflib_irq_free(ctx, &rx_que->que_irq);
1084 
1085 	return (error);
1086 } /* ixv_if_msix_intr_assign */
1087 
1088 /************************************************************************
1089  * ixv_allocate_pci_resources
1090  ************************************************************************/
1091 static int
1092 ixv_allocate_pci_resources(if_ctx_t ctx)
1093 {
1094 	struct adapter *adapter = iflib_get_softc(ctx);
1095 	device_t       dev = iflib_get_dev(ctx);
1096 	int            rid;
1097 
1098 	rid = PCIR_BAR(0);
1099 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1100 	    RF_ACTIVE);
1101 
1102 	if (!(adapter->pci_mem)) {
1103 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1104 		return (ENXIO);
1105 	}
1106 
1107 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1108 	adapter->osdep.mem_bus_space_handle =
1109 	    rman_get_bushandle(adapter->pci_mem);
1110 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1111 
1112 	return (0);
1113 } /* ixv_allocate_pci_resources */
1114 
1115 /************************************************************************
1116  * ixv_free_pci_resources
1117  ************************************************************************/
1118 static void
1119 ixv_free_pci_resources(if_ctx_t ctx)
1120 {
1121 	struct adapter     *adapter = iflib_get_softc(ctx);
1122 	struct ix_rx_queue *que = adapter->rx_queues;
1123 	device_t           dev = iflib_get_dev(ctx);
1124 
1125 	/* Release all msix queue resources */
1126 	if (adapter->intr_type == IFLIB_INTR_MSIX)
1127 		iflib_irq_free(ctx, &adapter->irq);
1128 
1129 	if (que != NULL) {
1130 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1131 			iflib_irq_free(ctx, &que->que_irq);
1132 		}
1133 	}
1134 
1135 	/* Clean the Legacy or Link interrupt last */
1136 	if (adapter->pci_mem != NULL)
1137 		bus_release_resource(dev, SYS_RES_MEMORY,
1138 				     PCIR_BAR(0), adapter->pci_mem);
1139 } /* ixv_free_pci_resources */
1140 
1141 /************************************************************************
1142  * ixv_setup_interface
1143  *
1144  *   Setup networking device structure and register an interface.
1145  ************************************************************************/
1146 static int
1147 ixv_setup_interface(if_ctx_t ctx)
1148 {
1149 	struct adapter *adapter = iflib_get_softc(ctx);
1150 	if_softc_ctx_t scctx = adapter->shared;
1151 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1152 
1153 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1154 
1155 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1156 	if_setbaudrate(ifp, IF_Gbps(10));
1157 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1158 
1159 
1160 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1161 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1162 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1163 
1164 	return 0;
1165 } /* ixv_setup_interface */
1166 
1167 /************************************************************************
1168  * ixv_if_get_counter
1169  ************************************************************************/
1170 static uint64_t
1171 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1172 {
1173 	struct adapter *adapter = iflib_get_softc(ctx);
1174 	if_t           ifp = iflib_get_ifp(ctx);
1175 
1176 	switch (cnt) {
1177 	case IFCOUNTER_IPACKETS:
1178 		return (adapter->ipackets);
1179 	case IFCOUNTER_OPACKETS:
1180 		return (adapter->opackets);
1181 	case IFCOUNTER_IBYTES:
1182 		return (adapter->ibytes);
1183 	case IFCOUNTER_OBYTES:
1184 		return (adapter->obytes);
1185 	case IFCOUNTER_IMCASTS:
1186 		return (adapter->imcasts);
1187 	default:
1188 		return (if_get_counter_default(ifp, cnt));
1189 	}
1190 } /* ixv_if_get_counter */
1191 
1192 /************************************************************************
1193  * ixv_initialize_transmit_units - Enable transmit unit.
1194  ************************************************************************/
1195 static void
1196 ixv_initialize_transmit_units(if_ctx_t ctx)
1197 {
1198 	struct adapter     *adapter = iflib_get_softc(ctx);
1199 	struct ixgbe_hw    *hw = &adapter->hw;
1200 	if_softc_ctx_t     scctx = adapter->shared;
1201 	struct ix_tx_queue *que = adapter->tx_queues;
1202 	int                i;
1203 
1204 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1205 		struct tx_ring *txr = &que->txr;
1206 		u64            tdba = txr->tx_paddr;
1207 		u32            txctrl, txdctl;
1208 		int            j = txr->me;
1209 
1210 		/* Set WTHRESH to 8, burst writeback */
1211 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1212 		txdctl |= (8 << 16);
1213 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1214 
1215 		/* Set the HW Tx Head and Tail indices */
1216 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1217 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1218 
1219 		/* Set Tx Tail register */
1220 		txr->tail = IXGBE_VFTDT(j);
1221 
1222 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
1223 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1224 			txr->tx_rsq[k] = QIDX_INVALID;
1225 
1226 		/* Set Ring parameters */
1227 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1228 		    (tdba & 0x00000000ffffffffULL));
1229 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1230 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1231 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1232 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1233 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1234 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1235 
1236 		/* Now enable */
1237 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1238 		txdctl |= IXGBE_TXDCTL_ENABLE;
1239 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1240 	}
1241 
1242 	return;
1243 } /* ixv_initialize_transmit_units */
1244 
1245 /************************************************************************
1246  * ixv_initialize_rss_mapping
1247  ************************************************************************/
1248 static void
1249 ixv_initialize_rss_mapping(struct adapter *adapter)
1250 {
1251 	struct ixgbe_hw *hw = &adapter->hw;
1252 	u32             reta = 0, mrqc, rss_key[10];
1253 	int             queue_id;
1254 	int             i, j;
1255 	u32             rss_hash_config;
1256 
1257 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1258 		/* Fetch the configured RSS key */
1259 		rss_getkey((uint8_t *)&rss_key);
1260 	} else {
1261 		/* set up random bits */
1262 		arc4rand(&rss_key, sizeof(rss_key), 0);
1263 	}
1264 
1265 	/* Now fill out hash function seeds */
1266 	for (i = 0; i < 10; i++)
1267 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1268 
1269 	/* Set up the redirection table */
1270 	for (i = 0, j = 0; i < 64; i++, j++) {
1271 		if (j == adapter->num_rx_queues)
1272 			j = 0;
1273 
1274 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1275 			/*
1276 			 * Fetch the RSS bucket id for the given indirection
1277 			 * entry. Cap it at the number of configured buckets
1278 			 * (which is num_rx_queues.)
1279 			 */
1280 			queue_id = rss_get_indirection_to_bucket(i);
1281 			queue_id = queue_id % adapter->num_rx_queues;
1282 		} else
1283 			queue_id = j;
1284 
1285 		/*
1286 		 * The low 8 bits are for hash value (n+0);
1287 		 * The next 8 bits are for hash value (n+1), etc.
1288 		 */
1289 		reta >>= 8;
1290 		reta |= ((uint32_t)queue_id) << 24;
1291 		if ((i & 3) == 3) {
1292 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1293 			reta = 0;
1294 		}
1295 	}
1296 
1297 	/* Perform hash on these packet types */
1298 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
1299 		rss_hash_config = rss_gethashconfig();
1300 	else {
1301 		/*
1302 		 * Disable UDP - IP fragments aren't currently being handled
1303 		 * and so we end up with a mix of 2-tuple and 4-tuple
1304 		 * traffic.
1305 		 */
1306 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1307 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1308 		                | RSS_HASHTYPE_RSS_IPV6
1309 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1310 	}
1311 
1312 	mrqc = IXGBE_MRQC_RSSEN;
1313 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1314 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1315 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1316 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1317 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1318 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1319 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1320 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1321 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1322 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1323 		    __func__);
1324 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1325 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1326 		    __func__);
1327 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1328 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1329 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1330 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1331 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1332 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1333 		    __func__);
1334 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1335 } /* ixv_initialize_rss_mapping */
1336 
1337 
1338 /************************************************************************
1339  * ixv_initialize_receive_units - Setup receive registers and features.
1340  ************************************************************************/
1341 static void
1342 ixv_initialize_receive_units(if_ctx_t ctx)
1343 {
1344 	struct adapter     *adapter = iflib_get_softc(ctx);
1345 	if_softc_ctx_t     scctx;
1346 	struct ixgbe_hw    *hw = &adapter->hw;
1347 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1348 	struct ix_rx_queue *que = adapter->rx_queues;
1349 	u32                bufsz, psrtype;
1350 
1351 	if (ifp->if_mtu > ETHERMTU)
1352 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1353 	else
1354 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1355 
1356 	psrtype = IXGBE_PSRTYPE_TCPHDR
1357 	        | IXGBE_PSRTYPE_UDPHDR
1358 	        | IXGBE_PSRTYPE_IPV4HDR
1359 	        | IXGBE_PSRTYPE_IPV6HDR
1360 	        | IXGBE_PSRTYPE_L2HDR;
1361 
1362 	if (adapter->num_rx_queues > 1)
1363 		psrtype |= 1 << 29;
1364 
1365 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1366 
1367 	/* Tell PF our max_frame size */
1368 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1369 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1370 	}
1371 	scctx = adapter->shared;
1372 
1373 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1374 		struct rx_ring *rxr = &que->rxr;
1375 		u64            rdba = rxr->rx_paddr;
1376 		u32            reg, rxdctl;
1377 		int            j = rxr->me;
1378 
1379 		/* Disable the queue */
1380 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1381 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1382 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1383 		for (int k = 0; k < 10; k++) {
1384 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1385 			    IXGBE_RXDCTL_ENABLE)
1386 				msec_delay(1);
1387 			else
1388 				break;
1389 		}
1390 		wmb();
1391 		/* Setup the Base and Length of the Rx Descriptor Ring */
1392 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1393 		    (rdba & 0x00000000ffffffffULL));
1394 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1395 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1396 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1397 
1398 		/* Reset the ring indices */
1399 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1400 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1401 
1402 		/* Set up the SRRCTL register */
1403 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1404 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1405 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1406 		reg |= bufsz;
1407 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1408 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1409 
1410 		/* Capture Rx Tail index */
1411 		rxr->tail = IXGBE_VFRDT(rxr->me);
1412 
1413 		/* Do the queue enabling last */
1414 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1415 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1416 		for (int l = 0; l < 10; l++) {
1417 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1418 			    IXGBE_RXDCTL_ENABLE)
1419 				break;
1420 			msec_delay(1);
1421 		}
1422 		wmb();
1423 
1424 		/* Set the Tail Pointer */
1425 #ifdef DEV_NETMAP
1426 		/*
1427 		 * In netmap mode, we must preserve the buffers made
1428 		 * available to userspace before the if_init()
1429 		 * (this is true by default on the TX side, because
1430 		 * init makes all buffers available to userspace).
1431 		 *
1432 		 * netmap_reset() and the device specific routines
1433 		 * (e.g. ixgbe_setup_receive_rings()) map these
1434 		 * buffers at the end of the NIC ring, so here we
1435 		 * must set the RDT (tail) register to make sure
1436 		 * they are not overwritten.
1437 		 *
1438 		 * In this driver the NIC ring starts at RDH = 0,
1439 		 * RDT points to the last slot available for reception (?),
1440 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1441 		 */
1442 		if (ifp->if_capenable & IFCAP_NETMAP) {
1443 			struct netmap_adapter *na = NA(ifp);
1444 			struct netmap_kring *kring = &na->rx_rings[j];
1445 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1446 
1447 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1448 		} else
1449 #endif /* DEV_NETMAP */
1450 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1451 			    scctx->isc_nrxd[0] - 1);
1452 	}
1453 
1454 	ixv_initialize_rss_mapping(adapter);
1455 } /* ixv_initialize_receive_units */
1456 
1457 /************************************************************************
1458  * ixv_setup_vlan_support
1459  ************************************************************************/
1460 static void
1461 ixv_setup_vlan_support(if_ctx_t ctx)
1462 {
1463 	struct adapter  *adapter = iflib_get_softc(ctx);
1464 	struct ixgbe_hw *hw = &adapter->hw;
1465 	u32             ctrl, vid, vfta, retry;
1466 
1467 	/*
1468 	 * We get here thru if_init, meaning
1469 	 * a soft reset, this has already cleared
1470 	 * the VFTA and other state, so if there
1471 	 * have been no vlan's registered do nothing.
1472 	 */
1473 	if (adapter->num_vlans == 0)
1474 		return;
1475 
1476 	/* Enable the queues */
1477 	for (int i = 0; i < adapter->num_rx_queues; i++) {
1478 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1479 		ctrl |= IXGBE_RXDCTL_VME;
1480 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1481 		/*
1482 		 * Let Rx path know that it needs to store VLAN tag
1483 		 * as part of extra mbuf info.
1484 		 */
1485 		adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1486 	}
1487 
1488 	/*
1489 	 * A soft reset zero's out the VFTA, so
1490 	 * we need to repopulate it now.
1491 	 */
1492 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1493 		if (ixv_shadow_vfta[i] == 0)
1494 			continue;
1495 		vfta = ixv_shadow_vfta[i];
1496 		/*
1497 		 * Reconstruct the vlan id's
1498 		 * based on the bits set in each
1499 		 * of the array ints.
1500 		 */
1501 		for (int j = 0; j < 32; j++) {
1502 			retry = 0;
1503 			if ((vfta & (1 << j)) == 0)
1504 				continue;
1505 			vid = (i * 32) + j;
1506 			/* Call the shared code mailbox routine */
1507 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1508 				if (++retry > 5)
1509 					break;
1510 			}
1511 		}
1512 	}
1513 } /* ixv_setup_vlan_support */
1514 
1515 /************************************************************************
1516  * ixv_if_register_vlan
1517  *
1518  *   Run via a vlan config EVENT, it enables us to use the
1519  *   HW Filter table since we can get the vlan id. This just
1520  *   creates the entry in the soft version of the VFTA, init
1521  *   will repopulate the real table.
1522  ************************************************************************/
1523 static void
1524 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1525 {
1526 	struct adapter *adapter = iflib_get_softc(ctx);
1527 	u16            index, bit;
1528 
1529 	index = (vtag >> 5) & 0x7F;
1530 	bit = vtag & 0x1F;
1531 	ixv_shadow_vfta[index] |= (1 << bit);
1532 	++adapter->num_vlans;
1533 } /* ixv_if_register_vlan */
1534 
1535 /************************************************************************
1536  * ixv_if_unregister_vlan
1537  *
1538  *   Run via a vlan unconfig EVENT, remove our entry
1539  *   in the soft vfta.
1540  ************************************************************************/
1541 static void
1542 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1543 {
1544 	struct adapter *adapter = iflib_get_softc(ctx);
1545 	u16            index, bit;
1546 
1547 	index = (vtag >> 5) & 0x7F;
1548 	bit = vtag & 0x1F;
1549 	ixv_shadow_vfta[index] &= ~(1 << bit);
1550 	--adapter->num_vlans;
1551 } /* ixv_if_unregister_vlan */
1552 
1553 /************************************************************************
1554  * ixv_if_enable_intr
1555  ************************************************************************/
1556 static void
1557 ixv_if_enable_intr(if_ctx_t ctx)
1558 {
1559 	struct adapter  *adapter = iflib_get_softc(ctx);
1560 	struct ixgbe_hw *hw = &adapter->hw;
1561 	struct ix_rx_queue *que = adapter->rx_queues;
1562 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1563 
1564 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1565 
1566 	mask = IXGBE_EIMS_ENABLE_MASK;
1567 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1568 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1569 
1570 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1571 		ixv_enable_queue(adapter, que->msix);
1572 
1573 	IXGBE_WRITE_FLUSH(hw);
1574 } /* ixv_if_enable_intr */
1575 
1576 /************************************************************************
1577  * ixv_if_disable_intr
1578  ************************************************************************/
1579 static void
1580 ixv_if_disable_intr(if_ctx_t ctx)
1581 {
1582 	struct adapter *adapter = iflib_get_softc(ctx);
1583 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1584 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1585 	IXGBE_WRITE_FLUSH(&adapter->hw);
1586 } /* ixv_if_disable_intr */
1587 
1588 /************************************************************************
1589  * ixv_if_rx_queue_intr_enable
1590  ************************************************************************/
1591 static int
1592 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1593 {
1594 	struct adapter	*adapter = iflib_get_softc(ctx);
1595 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1596 
1597 	ixv_enable_queue(adapter, que->rxr.me);
1598 
1599 	return (0);
1600 } /* ixv_if_rx_queue_intr_enable */
1601 
1602 /************************************************************************
1603  * ixv_set_ivar
1604  *
1605  *   Setup the correct IVAR register for a particular MSI-X interrupt
1606  *    - entry is the register array entry
1607  *    - vector is the MSI-X vector for this queue
1608  *    - type is RX/TX/MISC
1609  ************************************************************************/
1610 static void
1611 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1612 {
1613 	struct ixgbe_hw *hw = &adapter->hw;
1614 	u32             ivar, index;
1615 
1616 	vector |= IXGBE_IVAR_ALLOC_VAL;
1617 
1618 	if (type == -1) { /* MISC IVAR */
1619 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1620 		ivar &= ~0xFF;
1621 		ivar |= vector;
1622 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1623 	} else {          /* RX/TX IVARS */
1624 		index = (16 * (entry & 1)) + (8 * type);
1625 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1626 		ivar &= ~(0xFF << index);
1627 		ivar |= (vector << index);
1628 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1629 	}
1630 } /* ixv_set_ivar */
1631 
1632 /************************************************************************
1633  * ixv_configure_ivars
1634  ************************************************************************/
1635 static void
1636 ixv_configure_ivars(struct adapter *adapter)
1637 {
1638 	struct ix_rx_queue *que = adapter->rx_queues;
1639 
1640 	MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1641 
1642 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1643 		/* First the RX queue entry */
1644 		ixv_set_ivar(adapter, i, que->msix, 0);
1645 		/* ... and the TX */
1646 		ixv_set_ivar(adapter, i, que->msix, 1);
1647 		/* Set an initial value in EITR */
1648 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1649 		    IXGBE_EITR_DEFAULT);
1650 	}
1651 
1652 	/* For the mailbox interrupt */
1653 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
1654 } /* ixv_configure_ivars */
1655 
1656 /************************************************************************
1657  * ixv_save_stats
1658  *
1659  *   The VF stats registers never have a truly virgin
1660  *   starting point, so this routine tries to make an
1661  *   artificial one, marking ground zero on attach as
1662  *   it were.
1663  ************************************************************************/
1664 static void
1665 ixv_save_stats(struct adapter *adapter)
1666 {
1667 	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1668 		adapter->stats.vf.saved_reset_vfgprc +=
1669 		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1670 		adapter->stats.vf.saved_reset_vfgptc +=
1671 		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1672 		adapter->stats.vf.saved_reset_vfgorc +=
1673 		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1674 		adapter->stats.vf.saved_reset_vfgotc +=
1675 		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1676 		adapter->stats.vf.saved_reset_vfmprc +=
1677 		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1678 	}
1679 } /* ixv_save_stats */
1680 
1681 /************************************************************************
1682  * ixv_init_stats
1683  ************************************************************************/
1684 static void
1685 ixv_init_stats(struct adapter *adapter)
1686 {
1687 	struct ixgbe_hw *hw = &adapter->hw;
1688 
1689 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1690 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1691 	adapter->stats.vf.last_vfgorc |=
1692 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1693 
1694 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1695 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1696 	adapter->stats.vf.last_vfgotc |=
1697 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1698 
1699 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1700 
1701 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1702 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1703 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1704 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1705 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1706 } /* ixv_init_stats */
1707 
1708 #define UPDATE_STAT_32(reg, last, count)                \
1709 {                                                       \
1710 	u32 current = IXGBE_READ_REG(hw, reg);          \
1711 	if (current < last)                             \
1712 		count += 0x100000000LL;                 \
1713 	last = current;                                 \
1714 	count &= 0xFFFFFFFF00000000LL;                  \
1715 	count |= current;                               \
1716 }
1717 
1718 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1719 {                                                       \
1720 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1721 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1722 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1723 	if (current < last)                             \
1724 		count += 0x1000000000LL;                \
1725 	last = current;                                 \
1726 	count &= 0xFFFFFFF000000000LL;                  \
1727 	count |= current;                               \
1728 }
1729 
1730 /************************************************************************
1731  * ixv_update_stats - Update the board statistics counters.
1732  ************************************************************************/
1733 void
1734 ixv_update_stats(struct adapter *adapter)
1735 {
1736 	struct ixgbe_hw *hw = &adapter->hw;
1737 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1738 
1739 	UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1740 	    adapter->stats.vf.vfgprc);
1741 	UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1742 	    adapter->stats.vf.vfgptc);
1743 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1744 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1745 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1746 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1747 	UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1748 	    adapter->stats.vf.vfmprc);
1749 
1750 	/* Fill out the OS statistics structure */
1751 	IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1752 	IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1753 	IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1754 	IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1755 	IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1756 } /* ixv_update_stats */
1757 
1758 /************************************************************************
1759  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1760  ************************************************************************/
1761 static void
1762 ixv_add_stats_sysctls(struct adapter *adapter)
1763 {
1764 	device_t                dev = adapter->dev;
1765 	struct ix_tx_queue      *tx_que = adapter->tx_queues;
1766 	struct ix_rx_queue      *rx_que = adapter->rx_queues;
1767 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1768 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1769 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1770 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1771 	struct sysctl_oid       *stat_node, *queue_node;
1772 	struct sysctl_oid_list  *stat_list, *queue_list;
1773 
1774 #define QUEUE_NAME_LEN 32
1775 	char                    namebuf[QUEUE_NAME_LEN];
1776 
1777 	/* Driver Statistics */
1778 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1779 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1780 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1781 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1782 
1783 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1784 		struct tx_ring *txr = &tx_que->txr;
1785 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1786 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1787 		    CTLFLAG_RD, NULL, "Queue Name");
1788 		queue_list = SYSCTL_CHILDREN(queue_node);
1789 
1790 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1791 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1792 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1793 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1794 	}
1795 
1796 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1797 		struct rx_ring *rxr = &rx_que->rxr;
1798 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1799 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1800 		    CTLFLAG_RD, NULL, "Queue Name");
1801 		queue_list = SYSCTL_CHILDREN(queue_node);
1802 
1803 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1804 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1805 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1806 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1807 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1808 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1809 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1810 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1811 	}
1812 
1813 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1814 	    CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1815 	stat_list = SYSCTL_CHILDREN(stat_node);
1816 
1817 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1818 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1819 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1820 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1821 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1822 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1823 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1824 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1825 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1826 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1827 } /* ixv_add_stats_sysctls */
1828 
1829 /************************************************************************
1830  * ixv_print_debug_info
1831  *
1832  *   Called only when em_display_debug_stats is enabled.
1833  *   Provides a way to take a look at important statistics
1834  *   maintained by the driver and hardware.
1835  ************************************************************************/
1836 static void
1837 ixv_print_debug_info(struct adapter *adapter)
1838 {
1839 	device_t        dev = adapter->dev;
1840 	struct ixgbe_hw *hw = &adapter->hw;
1841 
1842 	device_printf(dev, "Error Byte Count = %u \n",
1843 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1844 
1845 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1846 } /* ixv_print_debug_info */
1847 
1848 /************************************************************************
1849  * ixv_sysctl_debug
1850  ************************************************************************/
1851 static int
1852 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1853 {
1854 	struct adapter *adapter;
1855 	int            error, result;
1856 
1857 	result = -1;
1858 	error = sysctl_handle_int(oidp, &result, 0, req);
1859 
1860 	if (error || !req->newptr)
1861 		return (error);
1862 
1863 	if (result == 1) {
1864 		adapter = (struct adapter *)arg1;
1865 		ixv_print_debug_info(adapter);
1866 	}
1867 
1868 	return error;
1869 } /* ixv_sysctl_debug */
1870 
1871 /************************************************************************
1872  * ixv_init_device_features
1873  ************************************************************************/
1874 static void
1875 ixv_init_device_features(struct adapter *adapter)
1876 {
1877 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
1878 	                  | IXGBE_FEATURE_VF
1879 	                  | IXGBE_FEATURE_RSS
1880 	                  | IXGBE_FEATURE_LEGACY_TX;
1881 
1882 	/* A tad short on feature flags for VFs, atm. */
1883 	switch (adapter->hw.mac.type) {
1884 	case ixgbe_mac_82599_vf:
1885 		break;
1886 	case ixgbe_mac_X540_vf:
1887 		break;
1888 	case ixgbe_mac_X550_vf:
1889 	case ixgbe_mac_X550EM_x_vf:
1890 	case ixgbe_mac_X550EM_a_vf:
1891 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1892 		break;
1893 	default:
1894 		break;
1895 	}
1896 
1897 	/* Enabled by default... */
1898 	/* Is a virtual function (VF) */
1899 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
1900 		adapter->feat_en |= IXGBE_FEATURE_VF;
1901 	/* Netmap */
1902 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1903 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1904 	/* Receive-Side Scaling (RSS) */
1905 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1906 		adapter->feat_en |= IXGBE_FEATURE_RSS;
1907 	/* Needs advanced context descriptor regardless of offloads req'd */
1908 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1909 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1910 } /* ixv_init_device_features */
1911 
1912