xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision f4f33ea0c752ff0f9bfad34991d5bbb54e71133d)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 char ixv_driver_version[] = "2.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 	/* required last entry */
67 PVID_END
68 };
69 
70 /************************************************************************
71  * Function prototypes
72  ************************************************************************/
73 static void     *ixv_register(device_t dev);
74 static int      ixv_if_attach_pre(if_ctx_t ctx);
75 static int      ixv_if_attach_post(if_ctx_t ctx);
76 static int      ixv_if_detach(if_ctx_t ctx);
77 
78 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
79 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
80 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static void     ixv_if_queues_free(if_ctx_t ctx);
82 static void     ixv_identify_hardware(if_ctx_t ctx);
83 static void     ixv_init_device_features(struct adapter *);
84 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
85 static void     ixv_free_pci_resources(if_ctx_t ctx);
86 static int      ixv_setup_interface(if_ctx_t ctx);
87 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
88 static int      ixv_if_media_change(if_ctx_t ctx);
89 static void     ixv_if_update_admin_status(if_ctx_t ctx);
90 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
91 
92 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
93 static void     ixv_if_init(if_ctx_t ctx);
94 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
95 static void     ixv_if_stop(if_ctx_t ctx);
96 static int      ixv_negotiate_api(struct adapter *);
97 
98 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
99 static void     ixv_initialize_receive_units(if_ctx_t ctx);
100 static void     ixv_initialize_rss_mapping(struct adapter *);
101 
102 static void     ixv_setup_vlan_support(if_ctx_t ctx);
103 static void     ixv_configure_ivars(struct adapter *);
104 static void     ixv_if_enable_intr(if_ctx_t ctx);
105 static void     ixv_if_disable_intr(if_ctx_t ctx);
106 static void     ixv_if_multi_set(if_ctx_t ctx);
107 
108 static void     ixv_if_register_vlan(if_ctx_t, u16);
109 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110 
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112 
113 static void     ixv_save_stats(struct adapter *);
114 static void     ixv_init_stats(struct adapter *);
115 static void     ixv_update_stats(struct adapter *);
116 static void     ixv_add_stats_sysctls(struct adapter *adapter);
117 
118 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
119 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
120 
121 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
122 
123 /* The MSI-X Interrupt handlers */
124 static int      ixv_msix_que(void *);
125 static int      ixv_msix_mbx(void *);
126 
127 /************************************************************************
128  * FreeBSD Device Interface Entry Points
129  ************************************************************************/
130 static device_method_t ixv_methods[] = {
131 	/* Device interface */
132 	DEVMETHOD(device_register, ixv_register),
133 	DEVMETHOD(device_probe, iflib_device_probe),
134 	DEVMETHOD(device_attach, iflib_device_attach),
135 	DEVMETHOD(device_detach, iflib_device_detach),
136 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
137 	DEVMETHOD_END
138 };
139 
140 static driver_t ixv_driver = {
141 	"ixv", ixv_methods, sizeof(struct adapter),
142 };
143 
144 devclass_t ixv_devclass;
145 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, pci, 1, 1, 1);
148 MODULE_DEPEND(ixv, ether, 1, 1, 1);
149 #ifdef DEV_NETMAP
150 MODULE_DEPEND(ixv, netmap, 1, 1, 1);
151 #endif /* DEV_NETMAP */
152 
153 static device_method_t ixv_if_methods[] = {
154 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 	DEVMETHOD(ifdi_detach, ixv_if_detach),
157 	DEVMETHOD(ifdi_init, ixv_if_init),
158 	DEVMETHOD(ifdi_stop, ixv_if_stop),
159 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176 	DEVMETHOD_END
177 };
178 
179 static driver_t ixv_if_driver = {
180   "ixv_if", ixv_if_methods, sizeof(struct adapter)
181 };
182 
183 /*
184  * TUNEABLE PARAMETERS:
185  */
186 
187 /* Flow control setting, default to full */
188 static int ixv_flow_control = ixgbe_fc_full;
189 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
190 
191 /*
192  * Header split: this causes the hardware to DMA
193  * the header into a separate mbuf from the payload,
194  * it can be a performance win in some workloads, but
195  * in others it actually hurts, its off by default.
196  */
197 static int ixv_header_split = FALSE;
198 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
199 
200 /*
201  * Shadow VFTA table, this is needed because
202  * the real filter table gets cleared during
203  * a soft reset and we need to repopulate it.
204  */
205 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
206 extern struct if_txrx ixgbe_txrx;
207 
208 static struct if_shared_ctx ixv_sctx_init = {
209 	.isc_magic = IFLIB_MAGIC,
210 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
211 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 	.isc_tx_maxsegsize = PAGE_SIZE,
213 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214 	.isc_tso_maxsegsize = PAGE_SIZE,
215 	.isc_rx_maxsize = MJUM16BYTES,
216 	.isc_rx_nsegments = 1,
217 	.isc_rx_maxsegsize = MJUM16BYTES,
218 	.isc_nfl = 1,
219 	.isc_ntxqs = 1,
220 	.isc_nrxqs = 1,
221 	.isc_admin_intrcnt = 1,
222 	.isc_vendor_info = ixv_vendor_info_array,
223 	.isc_driver_version = ixv_driver_version,
224 	.isc_driver = &ixv_if_driver,
225 
226 	.isc_nrxd_min = {MIN_RXD},
227 	.isc_ntxd_min = {MIN_TXD},
228 	.isc_nrxd_max = {MAX_RXD},
229 	.isc_ntxd_max = {MAX_TXD},
230 	.isc_nrxd_default = {DEFAULT_RXD},
231 	.isc_ntxd_default = {DEFAULT_TXD},
232 };
233 
234 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
235 
236 static void *
237 ixv_register(device_t dev)
238 {
239 	return (ixv_sctx);
240 }
241 
242 /************************************************************************
243  * ixv_if_tx_queues_alloc
244  ************************************************************************/
245 static int
246 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
247                        int ntxqs, int ntxqsets)
248 {
249 	struct adapter     *adapter = iflib_get_softc(ctx);
250 	if_softc_ctx_t     scctx = adapter->shared;
251 	struct ix_tx_queue *que;
252 	int                i, j, error;
253 
254 	MPASS(adapter->num_tx_queues == ntxqsets);
255 	MPASS(ntxqs == 1);
256 
257 	/* Allocate queue structure memory */
258 	adapter->tx_queues =
259 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
260 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
261 	if (!adapter->tx_queues) {
262 		device_printf(iflib_get_dev(ctx),
263 		    "Unable to allocate TX ring memory\n");
264 		return (ENOMEM);
265 	}
266 
267 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
268 		struct tx_ring *txr = &que->txr;
269 
270 		txr->me = i;
271 		txr->adapter =  que->adapter = adapter;
272 		adapter->active_queues |= (u64)1 << txr->me;
273 
274 		/* Allocate report status array */
275 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
276 			error = ENOMEM;
277 			goto fail;
278 		}
279 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
280 			txr->tx_rsq[j] = QIDX_INVALID;
281 		/* get the virtual and physical address of the hardware queues */
282 		txr->tail = IXGBE_VFTDT(txr->me);
283 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
284 		txr->tx_paddr = paddrs[i*ntxqs];
285 
286 		txr->bytes = 0;
287 		txr->total_packets = 0;
288 
289 	}
290 
291 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
292 	    adapter->num_tx_queues);
293 
294 	return (0);
295 
296  fail:
297 	ixv_if_queues_free(ctx);
298 
299 	return (error);
300 } /* ixv_if_tx_queues_alloc */
301 
302 /************************************************************************
303  * ixv_if_rx_queues_alloc
304  ************************************************************************/
305 static int
306 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
307                        int nrxqs, int nrxqsets)
308 {
309 	struct adapter     *adapter = iflib_get_softc(ctx);
310 	struct ix_rx_queue *que;
311 	int                i, error;
312 
313 	MPASS(adapter->num_rx_queues == nrxqsets);
314 	MPASS(nrxqs == 1);
315 
316 	/* Allocate queue structure memory */
317 	adapter->rx_queues =
318 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
319 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
320 	if (!adapter->rx_queues) {
321 		device_printf(iflib_get_dev(ctx),
322 		    "Unable to allocate TX ring memory\n");
323 		error = ENOMEM;
324 		goto fail;
325 	}
326 
327 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
328 		struct rx_ring *rxr = &que->rxr;
329 		rxr->me = i;
330 		rxr->adapter = que->adapter = adapter;
331 
332 
333 		/* get the virtual and physical address of the hw queues */
334 		rxr->tail = IXGBE_VFRDT(rxr->me);
335 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
336 		rxr->rx_paddr = paddrs[i*nrxqs];
337 		rxr->bytes = 0;
338 		rxr->que = que;
339 	}
340 
341 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
342 	    adapter->num_rx_queues);
343 
344 	return (0);
345 
346 fail:
347 	ixv_if_queues_free(ctx);
348 
349 	return (error);
350 } /* ixv_if_rx_queues_alloc */
351 
352 /************************************************************************
353  * ixv_if_queues_free
354  ************************************************************************/
355 static void
356 ixv_if_queues_free(if_ctx_t ctx)
357 {
358 	struct adapter     *adapter = iflib_get_softc(ctx);
359 	struct ix_tx_queue *que = adapter->tx_queues;
360 	int                i;
361 
362 	if (que == NULL)
363 		goto free;
364 
365 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
366 		struct tx_ring *txr = &que->txr;
367 		if (txr->tx_rsq == NULL)
368 			break;
369 
370 		free(txr->tx_rsq, M_DEVBUF);
371 		txr->tx_rsq = NULL;
372 	}
373 	if (adapter->tx_queues != NULL)
374 		free(adapter->tx_queues, M_DEVBUF);
375 free:
376 	if (adapter->rx_queues != NULL)
377 		free(adapter->rx_queues, M_DEVBUF);
378 	adapter->tx_queues = NULL;
379 	adapter->rx_queues = NULL;
380 } /* ixv_if_queues_free */
381 
382 /************************************************************************
383  * ixv_if_attach_pre - Device initialization routine
384  *
385  *   Called when the driver is being loaded.
386  *   Identifies the type of hardware, allocates all resources
387  *   and initializes the hardware.
388  *
389  *   return 0 on success, positive on failure
390  ************************************************************************/
391 static int
392 ixv_if_attach_pre(if_ctx_t ctx)
393 {
394 	struct adapter  *adapter;
395 	device_t        dev;
396 	if_softc_ctx_t  scctx;
397 	struct ixgbe_hw *hw;
398 	int             error = 0;
399 
400 	INIT_DEBUGOUT("ixv_attach: begin");
401 
402 	/* Allocate, clear, and link in our adapter structure */
403 	dev = iflib_get_dev(ctx);
404 	adapter = iflib_get_softc(ctx);
405 	adapter->dev = dev;
406 	adapter->ctx = ctx;
407 	adapter->hw.back = adapter;
408 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
409 	adapter->media = iflib_get_media(ctx);
410 	hw = &adapter->hw;
411 
412 	/* Do base PCI setup - map BAR0 */
413 	if (ixv_allocate_pci_resources(ctx)) {
414 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
415 		error = ENXIO;
416 		goto err_out;
417 	}
418 
419 	/* SYSCTL APIs */
420 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
421 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
422 	    CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixv_sysctl_debug, "I",
423 	    "Debug Info");
424 
425 	/* Determine hardware revision */
426 	ixv_identify_hardware(ctx);
427 	ixv_init_device_features(adapter);
428 
429 	/* Initialize the shared code */
430 	error = ixgbe_init_ops_vf(hw);
431 	if (error) {
432 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
433 		error = EIO;
434 		goto err_out;
435 	}
436 
437 	/* Setup the mailbox */
438 	ixgbe_init_mbx_params_vf(hw);
439 
440 	error = hw->mac.ops.reset_hw(hw);
441 	if (error == IXGBE_ERR_RESET_FAILED)
442 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
443 	else if (error)
444 		device_printf(dev, "...reset_hw() failed with error %d\n",
445 		    error);
446 	if (error) {
447 		error = EIO;
448 		goto err_out;
449 	}
450 
451 	error = hw->mac.ops.init_hw(hw);
452 	if (error) {
453 		device_printf(dev, "...init_hw() failed with error %d\n",
454 		    error);
455 		error = EIO;
456 		goto err_out;
457 	}
458 
459 	/* Negotiate mailbox API version */
460 	error = ixv_negotiate_api(adapter);
461 	if (error) {
462 		device_printf(dev,
463 		    "Mailbox API negotiation failed during attach!\n");
464 		goto err_out;
465 	}
466 
467 	/* If no mac address was assigned, make a random one */
468 	if (!ixv_check_ether_addr(hw->mac.addr)) {
469 		u8 addr[ETHER_ADDR_LEN];
470 		arc4rand(&addr, sizeof(addr), 0);
471 		addr[0] &= 0xFE;
472 		addr[0] |= 0x02;
473 		bcopy(addr, hw->mac.addr, sizeof(addr));
474 		bcopy(addr, hw->mac.perm_addr, sizeof(addr));
475 	}
476 
477 	/* Most of the iflib initialization... */
478 
479 	iflib_set_mac(ctx, hw->mac.addr);
480 	switch (adapter->hw.mac.type) {
481 	case ixgbe_mac_X550_vf:
482 	case ixgbe_mac_X550EM_x_vf:
483 	case ixgbe_mac_X550EM_a_vf:
484 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
485 		break;
486 	default:
487 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
488 	}
489 	scctx->isc_txqsizes[0] =
490 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
491 	    sizeof(u32), DBA_ALIGN);
492 	scctx->isc_rxqsizes[0] =
493 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
494 	    DBA_ALIGN);
495 	/* XXX */
496 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
497 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
498 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
499 	scctx->isc_msix_bar = PCIR_BAR(MSIX_82598_BAR);
500 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
501 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
502 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
503 
504 	scctx->isc_txrx = &ixgbe_txrx;
505 
506 	/*
507 	 * Tell the upper layer(s) we support everything the PF
508 	 * driver does except...
509 	 *   Wake-on-LAN
510 	 */
511 	scctx->isc_capabilities = IXGBE_CAPS;
512 	scctx->isc_capabilities ^= IFCAP_WOL;
513 	scctx->isc_capenable = scctx->isc_capabilities;
514 
515 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
516 
517 	return (0);
518 
519 err_out:
520 	ixv_free_pci_resources(ctx);
521 
522 	return (error);
523 } /* ixv_if_attach_pre */
524 
525 static int
526 ixv_if_attach_post(if_ctx_t ctx)
527 {
528 	struct adapter *adapter = iflib_get_softc(ctx);
529 	device_t       dev = iflib_get_dev(ctx);
530 	int            error = 0;
531 
532 	/* Setup OS specific network interface */
533 	error = ixv_setup_interface(ctx);
534 	if (error) {
535 		device_printf(dev, "Interface setup failed: %d\n", error);
536 		goto end;
537 	}
538 
539 	/* Do the stats setup */
540 	ixv_save_stats(adapter);
541 	ixv_init_stats(adapter);
542 	ixv_add_stats_sysctls(adapter);
543 
544 end:
545 	return error;
546 } /* ixv_if_attach_post */
547 
548 /************************************************************************
549  * ixv_detach - Device removal routine
550  *
551  *   Called when the driver is being removed.
552  *   Stops the adapter and deallocates all the resources
553  *   that were allocated for driver operation.
554  *
555  *   return 0 on success, positive on failure
556  ************************************************************************/
557 static int
558 ixv_if_detach(if_ctx_t ctx)
559 {
560 	INIT_DEBUGOUT("ixv_detach: begin");
561 
562 	ixv_free_pci_resources(ctx);
563 
564 	return (0);
565 } /* ixv_if_detach */
566 
567 /************************************************************************
568  * ixv_if_mtu_set
569  ************************************************************************/
570 static int
571 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
572 {
573 	struct adapter *adapter = iflib_get_softc(ctx);
574 	struct ifnet   *ifp = iflib_get_ifp(ctx);
575 	int            error = 0;
576 
577 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
578 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
579 		error = EINVAL;
580 	} else {
581 		ifp->if_mtu = mtu;
582 		adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
583 	}
584 
585 	return error;
586 } /* ixv_if_mtu_set */
587 
588 /************************************************************************
589  * ixv_if_init - Init entry point
590  *
591  *   Used in two ways: It is used by the stack as an init entry
592  *   point in network interface structure. It is also used
593  *   by the driver as a hw/sw initialization routine to get
594  *   to a consistent state.
595  *
596  *   return 0 on success, positive on failure
597  ************************************************************************/
598 static void
599 ixv_if_init(if_ctx_t ctx)
600 {
601 	struct adapter  *adapter = iflib_get_softc(ctx);
602 	struct ifnet    *ifp = iflib_get_ifp(ctx);
603 	device_t        dev = iflib_get_dev(ctx);
604 	struct ixgbe_hw *hw = &adapter->hw;
605 	int             error = 0;
606 
607 	INIT_DEBUGOUT("ixv_if_init: begin");
608 	hw->adapter_stopped = FALSE;
609 	hw->mac.ops.stop_adapter(hw);
610 
611 	/* reprogram the RAR[0] in case user changed it. */
612 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
613 
614 	/* Get the latest mac address, User can use a LAA */
615 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
616 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
617 
618 	/* Reset VF and renegotiate mailbox API version */
619 	hw->mac.ops.reset_hw(hw);
620 	hw->mac.ops.start_hw(hw);
621 	error = ixv_negotiate_api(adapter);
622 	if (error) {
623 		device_printf(dev,
624 		    "Mailbox API negotiation failed in if_init!\n");
625 		return;
626 	}
627 
628 	ixv_initialize_transmit_units(ctx);
629 
630 	/* Setup Multicast table */
631 	ixv_if_multi_set(ctx);
632 
633 	/*
634 	 * Determine the correct mbuf pool
635 	 * for doing jumbo/headersplit
636 	 */
637 	if (ifp->if_mtu > ETHERMTU)
638 		adapter->rx_mbuf_sz = MJUMPAGESIZE;
639 	else
640 		adapter->rx_mbuf_sz = MCLBYTES;
641 
642 	/* Configure RX settings */
643 	ixv_initialize_receive_units(ctx);
644 
645 	/* Set up VLAN offload and filter */
646 	ixv_setup_vlan_support(ctx);
647 
648 	/* Set up MSI-X routing */
649 	ixv_configure_ivars(adapter);
650 
651 	/* Set up auto-mask */
652 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
653 
654 	/* Set moderation on the Link interrupt */
655 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
656 
657 	/* Stats init */
658 	ixv_init_stats(adapter);
659 
660 	/* Config/Enable Link */
661 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
662 	    FALSE);
663 
664 	/* And now turn on interrupts */
665 	ixv_if_enable_intr(ctx);
666 
667 	return;
668 } /* ixv_if_init */
669 
670 /************************************************************************
671  * ixv_enable_queue
672  ************************************************************************/
673 static inline void
674 ixv_enable_queue(struct adapter *adapter, u32 vector)
675 {
676 	struct ixgbe_hw *hw = &adapter->hw;
677 	u32             queue = 1 << vector;
678 	u32             mask;
679 
680 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
681 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
682 } /* ixv_enable_queue */
683 
684 /************************************************************************
685  * ixv_disable_queue
686  ************************************************************************/
687 static inline void
688 ixv_disable_queue(struct adapter *adapter, u32 vector)
689 {
690 	struct ixgbe_hw *hw = &adapter->hw;
691 	u64             queue = (u64)(1 << vector);
692 	u32             mask;
693 
694 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
695 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
696 } /* ixv_disable_queue */
697 
698 
699 /************************************************************************
700  * ixv_msix_que - MSI-X Queue Interrupt Service routine
701  ************************************************************************/
702 static int
703 ixv_msix_que(void *arg)
704 {
705 	struct ix_rx_queue *que = arg;
706 	struct adapter     *adapter = que->adapter;
707 
708 	ixv_disable_queue(adapter, que->msix);
709 	++que->irqs;
710 
711 	return (FILTER_SCHEDULE_THREAD);
712 } /* ixv_msix_que */
713 
714 /************************************************************************
715  * ixv_msix_mbx
716  ************************************************************************/
717 static int
718 ixv_msix_mbx(void *arg)
719 {
720 	struct adapter  *adapter = arg;
721 	struct ixgbe_hw *hw = &adapter->hw;
722 	u32             reg;
723 
724 	++adapter->link_irq;
725 
726 	/* First get the cause */
727 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
728 	/* Clear interrupt with write */
729 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
730 
731 	/* Link status change */
732 	if (reg & IXGBE_EICR_LSC)
733 		iflib_admin_intr_deferred(adapter->ctx);
734 
735 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
736 
737 	return (FILTER_HANDLED);
738 } /* ixv_msix_mbx */
739 
740 /************************************************************************
741  * ixv_media_status - Media Ioctl callback
742  *
743  *   Called whenever the user queries the status of
744  *   the interface using ifconfig.
745  ************************************************************************/
746 static void
747 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
748 {
749 	struct adapter *adapter = iflib_get_softc(ctx);
750 
751 	INIT_DEBUGOUT("ixv_media_status: begin");
752 
753 	iflib_admin_intr_deferred(ctx);
754 
755 	ifmr->ifm_status = IFM_AVALID;
756 	ifmr->ifm_active = IFM_ETHER;
757 
758 	if (!adapter->link_active)
759 		return;
760 
761 	ifmr->ifm_status |= IFM_ACTIVE;
762 
763 	switch (adapter->link_speed) {
764 		case IXGBE_LINK_SPEED_1GB_FULL:
765 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
766 			break;
767 		case IXGBE_LINK_SPEED_10GB_FULL:
768 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
769 			break;
770 		case IXGBE_LINK_SPEED_100_FULL:
771 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
772 			break;
773 		case IXGBE_LINK_SPEED_10_FULL:
774 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
775 			break;
776 	}
777 } /* ixv_if_media_status */
778 
779 /************************************************************************
780  * ixv_if_media_change - Media Ioctl callback
781  *
782  *   Called when the user changes speed/duplex using
783  *   media/mediopt option with ifconfig.
784  ************************************************************************/
785 static int
786 ixv_if_media_change(if_ctx_t ctx)
787 {
788 	struct adapter *adapter = iflib_get_softc(ctx);
789 	struct ifmedia *ifm = iflib_get_media(ctx);
790 
791 	INIT_DEBUGOUT("ixv_media_change: begin");
792 
793 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
794 		return (EINVAL);
795 
796 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
797 	case IFM_AUTO:
798 		break;
799 	default:
800 		device_printf(adapter->dev, "Only auto media type\n");
801 		return (EINVAL);
802 	}
803 
804 	return (0);
805 } /* ixv_if_media_change */
806 
807 
808 /************************************************************************
809  * ixv_negotiate_api
810  *
811  *   Negotiate the Mailbox API with the PF;
812  *   start with the most featured API first.
813  ************************************************************************/
814 static int
815 ixv_negotiate_api(struct adapter *adapter)
816 {
817 	struct ixgbe_hw *hw = &adapter->hw;
818 	int             mbx_api[] = { ixgbe_mbox_api_11,
819 	                              ixgbe_mbox_api_10,
820 	                              ixgbe_mbox_api_unknown };
821 	int             i = 0;
822 
823 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
824 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
825 			return (0);
826 		i++;
827 	}
828 
829 	return (EINVAL);
830 } /* ixv_negotiate_api */
831 
832 
833 /************************************************************************
834  * ixv_if_multi_set - Multicast Update
835  *
836  *   Called whenever multicast address list is updated.
837  ************************************************************************/
838 static void
839 ixv_if_multi_set(if_ctx_t ctx)
840 {
841 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
842 	struct adapter     *adapter = iflib_get_softc(ctx);
843 	u8                 *update_ptr;
844 	struct ifmultiaddr *ifma;
845 	if_t               ifp = iflib_get_ifp(ctx);
846 	int                mcnt = 0;
847 
848 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
849 
850 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
851 		if (ifma->ifma_addr->sa_family != AF_LINK)
852 			continue;
853 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
854 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
855 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
856 		mcnt++;
857 	}
858 
859 	update_ptr = mta;
860 
861 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
862 	    ixv_mc_array_itr, TRUE);
863 } /* ixv_if_multi_set */
864 
865 /************************************************************************
866  * ixv_mc_array_itr
867  *
868  *   An iterator function needed by the multicast shared code.
869  *   It feeds the shared code routine the addresses in the
870  *   array of ixv_set_multi() one by one.
871  ************************************************************************/
872 static u8 *
873 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
874 {
875 	u8 *addr = *update_ptr;
876 	u8 *newptr;
877 
878 	*vmdq = 0;
879 
880 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
881 	*update_ptr = newptr;
882 
883 	return addr;
884 } /* ixv_mc_array_itr */
885 
886 /************************************************************************
887  * ixv_if_local_timer - Timer routine
888  *
889  *   Checks for link status, updates statistics,
890  *   and runs the watchdog check.
891  ************************************************************************/
892 static void
893 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
894 {
895 	if (qid != 0)
896 		return;
897 
898 	/* Fire off the adminq task */
899 	iflib_admin_intr_deferred(ctx);
900 } /* ixv_if_local_timer */
901 
902 /************************************************************************
903  * ixv_if_update_admin_status - Update OS on link state
904  *
905  * Note: Only updates the OS on the cached link state.
906  *       The real check of the hardware only happens with
907  *       a link interrupt.
908  ************************************************************************/
909 static void
910 ixv_if_update_admin_status(if_ctx_t ctx)
911 {
912 	struct adapter *adapter = iflib_get_softc(ctx);
913 	device_t       dev = iflib_get_dev(ctx);
914 	s32            status;
915 
916 	adapter->hw.mac.get_link_status = TRUE;
917 
918 	status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
919 	    &adapter->link_up, FALSE);
920 
921 	if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
922 		/* Mailbox's Clear To Send status is lost or timeout occurred.
923 		 * We need reinitialization. */
924 		iflib_get_ifp(ctx)->if_init(ctx);
925 	}
926 
927 	if (adapter->link_up) {
928 		if (adapter->link_active == FALSE) {
929 			if (bootverbose)
930 				device_printf(dev, "Link is up %d Gbps %s \n",
931 				    ((adapter->link_speed == 128) ? 10 : 1),
932 				    "Full Duplex");
933 			adapter->link_active = TRUE;
934 			iflib_link_state_change(ctx, LINK_STATE_UP,
935 			    IF_Gbps(10));
936 		}
937 	} else { /* Link down */
938 		if (adapter->link_active == TRUE) {
939 			if (bootverbose)
940 				device_printf(dev, "Link is Down\n");
941 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
942 			adapter->link_active = FALSE;
943 		}
944 	}
945 
946 	/* Stats Update */
947 	ixv_update_stats(adapter);
948 } /* ixv_if_update_admin_status */
949 
950 
951 /************************************************************************
952  * ixv_if_stop - Stop the hardware
953  *
954  *   Disables all traffic on the adapter by issuing a
955  *   global reset on the MAC and deallocates TX/RX buffers.
956  ************************************************************************/
957 static void
958 ixv_if_stop(if_ctx_t ctx)
959 {
960 	struct adapter  *adapter = iflib_get_softc(ctx);
961 	struct ixgbe_hw *hw = &adapter->hw;
962 
963 	INIT_DEBUGOUT("ixv_stop: begin\n");
964 
965 	ixv_if_disable_intr(ctx);
966 
967 	hw->mac.ops.reset_hw(hw);
968 	adapter->hw.adapter_stopped = FALSE;
969 	hw->mac.ops.stop_adapter(hw);
970 
971 	/* Update the stack */
972 	adapter->link_up = FALSE;
973 	ixv_if_update_admin_status(ctx);
974 
975 	/* reprogram the RAR[0] in case user changed it. */
976 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
977 } /* ixv_if_stop */
978 
979 
980 /************************************************************************
981  * ixv_identify_hardware - Determine hardware revision.
982  ************************************************************************/
983 static void
984 ixv_identify_hardware(if_ctx_t ctx)
985 {
986 	struct adapter  *adapter = iflib_get_softc(ctx);
987 	device_t        dev = iflib_get_dev(ctx);
988 	struct ixgbe_hw *hw = &adapter->hw;
989 
990 	/* Save off the information about this board */
991 	hw->vendor_id = pci_get_vendor(dev);
992 	hw->device_id = pci_get_device(dev);
993 	hw->revision_id = pci_get_revid(dev);
994 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
995 	hw->subsystem_device_id = pci_get_subdevice(dev);
996 
997 	/* A subset of set_mac_type */
998 	switch (hw->device_id) {
999 	case IXGBE_DEV_ID_82599_VF:
1000 		hw->mac.type = ixgbe_mac_82599_vf;
1001 		break;
1002 	case IXGBE_DEV_ID_X540_VF:
1003 		hw->mac.type = ixgbe_mac_X540_vf;
1004 		break;
1005 	case IXGBE_DEV_ID_X550_VF:
1006 		hw->mac.type = ixgbe_mac_X550_vf;
1007 		break;
1008 	case IXGBE_DEV_ID_X550EM_X_VF:
1009 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1010 		break;
1011 	case IXGBE_DEV_ID_X550EM_A_VF:
1012 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1013 		break;
1014 	default:
1015 		device_printf(dev, "unknown mac type\n");
1016 		hw->mac.type = ixgbe_mac_unknown;
1017 		break;
1018 	}
1019 } /* ixv_identify_hardware */
1020 
1021 /************************************************************************
1022  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1023  ************************************************************************/
1024 static int
1025 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1026 {
1027 	struct adapter     *adapter = iflib_get_softc(ctx);
1028 	device_t           dev = iflib_get_dev(ctx);
1029 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1030 	struct ix_tx_queue *tx_que;
1031 	int                error, rid, vector = 0;
1032 	char               buf[16];
1033 
1034 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1035 		rid = vector + 1;
1036 
1037 		snprintf(buf, sizeof(buf), "rxq%d", i);
1038 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1039 		    IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1040 
1041 		if (error) {
1042 			device_printf(iflib_get_dev(ctx),
1043 			    "Failed to allocate que int %d err: %d", i, error);
1044 			adapter->num_rx_queues = i + 1;
1045 			goto fail;
1046 		}
1047 
1048 		rx_que->msix = vector;
1049 		adapter->active_queues |= (u64)(1 << rx_que->msix);
1050 
1051 	}
1052 
1053 	for (int i = 0; i < adapter->num_tx_queues; i++) {
1054 		snprintf(buf, sizeof(buf), "txq%d", i);
1055 		tx_que = &adapter->tx_queues[i];
1056 		tx_que->msix = i % adapter->num_rx_queues;
1057 		iflib_softirq_alloc_generic(ctx,
1058 		    &adapter->rx_queues[tx_que->msix].que_irq,
1059 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1060 	}
1061 	rid = vector + 1;
1062 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1063 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1064 	if (error) {
1065 		device_printf(iflib_get_dev(ctx),
1066 		    "Failed to register admin handler");
1067 		return (error);
1068 	}
1069 
1070 	adapter->vector = vector;
1071 	/*
1072 	 * Due to a broken design QEMU will fail to properly
1073 	 * enable the guest for MSIX unless the vectors in
1074 	 * the table are all set up, so we must rewrite the
1075 	 * ENABLE in the MSIX control register again at this
1076 	 * point to cause it to successfully initialize us.
1077 	 */
1078 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1079 		int msix_ctrl;
1080 		pci_find_cap(dev, PCIY_MSIX, &rid);
1081 		rid += PCIR_MSIX_CTRL;
1082 		msix_ctrl = pci_read_config(dev, rid, 2);
1083 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1084 		pci_write_config(dev, rid, msix_ctrl, 2);
1085 	}
1086 
1087 	return (0);
1088 
1089 fail:
1090 	iflib_irq_free(ctx, &adapter->irq);
1091 	rx_que = adapter->rx_queues;
1092 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1093 		iflib_irq_free(ctx, &rx_que->que_irq);
1094 
1095 	return (error);
1096 } /* ixv_if_msix_intr_assign */
1097 
1098 /************************************************************************
1099  * ixv_allocate_pci_resources
1100  ************************************************************************/
1101 static int
1102 ixv_allocate_pci_resources(if_ctx_t ctx)
1103 {
1104 	struct adapter *adapter = iflib_get_softc(ctx);
1105 	device_t       dev = iflib_get_dev(ctx);
1106 	int            rid;
1107 
1108 	rid = PCIR_BAR(0);
1109 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1110 	    RF_ACTIVE);
1111 
1112 	if (!(adapter->pci_mem)) {
1113 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1114 		return (ENXIO);
1115 	}
1116 
1117 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1118 	adapter->osdep.mem_bus_space_handle =
1119 	    rman_get_bushandle(adapter->pci_mem);
1120 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1121 
1122 	return (0);
1123 } /* ixv_allocate_pci_resources */
1124 
1125 /************************************************************************
1126  * ixv_free_pci_resources
1127  ************************************************************************/
1128 static void
1129 ixv_free_pci_resources(if_ctx_t ctx)
1130 {
1131 	struct adapter     *adapter = iflib_get_softc(ctx);
1132 	struct ix_rx_queue *que = adapter->rx_queues;
1133 	device_t           dev = iflib_get_dev(ctx);
1134 
1135 	/* Release all msix queue resources */
1136 	if (adapter->intr_type == IFLIB_INTR_MSIX)
1137 		iflib_irq_free(ctx, &adapter->irq);
1138 
1139 	if (que != NULL) {
1140 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1141 			iflib_irq_free(ctx, &que->que_irq);
1142 		}
1143 	}
1144 
1145 	/* Clean the Legacy or Link interrupt last */
1146 	if (adapter->pci_mem != NULL)
1147 		bus_release_resource(dev, SYS_RES_MEMORY,
1148 				     PCIR_BAR(0), adapter->pci_mem);
1149 } /* ixv_free_pci_resources */
1150 
1151 /************************************************************************
1152  * ixv_setup_interface
1153  *
1154  *   Setup networking device structure and register an interface.
1155  ************************************************************************/
1156 static int
1157 ixv_setup_interface(if_ctx_t ctx)
1158 {
1159 	struct adapter *adapter = iflib_get_softc(ctx);
1160 	if_softc_ctx_t scctx = adapter->shared;
1161 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1162 
1163 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1164 
1165 	if_setbaudrate(ifp, IF_Gbps(10));
1166 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1167 
1168 
1169 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1170 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1171 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1172 
1173 	return 0;
1174 } /* ixv_setup_interface */
1175 
1176 /************************************************************************
1177  * ixv_if_get_counter
1178  ************************************************************************/
1179 static uint64_t
1180 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1181 {
1182 	struct adapter *adapter = iflib_get_softc(ctx);
1183 	if_t           ifp = iflib_get_ifp(ctx);
1184 
1185 	switch (cnt) {
1186 	case IFCOUNTER_IPACKETS:
1187 		return (adapter->ipackets);
1188 	case IFCOUNTER_OPACKETS:
1189 		return (adapter->opackets);
1190 	case IFCOUNTER_IBYTES:
1191 		return (adapter->ibytes);
1192 	case IFCOUNTER_OBYTES:
1193 		return (adapter->obytes);
1194 	case IFCOUNTER_IMCASTS:
1195 		return (adapter->imcasts);
1196 	default:
1197 		return (if_get_counter_default(ifp, cnt));
1198 	}
1199 } /* ixv_if_get_counter */
1200 
1201 /************************************************************************
1202  * ixv_initialize_transmit_units - Enable transmit unit.
1203  ************************************************************************/
1204 static void
1205 ixv_initialize_transmit_units(if_ctx_t ctx)
1206 {
1207 	struct adapter     *adapter = iflib_get_softc(ctx);
1208 	struct ixgbe_hw    *hw = &adapter->hw;
1209 	if_softc_ctx_t     scctx = adapter->shared;
1210 	struct ix_tx_queue *que = adapter->tx_queues;
1211 	int                i;
1212 
1213 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1214 		struct tx_ring *txr = &que->txr;
1215 		u64            tdba = txr->tx_paddr;
1216 		u32            txctrl, txdctl;
1217 		int            j = txr->me;
1218 
1219 		/* Set WTHRESH to 8, burst writeback */
1220 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1221 		txdctl |= (8 << 16);
1222 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1223 
1224 		/* Set the HW Tx Head and Tail indices */
1225 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1226 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1227 
1228 		/* Set Tx Tail register */
1229 		txr->tail = IXGBE_VFTDT(j);
1230 
1231 		txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0;
1232 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1233 			txr->tx_rsq[k] = QIDX_INVALID;
1234 
1235 		/* Set Ring parameters */
1236 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1237 		    (tdba & 0x00000000ffffffffULL));
1238 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1239 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1240 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1241 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1242 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1243 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1244 
1245 		/* Now enable */
1246 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1247 		txdctl |= IXGBE_TXDCTL_ENABLE;
1248 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1249 	}
1250 
1251 	return;
1252 } /* ixv_initialize_transmit_units */
1253 
1254 /************************************************************************
1255  * ixv_initialize_rss_mapping
1256  ************************************************************************/
1257 static void
1258 ixv_initialize_rss_mapping(struct adapter *adapter)
1259 {
1260 	struct ixgbe_hw *hw = &adapter->hw;
1261 	u32             reta = 0, mrqc, rss_key[10];
1262 	int             queue_id;
1263 	int             i, j;
1264 	u32             rss_hash_config;
1265 
1266 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1267 		/* Fetch the configured RSS key */
1268 		rss_getkey((uint8_t *)&rss_key);
1269 	} else {
1270 		/* set up random bits */
1271 		arc4rand(&rss_key, sizeof(rss_key), 0);
1272 	}
1273 
1274 	/* Now fill out hash function seeds */
1275 	for (i = 0; i < 10; i++)
1276 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1277 
1278 	/* Set up the redirection table */
1279 	for (i = 0, j = 0; i < 64; i++, j++) {
1280 		if (j == adapter->num_rx_queues)
1281 			j = 0;
1282 
1283 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1284 			/*
1285 			 * Fetch the RSS bucket id for the given indirection
1286 			 * entry. Cap it at the number of configured buckets
1287 			 * (which is num_rx_queues.)
1288 			 */
1289 			queue_id = rss_get_indirection_to_bucket(i);
1290 			queue_id = queue_id % adapter->num_rx_queues;
1291 		} else
1292 			queue_id = j;
1293 
1294 		/*
1295 		 * The low 8 bits are for hash value (n+0);
1296 		 * The next 8 bits are for hash value (n+1), etc.
1297 		 */
1298 		reta >>= 8;
1299 		reta |= ((uint32_t)queue_id) << 24;
1300 		if ((i & 3) == 3) {
1301 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1302 			reta = 0;
1303 		}
1304 	}
1305 
1306 	/* Perform hash on these packet types */
1307 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
1308 		rss_hash_config = rss_gethashconfig();
1309 	else {
1310 		/*
1311 		 * Disable UDP - IP fragments aren't currently being handled
1312 		 * and so we end up with a mix of 2-tuple and 4-tuple
1313 		 * traffic.
1314 		 */
1315 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1316 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1317 		                | RSS_HASHTYPE_RSS_IPV6
1318 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1319 	}
1320 
1321 	mrqc = IXGBE_MRQC_RSSEN;
1322 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1323 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1324 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1325 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1326 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1327 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1328 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1329 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1330 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1331 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1332 		    __func__);
1333 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1334 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1335 		    __func__);
1336 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1337 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1338 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1339 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1340 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1341 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1342 		    __func__);
1343 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1344 } /* ixv_initialize_rss_mapping */
1345 
1346 
1347 /************************************************************************
1348  * ixv_initialize_receive_units - Setup receive registers and features.
1349  ************************************************************************/
1350 static void
1351 ixv_initialize_receive_units(if_ctx_t ctx)
1352 {
1353 	struct adapter     *adapter = iflib_get_softc(ctx);
1354 	if_softc_ctx_t     scctx;
1355 	struct ixgbe_hw    *hw = &adapter->hw;
1356 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1357 	struct ix_rx_queue *que = adapter->rx_queues;
1358 	u32                bufsz, psrtype;
1359 
1360 	if (ifp->if_mtu > ETHERMTU)
1361 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1362 	else
1363 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1364 
1365 	psrtype = IXGBE_PSRTYPE_TCPHDR
1366 	        | IXGBE_PSRTYPE_UDPHDR
1367 	        | IXGBE_PSRTYPE_IPV4HDR
1368 	        | IXGBE_PSRTYPE_IPV6HDR
1369 	        | IXGBE_PSRTYPE_L2HDR;
1370 
1371 	if (adapter->num_rx_queues > 1)
1372 		psrtype |= 1 << 29;
1373 
1374 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1375 
1376 	/* Tell PF our max_frame size */
1377 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1378 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1379 	}
1380 	scctx = adapter->shared;
1381 
1382 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1383 		struct rx_ring *rxr = &que->rxr;
1384 		u64            rdba = rxr->rx_paddr;
1385 		u32            reg, rxdctl;
1386 		int            j = rxr->me;
1387 
1388 		/* Disable the queue */
1389 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1390 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1391 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1392 		for (int k = 0; k < 10; k++) {
1393 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1394 			    IXGBE_RXDCTL_ENABLE)
1395 				msec_delay(1);
1396 			else
1397 				break;
1398 		}
1399 		wmb();
1400 		/* Setup the Base and Length of the Rx Descriptor Ring */
1401 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1402 		    (rdba & 0x00000000ffffffffULL));
1403 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1404 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1405 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1406 
1407 		/* Reset the ring indices */
1408 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1409 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1410 
1411 		/* Set up the SRRCTL register */
1412 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1413 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1414 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1415 		reg |= bufsz;
1416 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1417 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1418 
1419 		/* Capture Rx Tail index */
1420 		rxr->tail = IXGBE_VFRDT(rxr->me);
1421 
1422 		/* Do the queue enabling last */
1423 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1424 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1425 		for (int l = 0; l < 10; l++) {
1426 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1427 			    IXGBE_RXDCTL_ENABLE)
1428 				break;
1429 			msec_delay(1);
1430 		}
1431 		wmb();
1432 
1433 		/* Set the Tail Pointer */
1434 #ifdef DEV_NETMAP
1435 		/*
1436 		 * In netmap mode, we must preserve the buffers made
1437 		 * available to userspace before the if_init()
1438 		 * (this is true by default on the TX side, because
1439 		 * init makes all buffers available to userspace).
1440 		 *
1441 		 * netmap_reset() and the device specific routines
1442 		 * (e.g. ixgbe_setup_receive_rings()) map these
1443 		 * buffers at the end of the NIC ring, so here we
1444 		 * must set the RDT (tail) register to make sure
1445 		 * they are not overwritten.
1446 		 *
1447 		 * In this driver the NIC ring starts at RDH = 0,
1448 		 * RDT points to the last slot available for reception (?),
1449 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1450 		 */
1451 		if (ifp->if_capenable & IFCAP_NETMAP) {
1452 			struct netmap_adapter *na = NA(ifp);
1453 			struct netmap_kring *kring = na->rx_rings[j];
1454 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1455 
1456 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1457 		} else
1458 #endif /* DEV_NETMAP */
1459 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1460 			    scctx->isc_nrxd[0] - 1);
1461 	}
1462 
1463 	ixv_initialize_rss_mapping(adapter);
1464 } /* ixv_initialize_receive_units */
1465 
1466 /************************************************************************
1467  * ixv_setup_vlan_support
1468  ************************************************************************/
1469 static void
1470 ixv_setup_vlan_support(if_ctx_t ctx)
1471 {
1472 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1473 	struct adapter  *adapter = iflib_get_softc(ctx);
1474 	struct ixgbe_hw *hw = &adapter->hw;
1475 	u32             ctrl, vid, vfta, retry;
1476 
1477 	/*
1478 	 * We get here thru if_init, meaning
1479 	 * a soft reset, this has already cleared
1480 	 * the VFTA and other state, so if there
1481 	 * have been no vlan's registered do nothing.
1482 	 */
1483 	if (adapter->num_vlans == 0)
1484 		return;
1485 
1486 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1487 		/* Enable the queues */
1488 		for (int i = 0; i < adapter->num_rx_queues; i++) {
1489 			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1490 			ctrl |= IXGBE_RXDCTL_VME;
1491 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1492 			/*
1493 			 * Let Rx path know that it needs to store VLAN tag
1494 			 * as part of extra mbuf info.
1495 			 */
1496 			adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1497 		}
1498 	}
1499 
1500 	/*
1501 	 * If filtering VLAN tags is disabled,
1502 	 * there is no need to fill VLAN Filter Table Array (VFTA).
1503 	 */
1504 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1505 		return;
1506 
1507 	/*
1508 	 * A soft reset zero's out the VFTA, so
1509 	 * we need to repopulate it now.
1510 	 */
1511 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1512 		if (ixv_shadow_vfta[i] == 0)
1513 			continue;
1514 		vfta = ixv_shadow_vfta[i];
1515 		/*
1516 		 * Reconstruct the vlan id's
1517 		 * based on the bits set in each
1518 		 * of the array ints.
1519 		 */
1520 		for (int j = 0; j < 32; j++) {
1521 			retry = 0;
1522 			if ((vfta & (1 << j)) == 0)
1523 				continue;
1524 			vid = (i * 32) + j;
1525 			/* Call the shared code mailbox routine */
1526 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1527 				if (++retry > 5)
1528 					break;
1529 			}
1530 		}
1531 	}
1532 } /* ixv_setup_vlan_support */
1533 
1534 /************************************************************************
1535  * ixv_if_register_vlan
1536  *
1537  *   Run via a vlan config EVENT, it enables us to use the
1538  *   HW Filter table since we can get the vlan id. This just
1539  *   creates the entry in the soft version of the VFTA, init
1540  *   will repopulate the real table.
1541  ************************************************************************/
1542 static void
1543 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1544 {
1545 	struct adapter *adapter = iflib_get_softc(ctx);
1546 	u16            index, bit;
1547 
1548 	index = (vtag >> 5) & 0x7F;
1549 	bit = vtag & 0x1F;
1550 	ixv_shadow_vfta[index] |= (1 << bit);
1551 	++adapter->num_vlans;
1552 } /* ixv_if_register_vlan */
1553 
1554 /************************************************************************
1555  * ixv_if_unregister_vlan
1556  *
1557  *   Run via a vlan unconfig EVENT, remove our entry
1558  *   in the soft vfta.
1559  ************************************************************************/
1560 static void
1561 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1562 {
1563 	struct adapter *adapter = iflib_get_softc(ctx);
1564 	u16            index, bit;
1565 
1566 	index = (vtag >> 5) & 0x7F;
1567 	bit = vtag & 0x1F;
1568 	ixv_shadow_vfta[index] &= ~(1 << bit);
1569 	--adapter->num_vlans;
1570 } /* ixv_if_unregister_vlan */
1571 
1572 /************************************************************************
1573  * ixv_if_enable_intr
1574  ************************************************************************/
1575 static void
1576 ixv_if_enable_intr(if_ctx_t ctx)
1577 {
1578 	struct adapter  *adapter = iflib_get_softc(ctx);
1579 	struct ixgbe_hw *hw = &adapter->hw;
1580 	struct ix_rx_queue *que = adapter->rx_queues;
1581 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1582 
1583 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1584 
1585 	mask = IXGBE_EIMS_ENABLE_MASK;
1586 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1587 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1588 
1589 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1590 		ixv_enable_queue(adapter, que->msix);
1591 
1592 	IXGBE_WRITE_FLUSH(hw);
1593 } /* ixv_if_enable_intr */
1594 
1595 /************************************************************************
1596  * ixv_if_disable_intr
1597  ************************************************************************/
1598 static void
1599 ixv_if_disable_intr(if_ctx_t ctx)
1600 {
1601 	struct adapter *adapter = iflib_get_softc(ctx);
1602 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1603 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1604 	IXGBE_WRITE_FLUSH(&adapter->hw);
1605 } /* ixv_if_disable_intr */
1606 
1607 /************************************************************************
1608  * ixv_if_rx_queue_intr_enable
1609  ************************************************************************/
1610 static int
1611 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1612 {
1613 	struct adapter	*adapter = iflib_get_softc(ctx);
1614 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1615 
1616 	ixv_enable_queue(adapter, que->rxr.me);
1617 
1618 	return (0);
1619 } /* ixv_if_rx_queue_intr_enable */
1620 
1621 /************************************************************************
1622  * ixv_set_ivar
1623  *
1624  *   Setup the correct IVAR register for a particular MSI-X interrupt
1625  *    - entry is the register array entry
1626  *    - vector is the MSI-X vector for this queue
1627  *    - type is RX/TX/MISC
1628  ************************************************************************/
1629 static void
1630 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1631 {
1632 	struct ixgbe_hw *hw = &adapter->hw;
1633 	u32             ivar, index;
1634 
1635 	vector |= IXGBE_IVAR_ALLOC_VAL;
1636 
1637 	if (type == -1) { /* MISC IVAR */
1638 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1639 		ivar &= ~0xFF;
1640 		ivar |= vector;
1641 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1642 	} else {          /* RX/TX IVARS */
1643 		index = (16 * (entry & 1)) + (8 * type);
1644 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1645 		ivar &= ~(0xFF << index);
1646 		ivar |= (vector << index);
1647 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1648 	}
1649 } /* ixv_set_ivar */
1650 
1651 /************************************************************************
1652  * ixv_configure_ivars
1653  ************************************************************************/
1654 static void
1655 ixv_configure_ivars(struct adapter *adapter)
1656 {
1657 	struct ix_rx_queue *que = adapter->rx_queues;
1658 
1659 	MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1660 
1661 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1662 		/* First the RX queue entry */
1663 		ixv_set_ivar(adapter, i, que->msix, 0);
1664 		/* ... and the TX */
1665 		ixv_set_ivar(adapter, i, que->msix, 1);
1666 		/* Set an initial value in EITR */
1667 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1668 		    IXGBE_EITR_DEFAULT);
1669 	}
1670 
1671 	/* For the mailbox interrupt */
1672 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
1673 } /* ixv_configure_ivars */
1674 
1675 /************************************************************************
1676  * ixv_save_stats
1677  *
1678  *   The VF stats registers never have a truly virgin
1679  *   starting point, so this routine tries to make an
1680  *   artificial one, marking ground zero on attach as
1681  *   it were.
1682  ************************************************************************/
1683 static void
1684 ixv_save_stats(struct adapter *adapter)
1685 {
1686 	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1687 		adapter->stats.vf.saved_reset_vfgprc +=
1688 		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1689 		adapter->stats.vf.saved_reset_vfgptc +=
1690 		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1691 		adapter->stats.vf.saved_reset_vfgorc +=
1692 		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1693 		adapter->stats.vf.saved_reset_vfgotc +=
1694 		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1695 		adapter->stats.vf.saved_reset_vfmprc +=
1696 		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1697 	}
1698 } /* ixv_save_stats */
1699 
1700 /************************************************************************
1701  * ixv_init_stats
1702  ************************************************************************/
1703 static void
1704 ixv_init_stats(struct adapter *adapter)
1705 {
1706 	struct ixgbe_hw *hw = &adapter->hw;
1707 
1708 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1709 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1710 	adapter->stats.vf.last_vfgorc |=
1711 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1712 
1713 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1714 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1715 	adapter->stats.vf.last_vfgotc |=
1716 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1717 
1718 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1719 
1720 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1721 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1722 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1723 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1724 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1725 } /* ixv_init_stats */
1726 
1727 #define UPDATE_STAT_32(reg, last, count)                \
1728 {                                                       \
1729 	u32 current = IXGBE_READ_REG(hw, reg);          \
1730 	if (current < last)                             \
1731 		count += 0x100000000LL;                 \
1732 	last = current;                                 \
1733 	count &= 0xFFFFFFFF00000000LL;                  \
1734 	count |= current;                               \
1735 }
1736 
1737 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1738 {                                                       \
1739 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1740 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1741 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1742 	if (current < last)                             \
1743 		count += 0x1000000000LL;                \
1744 	last = current;                                 \
1745 	count &= 0xFFFFFFF000000000LL;                  \
1746 	count |= current;                               \
1747 }
1748 
1749 /************************************************************************
1750  * ixv_update_stats - Update the board statistics counters.
1751  ************************************************************************/
1752 void
1753 ixv_update_stats(struct adapter *adapter)
1754 {
1755 	struct ixgbe_hw *hw = &adapter->hw;
1756 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1757 
1758 	UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1759 	    adapter->stats.vf.vfgprc);
1760 	UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1761 	    adapter->stats.vf.vfgptc);
1762 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1763 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1764 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1765 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1766 	UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1767 	    adapter->stats.vf.vfmprc);
1768 
1769 	/* Fill out the OS statistics structure */
1770 	IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1771 	IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1772 	IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1773 	IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1774 	IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1775 } /* ixv_update_stats */
1776 
1777 /************************************************************************
1778  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1779  ************************************************************************/
1780 static void
1781 ixv_add_stats_sysctls(struct adapter *adapter)
1782 {
1783 	device_t                dev = adapter->dev;
1784 	struct ix_tx_queue      *tx_que = adapter->tx_queues;
1785 	struct ix_rx_queue      *rx_que = adapter->rx_queues;
1786 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1787 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1788 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1789 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1790 	struct sysctl_oid       *stat_node, *queue_node;
1791 	struct sysctl_oid_list  *stat_list, *queue_list;
1792 
1793 #define QUEUE_NAME_LEN 32
1794 	char                    namebuf[QUEUE_NAME_LEN];
1795 
1796 	/* Driver Statistics */
1797 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1798 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1799 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1800 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1801 
1802 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1803 		struct tx_ring *txr = &tx_que->txr;
1804 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1805 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1806 		    CTLFLAG_RD, NULL, "Queue Name");
1807 		queue_list = SYSCTL_CHILDREN(queue_node);
1808 
1809 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1810 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1811 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1812 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1813 	}
1814 
1815 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1816 		struct rx_ring *rxr = &rx_que->rxr;
1817 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1818 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1819 		    CTLFLAG_RD, NULL, "Queue Name");
1820 		queue_list = SYSCTL_CHILDREN(queue_node);
1821 
1822 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1823 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1824 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1825 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1826 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1827 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1828 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1829 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1830 	}
1831 
1832 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1833 	    CTLFLAG_RD, NULL, "VF Statistics (read from HW registers)");
1834 	stat_list = SYSCTL_CHILDREN(stat_node);
1835 
1836 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1837 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1838 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1839 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1840 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1841 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1842 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1843 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1844 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1845 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1846 } /* ixv_add_stats_sysctls */
1847 
1848 /************************************************************************
1849  * ixv_print_debug_info
1850  *
1851  *   Called only when em_display_debug_stats is enabled.
1852  *   Provides a way to take a look at important statistics
1853  *   maintained by the driver and hardware.
1854  ************************************************************************/
1855 static void
1856 ixv_print_debug_info(struct adapter *adapter)
1857 {
1858 	device_t        dev = adapter->dev;
1859 	struct ixgbe_hw *hw = &adapter->hw;
1860 
1861 	device_printf(dev, "Error Byte Count = %u \n",
1862 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1863 
1864 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1865 } /* ixv_print_debug_info */
1866 
1867 /************************************************************************
1868  * ixv_sysctl_debug
1869  ************************************************************************/
1870 static int
1871 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1872 {
1873 	struct adapter *adapter;
1874 	int            error, result;
1875 
1876 	result = -1;
1877 	error = sysctl_handle_int(oidp, &result, 0, req);
1878 
1879 	if (error || !req->newptr)
1880 		return (error);
1881 
1882 	if (result == 1) {
1883 		adapter = (struct adapter *)arg1;
1884 		ixv_print_debug_info(adapter);
1885 	}
1886 
1887 	return error;
1888 } /* ixv_sysctl_debug */
1889 
1890 /************************************************************************
1891  * ixv_init_device_features
1892  ************************************************************************/
1893 static void
1894 ixv_init_device_features(struct adapter *adapter)
1895 {
1896 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
1897 	                  | IXGBE_FEATURE_VF
1898 	                  | IXGBE_FEATURE_RSS
1899 	                  | IXGBE_FEATURE_LEGACY_TX;
1900 
1901 	/* A tad short on feature flags for VFs, atm. */
1902 	switch (adapter->hw.mac.type) {
1903 	case ixgbe_mac_82599_vf:
1904 		break;
1905 	case ixgbe_mac_X540_vf:
1906 		break;
1907 	case ixgbe_mac_X550_vf:
1908 	case ixgbe_mac_X550EM_x_vf:
1909 	case ixgbe_mac_X550EM_a_vf:
1910 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1911 		break;
1912 	default:
1913 		break;
1914 	}
1915 
1916 	/* Enabled by default... */
1917 	/* Is a virtual function (VF) */
1918 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
1919 		adapter->feat_en |= IXGBE_FEATURE_VF;
1920 	/* Netmap */
1921 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1922 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1923 	/* Receive-Side Scaling (RSS) */
1924 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1925 		adapter->feat_en |= IXGBE_FEATURE_RSS;
1926 	/* Needs advanced context descriptor regardless of offloads req'd */
1927 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1928 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1929 } /* ixv_init_device_features */
1930 
1931