xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision 7d4374f65f7b3df3d2567029c510f2e1576f0f69)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_rss.h"
39 
40 #include "ixgbe.h"
41 #include "ifdi_if.h"
42 
43 #include <net/netmap.h>
44 #include <dev/netmap/netmap_kern.h>
45 
46 /************************************************************************
47  * Driver version
48  ************************************************************************/
49 char ixv_driver_version[] = "2.0.1-k";
50 
51 /************************************************************************
52  * PCI Device ID Table
53  *
54  *   Used by probe to select devices to load on
55  *   Last field stores an index into ixv_strings
56  *   Last entry must be all 0s
57  *
58  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
59  ************************************************************************/
60 static pci_vendor_info_t ixv_vendor_info_array[] =
61 {
62 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
64 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
66 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) PRO/10GbE Virtual Function Network Driver"),
67 	/* required last entry */
68 PVID_END
69 };
70 
71 /************************************************************************
72  * Function prototypes
73  ************************************************************************/
74 static void     *ixv_register(device_t dev);
75 static int      ixv_if_attach_pre(if_ctx_t ctx);
76 static int      ixv_if_attach_post(if_ctx_t ctx);
77 static int      ixv_if_detach(if_ctx_t ctx);
78 
79 static int      ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid);
80 static int      ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
81 static int      ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
82 static void     ixv_if_queues_free(if_ctx_t ctx);
83 static void     ixv_identify_hardware(if_ctx_t ctx);
84 static void     ixv_init_device_features(struct adapter *);
85 static int      ixv_allocate_pci_resources(if_ctx_t ctx);
86 static void     ixv_free_pci_resources(if_ctx_t ctx);
87 static int      ixv_setup_interface(if_ctx_t ctx);
88 static void     ixv_if_media_status(if_ctx_t , struct ifmediareq *);
89 static int      ixv_if_media_change(if_ctx_t ctx);
90 static void     ixv_if_update_admin_status(if_ctx_t ctx);
91 static int      ixv_if_msix_intr_assign(if_ctx_t ctx, int msix);
92 
93 static int      ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
94 static void     ixv_if_init(if_ctx_t ctx);
95 static void     ixv_if_local_timer(if_ctx_t ctx, uint16_t qid);
96 static void     ixv_if_stop(if_ctx_t ctx);
97 static int      ixv_negotiate_api(struct adapter *);
98 
99 static void     ixv_initialize_transmit_units(if_ctx_t ctx);
100 static void     ixv_initialize_receive_units(if_ctx_t ctx);
101 static void     ixv_initialize_rss_mapping(struct adapter *);
102 
103 static void     ixv_setup_vlan_support(if_ctx_t ctx);
104 static void     ixv_configure_ivars(struct adapter *);
105 static void     ixv_if_enable_intr(if_ctx_t ctx);
106 static void     ixv_if_disable_intr(if_ctx_t ctx);
107 static void     ixv_if_multi_set(if_ctx_t ctx);
108 
109 static void     ixv_if_register_vlan(if_ctx_t, u16);
110 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
111 
112 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
113 static bool	ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
114 
115 static void     ixv_save_stats(struct adapter *);
116 static void     ixv_init_stats(struct adapter *);
117 static void     ixv_update_stats(struct adapter *);
118 static void     ixv_add_stats_sysctls(struct adapter *adapter);
119 
120 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
121 static void     ixv_set_ivar(struct adapter *, u8, u8, s8);
122 
123 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
124 
125 /* The MSI-X Interrupt handlers */
126 static int      ixv_msix_que(void *);
127 static int      ixv_msix_mbx(void *);
128 
129 /************************************************************************
130  * FreeBSD Device Interface Entry Points
131  ************************************************************************/
132 static device_method_t ixv_methods[] = {
133 	/* Device interface */
134 	DEVMETHOD(device_register, ixv_register),
135 	DEVMETHOD(device_probe, iflib_device_probe),
136 	DEVMETHOD(device_attach, iflib_device_attach),
137 	DEVMETHOD(device_detach, iflib_device_detach),
138 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
139 	DEVMETHOD_END
140 };
141 
142 static driver_t ixv_driver = {
143 	"ixv", ixv_methods, sizeof(struct adapter),
144 };
145 
146 devclass_t ixv_devclass;
147 DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0);
148 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
149 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
150 MODULE_DEPEND(ixv, pci, 1, 1, 1);
151 MODULE_DEPEND(ixv, ether, 1, 1, 1);
152 
153 static device_method_t ixv_if_methods[] = {
154 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
155 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
156 	DEVMETHOD(ifdi_detach, ixv_if_detach),
157 	DEVMETHOD(ifdi_init, ixv_if_init),
158 	DEVMETHOD(ifdi_stop, ixv_if_stop),
159 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
160 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
161 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
162 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
163 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
164 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
165 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
166 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
167 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
168 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
169 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
170 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
171 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
172 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
173 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
174 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
175 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
176 	DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
177 	DEVMETHOD_END
178 };
179 
180 static driver_t ixv_if_driver = {
181   "ixv_if", ixv_if_methods, sizeof(struct adapter)
182 };
183 
184 /*
185  * TUNEABLE PARAMETERS:
186  */
187 
188 /* Flow control setting, default to full */
189 static int ixv_flow_control = ixgbe_fc_full;
190 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
191 
192 /*
193  * Header split: this causes the hardware to DMA
194  * the header into a separate mbuf from the payload,
195  * it can be a performance win in some workloads, but
196  * in others it actually hurts, its off by default.
197  */
198 static int ixv_header_split = FALSE;
199 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
200 
201 /*
202  * Shadow VFTA table, this is needed because
203  * the real filter table gets cleared during
204  * a soft reset and we need to repopulate it.
205  */
206 static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE];
207 extern struct if_txrx ixgbe_txrx;
208 
209 static struct if_shared_ctx ixv_sctx_init = {
210 	.isc_magic = IFLIB_MAGIC,
211 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
212 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
213 	.isc_tx_maxsegsize = PAGE_SIZE,
214 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
215 	.isc_tso_maxsegsize = PAGE_SIZE,
216 	.isc_rx_maxsize = MJUM16BYTES,
217 	.isc_rx_nsegments = 1,
218 	.isc_rx_maxsegsize = MJUM16BYTES,
219 	.isc_nfl = 1,
220 	.isc_ntxqs = 1,
221 	.isc_nrxqs = 1,
222 	.isc_admin_intrcnt = 1,
223 	.isc_vendor_info = ixv_vendor_info_array,
224 	.isc_driver_version = ixv_driver_version,
225 	.isc_driver = &ixv_if_driver,
226 	.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
227 
228 	.isc_nrxd_min = {MIN_RXD},
229 	.isc_ntxd_min = {MIN_TXD},
230 	.isc_nrxd_max = {MAX_RXD},
231 	.isc_ntxd_max = {MAX_TXD},
232 	.isc_nrxd_default = {DEFAULT_RXD},
233 	.isc_ntxd_default = {DEFAULT_TXD},
234 };
235 
236 if_shared_ctx_t ixv_sctx = &ixv_sctx_init;
237 
238 static void *
239 ixv_register(device_t dev)
240 {
241 	return (ixv_sctx);
242 }
243 
244 /************************************************************************
245  * ixv_if_tx_queues_alloc
246  ************************************************************************/
247 static int
248 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
249                        int ntxqs, int ntxqsets)
250 {
251 	struct adapter     *adapter = iflib_get_softc(ctx);
252 	if_softc_ctx_t     scctx = adapter->shared;
253 	struct ix_tx_queue *que;
254 	int                i, j, error;
255 
256 	MPASS(adapter->num_tx_queues == ntxqsets);
257 	MPASS(ntxqs == 1);
258 
259 	/* Allocate queue structure memory */
260 	adapter->tx_queues =
261 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
262 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
263 	if (!adapter->tx_queues) {
264 		device_printf(iflib_get_dev(ctx),
265 		    "Unable to allocate TX ring memory\n");
266 		return (ENOMEM);
267 	}
268 
269 	for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) {
270 		struct tx_ring *txr = &que->txr;
271 
272 		txr->me = i;
273 		txr->adapter =  que->adapter = adapter;
274 
275 		/* Allocate report status array */
276 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
277 			error = ENOMEM;
278 			goto fail;
279 		}
280 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
281 			txr->tx_rsq[j] = QIDX_INVALID;
282 		/* get the virtual and physical address of the hardware queues */
283 		txr->tail = IXGBE_VFTDT(txr->me);
284 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
285 		txr->tx_paddr = paddrs[i*ntxqs];
286 
287 		txr->bytes = 0;
288 		txr->total_packets = 0;
289 
290 	}
291 
292 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
293 	    adapter->num_tx_queues);
294 
295 	return (0);
296 
297  fail:
298 	ixv_if_queues_free(ctx);
299 
300 	return (error);
301 } /* ixv_if_tx_queues_alloc */
302 
303 /************************************************************************
304  * ixv_if_rx_queues_alloc
305  ************************************************************************/
306 static int
307 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
308                        int nrxqs, int nrxqsets)
309 {
310 	struct adapter     *adapter = iflib_get_softc(ctx);
311 	struct ix_rx_queue *que;
312 	int                i, error;
313 
314 	MPASS(adapter->num_rx_queues == nrxqsets);
315 	MPASS(nrxqs == 1);
316 
317 	/* Allocate queue structure memory */
318 	adapter->rx_queues =
319 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
320 	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
321 	if (!adapter->rx_queues) {
322 		device_printf(iflib_get_dev(ctx),
323 		    "Unable to allocate TX ring memory\n");
324 		error = ENOMEM;
325 		goto fail;
326 	}
327 
328 	for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
329 		struct rx_ring *rxr = &que->rxr;
330 		rxr->me = i;
331 		rxr->adapter = que->adapter = adapter;
332 
333 
334 		/* get the virtual and physical address of the hw queues */
335 		rxr->tail = IXGBE_VFRDT(rxr->me);
336 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
337 		rxr->rx_paddr = paddrs[i*nrxqs];
338 		rxr->bytes = 0;
339 		rxr->que = que;
340 	}
341 
342 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
343 	    adapter->num_rx_queues);
344 
345 	return (0);
346 
347 fail:
348 	ixv_if_queues_free(ctx);
349 
350 	return (error);
351 } /* ixv_if_rx_queues_alloc */
352 
353 /************************************************************************
354  * ixv_if_queues_free
355  ************************************************************************/
356 static void
357 ixv_if_queues_free(if_ctx_t ctx)
358 {
359 	struct adapter     *adapter = iflib_get_softc(ctx);
360 	struct ix_tx_queue *que = adapter->tx_queues;
361 	int                i;
362 
363 	if (que == NULL)
364 		goto free;
365 
366 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
367 		struct tx_ring *txr = &que->txr;
368 		if (txr->tx_rsq == NULL)
369 			break;
370 
371 		free(txr->tx_rsq, M_DEVBUF);
372 		txr->tx_rsq = NULL;
373 	}
374 	if (adapter->tx_queues != NULL)
375 		free(adapter->tx_queues, M_DEVBUF);
376 free:
377 	if (adapter->rx_queues != NULL)
378 		free(adapter->rx_queues, M_DEVBUF);
379 	adapter->tx_queues = NULL;
380 	adapter->rx_queues = NULL;
381 } /* ixv_if_queues_free */
382 
383 /************************************************************************
384  * ixv_if_attach_pre - Device initialization routine
385  *
386  *   Called when the driver is being loaded.
387  *   Identifies the type of hardware, allocates all resources
388  *   and initializes the hardware.
389  *
390  *   return 0 on success, positive on failure
391  ************************************************************************/
392 static int
393 ixv_if_attach_pre(if_ctx_t ctx)
394 {
395 	struct adapter  *adapter;
396 	device_t        dev;
397 	if_softc_ctx_t  scctx;
398 	struct ixgbe_hw *hw;
399 	int             error = 0;
400 
401 	INIT_DEBUGOUT("ixv_attach: begin");
402 
403 	/* Allocate, clear, and link in our adapter structure */
404 	dev = iflib_get_dev(ctx);
405 	adapter = iflib_get_softc(ctx);
406 	adapter->dev = dev;
407 	adapter->ctx = ctx;
408 	adapter->hw.back = adapter;
409 	scctx = adapter->shared = iflib_get_softc_ctx(ctx);
410 	adapter->media = iflib_get_media(ctx);
411 	hw = &adapter->hw;
412 
413 	/* Do base PCI setup - map BAR0 */
414 	if (ixv_allocate_pci_resources(ctx)) {
415 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
416 		error = ENXIO;
417 		goto err_out;
418 	}
419 
420 	/* SYSCTL APIs */
421 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
422 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
423 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
424 	    adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
425 
426 	/* Determine hardware revision */
427 	ixv_identify_hardware(ctx);
428 	ixv_init_device_features(adapter);
429 
430 	/* Initialize the shared code */
431 	error = ixgbe_init_ops_vf(hw);
432 	if (error) {
433 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
434 		error = EIO;
435 		goto err_out;
436 	}
437 
438 	/* Setup the mailbox */
439 	ixgbe_init_mbx_params_vf(hw);
440 
441 	error = hw->mac.ops.reset_hw(hw);
442 	if (error == IXGBE_ERR_RESET_FAILED)
443 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
444 	else if (error)
445 		device_printf(dev, "...reset_hw() failed with error %d\n",
446 		    error);
447 	if (error) {
448 		error = EIO;
449 		goto err_out;
450 	}
451 
452 	error = hw->mac.ops.init_hw(hw);
453 	if (error) {
454 		device_printf(dev, "...init_hw() failed with error %d\n",
455 		    error);
456 		error = EIO;
457 		goto err_out;
458 	}
459 
460 	/* Negotiate mailbox API version */
461 	error = ixv_negotiate_api(adapter);
462 	if (error) {
463 		device_printf(dev,
464 		    "Mailbox API negotiation failed during attach!\n");
465 		goto err_out;
466 	}
467 
468 	/* If no mac address was assigned, make a random one */
469 	if (!ixv_check_ether_addr(hw->mac.addr)) {
470 		u8 addr[ETHER_ADDR_LEN];
471 		arc4rand(&addr, sizeof(addr), 0);
472 		addr[0] &= 0xFE;
473 		addr[0] |= 0x02;
474 		bcopy(addr, hw->mac.addr, sizeof(addr));
475 		bcopy(addr, hw->mac.perm_addr, sizeof(addr));
476 	}
477 
478 	/* Most of the iflib initialization... */
479 
480 	iflib_set_mac(ctx, hw->mac.addr);
481 	switch (adapter->hw.mac.type) {
482 	case ixgbe_mac_X550_vf:
483 	case ixgbe_mac_X550EM_x_vf:
484 	case ixgbe_mac_X550EM_a_vf:
485 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
486 		break;
487 	default:
488 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
489 	}
490 	scctx->isc_txqsizes[0] =
491 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
492 	    sizeof(u32), DBA_ALIGN);
493 	scctx->isc_rxqsizes[0] =
494 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
495 	    DBA_ALIGN);
496 	/* XXX */
497 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
498 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
499 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
500 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
501 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
502 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
503 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
504 
505 	scctx->isc_txrx = &ixgbe_txrx;
506 
507 	/*
508 	 * Tell the upper layer(s) we support everything the PF
509 	 * driver does except...
510 	 *   Wake-on-LAN
511 	 */
512 	scctx->isc_capabilities = IXGBE_CAPS;
513 	scctx->isc_capabilities ^= IFCAP_WOL;
514 	scctx->isc_capenable = scctx->isc_capabilities;
515 
516 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
517 
518 	return (0);
519 
520 err_out:
521 	ixv_free_pci_resources(ctx);
522 
523 	return (error);
524 } /* ixv_if_attach_pre */
525 
526 static int
527 ixv_if_attach_post(if_ctx_t ctx)
528 {
529 	struct adapter *adapter = iflib_get_softc(ctx);
530 	device_t       dev = iflib_get_dev(ctx);
531 	int            error = 0;
532 
533 	/* Setup OS specific network interface */
534 	error = ixv_setup_interface(ctx);
535 	if (error) {
536 		device_printf(dev, "Interface setup failed: %d\n", error);
537 		goto end;
538 	}
539 
540 	/* Do the stats setup */
541 	ixv_save_stats(adapter);
542 	ixv_init_stats(adapter);
543 	ixv_add_stats_sysctls(adapter);
544 
545 end:
546 	return error;
547 } /* ixv_if_attach_post */
548 
549 /************************************************************************
550  * ixv_detach - Device removal routine
551  *
552  *   Called when the driver is being removed.
553  *   Stops the adapter and deallocates all the resources
554  *   that were allocated for driver operation.
555  *
556  *   return 0 on success, positive on failure
557  ************************************************************************/
558 static int
559 ixv_if_detach(if_ctx_t ctx)
560 {
561 	INIT_DEBUGOUT("ixv_detach: begin");
562 
563 	ixv_free_pci_resources(ctx);
564 
565 	return (0);
566 } /* ixv_if_detach */
567 
568 /************************************************************************
569  * ixv_if_mtu_set
570  ************************************************************************/
571 static int
572 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
573 {
574 	struct adapter *adapter = iflib_get_softc(ctx);
575 	struct ifnet   *ifp = iflib_get_ifp(ctx);
576 	int            error = 0;
577 
578 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
579 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
580 		error = EINVAL;
581 	} else {
582 		ifp->if_mtu = mtu;
583 		adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
584 	}
585 
586 	return error;
587 } /* ixv_if_mtu_set */
588 
589 /************************************************************************
590  * ixv_if_init - Init entry point
591  *
592  *   Used in two ways: It is used by the stack as an init entry
593  *   point in network interface structure. It is also used
594  *   by the driver as a hw/sw initialization routine to get
595  *   to a consistent state.
596  *
597  *   return 0 on success, positive on failure
598  ************************************************************************/
599 static void
600 ixv_if_init(if_ctx_t ctx)
601 {
602 	struct adapter  *adapter = iflib_get_softc(ctx);
603 	struct ifnet    *ifp = iflib_get_ifp(ctx);
604 	device_t        dev = iflib_get_dev(ctx);
605 	struct ixgbe_hw *hw = &adapter->hw;
606 	int             error = 0;
607 
608 	INIT_DEBUGOUT("ixv_if_init: begin");
609 	hw->adapter_stopped = FALSE;
610 	hw->mac.ops.stop_adapter(hw);
611 
612 	/* reprogram the RAR[0] in case user changed it. */
613 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
614 
615 	/* Get the latest mac address, User can use a LAA */
616 	bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
617 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
618 
619 	/* Reset VF and renegotiate mailbox API version */
620 	hw->mac.ops.reset_hw(hw);
621 	hw->mac.ops.start_hw(hw);
622 	error = ixv_negotiate_api(adapter);
623 	if (error) {
624 		device_printf(dev,
625 		    "Mailbox API negotiation failed in if_init!\n");
626 		return;
627 	}
628 
629 	ixv_initialize_transmit_units(ctx);
630 
631 	/* Setup Multicast table */
632 	ixv_if_multi_set(ctx);
633 
634 	adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
635 
636 	/* Configure RX settings */
637 	ixv_initialize_receive_units(ctx);
638 
639 	/* Set up VLAN offload and filter */
640 	ixv_setup_vlan_support(ctx);
641 
642 	/* Set up MSI-X routing */
643 	ixv_configure_ivars(adapter);
644 
645 	/* Set up auto-mask */
646 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
647 
648 	/* Set moderation on the Link interrupt */
649 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR);
650 
651 	/* Stats init */
652 	ixv_init_stats(adapter);
653 
654 	/* Config/Enable Link */
655 	hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up,
656 	    FALSE);
657 
658 	/* And now turn on interrupts */
659 	ixv_if_enable_intr(ctx);
660 
661 	return;
662 } /* ixv_if_init */
663 
664 /************************************************************************
665  * ixv_enable_queue
666  ************************************************************************/
667 static inline void
668 ixv_enable_queue(struct adapter *adapter, u32 vector)
669 {
670 	struct ixgbe_hw *hw = &adapter->hw;
671 	u32             queue = 1 << vector;
672 	u32             mask;
673 
674 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
675 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
676 } /* ixv_enable_queue */
677 
678 /************************************************************************
679  * ixv_disable_queue
680  ************************************************************************/
681 static inline void
682 ixv_disable_queue(struct adapter *adapter, u32 vector)
683 {
684 	struct ixgbe_hw *hw = &adapter->hw;
685 	u64             queue = (u64)(1 << vector);
686 	u32             mask;
687 
688 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
689 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
690 } /* ixv_disable_queue */
691 
692 
693 /************************************************************************
694  * ixv_msix_que - MSI-X Queue Interrupt Service routine
695  ************************************************************************/
696 static int
697 ixv_msix_que(void *arg)
698 {
699 	struct ix_rx_queue *que = arg;
700 	struct adapter     *adapter = que->adapter;
701 
702 	ixv_disable_queue(adapter, que->msix);
703 	++que->irqs;
704 
705 	return (FILTER_SCHEDULE_THREAD);
706 } /* ixv_msix_que */
707 
708 /************************************************************************
709  * ixv_msix_mbx
710  ************************************************************************/
711 static int
712 ixv_msix_mbx(void *arg)
713 {
714 	struct adapter  *adapter = arg;
715 	struct ixgbe_hw *hw = &adapter->hw;
716 	u32             reg;
717 
718 	++adapter->link_irq;
719 
720 	/* First get the cause */
721 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
722 	/* Clear interrupt with write */
723 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
724 
725 	/* Link status change */
726 	if (reg & IXGBE_EICR_LSC)
727 		iflib_admin_intr_deferred(adapter->ctx);
728 
729 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
730 
731 	return (FILTER_HANDLED);
732 } /* ixv_msix_mbx */
733 
734 /************************************************************************
735  * ixv_media_status - Media Ioctl callback
736  *
737  *   Called whenever the user queries the status of
738  *   the interface using ifconfig.
739  ************************************************************************/
740 static void
741 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
742 {
743 	struct adapter *adapter = iflib_get_softc(ctx);
744 
745 	INIT_DEBUGOUT("ixv_media_status: begin");
746 
747 	iflib_admin_intr_deferred(ctx);
748 
749 	ifmr->ifm_status = IFM_AVALID;
750 	ifmr->ifm_active = IFM_ETHER;
751 
752 	if (!adapter->link_active)
753 		return;
754 
755 	ifmr->ifm_status |= IFM_ACTIVE;
756 
757 	switch (adapter->link_speed) {
758 		case IXGBE_LINK_SPEED_1GB_FULL:
759 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
760 			break;
761 		case IXGBE_LINK_SPEED_10GB_FULL:
762 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
763 			break;
764 		case IXGBE_LINK_SPEED_100_FULL:
765 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
766 			break;
767 		case IXGBE_LINK_SPEED_10_FULL:
768 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
769 			break;
770 	}
771 } /* ixv_if_media_status */
772 
773 /************************************************************************
774  * ixv_if_media_change - Media Ioctl callback
775  *
776  *   Called when the user changes speed/duplex using
777  *   media/mediopt option with ifconfig.
778  ************************************************************************/
779 static int
780 ixv_if_media_change(if_ctx_t ctx)
781 {
782 	struct adapter *adapter = iflib_get_softc(ctx);
783 	struct ifmedia *ifm = iflib_get_media(ctx);
784 
785 	INIT_DEBUGOUT("ixv_media_change: begin");
786 
787 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
788 		return (EINVAL);
789 
790 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
791 	case IFM_AUTO:
792 		break;
793 	default:
794 		device_printf(adapter->dev, "Only auto media type\n");
795 		return (EINVAL);
796 	}
797 
798 	return (0);
799 } /* ixv_if_media_change */
800 
801 
802 /************************************************************************
803  * ixv_negotiate_api
804  *
805  *   Negotiate the Mailbox API with the PF;
806  *   start with the most featured API first.
807  ************************************************************************/
808 static int
809 ixv_negotiate_api(struct adapter *adapter)
810 {
811 	struct ixgbe_hw *hw = &adapter->hw;
812 	int             mbx_api[] = { ixgbe_mbox_api_11,
813 	                              ixgbe_mbox_api_10,
814 	                              ixgbe_mbox_api_unknown };
815 	int             i = 0;
816 
817 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
818 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
819 			return (0);
820 		i++;
821 	}
822 
823 	return (EINVAL);
824 } /* ixv_negotiate_api */
825 
826 
827 /************************************************************************
828  * ixv_if_multi_set - Multicast Update
829  *
830  *   Called whenever multicast address list is updated.
831  ************************************************************************/
832 static void
833 ixv_if_multi_set(if_ctx_t ctx)
834 {
835 	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
836 	struct adapter     *adapter = iflib_get_softc(ctx);
837 	u8                 *update_ptr;
838 	struct ifmultiaddr *ifma;
839 	if_t               ifp = iflib_get_ifp(ctx);
840 	int                mcnt = 0;
841 
842 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
843 
844 	CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
845 		if (ifma->ifma_addr->sa_family != AF_LINK)
846 			continue;
847 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
848 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
849 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
850 		mcnt++;
851 	}
852 
853 	update_ptr = mta;
854 
855 	adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt,
856 	    ixv_mc_array_itr, TRUE);
857 } /* ixv_if_multi_set */
858 
859 /************************************************************************
860  * ixv_mc_array_itr
861  *
862  *   An iterator function needed by the multicast shared code.
863  *   It feeds the shared code routine the addresses in the
864  *   array of ixv_set_multi() one by one.
865  ************************************************************************/
866 static u8 *
867 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
868 {
869 	u8 *addr = *update_ptr;
870 	u8 *newptr;
871 
872 	*vmdq = 0;
873 
874 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
875 	*update_ptr = newptr;
876 
877 	return addr;
878 } /* ixv_mc_array_itr */
879 
880 /************************************************************************
881  * ixv_if_local_timer - Timer routine
882  *
883  *   Checks for link status, updates statistics,
884  *   and runs the watchdog check.
885  ************************************************************************/
886 static void
887 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
888 {
889 	if (qid != 0)
890 		return;
891 
892 	/* Fire off the adminq task */
893 	iflib_admin_intr_deferred(ctx);
894 } /* ixv_if_local_timer */
895 
896 /************************************************************************
897  * ixv_if_update_admin_status - Update OS on link state
898  *
899  * Note: Only updates the OS on the cached link state.
900  *       The real check of the hardware only happens with
901  *       a link interrupt.
902  ************************************************************************/
903 static void
904 ixv_if_update_admin_status(if_ctx_t ctx)
905 {
906 	struct adapter *adapter = iflib_get_softc(ctx);
907 	device_t       dev = iflib_get_dev(ctx);
908 	s32            status;
909 
910 	adapter->hw.mac.get_link_status = TRUE;
911 
912 	status = ixgbe_check_link(&adapter->hw, &adapter->link_speed,
913 	    &adapter->link_up, FALSE);
914 
915 	if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) {
916 		/* Mailbox's Clear To Send status is lost or timeout occurred.
917 		 * We need reinitialization. */
918 		iflib_get_ifp(ctx)->if_init(ctx);
919 	}
920 
921 	if (adapter->link_up) {
922 		if (adapter->link_active == FALSE) {
923 			if (bootverbose)
924 				device_printf(dev, "Link is up %d Gbps %s \n",
925 				    ((adapter->link_speed == 128) ? 10 : 1),
926 				    "Full Duplex");
927 			adapter->link_active = TRUE;
928 			iflib_link_state_change(ctx, LINK_STATE_UP,
929 			    IF_Gbps(10));
930 		}
931 	} else { /* Link down */
932 		if (adapter->link_active == TRUE) {
933 			if (bootverbose)
934 				device_printf(dev, "Link is Down\n");
935 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
936 			adapter->link_active = FALSE;
937 		}
938 	}
939 
940 	/* Stats Update */
941 	ixv_update_stats(adapter);
942 } /* ixv_if_update_admin_status */
943 
944 
945 /************************************************************************
946  * ixv_if_stop - Stop the hardware
947  *
948  *   Disables all traffic on the adapter by issuing a
949  *   global reset on the MAC and deallocates TX/RX buffers.
950  ************************************************************************/
951 static void
952 ixv_if_stop(if_ctx_t ctx)
953 {
954 	struct adapter  *adapter = iflib_get_softc(ctx);
955 	struct ixgbe_hw *hw = &adapter->hw;
956 
957 	INIT_DEBUGOUT("ixv_stop: begin\n");
958 
959 	ixv_if_disable_intr(ctx);
960 
961 	hw->mac.ops.reset_hw(hw);
962 	adapter->hw.adapter_stopped = FALSE;
963 	hw->mac.ops.stop_adapter(hw);
964 
965 	/* Update the stack */
966 	adapter->link_up = FALSE;
967 	ixv_if_update_admin_status(ctx);
968 
969 	/* reprogram the RAR[0] in case user changed it. */
970 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
971 } /* ixv_if_stop */
972 
973 
974 /************************************************************************
975  * ixv_identify_hardware - Determine hardware revision.
976  ************************************************************************/
977 static void
978 ixv_identify_hardware(if_ctx_t ctx)
979 {
980 	struct adapter  *adapter = iflib_get_softc(ctx);
981 	device_t        dev = iflib_get_dev(ctx);
982 	struct ixgbe_hw *hw = &adapter->hw;
983 
984 	/* Save off the information about this board */
985 	hw->vendor_id = pci_get_vendor(dev);
986 	hw->device_id = pci_get_device(dev);
987 	hw->revision_id = pci_get_revid(dev);
988 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
989 	hw->subsystem_device_id = pci_get_subdevice(dev);
990 
991 	/* A subset of set_mac_type */
992 	switch (hw->device_id) {
993 	case IXGBE_DEV_ID_82599_VF:
994 		hw->mac.type = ixgbe_mac_82599_vf;
995 		break;
996 	case IXGBE_DEV_ID_X540_VF:
997 		hw->mac.type = ixgbe_mac_X540_vf;
998 		break;
999 	case IXGBE_DEV_ID_X550_VF:
1000 		hw->mac.type = ixgbe_mac_X550_vf;
1001 		break;
1002 	case IXGBE_DEV_ID_X550EM_X_VF:
1003 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1004 		break;
1005 	case IXGBE_DEV_ID_X550EM_A_VF:
1006 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1007 		break;
1008 	default:
1009 		device_printf(dev, "unknown mac type\n");
1010 		hw->mac.type = ixgbe_mac_unknown;
1011 		break;
1012 	}
1013 } /* ixv_identify_hardware */
1014 
1015 /************************************************************************
1016  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1017  ************************************************************************/
1018 static int
1019 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1020 {
1021 	struct adapter     *adapter = iflib_get_softc(ctx);
1022 	device_t           dev = iflib_get_dev(ctx);
1023 	struct ix_rx_queue *rx_que = adapter->rx_queues;
1024 	struct ix_tx_queue *tx_que;
1025 	int                error, rid, vector = 0;
1026 	char               buf[16];
1027 
1028 	for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) {
1029 		rid = vector + 1;
1030 
1031 		snprintf(buf, sizeof(buf), "rxq%d", i);
1032 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1033 		    IFLIB_INTR_RX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1034 
1035 		if (error) {
1036 			device_printf(iflib_get_dev(ctx),
1037 			    "Failed to allocate que int %d err: %d", i, error);
1038 			adapter->num_rx_queues = i + 1;
1039 			goto fail;
1040 		}
1041 
1042 		rx_que->msix = vector;
1043 	}
1044 
1045 	for (int i = 0; i < adapter->num_tx_queues; i++) {
1046 		snprintf(buf, sizeof(buf), "txq%d", i);
1047 		tx_que = &adapter->tx_queues[i];
1048 		tx_que->msix = i % adapter->num_rx_queues;
1049 		iflib_softirq_alloc_generic(ctx,
1050 		    &adapter->rx_queues[tx_que->msix].que_irq,
1051 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1052 	}
1053 	rid = vector + 1;
1054 	error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid,
1055 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq");
1056 	if (error) {
1057 		device_printf(iflib_get_dev(ctx),
1058 		    "Failed to register admin handler");
1059 		return (error);
1060 	}
1061 
1062 	adapter->vector = vector;
1063 	/*
1064 	 * Due to a broken design QEMU will fail to properly
1065 	 * enable the guest for MSIX unless the vectors in
1066 	 * the table are all set up, so we must rewrite the
1067 	 * ENABLE in the MSIX control register again at this
1068 	 * point to cause it to successfully initialize us.
1069 	 */
1070 	if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
1071 		int msix_ctrl;
1072 		pci_find_cap(dev, PCIY_MSIX, &rid);
1073 		rid += PCIR_MSIX_CTRL;
1074 		msix_ctrl = pci_read_config(dev, rid, 2);
1075 		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1076 		pci_write_config(dev, rid, msix_ctrl, 2);
1077 	}
1078 
1079 	return (0);
1080 
1081 fail:
1082 	iflib_irq_free(ctx, &adapter->irq);
1083 	rx_que = adapter->rx_queues;
1084 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++)
1085 		iflib_irq_free(ctx, &rx_que->que_irq);
1086 
1087 	return (error);
1088 } /* ixv_if_msix_intr_assign */
1089 
1090 /************************************************************************
1091  * ixv_allocate_pci_resources
1092  ************************************************************************/
1093 static int
1094 ixv_allocate_pci_resources(if_ctx_t ctx)
1095 {
1096 	struct adapter *adapter = iflib_get_softc(ctx);
1097 	device_t       dev = iflib_get_dev(ctx);
1098 	int            rid;
1099 
1100 	rid = PCIR_BAR(0);
1101 	adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1102 	    RF_ACTIVE);
1103 
1104 	if (!(adapter->pci_mem)) {
1105 		device_printf(dev, "Unable to allocate bus resource: memory\n");
1106 		return (ENXIO);
1107 	}
1108 
1109 	adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem);
1110 	adapter->osdep.mem_bus_space_handle =
1111 	    rman_get_bushandle(adapter->pci_mem);
1112 	adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
1113 
1114 	return (0);
1115 } /* ixv_allocate_pci_resources */
1116 
1117 /************************************************************************
1118  * ixv_free_pci_resources
1119  ************************************************************************/
1120 static void
1121 ixv_free_pci_resources(if_ctx_t ctx)
1122 {
1123 	struct adapter     *adapter = iflib_get_softc(ctx);
1124 	struct ix_rx_queue *que = adapter->rx_queues;
1125 	device_t           dev = iflib_get_dev(ctx);
1126 
1127 	/* Release all MSI-X queue resources */
1128 	if (adapter->intr_type == IFLIB_INTR_MSIX)
1129 		iflib_irq_free(ctx, &adapter->irq);
1130 
1131 	if (que != NULL) {
1132 		for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1133 			iflib_irq_free(ctx, &que->que_irq);
1134 		}
1135 	}
1136 
1137 	if (adapter->pci_mem != NULL)
1138 		bus_release_resource(dev, SYS_RES_MEMORY,
1139 		    rman_get_rid(adapter->pci_mem), adapter->pci_mem);
1140 } /* ixv_free_pci_resources */
1141 
1142 /************************************************************************
1143  * ixv_setup_interface
1144  *
1145  *   Setup networking device structure and register an interface.
1146  ************************************************************************/
1147 static int
1148 ixv_setup_interface(if_ctx_t ctx)
1149 {
1150 	struct adapter *adapter = iflib_get_softc(ctx);
1151 	if_softc_ctx_t scctx = adapter->shared;
1152 	struct ifnet   *ifp = iflib_get_ifp(ctx);
1153 
1154 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1155 
1156 	if_setbaudrate(ifp, IF_Gbps(10));
1157 	ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2;
1158 
1159 
1160 	adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR;
1161 	ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1162 	ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
1163 
1164 	return 0;
1165 } /* ixv_setup_interface */
1166 
1167 /************************************************************************
1168  * ixv_if_get_counter
1169  ************************************************************************/
1170 static uint64_t
1171 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1172 {
1173 	struct adapter *adapter = iflib_get_softc(ctx);
1174 	if_t           ifp = iflib_get_ifp(ctx);
1175 
1176 	switch (cnt) {
1177 	case IFCOUNTER_IPACKETS:
1178 		return (adapter->ipackets);
1179 	case IFCOUNTER_OPACKETS:
1180 		return (adapter->opackets);
1181 	case IFCOUNTER_IBYTES:
1182 		return (adapter->ibytes);
1183 	case IFCOUNTER_OBYTES:
1184 		return (adapter->obytes);
1185 	case IFCOUNTER_IMCASTS:
1186 		return (adapter->imcasts);
1187 	default:
1188 		return (if_get_counter_default(ifp, cnt));
1189 	}
1190 } /* ixv_if_get_counter */
1191 
1192 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1193  * @ctx: iflib context
1194  * @event: event code to check
1195  *
1196  * Defaults to returning true for every event.
1197  *
1198  * @returns true if iflib needs to reinit the interface
1199  */
1200 static bool
1201 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1202 {
1203 	switch (event) {
1204 	case IFLIB_RESTART_VLAN_CONFIG:
1205 		/* XXX: This may not need to return true */
1206 	default:
1207 		return (true);
1208 	}
1209 }
1210 
1211 /************************************************************************
1212  * ixv_initialize_transmit_units - Enable transmit unit.
1213  ************************************************************************/
1214 static void
1215 ixv_initialize_transmit_units(if_ctx_t ctx)
1216 {
1217 	struct adapter     *adapter = iflib_get_softc(ctx);
1218 	struct ixgbe_hw    *hw = &adapter->hw;
1219 	if_softc_ctx_t     scctx = adapter->shared;
1220 	struct ix_tx_queue *que = adapter->tx_queues;
1221 	int                i;
1222 
1223 	for (i = 0; i < adapter->num_tx_queues; i++, que++) {
1224 		struct tx_ring *txr = &que->txr;
1225 		u64            tdba = txr->tx_paddr;
1226 		u32            txctrl, txdctl;
1227 		int            j = txr->me;
1228 
1229 		/* Set WTHRESH to 8, burst writeback */
1230 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1231 		txdctl |= (8 << 16);
1232 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1233 
1234 		/* Set the HW Tx Head and Tail indices */
1235 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0);
1236 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0);
1237 
1238 		/* Set Tx Tail register */
1239 		txr->tail = IXGBE_VFTDT(j);
1240 
1241 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1242 		/* Initialize the last processed descriptor to be the end of
1243 		 * the ring, rather than the start, so that we avoid an
1244 		 * off-by-one error when calculating how many descriptors are
1245 		 * done in the credits_update function.
1246 		 */
1247 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1248 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1249 			txr->tx_rsq[k] = QIDX_INVALID;
1250 
1251 		/* Set Ring parameters */
1252 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1253 		    (tdba & 0x00000000ffffffffULL));
1254 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1255 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1256 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1257 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1258 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1259 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1260 
1261 		/* Now enable */
1262 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1263 		txdctl |= IXGBE_TXDCTL_ENABLE;
1264 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1265 	}
1266 
1267 	return;
1268 } /* ixv_initialize_transmit_units */
1269 
1270 /************************************************************************
1271  * ixv_initialize_rss_mapping
1272  ************************************************************************/
1273 static void
1274 ixv_initialize_rss_mapping(struct adapter *adapter)
1275 {
1276 	struct ixgbe_hw *hw = &adapter->hw;
1277 	u32             reta = 0, mrqc, rss_key[10];
1278 	int             queue_id;
1279 	int             i, j;
1280 	u32             rss_hash_config;
1281 
1282 	if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1283 		/* Fetch the configured RSS key */
1284 		rss_getkey((uint8_t *)&rss_key);
1285 	} else {
1286 		/* set up random bits */
1287 		arc4rand(&rss_key, sizeof(rss_key), 0);
1288 	}
1289 
1290 	/* Now fill out hash function seeds */
1291 	for (i = 0; i < 10; i++)
1292 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1293 
1294 	/* Set up the redirection table */
1295 	for (i = 0, j = 0; i < 64; i++, j++) {
1296 		if (j == adapter->num_rx_queues)
1297 			j = 0;
1298 
1299 		if (adapter->feat_en & IXGBE_FEATURE_RSS) {
1300 			/*
1301 			 * Fetch the RSS bucket id for the given indirection
1302 			 * entry. Cap it at the number of configured buckets
1303 			 * (which is num_rx_queues.)
1304 			 */
1305 			queue_id = rss_get_indirection_to_bucket(i);
1306 			queue_id = queue_id % adapter->num_rx_queues;
1307 		} else
1308 			queue_id = j;
1309 
1310 		/*
1311 		 * The low 8 bits are for hash value (n+0);
1312 		 * The next 8 bits are for hash value (n+1), etc.
1313 		 */
1314 		reta >>= 8;
1315 		reta |= ((uint32_t)queue_id) << 24;
1316 		if ((i & 3) == 3) {
1317 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1318 			reta = 0;
1319 		}
1320 	}
1321 
1322 	/* Perform hash on these packet types */
1323 	if (adapter->feat_en & IXGBE_FEATURE_RSS)
1324 		rss_hash_config = rss_gethashconfig();
1325 	else {
1326 		/*
1327 		 * Disable UDP - IP fragments aren't currently being handled
1328 		 * and so we end up with a mix of 2-tuple and 4-tuple
1329 		 * traffic.
1330 		 */
1331 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1332 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1333 		                | RSS_HASHTYPE_RSS_IPV6
1334 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1335 	}
1336 
1337 	mrqc = IXGBE_MRQC_RSSEN;
1338 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1339 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1340 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1341 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1342 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1343 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1344 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1345 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1346 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1347 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1348 		    __func__);
1349 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1350 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1351 		    __func__);
1352 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1353 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1354 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1355 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1356 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1357 		device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1358 		    __func__);
1359 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1360 } /* ixv_initialize_rss_mapping */
1361 
1362 
1363 /************************************************************************
1364  * ixv_initialize_receive_units - Setup receive registers and features.
1365  ************************************************************************/
1366 static void
1367 ixv_initialize_receive_units(if_ctx_t ctx)
1368 {
1369 	struct adapter     *adapter = iflib_get_softc(ctx);
1370 	if_softc_ctx_t     scctx;
1371 	struct ixgbe_hw    *hw = &adapter->hw;
1372 	struct ifnet       *ifp = iflib_get_ifp(ctx);
1373 	struct ix_rx_queue *que = adapter->rx_queues;
1374 	u32                bufsz, psrtype;
1375 
1376 	if (ifp->if_mtu > ETHERMTU)
1377 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1378 	else
1379 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1380 
1381 	psrtype = IXGBE_PSRTYPE_TCPHDR
1382 	        | IXGBE_PSRTYPE_UDPHDR
1383 	        | IXGBE_PSRTYPE_IPV4HDR
1384 	        | IXGBE_PSRTYPE_IPV6HDR
1385 	        | IXGBE_PSRTYPE_L2HDR;
1386 
1387 	if (adapter->num_rx_queues > 1)
1388 		psrtype |= 1 << 29;
1389 
1390 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1391 
1392 	/* Tell PF our max_frame size */
1393 	if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) {
1394 		device_printf(adapter->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1395 	}
1396 	scctx = adapter->shared;
1397 
1398 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1399 		struct rx_ring *rxr = &que->rxr;
1400 		u64            rdba = rxr->rx_paddr;
1401 		u32            reg, rxdctl;
1402 		int            j = rxr->me;
1403 
1404 		/* Disable the queue */
1405 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1406 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1407 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1408 		for (int k = 0; k < 10; k++) {
1409 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1410 			    IXGBE_RXDCTL_ENABLE)
1411 				msec_delay(1);
1412 			else
1413 				break;
1414 		}
1415 		wmb();
1416 		/* Setup the Base and Length of the Rx Descriptor Ring */
1417 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1418 		    (rdba & 0x00000000ffffffffULL));
1419 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1420 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1421 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1422 
1423 		/* Reset the ring indices */
1424 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1425 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1426 
1427 		/* Set up the SRRCTL register */
1428 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1429 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1430 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1431 		reg |= bufsz;
1432 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1433 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1434 
1435 		/* Capture Rx Tail index */
1436 		rxr->tail = IXGBE_VFRDT(rxr->me);
1437 
1438 		/* Do the queue enabling last */
1439 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1440 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1441 		for (int l = 0; l < 10; l++) {
1442 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1443 			    IXGBE_RXDCTL_ENABLE)
1444 				break;
1445 			msec_delay(1);
1446 		}
1447 		wmb();
1448 
1449 		/* Set the Tail Pointer */
1450 #ifdef DEV_NETMAP
1451 		/*
1452 		 * In netmap mode, we must preserve the buffers made
1453 		 * available to userspace before the if_init()
1454 		 * (this is true by default on the TX side, because
1455 		 * init makes all buffers available to userspace).
1456 		 *
1457 		 * netmap_reset() and the device specific routines
1458 		 * (e.g. ixgbe_setup_receive_rings()) map these
1459 		 * buffers at the end of the NIC ring, so here we
1460 		 * must set the RDT (tail) register to make sure
1461 		 * they are not overwritten.
1462 		 *
1463 		 * In this driver the NIC ring starts at RDH = 0,
1464 		 * RDT points to the last slot available for reception (?),
1465 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1466 		 */
1467 		if (ifp->if_capenable & IFCAP_NETMAP) {
1468 			struct netmap_adapter *na = NA(ifp);
1469 			struct netmap_kring *kring = na->rx_rings[j];
1470 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1471 
1472 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1473 		} else
1474 #endif /* DEV_NETMAP */
1475 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1476 			    scctx->isc_nrxd[0] - 1);
1477 	}
1478 
1479 	/*
1480 	 * Do not touch RSS and RETA settings for older hardware
1481 	 * as those are shared among PF and all VF.
1482 	 */
1483 	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
1484 		ixv_initialize_rss_mapping(adapter);
1485 } /* ixv_initialize_receive_units */
1486 
1487 /************************************************************************
1488  * ixv_setup_vlan_support
1489  ************************************************************************/
1490 static void
1491 ixv_setup_vlan_support(if_ctx_t ctx)
1492 {
1493 	struct ifnet	*ifp = iflib_get_ifp(ctx);
1494 	struct adapter  *adapter = iflib_get_softc(ctx);
1495 	struct ixgbe_hw *hw = &adapter->hw;
1496 	u32             ctrl, vid, vfta, retry;
1497 
1498 	/*
1499 	 * We get here thru if_init, meaning
1500 	 * a soft reset, this has already cleared
1501 	 * the VFTA and other state, so if there
1502 	 * have been no vlan's registered do nothing.
1503 	 */
1504 	if (adapter->num_vlans == 0)
1505 		return;
1506 
1507 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1508 		/* Enable the queues */
1509 		for (int i = 0; i < adapter->num_rx_queues; i++) {
1510 			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1511 			ctrl |= IXGBE_RXDCTL_VME;
1512 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1513 			/*
1514 			 * Let Rx path know that it needs to store VLAN tag
1515 			 * as part of extra mbuf info.
1516 			 */
1517 			adapter->rx_queues[i].rxr.vtag_strip = TRUE;
1518 		}
1519 	}
1520 
1521 	/*
1522 	 * If filtering VLAN tags is disabled,
1523 	 * there is no need to fill VLAN Filter Table Array (VFTA).
1524 	 */
1525 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
1526 		return;
1527 
1528 	/*
1529 	 * A soft reset zero's out the VFTA, so
1530 	 * we need to repopulate it now.
1531 	 */
1532 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1533 		if (ixv_shadow_vfta[i] == 0)
1534 			continue;
1535 		vfta = ixv_shadow_vfta[i];
1536 		/*
1537 		 * Reconstruct the vlan id's
1538 		 * based on the bits set in each
1539 		 * of the array ints.
1540 		 */
1541 		for (int j = 0; j < 32; j++) {
1542 			retry = 0;
1543 			if ((vfta & (1 << j)) == 0)
1544 				continue;
1545 			vid = (i * 32) + j;
1546 			/* Call the shared code mailbox routine */
1547 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1548 				if (++retry > 5)
1549 					break;
1550 			}
1551 		}
1552 	}
1553 } /* ixv_setup_vlan_support */
1554 
1555 /************************************************************************
1556  * ixv_if_register_vlan
1557  *
1558  *   Run via a vlan config EVENT, it enables us to use the
1559  *   HW Filter table since we can get the vlan id. This just
1560  *   creates the entry in the soft version of the VFTA, init
1561  *   will repopulate the real table.
1562  ************************************************************************/
1563 static void
1564 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1565 {
1566 	struct adapter *adapter = iflib_get_softc(ctx);
1567 	u16            index, bit;
1568 
1569 	index = (vtag >> 5) & 0x7F;
1570 	bit = vtag & 0x1F;
1571 	ixv_shadow_vfta[index] |= (1 << bit);
1572 	++adapter->num_vlans;
1573 } /* ixv_if_register_vlan */
1574 
1575 /************************************************************************
1576  * ixv_if_unregister_vlan
1577  *
1578  *   Run via a vlan unconfig EVENT, remove our entry
1579  *   in the soft vfta.
1580  ************************************************************************/
1581 static void
1582 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1583 {
1584 	struct adapter *adapter = iflib_get_softc(ctx);
1585 	u16            index, bit;
1586 
1587 	index = (vtag >> 5) & 0x7F;
1588 	bit = vtag & 0x1F;
1589 	ixv_shadow_vfta[index] &= ~(1 << bit);
1590 	--adapter->num_vlans;
1591 } /* ixv_if_unregister_vlan */
1592 
1593 /************************************************************************
1594  * ixv_if_enable_intr
1595  ************************************************************************/
1596 static void
1597 ixv_if_enable_intr(if_ctx_t ctx)
1598 {
1599 	struct adapter  *adapter = iflib_get_softc(ctx);
1600 	struct ixgbe_hw *hw = &adapter->hw;
1601 	struct ix_rx_queue *que = adapter->rx_queues;
1602 	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1603 
1604 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1605 
1606 	mask = IXGBE_EIMS_ENABLE_MASK;
1607 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1608 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1609 
1610 	for (int i = 0; i < adapter->num_rx_queues; i++, que++)
1611 		ixv_enable_queue(adapter, que->msix);
1612 
1613 	IXGBE_WRITE_FLUSH(hw);
1614 } /* ixv_if_enable_intr */
1615 
1616 /************************************************************************
1617  * ixv_if_disable_intr
1618  ************************************************************************/
1619 static void
1620 ixv_if_disable_intr(if_ctx_t ctx)
1621 {
1622 	struct adapter *adapter = iflib_get_softc(ctx);
1623 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
1624 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
1625 	IXGBE_WRITE_FLUSH(&adapter->hw);
1626 } /* ixv_if_disable_intr */
1627 
1628 /************************************************************************
1629  * ixv_if_rx_queue_intr_enable
1630  ************************************************************************/
1631 static int
1632 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1633 {
1634 	struct adapter	*adapter = iflib_get_softc(ctx);
1635 	struct ix_rx_queue *que = &adapter->rx_queues[rxqid];
1636 
1637 	ixv_enable_queue(adapter, que->rxr.me);
1638 
1639 	return (0);
1640 } /* ixv_if_rx_queue_intr_enable */
1641 
1642 /************************************************************************
1643  * ixv_set_ivar
1644  *
1645  *   Setup the correct IVAR register for a particular MSI-X interrupt
1646  *    - entry is the register array entry
1647  *    - vector is the MSI-X vector for this queue
1648  *    - type is RX/TX/MISC
1649  ************************************************************************/
1650 static void
1651 ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
1652 {
1653 	struct ixgbe_hw *hw = &adapter->hw;
1654 	u32             ivar, index;
1655 
1656 	vector |= IXGBE_IVAR_ALLOC_VAL;
1657 
1658 	if (type == -1) { /* MISC IVAR */
1659 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1660 		ivar &= ~0xFF;
1661 		ivar |= vector;
1662 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1663 	} else {          /* RX/TX IVARS */
1664 		index = (16 * (entry & 1)) + (8 * type);
1665 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1666 		ivar &= ~(0xFF << index);
1667 		ivar |= (vector << index);
1668 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1669 	}
1670 } /* ixv_set_ivar */
1671 
1672 /************************************************************************
1673  * ixv_configure_ivars
1674  ************************************************************************/
1675 static void
1676 ixv_configure_ivars(struct adapter *adapter)
1677 {
1678 	struct ix_rx_queue *que = adapter->rx_queues;
1679 
1680 	MPASS(adapter->num_rx_queues == adapter->num_tx_queues);
1681 
1682 	for (int i = 0; i < adapter->num_rx_queues; i++, que++) {
1683 		/* First the RX queue entry */
1684 		ixv_set_ivar(adapter, i, que->msix, 0);
1685 		/* ... and the TX */
1686 		ixv_set_ivar(adapter, i, que->msix, 1);
1687 		/* Set an initial value in EITR */
1688 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix),
1689 		    IXGBE_EITR_DEFAULT);
1690 	}
1691 
1692 	/* For the mailbox interrupt */
1693 	ixv_set_ivar(adapter, 1, adapter->vector, -1);
1694 } /* ixv_configure_ivars */
1695 
1696 /************************************************************************
1697  * ixv_save_stats
1698  *
1699  *   The VF stats registers never have a truly virgin
1700  *   starting point, so this routine tries to make an
1701  *   artificial one, marking ground zero on attach as
1702  *   it were.
1703  ************************************************************************/
1704 static void
1705 ixv_save_stats(struct adapter *adapter)
1706 {
1707 	if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) {
1708 		adapter->stats.vf.saved_reset_vfgprc +=
1709 		    adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc;
1710 		adapter->stats.vf.saved_reset_vfgptc +=
1711 		    adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc;
1712 		adapter->stats.vf.saved_reset_vfgorc +=
1713 		    adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc;
1714 		adapter->stats.vf.saved_reset_vfgotc +=
1715 		    adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc;
1716 		adapter->stats.vf.saved_reset_vfmprc +=
1717 		    adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc;
1718 	}
1719 } /* ixv_save_stats */
1720 
1721 /************************************************************************
1722  * ixv_init_stats
1723  ************************************************************************/
1724 static void
1725 ixv_init_stats(struct adapter *adapter)
1726 {
1727 	struct ixgbe_hw *hw = &adapter->hw;
1728 
1729 	adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1730 	adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1731 	adapter->stats.vf.last_vfgorc |=
1732 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1733 
1734 	adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1735 	adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1736 	adapter->stats.vf.last_vfgotc |=
1737 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1738 
1739 	adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1740 
1741 	adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc;
1742 	adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc;
1743 	adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc;
1744 	adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc;
1745 	adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc;
1746 } /* ixv_init_stats */
1747 
1748 #define UPDATE_STAT_32(reg, last, count)                \
1749 {                                                       \
1750 	u32 current = IXGBE_READ_REG(hw, reg);          \
1751 	if (current < last)                             \
1752 		count += 0x100000000LL;                 \
1753 	last = current;                                 \
1754 	count &= 0xFFFFFFFF00000000LL;                  \
1755 	count |= current;                               \
1756 }
1757 
1758 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1759 {                                                       \
1760 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1761 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1762 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1763 	if (current < last)                             \
1764 		count += 0x1000000000LL;                \
1765 	last = current;                                 \
1766 	count &= 0xFFFFFFF000000000LL;                  \
1767 	count |= current;                               \
1768 }
1769 
1770 /************************************************************************
1771  * ixv_update_stats - Update the board statistics counters.
1772  ************************************************************************/
1773 void
1774 ixv_update_stats(struct adapter *adapter)
1775 {
1776 	struct ixgbe_hw *hw = &adapter->hw;
1777 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1778 
1779 	UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc,
1780 	    adapter->stats.vf.vfgprc);
1781 	UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc,
1782 	    adapter->stats.vf.vfgptc);
1783 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1784 	    adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc);
1785 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1786 	    adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc);
1787 	UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc,
1788 	    adapter->stats.vf.vfmprc);
1789 
1790 	/* Fill out the OS statistics structure */
1791 	IXGBE_SET_IPACKETS(adapter, stats->vfgprc);
1792 	IXGBE_SET_OPACKETS(adapter, stats->vfgptc);
1793 	IXGBE_SET_IBYTES(adapter, stats->vfgorc);
1794 	IXGBE_SET_OBYTES(adapter, stats->vfgotc);
1795 	IXGBE_SET_IMCASTS(adapter, stats->vfmprc);
1796 } /* ixv_update_stats */
1797 
1798 /************************************************************************
1799  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1800  ************************************************************************/
1801 static void
1802 ixv_add_stats_sysctls(struct adapter *adapter)
1803 {
1804 	device_t                dev = adapter->dev;
1805 	struct ix_tx_queue      *tx_que = adapter->tx_queues;
1806 	struct ix_rx_queue      *rx_que = adapter->rx_queues;
1807 	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1808 	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1809 	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1810 	struct ixgbevf_hw_stats *stats = &adapter->stats.vf;
1811 	struct sysctl_oid       *stat_node, *queue_node;
1812 	struct sysctl_oid_list  *stat_list, *queue_list;
1813 
1814 #define QUEUE_NAME_LEN 32
1815 	char                    namebuf[QUEUE_NAME_LEN];
1816 
1817 	/* Driver Statistics */
1818 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1819 	    CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts");
1820 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1821 	    CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled");
1822 
1823 	for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) {
1824 		struct tx_ring *txr = &tx_que->txr;
1825 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1826 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1827 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1828 		queue_list = SYSCTL_CHILDREN(queue_node);
1829 
1830 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1831 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1832 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1833 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1834 	}
1835 
1836 	for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) {
1837 		struct rx_ring *rxr = &rx_que->rxr;
1838 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1839 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1840 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1841 		queue_list = SYSCTL_CHILDREN(queue_node);
1842 
1843 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1844 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1845 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1846 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1847 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1848 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1849 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1850 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1851 	}
1852 
1853 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1854 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1855 	    "VF Statistics (read from HW registers)");
1856 	stat_list = SYSCTL_CHILDREN(stat_node);
1857 
1858 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1859 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1860 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1861 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1862 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1863 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1864 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1865 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1866 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1867 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1868 } /* ixv_add_stats_sysctls */
1869 
1870 /************************************************************************
1871  * ixv_print_debug_info
1872  *
1873  *   Called only when em_display_debug_stats is enabled.
1874  *   Provides a way to take a look at important statistics
1875  *   maintained by the driver and hardware.
1876  ************************************************************************/
1877 static void
1878 ixv_print_debug_info(struct adapter *adapter)
1879 {
1880 	device_t        dev = adapter->dev;
1881 	struct ixgbe_hw *hw = &adapter->hw;
1882 
1883 	device_printf(dev, "Error Byte Count = %u \n",
1884 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1885 
1886 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq);
1887 } /* ixv_print_debug_info */
1888 
1889 /************************************************************************
1890  * ixv_sysctl_debug
1891  ************************************************************************/
1892 static int
1893 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1894 {
1895 	struct adapter *adapter;
1896 	int            error, result;
1897 
1898 	result = -1;
1899 	error = sysctl_handle_int(oidp, &result, 0, req);
1900 
1901 	if (error || !req->newptr)
1902 		return (error);
1903 
1904 	if (result == 1) {
1905 		adapter = (struct adapter *)arg1;
1906 		ixv_print_debug_info(adapter);
1907 	}
1908 
1909 	return error;
1910 } /* ixv_sysctl_debug */
1911 
1912 /************************************************************************
1913  * ixv_init_device_features
1914  ************************************************************************/
1915 static void
1916 ixv_init_device_features(struct adapter *adapter)
1917 {
1918 	adapter->feat_cap = IXGBE_FEATURE_NETMAP
1919 	                  | IXGBE_FEATURE_VF
1920 	                  | IXGBE_FEATURE_LEGACY_TX;
1921 
1922 	/* A tad short on feature flags for VFs, atm. */
1923 	switch (adapter->hw.mac.type) {
1924 	case ixgbe_mac_82599_vf:
1925 		break;
1926 	case ixgbe_mac_X540_vf:
1927 		break;
1928 	case ixgbe_mac_X550_vf:
1929 	case ixgbe_mac_X550EM_x_vf:
1930 	case ixgbe_mac_X550EM_a_vf:
1931 		adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1932 		adapter->feat_cap |= IXGBE_FEATURE_RSS;
1933 		break;
1934 	default:
1935 		break;
1936 	}
1937 
1938 	/* Enabled by default... */
1939 	/* Is a virtual function (VF) */
1940 	if (adapter->feat_cap & IXGBE_FEATURE_VF)
1941 		adapter->feat_en |= IXGBE_FEATURE_VF;
1942 	/* Netmap */
1943 	if (adapter->feat_cap & IXGBE_FEATURE_NETMAP)
1944 		adapter->feat_en |= IXGBE_FEATURE_NETMAP;
1945 	/* Receive-Side Scaling (RSS) */
1946 	if (adapter->feat_cap & IXGBE_FEATURE_RSS)
1947 		adapter->feat_en |= IXGBE_FEATURE_RSS;
1948 	/* Needs advanced context descriptor regardless of offloads req'd */
1949 	if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1950 		adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1951 } /* ixv_init_device_features */
1952 
1953