xref: /freebsd/sys/dev/ixgbe/if_ixv.c (revision dea5f973d0c8d29a79b433283d0a2de8f4615957)
1 /*****************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 *****************************************************************************/
33 
34 
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38 
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41 
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44 
45 /************************************************************************
46  * Driver version
47  ************************************************************************/
48 static const char ixv_driver_version[] = "2.0.1-k";
49 
50 /************************************************************************
51  * PCI Device ID Table
52  *
53  *   Used by probe to select devices to load on
54  *   Last field stores an index into ixv_strings
55  *   Last entry must be all 0s
56  *
57  *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  ************************************************************************/
59 static const pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF,
62 	    "Intel(R) X520 82599 Virtual Function"),
63 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF,
64 	    "Intel(R) X540 Virtual Function"),
65 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF,
66 	    "Intel(R) X550 Virtual Function"),
67 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF,
68 	    "Intel(R) X552 Virtual Function"),
69 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
70 	    "Intel(R) X553 Virtual Function"),
71 	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
72 	    "Intel(R) E610 Virtual Function"),
73 	/* required last entry */
74 	PVID_END
75 };
76 
77 /************************************************************************
78  * Function prototypes
79  ************************************************************************/
80 static void     *ixv_register(device_t);
81 static int      ixv_if_attach_pre(if_ctx_t);
82 static int      ixv_if_attach_post(if_ctx_t);
83 static int      ixv_if_detach(if_ctx_t);
84 
85 static int      ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
86 static int      ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
87     int);
88 static int      ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
89     int);
90 static void     ixv_if_queues_free(if_ctx_t);
91 static void     ixv_identify_hardware(if_ctx_t);
92 static void     ixv_init_device_features(struct ixgbe_softc *);
93 static int      ixv_allocate_pci_resources(if_ctx_t);
94 static void     ixv_free_pci_resources(if_ctx_t);
95 static int      ixv_setup_interface(if_ctx_t);
96 static void     ixv_if_media_status(if_ctx_t, struct ifmediareq *);
97 static int      ixv_if_media_change(if_ctx_t);
98 static void     ixv_if_update_admin_status(if_ctx_t);
99 static int      ixv_if_msix_intr_assign(if_ctx_t, int);
100 
101 static int      ixv_if_mtu_set(if_ctx_t, uint32_t);
102 static void     ixv_if_init(if_ctx_t);
103 static void     ixv_if_local_timer(if_ctx_t, uint16_t);
104 static void     ixv_if_stop(if_ctx_t);
105 static int      ixv_negotiate_api(struct ixgbe_softc *);
106 
107 static void     ixv_initialize_transmit_units(if_ctx_t);
108 static void     ixv_initialize_receive_units(if_ctx_t);
109 static void     ixv_initialize_rss_mapping(struct ixgbe_softc *);
110 
111 static void     ixv_setup_vlan_support(if_ctx_t);
112 static void     ixv_configure_ivars(struct ixgbe_softc *);
113 static void     ixv_if_enable_intr(if_ctx_t);
114 static void     ixv_if_disable_intr(if_ctx_t);
115 static void     ixv_if_multi_set(if_ctx_t);
116 
117 static void     ixv_if_register_vlan(if_ctx_t, u16);
118 static void     ixv_if_unregister_vlan(if_ctx_t, u16);
119 
120 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
121 static bool	ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
122 
123 static void     ixv_save_stats(struct ixgbe_softc *);
124 static void     ixv_init_stats(struct ixgbe_softc *);
125 static void     ixv_update_stats(struct ixgbe_softc *);
126 static void     ixv_add_stats_sysctls(struct ixgbe_softc *);
127 
128 static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
129 static void     ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
130 
131 static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
132 
133 /* The MSI-X Interrupt handlers */
134 static int      ixv_msix_que(void *);
135 static int      ixv_msix_mbx(void *);
136 
137 /************************************************************************
138  * FreeBSD Device Interface Entry Points
139  ************************************************************************/
140 static device_method_t ixv_methods[] = {
141 	/* Device interface */
142 	DEVMETHOD(device_register, ixv_register),
143 	DEVMETHOD(device_probe, iflib_device_probe),
144 	DEVMETHOD(device_attach, iflib_device_attach),
145 	DEVMETHOD(device_detach, iflib_device_detach),
146 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147 	DEVMETHOD_END
148 };
149 
150 static driver_t ixv_driver = {
151 	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
152 };
153 
154 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0);
155 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
156 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
157 MODULE_DEPEND(ixv, pci, 1, 1, 1);
158 MODULE_DEPEND(ixv, ether, 1, 1, 1);
159 
160 static device_method_t ixv_if_methods[] = {
161 	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
162 	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
163 	DEVMETHOD(ifdi_detach, ixv_if_detach),
164 	DEVMETHOD(ifdi_init, ixv_if_init),
165 	DEVMETHOD(ifdi_stop, ixv_if_stop),
166 	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
167 	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
168 	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
169 	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
170 	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
171 	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
172 	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
173 	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
174 	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
175 	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
176 	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
177 	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
178 	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
179 	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
180 	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
181 	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
182 	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
183 	DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
184 	DEVMETHOD_END
185 };
186 
187 static driver_t ixv_if_driver = {
188   "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
189 };
190 
191 /*
192  * TUNEABLE PARAMETERS:
193  */
194 
195 /* Flow control setting, default to full */
196 static int ixv_flow_control = ixgbe_fc_full;
197 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
198 
199 /*
200  * Header split: this causes the hardware to DMA
201  * the header into a separate mbuf from the payload,
202  * it can be a performance win in some workloads, but
203  * in others it actually hurts, its off by default.
204  */
205 static int ixv_header_split = false;
206 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
207 
208 extern struct if_txrx ixgbe_txrx;
209 
210 static struct if_shared_ctx ixv_sctx_init = {
211 	.isc_magic = IFLIB_MAGIC,
212 	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
213 	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214 	.isc_tx_maxsegsize = PAGE_SIZE,
215 	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
216 	.isc_tso_maxsegsize = PAGE_SIZE,
217 	.isc_rx_maxsize = MJUM16BYTES,
218 	.isc_rx_nsegments = 1,
219 	.isc_rx_maxsegsize = MJUM16BYTES,
220 	.isc_nfl = 1,
221 	.isc_ntxqs = 1,
222 	.isc_nrxqs = 1,
223 	.isc_admin_intrcnt = 1,
224 	.isc_vendor_info = ixv_vendor_info_array,
225 	.isc_driver_version = ixv_driver_version,
226 	.isc_driver = &ixv_if_driver,
227 	.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
228 
229 	.isc_nrxd_min = {MIN_RXD},
230 	.isc_ntxd_min = {MIN_TXD},
231 	.isc_nrxd_max = {MAX_RXD},
232 	.isc_ntxd_max = {MAX_TXD},
233 	.isc_nrxd_default = {DEFAULT_RXD},
234 	.isc_ntxd_default = {DEFAULT_TXD},
235 };
236 
237 static void *
ixv_register(device_t dev)238 ixv_register(device_t dev)
239 {
240 	return (&ixv_sctx_init);
241 }
242 
243 /************************************************************************
244  * ixv_if_tx_queues_alloc
245  ************************************************************************/
246 static int
ixv_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)247 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
248     int ntxqs, int ntxqsets)
249 {
250 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
251 	if_softc_ctx_t scctx = sc->shared;
252 	struct ix_tx_queue *que;
253 	int i, j, error;
254 
255 	MPASS(sc->num_tx_queues == ntxqsets);
256 	MPASS(ntxqs == 1);
257 
258 	/* Allocate queue structure memory */
259 	sc->tx_queues =
260 	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
261 	    ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
262 	if (!sc->tx_queues) {
263 		device_printf(iflib_get_dev(ctx),
264 		    "Unable to allocate TX ring memory\n");
265 		return (ENOMEM);
266 	}
267 
268 	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
269 		struct tx_ring *txr = &que->txr;
270 
271 		txr->me = i;
272 		txr->sc =  que->sc = sc;
273 
274 		/* Allocate report status array */
275 		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
276 		    scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
277 			error = ENOMEM;
278 			goto fail;
279 		}
280 		for (j = 0; j < scctx->isc_ntxd[0]; j++)
281 			txr->tx_rsq[j] = QIDX_INVALID;
282 		/* get virtual and physical address of the hardware queues */
283 		txr->tail = IXGBE_VFTDT(txr->me);
284 		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
285 		txr->tx_paddr = paddrs[i*ntxqs];
286 
287 		txr->bytes = 0;
288 		txr->total_packets = 0;
289 
290 	}
291 
292 	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
293 	    sc->num_tx_queues);
294 
295 	return (0);
296 
297  fail:
298 	ixv_if_queues_free(ctx);
299 
300 	return (error);
301 } /* ixv_if_tx_queues_alloc */
302 
303 /************************************************************************
304  * ixv_if_rx_queues_alloc
305  ************************************************************************/
306 static int
ixv_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)307 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
308     int nrxqs, int nrxqsets)
309 {
310 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
311 	struct ix_rx_queue *que;
312 	int i, error;
313 
314 	MPASS(sc->num_rx_queues == nrxqsets);
315 	MPASS(nrxqs == 1);
316 
317 	/* Allocate queue structure memory */
318 	sc->rx_queues =
319 	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) *
320 	    nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
321 	if (!sc->rx_queues) {
322 		device_printf(iflib_get_dev(ctx),
323 		    "Unable to allocate TX ring memory\n");
324 		error = ENOMEM;
325 		goto fail;
326 	}
327 
328 	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
329 		struct rx_ring *rxr = &que->rxr;
330 		rxr->me = i;
331 		rxr->sc = que->sc = sc;
332 
333 
334 		/* get the virtual and physical address of the hw queues */
335 		rxr->tail = IXGBE_VFRDT(rxr->me);
336 		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
337 		rxr->rx_paddr = paddrs[i*nrxqs];
338 		rxr->bytes = 0;
339 		rxr->que = que;
340 	}
341 
342 	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
343 	    sc->num_rx_queues);
344 
345 	return (0);
346 
347 fail:
348 	ixv_if_queues_free(ctx);
349 
350 	return (error);
351 } /* ixv_if_rx_queues_alloc */
352 
353 /************************************************************************
354  * ixv_if_queues_free
355  ************************************************************************/
356 static void
ixv_if_queues_free(if_ctx_t ctx)357 ixv_if_queues_free(if_ctx_t ctx)
358 {
359 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
360 	struct ix_tx_queue *que = sc->tx_queues;
361 	int i;
362 
363 	if (que == NULL)
364 		goto free;
365 
366 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
367 		struct tx_ring *txr = &que->txr;
368 		if (txr->tx_rsq == NULL)
369 			break;
370 
371 		free(txr->tx_rsq, M_DEVBUF);
372 		txr->tx_rsq = NULL;
373 	}
374 	if (sc->tx_queues != NULL)
375 		free(sc->tx_queues, M_DEVBUF);
376 free:
377 	if (sc->rx_queues != NULL)
378 		free(sc->rx_queues, M_DEVBUF);
379 	sc->tx_queues = NULL;
380 	sc->rx_queues = NULL;
381 } /* ixv_if_queues_free */
382 
383 /************************************************************************
384  * ixv_if_attach_pre - Device initialization routine
385  *
386  *   Called when the driver is being loaded.
387  *   Identifies the type of hardware, allocates all resources
388  *   and initializes the hardware.
389  *
390  *   return 0 on success, positive on failure
391  ************************************************************************/
392 static int
ixv_if_attach_pre(if_ctx_t ctx)393 ixv_if_attach_pre(if_ctx_t ctx)
394 {
395 	struct ixgbe_softc *sc;
396 	device_t dev;
397 	if_softc_ctx_t scctx;
398 	struct ixgbe_hw *hw;
399 	int error = 0;
400 
401 	INIT_DEBUGOUT("ixv_attach: begin");
402 
403 	/* Allocate, clear, and link in our sc structure */
404 	dev = iflib_get_dev(ctx);
405 	sc = iflib_get_softc(ctx);
406 	sc->dev = dev;
407 	sc->ctx = ctx;
408 	sc->hw.back = sc;
409 	scctx = sc->shared = iflib_get_softc_ctx(ctx);
410 	sc->media = iflib_get_media(ctx);
411 	hw = &sc->hw;
412 
413 	/* Do base PCI setup - map BAR0 */
414 	if (ixv_allocate_pci_resources(ctx)) {
415 		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
416 		error = ENXIO;
417 		goto err_out;
418 	}
419 
420 	/* SYSCTL APIs */
421 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
422 	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
423 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
424 	    sc, 0, ixv_sysctl_debug, "I", "Debug Info");
425 
426 	/* Determine hardware revision */
427 	ixv_identify_hardware(ctx);
428 	ixv_init_device_features(sc);
429 
430 	/* Initialize the shared code */
431 	error = ixgbe_init_ops_vf(hw);
432 	if (error) {
433 		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
434 		error = EIO;
435 		goto err_out;
436 	}
437 
438 	/* Setup the mailbox */
439 	ixgbe_init_mbx_params_vf(hw);
440 
441 	error = hw->mac.ops.reset_hw(hw);
442 	if (error == IXGBE_ERR_RESET_FAILED)
443 		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
444 	else if (error)
445 		device_printf(dev, "...reset_hw() failed with error %d\n",
446 		    error);
447 	if (error) {
448 		error = EIO;
449 		goto err_out;
450 	}
451 
452 	error = hw->mac.ops.init_hw(hw);
453 	if (error) {
454 		device_printf(dev, "...init_hw() failed with error %d\n",
455 		    error);
456 		error = EIO;
457 		goto err_out;
458 	}
459 
460 	/* Negotiate mailbox API version */
461 	error = ixv_negotiate_api(sc);
462 	if (error) {
463 		device_printf(dev,
464 		    "Mailbox API negotiation failed during attach!\n");
465 		goto err_out;
466 	}
467 
468 	/* Check if VF was disabled by PF */
469 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
470 	if (error) {
471 		/* PF is not capable of controlling VF state. Enable link. */
472 		sc->link_enabled = true;
473 	}
474 
475 	/* If no mac address was assigned, make a random one */
476 	if (!ixv_check_ether_addr(hw->mac.addr)) {
477 		ether_gen_addr(iflib_get_ifp(ctx),
478 		    (struct ether_addr *)hw->mac.addr);
479 		bcopy(hw->mac.addr, hw->mac.perm_addr,
480 		    sizeof(hw->mac.perm_addr));
481 	}
482 
483 	/* Most of the iflib initialization... */
484 
485 	iflib_set_mac(ctx, hw->mac.addr);
486 	switch (sc->hw.mac.type) {
487 	case ixgbe_mac_X550_vf:
488 	case ixgbe_mac_X550EM_x_vf:
489 	case ixgbe_mac_X550EM_a_vf:
490 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
491 		break;
492 	default:
493 		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
494 	}
495 	scctx->isc_txqsizes[0] =
496 	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
497 	    sizeof(u32), DBA_ALIGN);
498 	scctx->isc_rxqsizes[0] =
499 	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
500 	    DBA_ALIGN);
501 	/* XXX */
502 	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
503 	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
504 	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
505 	scctx->isc_msix_bar = pci_msix_table_bar(dev);
506 	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
507 	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
508 	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
509 
510 	scctx->isc_txrx = &ixgbe_txrx;
511 
512 	/*
513 	 * Tell the upper layer(s) we support everything the PF
514 	 * driver does except...
515 	 *   Wake-on-LAN
516 	 */
517 	scctx->isc_capabilities = IXGBE_CAPS;
518 	scctx->isc_capabilities ^= IFCAP_WOL;
519 	scctx->isc_capenable = scctx->isc_capabilities;
520 
521 	INIT_DEBUGOUT("ixv_if_attach_pre: end");
522 
523 	return (0);
524 
525 err_out:
526 	ixv_free_pci_resources(ctx);
527 
528 	return (error);
529 } /* ixv_if_attach_pre */
530 
531 static int
ixv_if_attach_post(if_ctx_t ctx)532 ixv_if_attach_post(if_ctx_t ctx)
533 {
534 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
535 	device_t dev = iflib_get_dev(ctx);
536 	int error = 0;
537 
538 	/* Setup OS specific network interface */
539 	error = ixv_setup_interface(ctx);
540 	if (error) {
541 		device_printf(dev, "Interface setup failed: %d\n", error);
542 		goto end;
543 	}
544 
545 	/* Do the stats setup */
546 	ixv_save_stats(sc);
547 	ixv_init_stats(sc);
548 	ixv_add_stats_sysctls(sc);
549 
550 end:
551 	return error;
552 } /* ixv_if_attach_post */
553 
554 /************************************************************************
555  * ixv_detach - Device removal routine
556  *
557  *   Called when the driver is being removed.
558  *   Stops the adapter and deallocates all the resources
559  *   that were allocated for driver operation.
560  *
561  *   return 0 on success, positive on failure
562  ************************************************************************/
563 static int
ixv_if_detach(if_ctx_t ctx)564 ixv_if_detach(if_ctx_t ctx)
565 {
566 	INIT_DEBUGOUT("ixv_detach: begin");
567 
568 	ixv_free_pci_resources(ctx);
569 
570 	return (0);
571 } /* ixv_if_detach */
572 
573 /************************************************************************
574  * ixv_if_mtu_set
575  ************************************************************************/
576 static int
ixv_if_mtu_set(if_ctx_t ctx,uint32_t mtu)577 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
578 {
579 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
580 	if_t ifp = iflib_get_ifp(ctx);
581 	int error = 0;
582 
583 	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
584 	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
585 		error = EINVAL;
586 	} else {
587 		if_setmtu(ifp, mtu);
588 		sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
589 	}
590 
591 	return error;
592 } /* ixv_if_mtu_set */
593 
594 /************************************************************************
595  * ixv_if_init - Init entry point
596  *
597  *   Used in two ways: It is used by the stack as an init entry
598  *   point in network interface structure. It is also used
599  *   by the driver as a hw/sw initialization routine to get
600  *   to a consistent state.
601  *
602  *   return 0 on success, positive on failure
603  ************************************************************************/
604 static void
ixv_if_init(if_ctx_t ctx)605 ixv_if_init(if_ctx_t ctx)
606 {
607 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
608 	if_t ifp = iflib_get_ifp(ctx);
609 	device_t dev = iflib_get_dev(ctx);
610 	struct ixgbe_hw *hw = &sc->hw;
611 	int error = 0;
612 
613 	INIT_DEBUGOUT("ixv_if_init: begin");
614 	hw->adapter_stopped = false;
615 	hw->mac.ops.stop_adapter(hw);
616 
617 	/* reprogram the RAR[0] in case user changed it. */
618 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
619 
620 	/* Get the latest mac address, User can use a LAA */
621 	bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
622 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
623 
624 	/* Reset VF and renegotiate mailbox API version */
625 	hw->mac.ops.reset_hw(hw);
626 	hw->mac.ops.start_hw(hw);
627 	error = ixv_negotiate_api(sc);
628 	if (error) {
629 		device_printf(dev,
630 		    "Mailbox API negotiation failed in if_init!\n");
631 		return;
632 	}
633 
634 	ixv_initialize_transmit_units(ctx);
635 
636 	/* Setup Multicast table */
637 	ixv_if_multi_set(ctx);
638 
639 	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
640 
641 	/* Configure RX settings */
642 	ixv_initialize_receive_units(ctx);
643 
644 	/* Set up VLAN offload and filter */
645 	ixv_setup_vlan_support(ctx);
646 
647 	/* Set up MSI-X routing */
648 	ixv_configure_ivars(sc);
649 
650 	/* Set up auto-mask */
651 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
652 
653 	/* Set moderation on the Link interrupt */
654 	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
655 
656 	/* Stats init */
657 	ixv_init_stats(sc);
658 
659 	/* Config/Enable Link */
660 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
661 	if (error) {
662 		/* PF is not capable of controlling VF state. Enable the link. */
663 		sc->link_enabled = true;
664 	} else if (sc->link_enabled == false)
665 		device_printf(dev, "VF is disabled by PF\n");
666 
667 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
668 	    false);
669 
670 	/* And now turn on interrupts */
671 	ixv_if_enable_intr(ctx);
672 
673 	return;
674 } /* ixv_if_init */
675 
676 /************************************************************************
677  * ixv_enable_queue
678  ************************************************************************/
679 static inline void
ixv_enable_queue(struct ixgbe_softc * sc,u32 vector)680 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
681 {
682 	struct ixgbe_hw *hw = &sc->hw;
683 	u32 queue = 1 << vector;
684 	u32 mask;
685 
686 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
687 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
688 } /* ixv_enable_queue */
689 
690 /************************************************************************
691  * ixv_disable_queue
692  ************************************************************************/
693 static inline void
ixv_disable_queue(struct ixgbe_softc * sc,u32 vector)694 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
695 {
696 	struct ixgbe_hw *hw = &sc->hw;
697 	u64 queue = (u64)(1 << vector);
698 	u32 mask;
699 
700 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
701 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
702 } /* ixv_disable_queue */
703 
704 
705 /************************************************************************
706  * ixv_msix_que - MSI-X Queue Interrupt Service routine
707  ************************************************************************/
708 static int
ixv_msix_que(void * arg)709 ixv_msix_que(void *arg)
710 {
711 	struct ix_rx_queue *que = arg;
712 	struct ixgbe_softc *sc = que->sc;
713 
714 	ixv_disable_queue(sc, que->msix);
715 	++que->irqs;
716 
717 	return (FILTER_SCHEDULE_THREAD);
718 } /* ixv_msix_que */
719 
720 /************************************************************************
721  * ixv_msix_mbx
722  ************************************************************************/
723 static int
ixv_msix_mbx(void * arg)724 ixv_msix_mbx(void *arg)
725 {
726 	struct ixgbe_softc *sc = arg;
727 	struct ixgbe_hw *hw = &sc->hw;
728 	u32 reg;
729 
730 	++sc->link_irq;
731 
732 	/* First get the cause */
733 	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
734 	/* Clear interrupt with write */
735 	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
736 
737 	/* Link status change */
738 	if (reg & IXGBE_EICR_LSC)
739 		iflib_admin_intr_deferred(sc->ctx);
740 
741 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
742 
743 	return (FILTER_HANDLED);
744 } /* ixv_msix_mbx */
745 
746 /************************************************************************
747  * ixv_media_status - Media Ioctl callback
748  *
749  *   Called whenever the user queries the status of
750  *   the interface using ifconfig.
751  ************************************************************************/
752 static void
ixv_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)753 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
754 {
755 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
756 
757 	INIT_DEBUGOUT("ixv_media_status: begin");
758 
759 	iflib_admin_intr_deferred(ctx);
760 
761 	ifmr->ifm_status = IFM_AVALID;
762 	ifmr->ifm_active = IFM_ETHER;
763 
764 	if (!sc->link_active)
765 		return;
766 
767 	ifmr->ifm_status |= IFM_ACTIVE;
768 
769 	switch (sc->link_speed) {
770 		case IXGBE_LINK_SPEED_1GB_FULL:
771 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
772 			break;
773 		case IXGBE_LINK_SPEED_10GB_FULL:
774 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
775 			break;
776 		case IXGBE_LINK_SPEED_100_FULL:
777 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
778 			break;
779 		case IXGBE_LINK_SPEED_10_FULL:
780 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
781 			break;
782 	}
783 } /* ixv_if_media_status */
784 
785 /************************************************************************
786  * ixv_if_media_change - Media Ioctl callback
787  *
788  *   Called when the user changes speed/duplex using
789  *   media/mediopt option with ifconfig.
790  ************************************************************************/
791 static int
ixv_if_media_change(if_ctx_t ctx)792 ixv_if_media_change(if_ctx_t ctx)
793 {
794 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
795 	struct ifmedia *ifm = iflib_get_media(ctx);
796 
797 	INIT_DEBUGOUT("ixv_media_change: begin");
798 
799 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
800 		return (EINVAL);
801 
802 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
803 	case IFM_AUTO:
804 		break;
805 	default:
806 		device_printf(sc->dev, "Only auto media type\n");
807 		return (EINVAL);
808 	}
809 
810 	return (0);
811 } /* ixv_if_media_change */
812 
813 
814 /************************************************************************
815  * ixv_negotiate_api
816  *
817  *   Negotiate the Mailbox API with the PF;
818  *   start with the most featured API first.
819  ************************************************************************/
820 static int
ixv_negotiate_api(struct ixgbe_softc * sc)821 ixv_negotiate_api(struct ixgbe_softc *sc)
822 {
823 	struct ixgbe_hw *hw = &sc->hw;
824 	int mbx_api[] = {
825 		ixgbe_mbox_api_12,
826 		ixgbe_mbox_api_11,
827 		ixgbe_mbox_api_10,
828 		ixgbe_mbox_api_unknown
829 	};
830 	int i = 0;
831 
832 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
833 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
834 			return (0);
835 		i++;
836 	}
837 
838 	return (EINVAL);
839 } /* ixv_negotiate_api */
840 
841 
842 static u_int
ixv_if_multi_set_cb(void * cb_arg,struct sockaddr_dl * addr,u_int cnt)843 ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
844 {
845 	bcopy(LLADDR(addr),
846 	    &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
847 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
848 
849 	return (++cnt);
850 }
851 
852 /************************************************************************
853  * ixv_if_multi_set - Multicast Update
854  *
855  *   Called whenever multicast address list is updated.
856  ************************************************************************/
857 static void
ixv_if_multi_set(if_ctx_t ctx)858 ixv_if_multi_set(if_ctx_t ctx)
859 {
860 	u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
861 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
862 	u8 *update_ptr;
863 	if_t ifp = iflib_get_ifp(ctx);
864 	int mcnt = 0;
865 
866 	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
867 
868 	mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta);
869 
870 	update_ptr = mta;
871 
872 	sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
873 	    ixv_mc_array_itr, true);
874 } /* ixv_if_multi_set */
875 
876 /************************************************************************
877  * ixv_mc_array_itr
878  *
879  *   An iterator function needed by the multicast shared code.
880  *   It feeds the shared code routine the addresses in the
881  *   array of ixv_set_multi() one by one.
882  ************************************************************************/
883 static u8 *
ixv_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)884 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
885 {
886 	u8 *addr = *update_ptr;
887 	u8 *newptr;
888 
889 	*vmdq = 0;
890 
891 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
892 	*update_ptr = newptr;
893 
894 	return addr;
895 } /* ixv_mc_array_itr */
896 
897 /************************************************************************
898  * ixv_if_local_timer - Timer routine
899  *
900  *   Checks for link status, updates statistics,
901  *   and runs the watchdog check.
902  ************************************************************************/
903 static void
ixv_if_local_timer(if_ctx_t ctx,uint16_t qid)904 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
905 {
906 	if (qid != 0)
907 		return;
908 
909 	/* Fire off the adminq task */
910 	iflib_admin_intr_deferred(ctx);
911 } /* ixv_if_local_timer */
912 
913 /************************************************************************
914  * ixv_if_update_admin_status - Update OS on link state
915  *
916  * Note: Only updates the OS on the cached link state.
917  *       The real check of the hardware only happens with
918  *       a link interrupt.
919  ************************************************************************/
920 static void
ixv_if_update_admin_status(if_ctx_t ctx)921 ixv_if_update_admin_status(if_ctx_t ctx)
922 {
923 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
924 	device_t dev = iflib_get_dev(ctx);
925 	s32 status;
926 
927 	sc->hw.mac.get_link_status = true;
928 
929 	status = ixgbe_check_link(&sc->hw, &sc->link_speed,
930 	    &sc->link_up, false);
931 
932 	if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
933 		/* Mailbox's Clear To Send status is lost or timeout occurred.
934 		 * We need reinitialization. */
935 		if_init(iflib_get_ifp(ctx), ctx);
936 	}
937 
938 	if (sc->link_up && sc->link_enabled) {
939 		if (sc->link_active == false) {
940 			if (bootverbose)
941 				device_printf(dev, "Link is up %d Gbps %s \n",
942 				    ((sc->link_speed == 128) ? 10 : 1),
943 				    "Full Duplex");
944 			sc->link_active = true;
945 			iflib_link_state_change(ctx, LINK_STATE_UP,
946 			    ixgbe_link_speed_to_baudrate(sc->link_speed));
947 		}
948 	} else { /* Link down */
949 		if (sc->link_active == true) {
950 			if (bootverbose)
951 				device_printf(dev, "Link is Down\n");
952 			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
953 			sc->link_active = false;
954 		}
955 	}
956 
957 	/* Stats Update */
958 	ixv_update_stats(sc);
959 } /* ixv_if_update_admin_status */
960 
961 
962 /************************************************************************
963  * ixv_if_stop - Stop the hardware
964  *
965  *   Disables all traffic on the adapter by issuing a
966  *   global reset on the MAC and deallocates TX/RX buffers.
967  ************************************************************************/
968 static void
ixv_if_stop(if_ctx_t ctx)969 ixv_if_stop(if_ctx_t ctx)
970 {
971 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
972 	struct ixgbe_hw *hw = &sc->hw;
973 
974 	INIT_DEBUGOUT("ixv_stop: begin\n");
975 
976 	ixv_if_disable_intr(ctx);
977 
978 	hw->mac.ops.reset_hw(hw);
979 	sc->hw.adapter_stopped = false;
980 	hw->mac.ops.stop_adapter(hw);
981 
982 	/* Update the stack */
983 	sc->link_up = false;
984 	ixv_if_update_admin_status(ctx);
985 
986 	/* reprogram the RAR[0] in case user changed it. */
987 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
988 } /* ixv_if_stop */
989 
990 
991 /************************************************************************
992  * ixv_identify_hardware - Determine hardware revision.
993  ************************************************************************/
994 static void
ixv_identify_hardware(if_ctx_t ctx)995 ixv_identify_hardware(if_ctx_t ctx)
996 {
997 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
998 	device_t dev = iflib_get_dev(ctx);
999 	struct ixgbe_hw *hw = &sc->hw;
1000 
1001 	/* Save off the information about this board */
1002 	hw->vendor_id = pci_get_vendor(dev);
1003 	hw->device_id = pci_get_device(dev);
1004 	hw->revision_id = pci_get_revid(dev);
1005 	hw->subsystem_vendor_id = pci_get_subvendor(dev);
1006 	hw->subsystem_device_id = pci_get_subdevice(dev);
1007 
1008 	/* A subset of set_mac_type */
1009 	switch (hw->device_id) {
1010 	case IXGBE_DEV_ID_82599_VF:
1011 		hw->mac.type = ixgbe_mac_82599_vf;
1012 		break;
1013 	case IXGBE_DEV_ID_X540_VF:
1014 		hw->mac.type = ixgbe_mac_X540_vf;
1015 		break;
1016 	case IXGBE_DEV_ID_X550_VF:
1017 		hw->mac.type = ixgbe_mac_X550_vf;
1018 		break;
1019 	case IXGBE_DEV_ID_X550EM_X_VF:
1020 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1021 		break;
1022 	case IXGBE_DEV_ID_X550EM_A_VF:
1023 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1024 		break;
1025 	case IXGBE_DEV_ID_E610_VF:
1026 		hw->mac.type = ixgbe_mac_E610_vf;
1027 		break;
1028 	default:
1029 		device_printf(dev, "unknown mac type\n");
1030 		hw->mac.type = ixgbe_mac_unknown;
1031 		break;
1032 	}
1033 } /* ixv_identify_hardware */
1034 
1035 /************************************************************************
1036  * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1037  ************************************************************************/
1038 static int
ixv_if_msix_intr_assign(if_ctx_t ctx,int msix)1039 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1040 {
1041 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1042 	device_t dev = iflib_get_dev(ctx);
1043 	struct ix_rx_queue *rx_que = sc->rx_queues;
1044 	struct ix_tx_queue *tx_que;
1045 	int error, rid, vector = 0;
1046 	char buf[16];
1047 
1048 	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1049 		rid = vector + 1;
1050 
1051 		snprintf(buf, sizeof(buf), "rxq%d", i);
1052 		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1053 		    IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me,
1054 		    buf);
1055 
1056 		if (error) {
1057 			device_printf(iflib_get_dev(ctx),
1058 			    "Failed to allocate que int %d err: %d",
1059 			    i, error);
1060 			sc->num_rx_queues = i + 1;
1061 			goto fail;
1062 		}
1063 
1064 		rx_que->msix = vector;
1065 	}
1066 
1067 	for (int i = 0; i < sc->num_tx_queues; i++) {
1068 		snprintf(buf, sizeof(buf), "txq%d", i);
1069 		tx_que = &sc->tx_queues[i];
1070 		tx_que->msix = i % sc->num_rx_queues;
1071 		iflib_softirq_alloc_generic(ctx,
1072 		    &sc->rx_queues[tx_que->msix].que_irq,
1073 		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1074 	}
1075 	rid = vector + 1;
1076 	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1077 	    IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1078 	if (error) {
1079 		device_printf(iflib_get_dev(ctx),
1080 		    "Failed to register admin handler");
1081 		return (error);
1082 	}
1083 
1084 	sc->vector = vector;
1085 	/*
1086 	 * Due to a broken design QEMU will fail to properly
1087 	 * enable the guest for MSIX unless the vectors in
1088 	 * the table are all set up, so we must rewrite the
1089 	 * ENABLE in the MSIX control register again at this
1090 	 * point to cause it to successfully initialize us.
1091 	 */
1092 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1093 		int msix_ctrl;
1094 		if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
1095 			device_printf(dev,
1096 			    "Finding MSIX capability failed\n");
1097 		} else {
1098 			rid += PCIR_MSIX_CTRL;
1099 			msix_ctrl = pci_read_config(dev, rid, 2);
1100 			msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1101 			pci_write_config(dev, rid, msix_ctrl, 2);
1102 		}
1103 	}
1104 
1105 	return (0);
1106 
1107 fail:
1108 	iflib_irq_free(ctx, &sc->irq);
1109 	rx_que = sc->rx_queues;
1110 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1111 		iflib_irq_free(ctx, &rx_que->que_irq);
1112 
1113 	return (error);
1114 } /* ixv_if_msix_intr_assign */
1115 
1116 /************************************************************************
1117  * ixv_allocate_pci_resources
1118  ************************************************************************/
1119 static int
ixv_allocate_pci_resources(if_ctx_t ctx)1120 ixv_allocate_pci_resources(if_ctx_t ctx)
1121 {
1122 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1123 	device_t dev = iflib_get_dev(ctx);
1124 	int rid;
1125 
1126 	rid = PCIR_BAR(0);
1127 	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1128 	    RF_ACTIVE);
1129 
1130 	if (!(sc->pci_mem)) {
1131 		device_printf(dev,
1132 		    "Unable to allocate bus resource: memory\n");
1133 		return (ENXIO);
1134 	}
1135 
1136 	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1137 	sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
1138 	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1139 
1140 	return (0);
1141 } /* ixv_allocate_pci_resources */
1142 
1143 /************************************************************************
1144  * ixv_free_pci_resources
1145  ************************************************************************/
1146 static void
ixv_free_pci_resources(if_ctx_t ctx)1147 ixv_free_pci_resources(if_ctx_t ctx)
1148 {
1149 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1150 	struct ix_rx_queue *que = sc->rx_queues;
1151 	device_t dev = iflib_get_dev(ctx);
1152 
1153 	/* Release all MSI-X queue resources */
1154 	if (sc->intr_type == IFLIB_INTR_MSIX)
1155 		iflib_irq_free(ctx, &sc->irq);
1156 
1157 	if (que != NULL) {
1158 		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1159 			iflib_irq_free(ctx, &que->que_irq);
1160 		}
1161 	}
1162 
1163 	if (sc->pci_mem != NULL)
1164 		bus_release_resource(dev, SYS_RES_MEMORY,
1165 		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1166 } /* ixv_free_pci_resources */
1167 
1168 /************************************************************************
1169  * ixv_setup_interface
1170  *
1171  *   Setup networking device structure and register an interface.
1172  ************************************************************************/
1173 static int
ixv_setup_interface(if_ctx_t ctx)1174 ixv_setup_interface(if_ctx_t ctx)
1175 {
1176 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1177 	if_softc_ctx_t scctx = sc->shared;
1178 	if_t ifp = iflib_get_ifp(ctx);
1179 
1180 	INIT_DEBUGOUT("ixv_setup_interface: begin");
1181 
1182 	if_setbaudrate(ifp, IF_Gbps(10));
1183 	if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2);
1184 
1185 
1186 	sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
1187 	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1188 	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1189 
1190 	return 0;
1191 } /* ixv_setup_interface */
1192 
1193 /************************************************************************
1194  * ixv_if_get_counter
1195  ************************************************************************/
1196 static uint64_t
ixv_if_get_counter(if_ctx_t ctx,ift_counter cnt)1197 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1198 {
1199 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1200 	if_t ifp = iflib_get_ifp(ctx);
1201 
1202 	switch (cnt) {
1203 	case IFCOUNTER_IPACKETS:
1204 		return (sc->ipackets);
1205 	case IFCOUNTER_OPACKETS:
1206 		return (sc->opackets);
1207 	case IFCOUNTER_IBYTES:
1208 		return (sc->ibytes);
1209 	case IFCOUNTER_OBYTES:
1210 		return (sc->obytes);
1211 	case IFCOUNTER_IMCASTS:
1212 		return (sc->imcasts);
1213 	default:
1214 		return (if_get_counter_default(ifp, cnt));
1215 	}
1216 } /* ixv_if_get_counter */
1217 
1218 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1219  * @ctx: iflib context
1220  * @event: event code to check
1221  *
1222  * Defaults to returning true for every event.
1223  *
1224  * @returns true if iflib needs to reinit the interface
1225  */
1226 static bool
ixv_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1227 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1228 {
1229 	switch (event) {
1230 	case IFLIB_RESTART_VLAN_CONFIG:
1231 		/* XXX: This may not need to return true */
1232 	default:
1233 		return (true);
1234 	}
1235 }
1236 
1237 /************************************************************************
1238  * ixv_initialize_transmit_units - Enable transmit unit.
1239  ************************************************************************/
1240 static void
ixv_initialize_transmit_units(if_ctx_t ctx)1241 ixv_initialize_transmit_units(if_ctx_t ctx)
1242 {
1243 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1244 	struct ixgbe_hw *hw = &sc->hw;
1245 	if_softc_ctx_t scctx = sc->shared;
1246 	struct ix_tx_queue *que = sc->tx_queues;
1247 	int i;
1248 
1249 	for (i = 0; i < sc->num_tx_queues; i++, que++) {
1250 		struct tx_ring *txr = &que->txr;
1251 		u64 tdba = txr->tx_paddr;
1252 		u32 txctrl, txdctl;
1253 		int j = txr->me;
1254 
1255 		/* Set WTHRESH to 8, burst writeback */
1256 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1257 		txdctl |= (8 << 16);
1258 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1259 
1260 		/* Set the HW Tx Head and Tail indices */
1261 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1262 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1263 
1264 		/* Set Tx Tail register */
1265 		txr->tail = IXGBE_VFTDT(j);
1266 
1267 		txr->tx_rs_cidx = txr->tx_rs_pidx;
1268 		/* Initialize the last processed descriptor to be the end of
1269 		 * the ring, rather than the start, so that we avoid an
1270 		 * off-by-one error when calculating how many descriptors are
1271 		 * done in the credits_update function.
1272 		 */
1273 		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1274 		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1275 			txr->tx_rsq[k] = QIDX_INVALID;
1276 
1277 		/* Set Ring parameters */
1278 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1279 		    (tdba & 0x00000000ffffffffULL));
1280 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1281 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1282 		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1283 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1284 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1285 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1286 
1287 		/* Now enable */
1288 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1289 		txdctl |= IXGBE_TXDCTL_ENABLE;
1290 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1291 	}
1292 
1293 	return;
1294 } /* ixv_initialize_transmit_units */
1295 
1296 /************************************************************************
1297  * ixv_initialize_rss_mapping
1298  ************************************************************************/
1299 static void
ixv_initialize_rss_mapping(struct ixgbe_softc * sc)1300 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1301 {
1302 	struct ixgbe_hw *hw = &sc->hw;
1303 	u32 reta = 0, mrqc, rss_key[10];
1304 	int queue_id;
1305 	int i, j;
1306 	u32 rss_hash_config;
1307 
1308 	if (sc->feat_en & IXGBE_FEATURE_RSS) {
1309 		/* Fetch the configured RSS key */
1310 		rss_getkey((uint8_t *)&rss_key);
1311 	} else {
1312 		/* set up random bits */
1313 		arc4rand(&rss_key, sizeof(rss_key), 0);
1314 	}
1315 
1316 	/* Now fill out hash function seeds */
1317 	for (i = 0; i < 10; i++)
1318 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1319 
1320 	/* Set up the redirection table */
1321 	for (i = 0, j = 0; i < 64; i++, j++) {
1322 		if (j == sc->num_rx_queues)
1323 			j = 0;
1324 
1325 		if (sc->feat_en & IXGBE_FEATURE_RSS) {
1326 			/*
1327 			 * Fetch the RSS bucket id for the given indirection
1328 			 * entry. Cap it at the number of configured buckets
1329 			 * (which is num_rx_queues.)
1330 			 */
1331 			queue_id = rss_get_indirection_to_bucket(i);
1332 			queue_id = queue_id % sc->num_rx_queues;
1333 		} else
1334 			queue_id = j;
1335 
1336 		/*
1337 		 * The low 8 bits are for hash value (n+0);
1338 		 * The next 8 bits are for hash value (n+1), etc.
1339 		 */
1340 		reta >>= 8;
1341 		reta |= ((uint32_t)queue_id) << 24;
1342 		if ((i & 3) == 3) {
1343 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1344 			reta = 0;
1345 		}
1346 	}
1347 
1348 	/* Perform hash on these packet types */
1349 	if (sc->feat_en & IXGBE_FEATURE_RSS)
1350 		rss_hash_config = rss_gethashconfig();
1351 	else {
1352 		/*
1353 		 * Disable UDP - IP fragments aren't currently being handled
1354 		 * and so we end up with a mix of 2-tuple and 4-tuple
1355 		 * traffic.
1356 		 */
1357 		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1358 		                | RSS_HASHTYPE_RSS_TCP_IPV4
1359 		                | RSS_HASHTYPE_RSS_IPV6
1360 		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1361 	}
1362 
1363 	mrqc = IXGBE_MRQC_RSSEN;
1364 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1365 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1366 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1367 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1368 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1369 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1370 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1371 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1372 	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1373 		device_printf(sc->dev,
1374 		    "%s: RSS_HASHTYPE_RSS_IPV6_EX defined,"
1375 		    " but not supported\n", __func__);
1376 	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1377 		device_printf(sc->dev,
1378 		    "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined,"
1379 		    " but not supported\n", __func__);
1380 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1381 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1382 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1383 		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1384 	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1385 		device_printf(sc->dev,
1386 		    "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined,"
1387 		    " but not supported\n", __func__);
1388 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1389 } /* ixv_initialize_rss_mapping */
1390 
1391 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
1392 /************************************************************************
1393  * ixv_initialize_receive_units - Setup receive registers and features.
1394  ************************************************************************/
1395 static void
ixv_initialize_receive_units(if_ctx_t ctx)1396 ixv_initialize_receive_units(if_ctx_t ctx)
1397 {
1398 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1399 	if_softc_ctx_t scctx;
1400 	struct ixgbe_hw *hw = &sc->hw;
1401 #ifdef DEV_NETMAP
1402 	if_t ifp = iflib_get_ifp(ctx);
1403 #endif
1404 	struct ix_rx_queue *que = sc->rx_queues;
1405 	u32 bufsz, psrtype;
1406 
1407 	bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
1408 	    IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1409 
1410 	psrtype = IXGBE_PSRTYPE_TCPHDR |
1411 	    IXGBE_PSRTYPE_UDPHDR |
1412 	    IXGBE_PSRTYPE_IPV4HDR |
1413 	    IXGBE_PSRTYPE_IPV6HDR |
1414 	    IXGBE_PSRTYPE_L2HDR;
1415 
1416 	if (sc->num_rx_queues > 1)
1417 		psrtype |= 1 << 29;
1418 
1419 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1420 
1421 	/* Tell PF our max_frame size */
1422 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1423 		device_printf(sc->dev,
1424 		    "There is a problem with the PF setup.  It is likely the"
1425 		    " receive unit for this VF will not function correctly."
1426 		    "\n");
1427 	}
1428 	scctx = sc->shared;
1429 
1430 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1431 		struct rx_ring *rxr = &que->rxr;
1432 		u64 rdba = rxr->rx_paddr;
1433 		u32 reg, rxdctl;
1434 		int j = rxr->me;
1435 
1436 		/* Disable the queue */
1437 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1438 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1439 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1440 		for (int k = 0; k < 10; k++) {
1441 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1442 			    IXGBE_RXDCTL_ENABLE)
1443 				msec_delay(1);
1444 			else
1445 				break;
1446 		}
1447 		wmb();
1448 		/* Setup the Base and Length of the Rx Descriptor Ring */
1449 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1450 		    (rdba & 0x00000000ffffffffULL));
1451 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1452 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1453 		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1454 
1455 		/* Reset the ring indices */
1456 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1457 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1458 
1459 		/* Set up the SRRCTL register */
1460 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1461 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1462 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1463 		reg |= bufsz;
1464 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1465 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1466 
1467 		/* Capture Rx Tail index */
1468 		rxr->tail = IXGBE_VFRDT(rxr->me);
1469 
1470 		/* Do the queue enabling last */
1471 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1472 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1473 		for (int l = 0; l < 10; l++) {
1474 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1475 			    IXGBE_RXDCTL_ENABLE)
1476 				break;
1477 			msec_delay(1);
1478 		}
1479 		wmb();
1480 
1481 		/* Set the Tail Pointer */
1482 #ifdef DEV_NETMAP
1483 		/*
1484 		 * In netmap mode, we must preserve the buffers made
1485 		 * available to userspace before the if_init()
1486 		 * (this is true by default on the TX side, because
1487 		 * init makes all buffers available to userspace).
1488 		 *
1489 		 * netmap_reset() and the device specific routines
1490 		 * (e.g. ixgbe_setup_receive_rings()) map these
1491 		 * buffers at the end of the NIC ring, so here we
1492 		 * must set the RDT (tail) register to make sure
1493 		 * they are not overwritten.
1494 		 *
1495 		 * In this driver the NIC ring starts at RDH = 0,
1496 		 * RDT points to the last slot available for reception (?),
1497 		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1498 		 */
1499 		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
1500 			struct netmap_adapter *na = NA(ifp);
1501 			struct netmap_kring *kring = na->rx_rings[j];
1502 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1503 
1504 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1505 		} else
1506 #endif /* DEV_NETMAP */
1507 			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1508 			    scctx->isc_nrxd[0] - 1);
1509 	}
1510 
1511 	/*
1512 	 * Do not touch RSS and RETA settings for older hardware
1513 	 * as those are shared among PF and all VF.
1514 	 */
1515 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1516 		ixv_initialize_rss_mapping(sc);
1517 } /* ixv_initialize_receive_units */
1518 
1519 /************************************************************************
1520  * ixv_setup_vlan_support
1521  ************************************************************************/
1522 static void
ixv_setup_vlan_support(if_ctx_t ctx)1523 ixv_setup_vlan_support(if_ctx_t ctx)
1524 {
1525 	if_t ifp = iflib_get_ifp(ctx);
1526 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1527 	struct ixgbe_hw *hw = &sc->hw;
1528 	u32 ctrl, vid, vfta, retry;
1529 
1530 	/*
1531 	 * We get here thru if_init, meaning
1532 	 * a soft reset, this has already cleared
1533 	 * the VFTA and other state, so if there
1534 	 * have been no vlan's registered do nothing.
1535 	 */
1536 	if (sc->num_vlans == 0)
1537 		return;
1538 
1539 	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1540 		/* Enable the queues */
1541 		for (int i = 0; i < sc->num_rx_queues; i++) {
1542 			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1543 			ctrl |= IXGBE_RXDCTL_VME;
1544 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1545 			/*
1546 			 * Let Rx path know that it needs to store VLAN tag
1547 			 * as part of extra mbuf info.
1548 			 */
1549 			sc->rx_queues[i].rxr.vtag_strip = true;
1550 		}
1551 	}
1552 
1553 	/*
1554 	 * If filtering VLAN tags is disabled,
1555 	 * there is no need to fill VLAN Filter Table Array (VFTA).
1556 	 */
1557 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1558 		return;
1559 
1560 	/*
1561 	 * A soft reset zero's out the VFTA, so
1562 	 * we need to repopulate it now.
1563 	 */
1564 	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1565 		if (sc->shadow_vfta[i] == 0)
1566 			continue;
1567 		vfta = sc->shadow_vfta[i];
1568 		/*
1569 		 * Reconstruct the vlan id's
1570 		 * based on the bits set in each
1571 		 * of the array ints.
1572 		 */
1573 		for (int j = 0; j < 32; j++) {
1574 			retry = 0;
1575 			if ((vfta & (1 << j)) == 0)
1576 				continue;
1577 			vid = (i * 32) + j;
1578 			/* Call the shared code mailbox routine */
1579 			while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1580 				if (++retry > 5)
1581 					break;
1582 			}
1583 		}
1584 	}
1585 } /* ixv_setup_vlan_support */
1586 
1587 /************************************************************************
1588  * ixv_if_register_vlan
1589  *
1590  *   Run via a vlan config EVENT, it enables us to use the
1591  *   HW Filter table since we can get the vlan id. This just
1592  *   creates the entry in the soft version of the VFTA, init
1593  *   will repopulate the real table.
1594  ************************************************************************/
1595 static void
ixv_if_register_vlan(if_ctx_t ctx,u16 vtag)1596 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1597 {
1598 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1599 	u16 index, bit;
1600 
1601 	index = (vtag >> 5) & 0x7F;
1602 	bit = vtag & 0x1F;
1603 	sc->shadow_vfta[index] |= (1 << bit);
1604 	++sc->num_vlans;
1605 } /* ixv_if_register_vlan */
1606 
1607 /************************************************************************
1608  * ixv_if_unregister_vlan
1609  *
1610  *   Run via a vlan unconfig EVENT, remove our entry
1611  *   in the soft vfta.
1612  ************************************************************************/
1613 static void
ixv_if_unregister_vlan(if_ctx_t ctx,u16 vtag)1614 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1615 {
1616 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1617 	u16 index, bit;
1618 
1619 	index = (vtag >> 5) & 0x7F;
1620 	bit = vtag & 0x1F;
1621 	sc->shadow_vfta[index] &= ~(1 << bit);
1622 	--sc->num_vlans;
1623 } /* ixv_if_unregister_vlan */
1624 
1625 /************************************************************************
1626  * ixv_if_enable_intr
1627  ************************************************************************/
1628 static void
ixv_if_enable_intr(if_ctx_t ctx)1629 ixv_if_enable_intr(if_ctx_t ctx)
1630 {
1631 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1632 	struct ixgbe_hw *hw = &sc->hw;
1633 	struct ix_rx_queue *que = sc->rx_queues;
1634 	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1635 
1636 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1637 
1638 	mask = IXGBE_EIMS_ENABLE_MASK;
1639 	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1640 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1641 
1642 	for (int i = 0; i < sc->num_rx_queues; i++, que++)
1643 		ixv_enable_queue(sc, que->msix);
1644 
1645 	IXGBE_WRITE_FLUSH(hw);
1646 } /* ixv_if_enable_intr */
1647 
1648 /************************************************************************
1649  * ixv_if_disable_intr
1650  ************************************************************************/
1651 static void
ixv_if_disable_intr(if_ctx_t ctx)1652 ixv_if_disable_intr(if_ctx_t ctx)
1653 {
1654 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1655 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1656 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1657 	IXGBE_WRITE_FLUSH(&sc->hw);
1658 } /* ixv_if_disable_intr */
1659 
1660 /************************************************************************
1661  * ixv_if_rx_queue_intr_enable
1662  ************************************************************************/
1663 static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1664 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1665 {
1666 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1667 	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1668 
1669 	ixv_enable_queue(sc, que->rxr.me);
1670 
1671 	return (0);
1672 } /* ixv_if_rx_queue_intr_enable */
1673 
1674 /************************************************************************
1675  * ixv_set_ivar
1676  *
1677  *   Setup the correct IVAR register for a particular MSI-X interrupt
1678  *    - entry is the register array entry
1679  *    - vector is the MSI-X vector for this queue
1680  *    - type is RX/TX/MISC
1681  ************************************************************************/
1682 static void
ixv_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)1683 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1684 {
1685 	struct ixgbe_hw *hw = &sc->hw;
1686 	u32 ivar, index;
1687 
1688 	vector |= IXGBE_IVAR_ALLOC_VAL;
1689 
1690 	if (type == -1) { /* MISC IVAR */
1691 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1692 		ivar &= ~0xFF;
1693 		ivar |= vector;
1694 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1695 	} else {          /* RX/TX IVARS */
1696 		index = (16 * (entry & 1)) + (8 * type);
1697 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1698 		ivar &= ~(0xFF << index);
1699 		ivar |= (vector << index);
1700 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1701 	}
1702 } /* ixv_set_ivar */
1703 
1704 /************************************************************************
1705  * ixv_configure_ivars
1706  ************************************************************************/
1707 static void
ixv_configure_ivars(struct ixgbe_softc * sc)1708 ixv_configure_ivars(struct ixgbe_softc *sc)
1709 {
1710 	struct ix_rx_queue *que = sc->rx_queues;
1711 
1712 	MPASS(sc->num_rx_queues == sc->num_tx_queues);
1713 
1714 	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1715 		/* First the RX queue entry */
1716 		ixv_set_ivar(sc, i, que->msix, 0);
1717 		/* ... and the TX */
1718 		ixv_set_ivar(sc, i, que->msix, 1);
1719 		/* Set an initial value in EITR */
1720 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1721 		    IXGBE_EITR_DEFAULT);
1722 	}
1723 
1724 	/* For the mailbox interrupt */
1725 	ixv_set_ivar(sc, 1, sc->vector, -1);
1726 } /* ixv_configure_ivars */
1727 
1728 /************************************************************************
1729  * ixv_save_stats
1730  *
1731  *   The VF stats registers never have a truly virgin
1732  *   starting point, so this routine tries to make an
1733  *   artificial one, marking ground zero on attach as
1734  *   it were.
1735  ************************************************************************/
1736 static void
ixv_save_stats(struct ixgbe_softc * sc)1737 ixv_save_stats(struct ixgbe_softc *sc)
1738 {
1739 	if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1740 		sc->stats.vf.saved_reset_vfgprc +=
1741 		    sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1742 		sc->stats.vf.saved_reset_vfgptc +=
1743 		    sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1744 		sc->stats.vf.saved_reset_vfgorc +=
1745 		    sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1746 		sc->stats.vf.saved_reset_vfgotc +=
1747 		    sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1748 		sc->stats.vf.saved_reset_vfmprc +=
1749 		    sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1750 	}
1751 } /* ixv_save_stats */
1752 
1753 /************************************************************************
1754  * ixv_init_stats
1755  ************************************************************************/
1756 static void
ixv_init_stats(struct ixgbe_softc * sc)1757 ixv_init_stats(struct ixgbe_softc *sc)
1758 {
1759 	struct ixgbe_hw *hw = &sc->hw;
1760 
1761 	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1762 	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1763 	sc->stats.vf.last_vfgorc |=
1764 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1765 
1766 	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1767 	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1768 	sc->stats.vf.last_vfgotc |=
1769 	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1770 
1771 	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1772 
1773 	sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1774 	sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1775 	sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1776 	sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1777 	sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1778 } /* ixv_init_stats */
1779 
1780 #define UPDATE_STAT_32(reg, last, count)                \
1781 {                                                       \
1782 	u32 current = IXGBE_READ_REG(hw, reg);          \
1783 	if (current < last)                             \
1784 		count += 0x100000000LL;                 \
1785 	last = current;                                 \
1786 	count &= 0xFFFFFFFF00000000LL;                  \
1787 	count |= current;                               \
1788 }
1789 
1790 #define UPDATE_STAT_36(lsb, msb, last, count)           \
1791 {                                                       \
1792 	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1793 	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1794 	u64 current = ((cur_msb << 32) | cur_lsb);      \
1795 	if (current < last)                             \
1796 		count += 0x1000000000LL;                \
1797 	last = current;                                 \
1798 	count &= 0xFFFFFFF000000000LL;                  \
1799 	count |= current;                               \
1800 }
1801 
1802 /************************************************************************
1803  * ixv_update_stats - Update the board statistics counters.
1804  ************************************************************************/
1805 void
ixv_update_stats(struct ixgbe_softc * sc)1806 ixv_update_stats(struct ixgbe_softc *sc)
1807 {
1808 	struct ixgbe_hw *hw = &sc->hw;
1809 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1810 
1811 	UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1812 	    sc->stats.vf.vfgprc);
1813 	UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1814 	    sc->stats.vf.vfgptc);
1815 	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1816 	    sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1817 	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1818 	    sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1819 	UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1820 	    sc->stats.vf.vfmprc);
1821 
1822 	/* Fill out the OS statistics structure */
1823 	IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1824 	IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1825 	IXGBE_SET_IBYTES(sc, stats->vfgorc);
1826 	IXGBE_SET_OBYTES(sc, stats->vfgotc);
1827 	IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1828 } /* ixv_update_stats */
1829 
1830 /************************************************************************
1831  * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1832  ************************************************************************/
1833 static void
ixv_add_stats_sysctls(struct ixgbe_softc * sc)1834 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1835 {
1836 	device_t dev = sc->dev;
1837 	struct ix_tx_queue *tx_que = sc->tx_queues;
1838 	struct ix_rx_queue *rx_que = sc->rx_queues;
1839 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1840 	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1841 	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1842 	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1843 	struct sysctl_oid *stat_node, *queue_node;
1844 	struct sysctl_oid_list *stat_list, *queue_list;
1845 
1846 #define QUEUE_NAME_LEN 32
1847 	char namebuf[QUEUE_NAME_LEN];
1848 
1849 	/* Driver Statistics */
1850 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1851 	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1852 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1853 	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1854 
1855 	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1856 		struct tx_ring *txr = &tx_que->txr;
1857 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1858 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1859 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1860 		queue_list = SYSCTL_CHILDREN(queue_node);
1861 
1862 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1863 		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1864 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1865 		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1866 	}
1867 
1868 	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1869 		struct rx_ring *rxr = &rx_que->rxr;
1870 		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1871 		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1872 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1873 		queue_list = SYSCTL_CHILDREN(queue_node);
1874 
1875 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1876 		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1877 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1878 		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1879 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1880 		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1881 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1882 		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1883 	}
1884 
1885 	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1886 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1887 	    "VF Statistics (read from HW registers)");
1888 	stat_list = SYSCTL_CHILDREN(stat_node);
1889 
1890 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1891 	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1892 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1893 	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1894 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1895 	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1896 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1897 	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1898 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1899 	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1900 } /* ixv_add_stats_sysctls */
1901 
1902 /************************************************************************
1903  * ixv_print_debug_info
1904  *
1905  *   Called only when em_display_debug_stats is enabled.
1906  *   Provides a way to take a look at important statistics
1907  *   maintained by the driver and hardware.
1908  ************************************************************************/
1909 static void
ixv_print_debug_info(struct ixgbe_softc * sc)1910 ixv_print_debug_info(struct ixgbe_softc *sc)
1911 {
1912 	device_t dev = sc->dev;
1913 	struct ixgbe_hw *hw = &sc->hw;
1914 
1915 	device_printf(dev, "Error Byte Count = %u \n",
1916 	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1917 
1918 	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1919 } /* ixv_print_debug_info */
1920 
1921 /************************************************************************
1922  * ixv_sysctl_debug
1923  ************************************************************************/
1924 static int
ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)1925 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1926 {
1927 	struct ixgbe_softc *sc;
1928 	int error, result;
1929 
1930 	result = -1;
1931 	error = sysctl_handle_int(oidp, &result, 0, req);
1932 
1933 	if (error || !req->newptr)
1934 		return (error);
1935 
1936 	if (result == 1) {
1937 		sc = (struct ixgbe_softc *)arg1;
1938 		ixv_print_debug_info(sc);
1939 	}
1940 
1941 	return error;
1942 } /* ixv_sysctl_debug */
1943 
1944 /************************************************************************
1945  * ixv_init_device_features
1946  ************************************************************************/
1947 static void
ixv_init_device_features(struct ixgbe_softc * sc)1948 ixv_init_device_features(struct ixgbe_softc *sc)
1949 {
1950 	sc->feat_cap = IXGBE_FEATURE_NETMAP |
1951 	    IXGBE_FEATURE_VF |
1952 	    IXGBE_FEATURE_LEGACY_TX;
1953 
1954 	/* A tad short on feature flags for VFs, atm. */
1955 	switch (sc->hw.mac.type) {
1956 	case ixgbe_mac_82599_vf:
1957 		break;
1958 	case ixgbe_mac_X540_vf:
1959 		break;
1960 	case ixgbe_mac_X550_vf:
1961 	case ixgbe_mac_X550EM_x_vf:
1962 	case ixgbe_mac_X550EM_a_vf:
1963 	case ixgbe_mac_E610_vf:
1964 		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1965 		sc->feat_cap |= IXGBE_FEATURE_RSS;
1966 		break;
1967 	default:
1968 		break;
1969 	}
1970 
1971 	/* Enabled by default... */
1972 	/* Is a virtual function (VF) */
1973 	if (sc->feat_cap & IXGBE_FEATURE_VF)
1974 		sc->feat_en |= IXGBE_FEATURE_VF;
1975 	/* Netmap */
1976 	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1977 		sc->feat_en |= IXGBE_FEATURE_NETMAP;
1978 	/* Receive-Side Scaling (RSS) */
1979 	if (sc->feat_cap & IXGBE_FEATURE_RSS)
1980 		sc->feat_en |= IXGBE_FEATURE_RSS;
1981 	/* Needs advanced context descriptor regardless of offloads req'd */
1982 	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1983 		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1984 } /* ixv_init_device_features */
1985 
1986