1 /******************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixv_driver_version[] = "2.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
62 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
64 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
66 /* required last entry */
67 PVID_END
68 };
69
70 /************************************************************************
71 * Function prototypes
72 ************************************************************************/
73 static void *ixv_register(device_t);
74 static int ixv_if_attach_pre(if_ctx_t);
75 static int ixv_if_attach_post(if_ctx_t);
76 static int ixv_if_detach(if_ctx_t);
77
78 static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
79 static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
80 static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81 static void ixv_if_queues_free(if_ctx_t);
82 static void ixv_identify_hardware(if_ctx_t);
83 static void ixv_init_device_features(struct ixgbe_softc *);
84 static int ixv_allocate_pci_resources(if_ctx_t);
85 static void ixv_free_pci_resources(if_ctx_t);
86 static int ixv_setup_interface(if_ctx_t);
87 static void ixv_if_media_status(if_ctx_t, struct ifmediareq *);
88 static int ixv_if_media_change(if_ctx_t);
89 static void ixv_if_update_admin_status(if_ctx_t);
90 static int ixv_if_msix_intr_assign(if_ctx_t, int);
91
92 static int ixv_if_mtu_set(if_ctx_t, uint32_t);
93 static void ixv_if_init(if_ctx_t);
94 static void ixv_if_local_timer(if_ctx_t, uint16_t);
95 static void ixv_if_stop(if_ctx_t);
96 static int ixv_negotiate_api(struct ixgbe_softc *);
97
98 static void ixv_initialize_transmit_units(if_ctx_t);
99 static void ixv_initialize_receive_units(if_ctx_t);
100 static void ixv_initialize_rss_mapping(struct ixgbe_softc *);
101
102 static void ixv_setup_vlan_support(if_ctx_t);
103 static void ixv_configure_ivars(struct ixgbe_softc *);
104 static void ixv_if_enable_intr(if_ctx_t);
105 static void ixv_if_disable_intr(if_ctx_t);
106 static void ixv_if_multi_set(if_ctx_t);
107
108 static void ixv_if_register_vlan(if_ctx_t, u16);
109 static void ixv_if_unregister_vlan(if_ctx_t, u16);
110
111 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112 static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
113
114 static void ixv_save_stats(struct ixgbe_softc *);
115 static void ixv_init_stats(struct ixgbe_softc *);
116 static void ixv_update_stats(struct ixgbe_softc *);
117 static void ixv_add_stats_sysctls(struct ixgbe_softc *);
118
119 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
120 static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
121
122 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
123
124 /* The MSI-X Interrupt handlers */
125 static int ixv_msix_que(void *);
126 static int ixv_msix_mbx(void *);
127
128 /************************************************************************
129 * FreeBSD Device Interface Entry Points
130 ************************************************************************/
131 static device_method_t ixv_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_register, ixv_register),
134 DEVMETHOD(device_probe, iflib_device_probe),
135 DEVMETHOD(device_attach, iflib_device_attach),
136 DEVMETHOD(device_detach, iflib_device_detach),
137 DEVMETHOD(device_shutdown, iflib_device_shutdown),
138 DEVMETHOD_END
139 };
140
141 static driver_t ixv_driver = {
142 "ixv", ixv_methods, sizeof(struct ixgbe_softc),
143 };
144
145 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0);
146 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
148 MODULE_DEPEND(ixv, pci, 1, 1, 1);
149 MODULE_DEPEND(ixv, ether, 1, 1, 1);
150
151 static device_method_t ixv_if_methods[] = {
152 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
153 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
154 DEVMETHOD(ifdi_detach, ixv_if_detach),
155 DEVMETHOD(ifdi_init, ixv_if_init),
156 DEVMETHOD(ifdi_stop, ixv_if_stop),
157 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
158 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
159 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
160 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
161 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
163 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
164 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
165 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
166 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
167 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
168 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
169 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
170 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
171 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
172 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
173 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
174 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
175 DEVMETHOD_END
176 };
177
178 static driver_t ixv_if_driver = {
179 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
180 };
181
182 /*
183 * TUNEABLE PARAMETERS:
184 */
185
186 /* Flow control setting, default to full */
187 static int ixv_flow_control = ixgbe_fc_full;
188 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
189
190 /*
191 * Header split: this causes the hardware to DMA
192 * the header into a separate mbuf from the payload,
193 * it can be a performance win in some workloads, but
194 * in others it actually hurts, its off by default.
195 */
196 static int ixv_header_split = false;
197 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
198
199 extern struct if_txrx ixgbe_txrx;
200
201 static struct if_shared_ctx ixv_sctx_init = {
202 .isc_magic = IFLIB_MAGIC,
203 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
204 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
205 .isc_tx_maxsegsize = PAGE_SIZE,
206 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
207 .isc_tso_maxsegsize = PAGE_SIZE,
208 .isc_rx_maxsize = MJUM16BYTES,
209 .isc_rx_nsegments = 1,
210 .isc_rx_maxsegsize = MJUM16BYTES,
211 .isc_nfl = 1,
212 .isc_ntxqs = 1,
213 .isc_nrxqs = 1,
214 .isc_admin_intrcnt = 1,
215 .isc_vendor_info = ixv_vendor_info_array,
216 .isc_driver_version = ixv_driver_version,
217 .isc_driver = &ixv_if_driver,
218 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
219
220 .isc_nrxd_min = {MIN_RXD},
221 .isc_ntxd_min = {MIN_TXD},
222 .isc_nrxd_max = {MAX_RXD},
223 .isc_ntxd_max = {MAX_TXD},
224 .isc_nrxd_default = {DEFAULT_RXD},
225 .isc_ntxd_default = {DEFAULT_TXD},
226 };
227
228 static void *
ixv_register(device_t dev)229 ixv_register(device_t dev)
230 {
231 return (&ixv_sctx_init);
232 }
233
234 /************************************************************************
235 * ixv_if_tx_queues_alloc
236 ************************************************************************/
237 static int
ixv_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)238 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
239 int ntxqs, int ntxqsets)
240 {
241 struct ixgbe_softc *sc = iflib_get_softc(ctx);
242 if_softc_ctx_t scctx = sc->shared;
243 struct ix_tx_queue *que;
244 int i, j, error;
245
246 MPASS(sc->num_tx_queues == ntxqsets);
247 MPASS(ntxqs == 1);
248
249 /* Allocate queue structure memory */
250 sc->tx_queues =
251 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
252 M_DEVBUF, M_NOWAIT | M_ZERO);
253 if (!sc->tx_queues) {
254 device_printf(iflib_get_dev(ctx),
255 "Unable to allocate TX ring memory\n");
256 return (ENOMEM);
257 }
258
259 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
260 struct tx_ring *txr = &que->txr;
261
262 txr->me = i;
263 txr->sc = que->sc = sc;
264
265 /* Allocate report status array */
266 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
267 error = ENOMEM;
268 goto fail;
269 }
270 for (j = 0; j < scctx->isc_ntxd[0]; j++)
271 txr->tx_rsq[j] = QIDX_INVALID;
272 /* get the virtual and physical address of the hardware queues */
273 txr->tail = IXGBE_VFTDT(txr->me);
274 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
275 txr->tx_paddr = paddrs[i*ntxqs];
276
277 txr->bytes = 0;
278 txr->total_packets = 0;
279
280 }
281
282 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
283 sc->num_tx_queues);
284
285 return (0);
286
287 fail:
288 ixv_if_queues_free(ctx);
289
290 return (error);
291 } /* ixv_if_tx_queues_alloc */
292
293 /************************************************************************
294 * ixv_if_rx_queues_alloc
295 ************************************************************************/
296 static int
ixv_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)297 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
298 int nrxqs, int nrxqsets)
299 {
300 struct ixgbe_softc *sc = iflib_get_softc(ctx);
301 struct ix_rx_queue *que;
302 int i, error;
303
304 MPASS(sc->num_rx_queues == nrxqsets);
305 MPASS(nrxqs == 1);
306
307 /* Allocate queue structure memory */
308 sc->rx_queues =
309 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
310 M_DEVBUF, M_NOWAIT | M_ZERO);
311 if (!sc->rx_queues) {
312 device_printf(iflib_get_dev(ctx),
313 "Unable to allocate TX ring memory\n");
314 error = ENOMEM;
315 goto fail;
316 }
317
318 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
319 struct rx_ring *rxr = &que->rxr;
320 rxr->me = i;
321 rxr->sc = que->sc = sc;
322
323
324 /* get the virtual and physical address of the hw queues */
325 rxr->tail = IXGBE_VFRDT(rxr->me);
326 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
327 rxr->rx_paddr = paddrs[i*nrxqs];
328 rxr->bytes = 0;
329 rxr->que = que;
330 }
331
332 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
333 sc->num_rx_queues);
334
335 return (0);
336
337 fail:
338 ixv_if_queues_free(ctx);
339
340 return (error);
341 } /* ixv_if_rx_queues_alloc */
342
343 /************************************************************************
344 * ixv_if_queues_free
345 ************************************************************************/
346 static void
ixv_if_queues_free(if_ctx_t ctx)347 ixv_if_queues_free(if_ctx_t ctx)
348 {
349 struct ixgbe_softc *sc = iflib_get_softc(ctx);
350 struct ix_tx_queue *que = sc->tx_queues;
351 int i;
352
353 if (que == NULL)
354 goto free;
355
356 for (i = 0; i < sc->num_tx_queues; i++, que++) {
357 struct tx_ring *txr = &que->txr;
358 if (txr->tx_rsq == NULL)
359 break;
360
361 free(txr->tx_rsq, M_DEVBUF);
362 txr->tx_rsq = NULL;
363 }
364 if (sc->tx_queues != NULL)
365 free(sc->tx_queues, M_DEVBUF);
366 free:
367 if (sc->rx_queues != NULL)
368 free(sc->rx_queues, M_DEVBUF);
369 sc->tx_queues = NULL;
370 sc->rx_queues = NULL;
371 } /* ixv_if_queues_free */
372
373 /************************************************************************
374 * ixv_if_attach_pre - Device initialization routine
375 *
376 * Called when the driver is being loaded.
377 * Identifies the type of hardware, allocates all resources
378 * and initializes the hardware.
379 *
380 * return 0 on success, positive on failure
381 ************************************************************************/
382 static int
ixv_if_attach_pre(if_ctx_t ctx)383 ixv_if_attach_pre(if_ctx_t ctx)
384 {
385 struct ixgbe_softc *sc;
386 device_t dev;
387 if_softc_ctx_t scctx;
388 struct ixgbe_hw *hw;
389 int error = 0;
390
391 INIT_DEBUGOUT("ixv_attach: begin");
392
393 /* Allocate, clear, and link in our sc structure */
394 dev = iflib_get_dev(ctx);
395 sc = iflib_get_softc(ctx);
396 sc->dev = dev;
397 sc->ctx = ctx;
398 sc->hw.back = sc;
399 scctx = sc->shared = iflib_get_softc_ctx(ctx);
400 sc->media = iflib_get_media(ctx);
401 hw = &sc->hw;
402
403 /* Do base PCI setup - map BAR0 */
404 if (ixv_allocate_pci_resources(ctx)) {
405 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
406 error = ENXIO;
407 goto err_out;
408 }
409
410 /* SYSCTL APIs */
411 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
412 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
413 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
414 sc, 0, ixv_sysctl_debug, "I", "Debug Info");
415
416 /* Determine hardware revision */
417 ixv_identify_hardware(ctx);
418 ixv_init_device_features(sc);
419
420 /* Initialize the shared code */
421 error = ixgbe_init_ops_vf(hw);
422 if (error) {
423 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
424 error = EIO;
425 goto err_out;
426 }
427
428 /* Setup the mailbox */
429 ixgbe_init_mbx_params_vf(hw);
430
431 error = hw->mac.ops.reset_hw(hw);
432 if (error == IXGBE_ERR_RESET_FAILED)
433 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
434 else if (error)
435 device_printf(dev, "...reset_hw() failed with error %d\n",
436 error);
437 if (error) {
438 error = EIO;
439 goto err_out;
440 }
441
442 error = hw->mac.ops.init_hw(hw);
443 if (error) {
444 device_printf(dev, "...init_hw() failed with error %d\n",
445 error);
446 error = EIO;
447 goto err_out;
448 }
449
450 /* Negotiate mailbox API version */
451 error = ixv_negotiate_api(sc);
452 if (error) {
453 device_printf(dev,
454 "Mailbox API negotiation failed during attach!\n");
455 goto err_out;
456 }
457
458 /* Check if VF was disabled by PF */
459 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
460 if (error) {
461 /* PF is not capable of controlling VF state. Enable the link. */
462 sc->link_enabled = true;
463 }
464
465 /* If no mac address was assigned, make a random one */
466 if (!ixv_check_ether_addr(hw->mac.addr)) {
467 ether_gen_addr(iflib_get_ifp(ctx),
468 (struct ether_addr *)hw->mac.addr);
469 bcopy(hw->mac.addr, hw->mac.perm_addr,
470 sizeof(hw->mac.perm_addr));
471 }
472
473 /* Most of the iflib initialization... */
474
475 iflib_set_mac(ctx, hw->mac.addr);
476 switch (sc->hw.mac.type) {
477 case ixgbe_mac_X550_vf:
478 case ixgbe_mac_X550EM_x_vf:
479 case ixgbe_mac_X550EM_a_vf:
480 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
481 break;
482 default:
483 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
484 }
485 scctx->isc_txqsizes[0] =
486 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
487 sizeof(u32), DBA_ALIGN);
488 scctx->isc_rxqsizes[0] =
489 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
490 DBA_ALIGN);
491 /* XXX */
492 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
493 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
494 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
495 scctx->isc_msix_bar = pci_msix_table_bar(dev);
496 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
497 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
498 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
499
500 scctx->isc_txrx = &ixgbe_txrx;
501
502 /*
503 * Tell the upper layer(s) we support everything the PF
504 * driver does except...
505 * Wake-on-LAN
506 */
507 scctx->isc_capabilities = IXGBE_CAPS;
508 scctx->isc_capabilities ^= IFCAP_WOL;
509 scctx->isc_capenable = scctx->isc_capabilities;
510
511 INIT_DEBUGOUT("ixv_if_attach_pre: end");
512
513 return (0);
514
515 err_out:
516 ixv_free_pci_resources(ctx);
517
518 return (error);
519 } /* ixv_if_attach_pre */
520
521 static int
ixv_if_attach_post(if_ctx_t ctx)522 ixv_if_attach_post(if_ctx_t ctx)
523 {
524 struct ixgbe_softc *sc = iflib_get_softc(ctx);
525 device_t dev = iflib_get_dev(ctx);
526 int error = 0;
527
528 /* Setup OS specific network interface */
529 error = ixv_setup_interface(ctx);
530 if (error) {
531 device_printf(dev, "Interface setup failed: %d\n", error);
532 goto end;
533 }
534
535 /* Do the stats setup */
536 ixv_save_stats(sc);
537 ixv_init_stats(sc);
538 ixv_add_stats_sysctls(sc);
539
540 end:
541 return error;
542 } /* ixv_if_attach_post */
543
544 /************************************************************************
545 * ixv_detach - Device removal routine
546 *
547 * Called when the driver is being removed.
548 * Stops the adapter and deallocates all the resources
549 * that were allocated for driver operation.
550 *
551 * return 0 on success, positive on failure
552 ************************************************************************/
553 static int
ixv_if_detach(if_ctx_t ctx)554 ixv_if_detach(if_ctx_t ctx)
555 {
556 INIT_DEBUGOUT("ixv_detach: begin");
557
558 ixv_free_pci_resources(ctx);
559
560 return (0);
561 } /* ixv_if_detach */
562
563 /************************************************************************
564 * ixv_if_mtu_set
565 ************************************************************************/
566 static int
ixv_if_mtu_set(if_ctx_t ctx,uint32_t mtu)567 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
568 {
569 struct ixgbe_softc *sc = iflib_get_softc(ctx);
570 if_t ifp = iflib_get_ifp(ctx);
571 int error = 0;
572
573 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
574 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
575 error = EINVAL;
576 } else {
577 if_setmtu(ifp, mtu);
578 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
579 }
580
581 return error;
582 } /* ixv_if_mtu_set */
583
584 /************************************************************************
585 * ixv_if_init - Init entry point
586 *
587 * Used in two ways: It is used by the stack as an init entry
588 * point in network interface structure. It is also used
589 * by the driver as a hw/sw initialization routine to get
590 * to a consistent state.
591 *
592 * return 0 on success, positive on failure
593 ************************************************************************/
594 static void
ixv_if_init(if_ctx_t ctx)595 ixv_if_init(if_ctx_t ctx)
596 {
597 struct ixgbe_softc *sc = iflib_get_softc(ctx);
598 if_t ifp = iflib_get_ifp(ctx);
599 device_t dev = iflib_get_dev(ctx);
600 struct ixgbe_hw *hw = &sc->hw;
601 int error = 0;
602
603 INIT_DEBUGOUT("ixv_if_init: begin");
604 hw->adapter_stopped = false;
605 hw->mac.ops.stop_adapter(hw);
606
607 /* reprogram the RAR[0] in case user changed it. */
608 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
609
610 /* Get the latest mac address, User can use a LAA */
611 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
612 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
613
614 /* Reset VF and renegotiate mailbox API version */
615 hw->mac.ops.reset_hw(hw);
616 hw->mac.ops.start_hw(hw);
617 error = ixv_negotiate_api(sc);
618 if (error) {
619 device_printf(dev,
620 "Mailbox API negotiation failed in if_init!\n");
621 return;
622 }
623
624 ixv_initialize_transmit_units(ctx);
625
626 /* Setup Multicast table */
627 ixv_if_multi_set(ctx);
628
629 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
630
631 /* Configure RX settings */
632 ixv_initialize_receive_units(ctx);
633
634 /* Set up VLAN offload and filter */
635 ixv_setup_vlan_support(ctx);
636
637 /* Set up MSI-X routing */
638 ixv_configure_ivars(sc);
639
640 /* Set up auto-mask */
641 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
642
643 /* Set moderation on the Link interrupt */
644 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
645
646 /* Stats init */
647 ixv_init_stats(sc);
648
649 /* Config/Enable Link */
650 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
651 if (error) {
652 /* PF is not capable of controlling VF state. Enable the link. */
653 sc->link_enabled = true;
654 } else if (sc->link_enabled == false)
655 device_printf(dev, "VF is disabled by PF\n");
656
657 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
658 false);
659
660 /* And now turn on interrupts */
661 ixv_if_enable_intr(ctx);
662
663 return;
664 } /* ixv_if_init */
665
666 /************************************************************************
667 * ixv_enable_queue
668 ************************************************************************/
669 static inline void
ixv_enable_queue(struct ixgbe_softc * sc,u32 vector)670 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
671 {
672 struct ixgbe_hw *hw = &sc->hw;
673 u32 queue = 1 << vector;
674 u32 mask;
675
676 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
677 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
678 } /* ixv_enable_queue */
679
680 /************************************************************************
681 * ixv_disable_queue
682 ************************************************************************/
683 static inline void
ixv_disable_queue(struct ixgbe_softc * sc,u32 vector)684 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
685 {
686 struct ixgbe_hw *hw = &sc->hw;
687 u64 queue = (u64)(1 << vector);
688 u32 mask;
689
690 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
691 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
692 } /* ixv_disable_queue */
693
694
695 /************************************************************************
696 * ixv_msix_que - MSI-X Queue Interrupt Service routine
697 ************************************************************************/
698 static int
ixv_msix_que(void * arg)699 ixv_msix_que(void *arg)
700 {
701 struct ix_rx_queue *que = arg;
702 struct ixgbe_softc *sc = que->sc;
703
704 ixv_disable_queue(sc, que->msix);
705 ++que->irqs;
706
707 return (FILTER_SCHEDULE_THREAD);
708 } /* ixv_msix_que */
709
710 /************************************************************************
711 * ixv_msix_mbx
712 ************************************************************************/
713 static int
ixv_msix_mbx(void * arg)714 ixv_msix_mbx(void *arg)
715 {
716 struct ixgbe_softc *sc = arg;
717 struct ixgbe_hw *hw = &sc->hw;
718 u32 reg;
719
720 ++sc->link_irq;
721
722 /* First get the cause */
723 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
724 /* Clear interrupt with write */
725 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
726
727 /* Link status change */
728 if (reg & IXGBE_EICR_LSC)
729 iflib_admin_intr_deferred(sc->ctx);
730
731 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
732
733 return (FILTER_HANDLED);
734 } /* ixv_msix_mbx */
735
736 /************************************************************************
737 * ixv_media_status - Media Ioctl callback
738 *
739 * Called whenever the user queries the status of
740 * the interface using ifconfig.
741 ************************************************************************/
742 static void
ixv_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)743 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
744 {
745 struct ixgbe_softc *sc = iflib_get_softc(ctx);
746
747 INIT_DEBUGOUT("ixv_media_status: begin");
748
749 iflib_admin_intr_deferred(ctx);
750
751 ifmr->ifm_status = IFM_AVALID;
752 ifmr->ifm_active = IFM_ETHER;
753
754 if (!sc->link_active)
755 return;
756
757 ifmr->ifm_status |= IFM_ACTIVE;
758
759 switch (sc->link_speed) {
760 case IXGBE_LINK_SPEED_1GB_FULL:
761 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
762 break;
763 case IXGBE_LINK_SPEED_10GB_FULL:
764 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
765 break;
766 case IXGBE_LINK_SPEED_100_FULL:
767 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
768 break;
769 case IXGBE_LINK_SPEED_10_FULL:
770 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
771 break;
772 }
773 } /* ixv_if_media_status */
774
775 /************************************************************************
776 * ixv_if_media_change - Media Ioctl callback
777 *
778 * Called when the user changes speed/duplex using
779 * media/mediopt option with ifconfig.
780 ************************************************************************/
781 static int
ixv_if_media_change(if_ctx_t ctx)782 ixv_if_media_change(if_ctx_t ctx)
783 {
784 struct ixgbe_softc *sc = iflib_get_softc(ctx);
785 struct ifmedia *ifm = iflib_get_media(ctx);
786
787 INIT_DEBUGOUT("ixv_media_change: begin");
788
789 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
790 return (EINVAL);
791
792 switch (IFM_SUBTYPE(ifm->ifm_media)) {
793 case IFM_AUTO:
794 break;
795 default:
796 device_printf(sc->dev, "Only auto media type\n");
797 return (EINVAL);
798 }
799
800 return (0);
801 } /* ixv_if_media_change */
802
803
804 /************************************************************************
805 * ixv_negotiate_api
806 *
807 * Negotiate the Mailbox API with the PF;
808 * start with the most featured API first.
809 ************************************************************************/
810 static int
ixv_negotiate_api(struct ixgbe_softc * sc)811 ixv_negotiate_api(struct ixgbe_softc *sc)
812 {
813 struct ixgbe_hw *hw = &sc->hw;
814 int mbx_api[] = { ixgbe_mbox_api_12,
815 ixgbe_mbox_api_11,
816 ixgbe_mbox_api_10,
817 ixgbe_mbox_api_unknown };
818 int i = 0;
819
820 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
821 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
822 return (0);
823 i++;
824 }
825
826 return (EINVAL);
827 } /* ixv_negotiate_api */
828
829
830 static u_int
ixv_if_multi_set_cb(void * cb_arg,struct sockaddr_dl * addr,u_int cnt)831 ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
832 {
833 bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
834 IXGBE_ETH_LENGTH_OF_ADDRESS);
835
836 return (++cnt);
837 }
838
839 /************************************************************************
840 * ixv_if_multi_set - Multicast Update
841 *
842 * Called whenever multicast address list is updated.
843 ************************************************************************/
844 static void
ixv_if_multi_set(if_ctx_t ctx)845 ixv_if_multi_set(if_ctx_t ctx)
846 {
847 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
848 struct ixgbe_softc *sc = iflib_get_softc(ctx);
849 u8 *update_ptr;
850 if_t ifp = iflib_get_ifp(ctx);
851 int mcnt = 0;
852
853 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
854
855 mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta);
856
857 update_ptr = mta;
858
859 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
860 ixv_mc_array_itr, true);
861 } /* ixv_if_multi_set */
862
863 /************************************************************************
864 * ixv_mc_array_itr
865 *
866 * An iterator function needed by the multicast shared code.
867 * It feeds the shared code routine the addresses in the
868 * array of ixv_set_multi() one by one.
869 ************************************************************************/
870 static u8 *
ixv_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)871 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
872 {
873 u8 *addr = *update_ptr;
874 u8 *newptr;
875
876 *vmdq = 0;
877
878 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
879 *update_ptr = newptr;
880
881 return addr;
882 } /* ixv_mc_array_itr */
883
884 /************************************************************************
885 * ixv_if_local_timer - Timer routine
886 *
887 * Checks for link status, updates statistics,
888 * and runs the watchdog check.
889 ************************************************************************/
890 static void
ixv_if_local_timer(if_ctx_t ctx,uint16_t qid)891 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
892 {
893 if (qid != 0)
894 return;
895
896 /* Fire off the adminq task */
897 iflib_admin_intr_deferred(ctx);
898 } /* ixv_if_local_timer */
899
900 /************************************************************************
901 * ixv_if_update_admin_status - Update OS on link state
902 *
903 * Note: Only updates the OS on the cached link state.
904 * The real check of the hardware only happens with
905 * a link interrupt.
906 ************************************************************************/
907 static void
ixv_if_update_admin_status(if_ctx_t ctx)908 ixv_if_update_admin_status(if_ctx_t ctx)
909 {
910 struct ixgbe_softc *sc = iflib_get_softc(ctx);
911 device_t dev = iflib_get_dev(ctx);
912 s32 status;
913
914 sc->hw.mac.get_link_status = true;
915
916 status = ixgbe_check_link(&sc->hw, &sc->link_speed,
917 &sc->link_up, false);
918
919 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
920 /* Mailbox's Clear To Send status is lost or timeout occurred.
921 * We need reinitialization. */
922 if_init(iflib_get_ifp(ctx), ctx);
923 }
924
925 if (sc->link_up && sc->link_enabled) {
926 if (sc->link_active == false) {
927 if (bootverbose)
928 device_printf(dev, "Link is up %d Gbps %s \n",
929 ((sc->link_speed == 128) ? 10 : 1),
930 "Full Duplex");
931 sc->link_active = true;
932 iflib_link_state_change(ctx, LINK_STATE_UP,
933 ixgbe_link_speed_to_baudrate(sc->link_speed));
934 }
935 } else { /* Link down */
936 if (sc->link_active == true) {
937 if (bootverbose)
938 device_printf(dev, "Link is Down\n");
939 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
940 sc->link_active = false;
941 }
942 }
943
944 /* Stats Update */
945 ixv_update_stats(sc);
946 } /* ixv_if_update_admin_status */
947
948
949 /************************************************************************
950 * ixv_if_stop - Stop the hardware
951 *
952 * Disables all traffic on the adapter by issuing a
953 * global reset on the MAC and deallocates TX/RX buffers.
954 ************************************************************************/
955 static void
ixv_if_stop(if_ctx_t ctx)956 ixv_if_stop(if_ctx_t ctx)
957 {
958 struct ixgbe_softc *sc = iflib_get_softc(ctx);
959 struct ixgbe_hw *hw = &sc->hw;
960
961 INIT_DEBUGOUT("ixv_stop: begin\n");
962
963 ixv_if_disable_intr(ctx);
964
965 hw->mac.ops.reset_hw(hw);
966 sc->hw.adapter_stopped = false;
967 hw->mac.ops.stop_adapter(hw);
968
969 /* Update the stack */
970 sc->link_up = false;
971 ixv_if_update_admin_status(ctx);
972
973 /* reprogram the RAR[0] in case user changed it. */
974 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
975 } /* ixv_if_stop */
976
977
978 /************************************************************************
979 * ixv_identify_hardware - Determine hardware revision.
980 ************************************************************************/
981 static void
ixv_identify_hardware(if_ctx_t ctx)982 ixv_identify_hardware(if_ctx_t ctx)
983 {
984 struct ixgbe_softc *sc = iflib_get_softc(ctx);
985 device_t dev = iflib_get_dev(ctx);
986 struct ixgbe_hw *hw = &sc->hw;
987
988 /* Save off the information about this board */
989 hw->vendor_id = pci_get_vendor(dev);
990 hw->device_id = pci_get_device(dev);
991 hw->revision_id = pci_get_revid(dev);
992 hw->subsystem_vendor_id = pci_get_subvendor(dev);
993 hw->subsystem_device_id = pci_get_subdevice(dev);
994
995 /* A subset of set_mac_type */
996 switch (hw->device_id) {
997 case IXGBE_DEV_ID_82599_VF:
998 hw->mac.type = ixgbe_mac_82599_vf;
999 break;
1000 case IXGBE_DEV_ID_X540_VF:
1001 hw->mac.type = ixgbe_mac_X540_vf;
1002 break;
1003 case IXGBE_DEV_ID_X550_VF:
1004 hw->mac.type = ixgbe_mac_X550_vf;
1005 break;
1006 case IXGBE_DEV_ID_X550EM_X_VF:
1007 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1008 break;
1009 case IXGBE_DEV_ID_X550EM_A_VF:
1010 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1011 break;
1012 default:
1013 device_printf(dev, "unknown mac type\n");
1014 hw->mac.type = ixgbe_mac_unknown;
1015 break;
1016 }
1017 } /* ixv_identify_hardware */
1018
1019 /************************************************************************
1020 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1021 ************************************************************************/
1022 static int
ixv_if_msix_intr_assign(if_ctx_t ctx,int msix)1023 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1024 {
1025 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1026 device_t dev = iflib_get_dev(ctx);
1027 struct ix_rx_queue *rx_que = sc->rx_queues;
1028 struct ix_tx_queue *tx_que;
1029 int error, rid, vector = 0;
1030 char buf[16];
1031
1032 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1033 rid = vector + 1;
1034
1035 snprintf(buf, sizeof(buf), "rxq%d", i);
1036 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1037 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1038
1039 if (error) {
1040 device_printf(iflib_get_dev(ctx),
1041 "Failed to allocate que int %d err: %d", i, error);
1042 sc->num_rx_queues = i + 1;
1043 goto fail;
1044 }
1045
1046 rx_que->msix = vector;
1047 }
1048
1049 for (int i = 0; i < sc->num_tx_queues; i++) {
1050 snprintf(buf, sizeof(buf), "txq%d", i);
1051 tx_que = &sc->tx_queues[i];
1052 tx_que->msix = i % sc->num_rx_queues;
1053 iflib_softirq_alloc_generic(ctx,
1054 &sc->rx_queues[tx_que->msix].que_irq,
1055 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1056 }
1057 rid = vector + 1;
1058 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1059 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1060 if (error) {
1061 device_printf(iflib_get_dev(ctx),
1062 "Failed to register admin handler");
1063 return (error);
1064 }
1065
1066 sc->vector = vector;
1067 /*
1068 * Due to a broken design QEMU will fail to properly
1069 * enable the guest for MSIX unless the vectors in
1070 * the table are all set up, so we must rewrite the
1071 * ENABLE in the MSIX control register again at this
1072 * point to cause it to successfully initialize us.
1073 */
1074 if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1075 int msix_ctrl;
1076 if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
1077 device_printf(dev, "Finding MSIX capability failed\n");
1078 } else {
1079 rid += PCIR_MSIX_CTRL;
1080 msix_ctrl = pci_read_config(dev, rid, 2);
1081 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1082 pci_write_config(dev, rid, msix_ctrl, 2);
1083 }
1084 }
1085
1086 return (0);
1087
1088 fail:
1089 iflib_irq_free(ctx, &sc->irq);
1090 rx_que = sc->rx_queues;
1091 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1092 iflib_irq_free(ctx, &rx_que->que_irq);
1093
1094 return (error);
1095 } /* ixv_if_msix_intr_assign */
1096
1097 /************************************************************************
1098 * ixv_allocate_pci_resources
1099 ************************************************************************/
1100 static int
ixv_allocate_pci_resources(if_ctx_t ctx)1101 ixv_allocate_pci_resources(if_ctx_t ctx)
1102 {
1103 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1104 device_t dev = iflib_get_dev(ctx);
1105 int rid;
1106
1107 rid = PCIR_BAR(0);
1108 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1109 RF_ACTIVE);
1110
1111 if (!(sc->pci_mem)) {
1112 device_printf(dev, "Unable to allocate bus resource: memory\n");
1113 return (ENXIO);
1114 }
1115
1116 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1117 sc->osdep.mem_bus_space_handle =
1118 rman_get_bushandle(sc->pci_mem);
1119 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1120
1121 return (0);
1122 } /* ixv_allocate_pci_resources */
1123
1124 /************************************************************************
1125 * ixv_free_pci_resources
1126 ************************************************************************/
1127 static void
ixv_free_pci_resources(if_ctx_t ctx)1128 ixv_free_pci_resources(if_ctx_t ctx)
1129 {
1130 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1131 struct ix_rx_queue *que = sc->rx_queues;
1132 device_t dev = iflib_get_dev(ctx);
1133
1134 /* Release all MSI-X queue resources */
1135 if (sc->intr_type == IFLIB_INTR_MSIX)
1136 iflib_irq_free(ctx, &sc->irq);
1137
1138 if (que != NULL) {
1139 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1140 iflib_irq_free(ctx, &que->que_irq);
1141 }
1142 }
1143
1144 if (sc->pci_mem != NULL)
1145 bus_release_resource(dev, SYS_RES_MEMORY,
1146 rman_get_rid(sc->pci_mem), sc->pci_mem);
1147 } /* ixv_free_pci_resources */
1148
1149 /************************************************************************
1150 * ixv_setup_interface
1151 *
1152 * Setup networking device structure and register an interface.
1153 ************************************************************************/
1154 static int
ixv_setup_interface(if_ctx_t ctx)1155 ixv_setup_interface(if_ctx_t ctx)
1156 {
1157 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1158 if_softc_ctx_t scctx = sc->shared;
1159 if_t ifp = iflib_get_ifp(ctx);
1160
1161 INIT_DEBUGOUT("ixv_setup_interface: begin");
1162
1163 if_setbaudrate(ifp, IF_Gbps(10));
1164 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2);
1165
1166
1167 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
1168 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1169 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1170
1171 return 0;
1172 } /* ixv_setup_interface */
1173
1174 /************************************************************************
1175 * ixv_if_get_counter
1176 ************************************************************************/
1177 static uint64_t
ixv_if_get_counter(if_ctx_t ctx,ift_counter cnt)1178 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1179 {
1180 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1181 if_t ifp = iflib_get_ifp(ctx);
1182
1183 switch (cnt) {
1184 case IFCOUNTER_IPACKETS:
1185 return (sc->ipackets);
1186 case IFCOUNTER_OPACKETS:
1187 return (sc->opackets);
1188 case IFCOUNTER_IBYTES:
1189 return (sc->ibytes);
1190 case IFCOUNTER_OBYTES:
1191 return (sc->obytes);
1192 case IFCOUNTER_IMCASTS:
1193 return (sc->imcasts);
1194 default:
1195 return (if_get_counter_default(ifp, cnt));
1196 }
1197 } /* ixv_if_get_counter */
1198
1199 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1200 * @ctx: iflib context
1201 * @event: event code to check
1202 *
1203 * Defaults to returning true for every event.
1204 *
1205 * @returns true if iflib needs to reinit the interface
1206 */
1207 static bool
ixv_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1208 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1209 {
1210 switch (event) {
1211 case IFLIB_RESTART_VLAN_CONFIG:
1212 /* XXX: This may not need to return true */
1213 default:
1214 return (true);
1215 }
1216 }
1217
1218 /************************************************************************
1219 * ixv_initialize_transmit_units - Enable transmit unit.
1220 ************************************************************************/
1221 static void
ixv_initialize_transmit_units(if_ctx_t ctx)1222 ixv_initialize_transmit_units(if_ctx_t ctx)
1223 {
1224 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1225 struct ixgbe_hw *hw = &sc->hw;
1226 if_softc_ctx_t scctx = sc->shared;
1227 struct ix_tx_queue *que = sc->tx_queues;
1228 int i;
1229
1230 for (i = 0; i < sc->num_tx_queues; i++, que++) {
1231 struct tx_ring *txr = &que->txr;
1232 u64 tdba = txr->tx_paddr;
1233 u32 txctrl, txdctl;
1234 int j = txr->me;
1235
1236 /* Set WTHRESH to 8, burst writeback */
1237 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1238 txdctl |= (8 << 16);
1239 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1240
1241 /* Set the HW Tx Head and Tail indices */
1242 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1243 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1244
1245 /* Set Tx Tail register */
1246 txr->tail = IXGBE_VFTDT(j);
1247
1248 txr->tx_rs_cidx = txr->tx_rs_pidx;
1249 /* Initialize the last processed descriptor to be the end of
1250 * the ring, rather than the start, so that we avoid an
1251 * off-by-one error when calculating how many descriptors are
1252 * done in the credits_update function.
1253 */
1254 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1255 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1256 txr->tx_rsq[k] = QIDX_INVALID;
1257
1258 /* Set Ring parameters */
1259 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1260 (tdba & 0x00000000ffffffffULL));
1261 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1262 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1263 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1264 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1265 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1266 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1267
1268 /* Now enable */
1269 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1270 txdctl |= IXGBE_TXDCTL_ENABLE;
1271 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1272 }
1273
1274 return;
1275 } /* ixv_initialize_transmit_units */
1276
1277 /************************************************************************
1278 * ixv_initialize_rss_mapping
1279 ************************************************************************/
1280 static void
ixv_initialize_rss_mapping(struct ixgbe_softc * sc)1281 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1282 {
1283 struct ixgbe_hw *hw = &sc->hw;
1284 u32 reta = 0, mrqc, rss_key[10];
1285 int queue_id;
1286 int i, j;
1287 u32 rss_hash_config;
1288
1289 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1290 /* Fetch the configured RSS key */
1291 rss_getkey((uint8_t *)&rss_key);
1292 } else {
1293 /* set up random bits */
1294 arc4rand(&rss_key, sizeof(rss_key), 0);
1295 }
1296
1297 /* Now fill out hash function seeds */
1298 for (i = 0; i < 10; i++)
1299 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1300
1301 /* Set up the redirection table */
1302 for (i = 0, j = 0; i < 64; i++, j++) {
1303 if (j == sc->num_rx_queues)
1304 j = 0;
1305
1306 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1307 /*
1308 * Fetch the RSS bucket id for the given indirection
1309 * entry. Cap it at the number of configured buckets
1310 * (which is num_rx_queues.)
1311 */
1312 queue_id = rss_get_indirection_to_bucket(i);
1313 queue_id = queue_id % sc->num_rx_queues;
1314 } else
1315 queue_id = j;
1316
1317 /*
1318 * The low 8 bits are for hash value (n+0);
1319 * The next 8 bits are for hash value (n+1), etc.
1320 */
1321 reta >>= 8;
1322 reta |= ((uint32_t)queue_id) << 24;
1323 if ((i & 3) == 3) {
1324 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1325 reta = 0;
1326 }
1327 }
1328
1329 /* Perform hash on these packet types */
1330 if (sc->feat_en & IXGBE_FEATURE_RSS)
1331 rss_hash_config = rss_gethashconfig();
1332 else {
1333 /*
1334 * Disable UDP - IP fragments aren't currently being handled
1335 * and so we end up with a mix of 2-tuple and 4-tuple
1336 * traffic.
1337 */
1338 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1339 | RSS_HASHTYPE_RSS_TCP_IPV4
1340 | RSS_HASHTYPE_RSS_IPV6
1341 | RSS_HASHTYPE_RSS_TCP_IPV6;
1342 }
1343
1344 mrqc = IXGBE_MRQC_RSSEN;
1345 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1346 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1347 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1348 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1349 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1350 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1351 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1352 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1353 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1354 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1355 __func__);
1356 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1357 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1358 __func__);
1359 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1361 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1362 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1363 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1364 device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1365 __func__);
1366 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1367 } /* ixv_initialize_rss_mapping */
1368
1369 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
1370 /************************************************************************
1371 * ixv_initialize_receive_units - Setup receive registers and features.
1372 ************************************************************************/
1373 static void
ixv_initialize_receive_units(if_ctx_t ctx)1374 ixv_initialize_receive_units(if_ctx_t ctx)
1375 {
1376 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1377 if_softc_ctx_t scctx;
1378 struct ixgbe_hw *hw = &sc->hw;
1379 #ifdef DEV_NETMAP
1380 if_t ifp = iflib_get_ifp(ctx);
1381 #endif
1382 struct ix_rx_queue *que = sc->rx_queues;
1383 u32 bufsz, psrtype;
1384
1385 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
1386 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1387
1388 psrtype = IXGBE_PSRTYPE_TCPHDR
1389 | IXGBE_PSRTYPE_UDPHDR
1390 | IXGBE_PSRTYPE_IPV4HDR
1391 | IXGBE_PSRTYPE_IPV6HDR
1392 | IXGBE_PSRTYPE_L2HDR;
1393
1394 if (sc->num_rx_queues > 1)
1395 psrtype |= 1 << 29;
1396
1397 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1398
1399 /* Tell PF our max_frame size */
1400 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1401 device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
1402 }
1403 scctx = sc->shared;
1404
1405 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1406 struct rx_ring *rxr = &que->rxr;
1407 u64 rdba = rxr->rx_paddr;
1408 u32 reg, rxdctl;
1409 int j = rxr->me;
1410
1411 /* Disable the queue */
1412 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1413 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1414 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1415 for (int k = 0; k < 10; k++) {
1416 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1417 IXGBE_RXDCTL_ENABLE)
1418 msec_delay(1);
1419 else
1420 break;
1421 }
1422 wmb();
1423 /* Setup the Base and Length of the Rx Descriptor Ring */
1424 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1425 (rdba & 0x00000000ffffffffULL));
1426 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1427 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1428 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1429
1430 /* Reset the ring indices */
1431 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1432 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1433
1434 /* Set up the SRRCTL register */
1435 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1436 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1437 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1438 reg |= bufsz;
1439 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1440 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1441
1442 /* Capture Rx Tail index */
1443 rxr->tail = IXGBE_VFRDT(rxr->me);
1444
1445 /* Do the queue enabling last */
1446 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1447 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1448 for (int l = 0; l < 10; l++) {
1449 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1450 IXGBE_RXDCTL_ENABLE)
1451 break;
1452 msec_delay(1);
1453 }
1454 wmb();
1455
1456 /* Set the Tail Pointer */
1457 #ifdef DEV_NETMAP
1458 /*
1459 * In netmap mode, we must preserve the buffers made
1460 * available to userspace before the if_init()
1461 * (this is true by default on the TX side, because
1462 * init makes all buffers available to userspace).
1463 *
1464 * netmap_reset() and the device specific routines
1465 * (e.g. ixgbe_setup_receive_rings()) map these
1466 * buffers at the end of the NIC ring, so here we
1467 * must set the RDT (tail) register to make sure
1468 * they are not overwritten.
1469 *
1470 * In this driver the NIC ring starts at RDH = 0,
1471 * RDT points to the last slot available for reception (?),
1472 * so RDT = num_rx_desc - 1 means the whole ring is available.
1473 */
1474 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
1475 struct netmap_adapter *na = NA(ifp);
1476 struct netmap_kring *kring = na->rx_rings[j];
1477 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1478
1479 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1480 } else
1481 #endif /* DEV_NETMAP */
1482 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1483 scctx->isc_nrxd[0] - 1);
1484 }
1485
1486 /*
1487 * Do not touch RSS and RETA settings for older hardware
1488 * as those are shared among PF and all VF.
1489 */
1490 if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1491 ixv_initialize_rss_mapping(sc);
1492 } /* ixv_initialize_receive_units */
1493
1494 /************************************************************************
1495 * ixv_setup_vlan_support
1496 ************************************************************************/
1497 static void
ixv_setup_vlan_support(if_ctx_t ctx)1498 ixv_setup_vlan_support(if_ctx_t ctx)
1499 {
1500 if_t ifp = iflib_get_ifp(ctx);
1501 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1502 struct ixgbe_hw *hw = &sc->hw;
1503 u32 ctrl, vid, vfta, retry;
1504
1505 /*
1506 * We get here thru if_init, meaning
1507 * a soft reset, this has already cleared
1508 * the VFTA and other state, so if there
1509 * have been no vlan's registered do nothing.
1510 */
1511 if (sc->num_vlans == 0)
1512 return;
1513
1514 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1515 /* Enable the queues */
1516 for (int i = 0; i < sc->num_rx_queues; i++) {
1517 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1518 ctrl |= IXGBE_RXDCTL_VME;
1519 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1520 /*
1521 * Let Rx path know that it needs to store VLAN tag
1522 * as part of extra mbuf info.
1523 */
1524 sc->rx_queues[i].rxr.vtag_strip = true;
1525 }
1526 }
1527
1528 /*
1529 * If filtering VLAN tags is disabled,
1530 * there is no need to fill VLAN Filter Table Array (VFTA).
1531 */
1532 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1533 return;
1534
1535 /*
1536 * A soft reset zero's out the VFTA, so
1537 * we need to repopulate it now.
1538 */
1539 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1540 if (sc->shadow_vfta[i] == 0)
1541 continue;
1542 vfta = sc->shadow_vfta[i];
1543 /*
1544 * Reconstruct the vlan id's
1545 * based on the bits set in each
1546 * of the array ints.
1547 */
1548 for (int j = 0; j < 32; j++) {
1549 retry = 0;
1550 if ((vfta & (1 << j)) == 0)
1551 continue;
1552 vid = (i * 32) + j;
1553 /* Call the shared code mailbox routine */
1554 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1555 if (++retry > 5)
1556 break;
1557 }
1558 }
1559 }
1560 } /* ixv_setup_vlan_support */
1561
1562 /************************************************************************
1563 * ixv_if_register_vlan
1564 *
1565 * Run via a vlan config EVENT, it enables us to use the
1566 * HW Filter table since we can get the vlan id. This just
1567 * creates the entry in the soft version of the VFTA, init
1568 * will repopulate the real table.
1569 ************************************************************************/
1570 static void
ixv_if_register_vlan(if_ctx_t ctx,u16 vtag)1571 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1572 {
1573 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1574 u16 index, bit;
1575
1576 index = (vtag >> 5) & 0x7F;
1577 bit = vtag & 0x1F;
1578 sc->shadow_vfta[index] |= (1 << bit);
1579 ++sc->num_vlans;
1580 } /* ixv_if_register_vlan */
1581
1582 /************************************************************************
1583 * ixv_if_unregister_vlan
1584 *
1585 * Run via a vlan unconfig EVENT, remove our entry
1586 * in the soft vfta.
1587 ************************************************************************/
1588 static void
ixv_if_unregister_vlan(if_ctx_t ctx,u16 vtag)1589 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1590 {
1591 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1592 u16 index, bit;
1593
1594 index = (vtag >> 5) & 0x7F;
1595 bit = vtag & 0x1F;
1596 sc->shadow_vfta[index] &= ~(1 << bit);
1597 --sc->num_vlans;
1598 } /* ixv_if_unregister_vlan */
1599
1600 /************************************************************************
1601 * ixv_if_enable_intr
1602 ************************************************************************/
1603 static void
ixv_if_enable_intr(if_ctx_t ctx)1604 ixv_if_enable_intr(if_ctx_t ctx)
1605 {
1606 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1607 struct ixgbe_hw *hw = &sc->hw;
1608 struct ix_rx_queue *que = sc->rx_queues;
1609 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1610
1611 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1612
1613 mask = IXGBE_EIMS_ENABLE_MASK;
1614 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1615 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1616
1617 for (int i = 0; i < sc->num_rx_queues; i++, que++)
1618 ixv_enable_queue(sc, que->msix);
1619
1620 IXGBE_WRITE_FLUSH(hw);
1621 } /* ixv_if_enable_intr */
1622
1623 /************************************************************************
1624 * ixv_if_disable_intr
1625 ************************************************************************/
1626 static void
ixv_if_disable_intr(if_ctx_t ctx)1627 ixv_if_disable_intr(if_ctx_t ctx)
1628 {
1629 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1630 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1631 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1632 IXGBE_WRITE_FLUSH(&sc->hw);
1633 } /* ixv_if_disable_intr */
1634
1635 /************************************************************************
1636 * ixv_if_rx_queue_intr_enable
1637 ************************************************************************/
1638 static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1639 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1640 {
1641 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1642 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1643
1644 ixv_enable_queue(sc, que->rxr.me);
1645
1646 return (0);
1647 } /* ixv_if_rx_queue_intr_enable */
1648
1649 /************************************************************************
1650 * ixv_set_ivar
1651 *
1652 * Setup the correct IVAR register for a particular MSI-X interrupt
1653 * - entry is the register array entry
1654 * - vector is the MSI-X vector for this queue
1655 * - type is RX/TX/MISC
1656 ************************************************************************/
1657 static void
ixv_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)1658 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1659 {
1660 struct ixgbe_hw *hw = &sc->hw;
1661 u32 ivar, index;
1662
1663 vector |= IXGBE_IVAR_ALLOC_VAL;
1664
1665 if (type == -1) { /* MISC IVAR */
1666 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1667 ivar &= ~0xFF;
1668 ivar |= vector;
1669 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1670 } else { /* RX/TX IVARS */
1671 index = (16 * (entry & 1)) + (8 * type);
1672 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1673 ivar &= ~(0xFF << index);
1674 ivar |= (vector << index);
1675 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1676 }
1677 } /* ixv_set_ivar */
1678
1679 /************************************************************************
1680 * ixv_configure_ivars
1681 ************************************************************************/
1682 static void
ixv_configure_ivars(struct ixgbe_softc * sc)1683 ixv_configure_ivars(struct ixgbe_softc *sc)
1684 {
1685 struct ix_rx_queue *que = sc->rx_queues;
1686
1687 MPASS(sc->num_rx_queues == sc->num_tx_queues);
1688
1689 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1690 /* First the RX queue entry */
1691 ixv_set_ivar(sc, i, que->msix, 0);
1692 /* ... and the TX */
1693 ixv_set_ivar(sc, i, que->msix, 1);
1694 /* Set an initial value in EITR */
1695 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1696 IXGBE_EITR_DEFAULT);
1697 }
1698
1699 /* For the mailbox interrupt */
1700 ixv_set_ivar(sc, 1, sc->vector, -1);
1701 } /* ixv_configure_ivars */
1702
1703 /************************************************************************
1704 * ixv_save_stats
1705 *
1706 * The VF stats registers never have a truly virgin
1707 * starting point, so this routine tries to make an
1708 * artificial one, marking ground zero on attach as
1709 * it were.
1710 ************************************************************************/
1711 static void
ixv_save_stats(struct ixgbe_softc * sc)1712 ixv_save_stats(struct ixgbe_softc *sc)
1713 {
1714 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1715 sc->stats.vf.saved_reset_vfgprc +=
1716 sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1717 sc->stats.vf.saved_reset_vfgptc +=
1718 sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1719 sc->stats.vf.saved_reset_vfgorc +=
1720 sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1721 sc->stats.vf.saved_reset_vfgotc +=
1722 sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1723 sc->stats.vf.saved_reset_vfmprc +=
1724 sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1725 }
1726 } /* ixv_save_stats */
1727
1728 /************************************************************************
1729 * ixv_init_stats
1730 ************************************************************************/
1731 static void
ixv_init_stats(struct ixgbe_softc * sc)1732 ixv_init_stats(struct ixgbe_softc *sc)
1733 {
1734 struct ixgbe_hw *hw = &sc->hw;
1735
1736 sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1737 sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1738 sc->stats.vf.last_vfgorc |=
1739 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1740
1741 sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1742 sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1743 sc->stats.vf.last_vfgotc |=
1744 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1745
1746 sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1747
1748 sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1749 sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1750 sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1751 sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1752 sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1753 } /* ixv_init_stats */
1754
1755 #define UPDATE_STAT_32(reg, last, count) \
1756 { \
1757 u32 current = IXGBE_READ_REG(hw, reg); \
1758 if (current < last) \
1759 count += 0x100000000LL; \
1760 last = current; \
1761 count &= 0xFFFFFFFF00000000LL; \
1762 count |= current; \
1763 }
1764
1765 #define UPDATE_STAT_36(lsb, msb, last, count) \
1766 { \
1767 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1768 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1769 u64 current = ((cur_msb << 32) | cur_lsb); \
1770 if (current < last) \
1771 count += 0x1000000000LL; \
1772 last = current; \
1773 count &= 0xFFFFFFF000000000LL; \
1774 count |= current; \
1775 }
1776
1777 /************************************************************************
1778 * ixv_update_stats - Update the board statistics counters.
1779 ************************************************************************/
1780 void
ixv_update_stats(struct ixgbe_softc * sc)1781 ixv_update_stats(struct ixgbe_softc *sc)
1782 {
1783 struct ixgbe_hw *hw = &sc->hw;
1784 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1785
1786 UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1787 sc->stats.vf.vfgprc);
1788 UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1789 sc->stats.vf.vfgptc);
1790 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1791 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1792 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1793 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1794 UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1795 sc->stats.vf.vfmprc);
1796
1797 /* Fill out the OS statistics structure */
1798 IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1799 IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1800 IXGBE_SET_IBYTES(sc, stats->vfgorc);
1801 IXGBE_SET_OBYTES(sc, stats->vfgotc);
1802 IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1803 } /* ixv_update_stats */
1804
1805 /************************************************************************
1806 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1807 ************************************************************************/
1808 static void
ixv_add_stats_sysctls(struct ixgbe_softc * sc)1809 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1810 {
1811 device_t dev = sc->dev;
1812 struct ix_tx_queue *tx_que = sc->tx_queues;
1813 struct ix_rx_queue *rx_que = sc->rx_queues;
1814 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1815 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1816 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1817 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1818 struct sysctl_oid *stat_node, *queue_node;
1819 struct sysctl_oid_list *stat_list, *queue_list;
1820
1821 #define QUEUE_NAME_LEN 32
1822 char namebuf[QUEUE_NAME_LEN];
1823
1824 /* Driver Statistics */
1825 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1826 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1827 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1828 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1829
1830 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1831 struct tx_ring *txr = &tx_que->txr;
1832 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1833 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1834 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1835 queue_list = SYSCTL_CHILDREN(queue_node);
1836
1837 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1838 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1839 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1840 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1841 }
1842
1843 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1844 struct rx_ring *rxr = &rx_que->rxr;
1845 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1846 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1847 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1848 queue_list = SYSCTL_CHILDREN(queue_node);
1849
1850 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1851 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1852 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1853 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1854 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1855 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1856 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1857 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1858 }
1859
1860 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1861 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1862 "VF Statistics (read from HW registers)");
1863 stat_list = SYSCTL_CHILDREN(stat_node);
1864
1865 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1866 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1867 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1868 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1869 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1870 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1871 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1872 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1873 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1874 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1875 } /* ixv_add_stats_sysctls */
1876
1877 /************************************************************************
1878 * ixv_print_debug_info
1879 *
1880 * Called only when em_display_debug_stats is enabled.
1881 * Provides a way to take a look at important statistics
1882 * maintained by the driver and hardware.
1883 ************************************************************************/
1884 static void
ixv_print_debug_info(struct ixgbe_softc * sc)1885 ixv_print_debug_info(struct ixgbe_softc *sc)
1886 {
1887 device_t dev = sc->dev;
1888 struct ixgbe_hw *hw = &sc->hw;
1889
1890 device_printf(dev, "Error Byte Count = %u \n",
1891 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1892
1893 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1894 } /* ixv_print_debug_info */
1895
1896 /************************************************************************
1897 * ixv_sysctl_debug
1898 ************************************************************************/
1899 static int
ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)1900 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1901 {
1902 struct ixgbe_softc *sc;
1903 int error, result;
1904
1905 result = -1;
1906 error = sysctl_handle_int(oidp, &result, 0, req);
1907
1908 if (error || !req->newptr)
1909 return (error);
1910
1911 if (result == 1) {
1912 sc = (struct ixgbe_softc *)arg1;
1913 ixv_print_debug_info(sc);
1914 }
1915
1916 return error;
1917 } /* ixv_sysctl_debug */
1918
1919 /************************************************************************
1920 * ixv_init_device_features
1921 ************************************************************************/
1922 static void
ixv_init_device_features(struct ixgbe_softc * sc)1923 ixv_init_device_features(struct ixgbe_softc *sc)
1924 {
1925 sc->feat_cap = IXGBE_FEATURE_NETMAP
1926 | IXGBE_FEATURE_VF
1927 | IXGBE_FEATURE_LEGACY_TX;
1928
1929 /* A tad short on feature flags for VFs, atm. */
1930 switch (sc->hw.mac.type) {
1931 case ixgbe_mac_82599_vf:
1932 break;
1933 case ixgbe_mac_X540_vf:
1934 break;
1935 case ixgbe_mac_X550_vf:
1936 case ixgbe_mac_X550EM_x_vf:
1937 case ixgbe_mac_X550EM_a_vf:
1938 sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1939 sc->feat_cap |= IXGBE_FEATURE_RSS;
1940 break;
1941 default:
1942 break;
1943 }
1944
1945 /* Enabled by default... */
1946 /* Is a virtual function (VF) */
1947 if (sc->feat_cap & IXGBE_FEATURE_VF)
1948 sc->feat_en |= IXGBE_FEATURE_VF;
1949 /* Netmap */
1950 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1951 sc->feat_en |= IXGBE_FEATURE_NETMAP;
1952 /* Receive-Side Scaling (RSS) */
1953 if (sc->feat_cap & IXGBE_FEATURE_RSS)
1954 sc->feat_en |= IXGBE_FEATURE_RSS;
1955 /* Needs advanced context descriptor regardless of offloads req'd */
1956 if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1957 sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1958 } /* ixv_init_device_features */
1959
1960