1 /*****************************************************************************
2
3 Copyright (c) 2001-2017, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 *****************************************************************************/
33
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37 #include "opt_rss.h"
38
39 #include "ixgbe.h"
40 #include "ifdi_if.h"
41
42 #include <net/netmap.h>
43 #include <dev/netmap/netmap_kern.h>
44
45 /************************************************************************
46 * Driver version
47 ************************************************************************/
48 static const char ixv_driver_version[] = "2.0.1-k";
49
50 /************************************************************************
51 * PCI Device ID Table
52 *
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixv_strings
55 * Last entry must be all 0s
56 *
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59 static const pci_vendor_info_t ixv_vendor_info_array[] =
60 {
61 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF,
62 "Intel(R) X520 82599 Virtual Function"),
63 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF,
64 "Intel(R) X540 Virtual Function"),
65 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF,
66 "Intel(R) X550 Virtual Function"),
67 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF,
68 "Intel(R) X552 Virtual Function"),
69 PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
70 "Intel(R) X553 Virtual Function"),
71 /* required last entry */
72 PVID_END
73 };
74
75 /************************************************************************
76 * Function prototypes
77 ************************************************************************/
78 static void *ixv_register(device_t);
79 static int ixv_if_attach_pre(if_ctx_t);
80 static int ixv_if_attach_post(if_ctx_t);
81 static int ixv_if_detach(if_ctx_t);
82
83 static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
84 static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
85 int);
86 static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
87 int);
88 static void ixv_if_queues_free(if_ctx_t);
89 static void ixv_identify_hardware(if_ctx_t);
90 static void ixv_init_device_features(struct ixgbe_softc *);
91 static int ixv_allocate_pci_resources(if_ctx_t);
92 static void ixv_free_pci_resources(if_ctx_t);
93 static int ixv_setup_interface(if_ctx_t);
94 static void ixv_if_media_status(if_ctx_t, struct ifmediareq *);
95 static int ixv_if_media_change(if_ctx_t);
96 static void ixv_if_update_admin_status(if_ctx_t);
97 static int ixv_if_msix_intr_assign(if_ctx_t, int);
98
99 static int ixv_if_mtu_set(if_ctx_t, uint32_t);
100 static void ixv_if_init(if_ctx_t);
101 static void ixv_if_local_timer(if_ctx_t, uint16_t);
102 static void ixv_if_stop(if_ctx_t);
103 static int ixv_negotiate_api(struct ixgbe_softc *);
104
105 static void ixv_initialize_transmit_units(if_ctx_t);
106 static void ixv_initialize_receive_units(if_ctx_t);
107 static void ixv_initialize_rss_mapping(struct ixgbe_softc *);
108
109 static void ixv_setup_vlan_support(if_ctx_t);
110 static void ixv_configure_ivars(struct ixgbe_softc *);
111 static void ixv_if_enable_intr(if_ctx_t);
112 static void ixv_if_disable_intr(if_ctx_t);
113 static void ixv_if_multi_set(if_ctx_t);
114
115 static void ixv_if_register_vlan(if_ctx_t, u16);
116 static void ixv_if_unregister_vlan(if_ctx_t, u16);
117
118 static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
119 static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
120
121 static void ixv_save_stats(struct ixgbe_softc *);
122 static void ixv_init_stats(struct ixgbe_softc *);
123 static void ixv_update_stats(struct ixgbe_softc *);
124 static void ixv_add_stats_sysctls(struct ixgbe_softc *);
125
126 static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
127 static void ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
128
129 static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
130
131 /* The MSI-X Interrupt handlers */
132 static int ixv_msix_que(void *);
133 static int ixv_msix_mbx(void *);
134
135 /************************************************************************
136 * FreeBSD Device Interface Entry Points
137 ************************************************************************/
138 static device_method_t ixv_methods[] = {
139 /* Device interface */
140 DEVMETHOD(device_register, ixv_register),
141 DEVMETHOD(device_probe, iflib_device_probe),
142 DEVMETHOD(device_attach, iflib_device_attach),
143 DEVMETHOD(device_detach, iflib_device_detach),
144 DEVMETHOD(device_shutdown, iflib_device_shutdown),
145 DEVMETHOD_END
146 };
147
148 static driver_t ixv_driver = {
149 "ixv", ixv_methods, sizeof(struct ixgbe_softc),
150 };
151
152 DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0);
153 IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
154 MODULE_DEPEND(ixv, iflib, 1, 1, 1);
155 MODULE_DEPEND(ixv, pci, 1, 1, 1);
156 MODULE_DEPEND(ixv, ether, 1, 1, 1);
157
158 static device_method_t ixv_if_methods[] = {
159 DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
160 DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
161 DEVMETHOD(ifdi_detach, ixv_if_detach),
162 DEVMETHOD(ifdi_init, ixv_if_init),
163 DEVMETHOD(ifdi_stop, ixv_if_stop),
164 DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
165 DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
166 DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
167 DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
168 DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
169 DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
170 DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
171 DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
172 DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
173 DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
174 DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
175 DEVMETHOD(ifdi_media_status, ixv_if_media_status),
176 DEVMETHOD(ifdi_media_change, ixv_if_media_change),
177 DEVMETHOD(ifdi_timer, ixv_if_local_timer),
178 DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
179 DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
180 DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
181 DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
182 DEVMETHOD_END
183 };
184
185 static driver_t ixv_if_driver = {
186 "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
187 };
188
189 /*
190 * TUNEABLE PARAMETERS:
191 */
192
193 /* Flow control setting, default to full */
194 static int ixv_flow_control = ixgbe_fc_full;
195 TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
196
197 /*
198 * Header split: this causes the hardware to DMA
199 * the header into a separate mbuf from the payload,
200 * it can be a performance win in some workloads, but
201 * in others it actually hurts, its off by default.
202 */
203 static int ixv_header_split = false;
204 TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
205
206 extern struct if_txrx ixgbe_txrx;
207
208 static struct if_shared_ctx ixv_sctx_init = {
209 .isc_magic = IFLIB_MAGIC,
210 .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
211 .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
212 .isc_tx_maxsegsize = PAGE_SIZE,
213 .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
214 .isc_tso_maxsegsize = PAGE_SIZE,
215 .isc_rx_maxsize = MJUM16BYTES,
216 .isc_rx_nsegments = 1,
217 .isc_rx_maxsegsize = MJUM16BYTES,
218 .isc_nfl = 1,
219 .isc_ntxqs = 1,
220 .isc_nrxqs = 1,
221 .isc_admin_intrcnt = 1,
222 .isc_vendor_info = ixv_vendor_info_array,
223 .isc_driver_version = ixv_driver_version,
224 .isc_driver = &ixv_if_driver,
225 .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
226
227 .isc_nrxd_min = {MIN_RXD},
228 .isc_ntxd_min = {MIN_TXD},
229 .isc_nrxd_max = {MAX_RXD},
230 .isc_ntxd_max = {MAX_TXD},
231 .isc_nrxd_default = {DEFAULT_RXD},
232 .isc_ntxd_default = {DEFAULT_TXD},
233 };
234
235 static void *
ixv_register(device_t dev)236 ixv_register(device_t dev)
237 {
238 return (&ixv_sctx_init);
239 }
240
241 /************************************************************************
242 * ixv_if_tx_queues_alloc
243 ************************************************************************/
244 static int
ixv_if_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)245 ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
246 int ntxqs, int ntxqsets)
247 {
248 struct ixgbe_softc *sc = iflib_get_softc(ctx);
249 if_softc_ctx_t scctx = sc->shared;
250 struct ix_tx_queue *que;
251 int i, j, error;
252
253 MPASS(sc->num_tx_queues == ntxqsets);
254 MPASS(ntxqs == 1);
255
256 /* Allocate queue structure memory */
257 sc->tx_queues =
258 (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
259 ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
260 if (!sc->tx_queues) {
261 device_printf(iflib_get_dev(ctx),
262 "Unable to allocate TX ring memory\n");
263 return (ENOMEM);
264 }
265
266 for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
267 struct tx_ring *txr = &que->txr;
268
269 txr->me = i;
270 txr->sc = que->sc = sc;
271
272 /* Allocate report status array */
273 if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
274 scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
275 error = ENOMEM;
276 goto fail;
277 }
278 for (j = 0; j < scctx->isc_ntxd[0]; j++)
279 txr->tx_rsq[j] = QIDX_INVALID;
280 /* get virtual and physical address of the hardware queues */
281 txr->tail = IXGBE_VFTDT(txr->me);
282 txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
283 txr->tx_paddr = paddrs[i*ntxqs];
284
285 txr->bytes = 0;
286 txr->total_packets = 0;
287
288 }
289
290 device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
291 sc->num_tx_queues);
292
293 return (0);
294
295 fail:
296 ixv_if_queues_free(ctx);
297
298 return (error);
299 } /* ixv_if_tx_queues_alloc */
300
301 /************************************************************************
302 * ixv_if_rx_queues_alloc
303 ************************************************************************/
304 static int
ixv_if_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)305 ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
306 int nrxqs, int nrxqsets)
307 {
308 struct ixgbe_softc *sc = iflib_get_softc(ctx);
309 struct ix_rx_queue *que;
310 int i, error;
311
312 MPASS(sc->num_rx_queues == nrxqsets);
313 MPASS(nrxqs == 1);
314
315 /* Allocate queue structure memory */
316 sc->rx_queues =
317 (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) *
318 nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
319 if (!sc->rx_queues) {
320 device_printf(iflib_get_dev(ctx),
321 "Unable to allocate TX ring memory\n");
322 error = ENOMEM;
323 goto fail;
324 }
325
326 for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
327 struct rx_ring *rxr = &que->rxr;
328 rxr->me = i;
329 rxr->sc = que->sc = sc;
330
331
332 /* get the virtual and physical address of the hw queues */
333 rxr->tail = IXGBE_VFRDT(rxr->me);
334 rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
335 rxr->rx_paddr = paddrs[i*nrxqs];
336 rxr->bytes = 0;
337 rxr->que = que;
338 }
339
340 device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
341 sc->num_rx_queues);
342
343 return (0);
344
345 fail:
346 ixv_if_queues_free(ctx);
347
348 return (error);
349 } /* ixv_if_rx_queues_alloc */
350
351 /************************************************************************
352 * ixv_if_queues_free
353 ************************************************************************/
354 static void
ixv_if_queues_free(if_ctx_t ctx)355 ixv_if_queues_free(if_ctx_t ctx)
356 {
357 struct ixgbe_softc *sc = iflib_get_softc(ctx);
358 struct ix_tx_queue *que = sc->tx_queues;
359 int i;
360
361 if (que == NULL)
362 goto free;
363
364 for (i = 0; i < sc->num_tx_queues; i++, que++) {
365 struct tx_ring *txr = &que->txr;
366 if (txr->tx_rsq == NULL)
367 break;
368
369 free(txr->tx_rsq, M_DEVBUF);
370 txr->tx_rsq = NULL;
371 }
372 if (sc->tx_queues != NULL)
373 free(sc->tx_queues, M_DEVBUF);
374 free:
375 if (sc->rx_queues != NULL)
376 free(sc->rx_queues, M_DEVBUF);
377 sc->tx_queues = NULL;
378 sc->rx_queues = NULL;
379 } /* ixv_if_queues_free */
380
381 /************************************************************************
382 * ixv_if_attach_pre - Device initialization routine
383 *
384 * Called when the driver is being loaded.
385 * Identifies the type of hardware, allocates all resources
386 * and initializes the hardware.
387 *
388 * return 0 on success, positive on failure
389 ************************************************************************/
390 static int
ixv_if_attach_pre(if_ctx_t ctx)391 ixv_if_attach_pre(if_ctx_t ctx)
392 {
393 struct ixgbe_softc *sc;
394 device_t dev;
395 if_softc_ctx_t scctx;
396 struct ixgbe_hw *hw;
397 int error = 0;
398
399 INIT_DEBUGOUT("ixv_attach: begin");
400
401 /* Allocate, clear, and link in our sc structure */
402 dev = iflib_get_dev(ctx);
403 sc = iflib_get_softc(ctx);
404 sc->dev = dev;
405 sc->ctx = ctx;
406 sc->hw.back = sc;
407 scctx = sc->shared = iflib_get_softc_ctx(ctx);
408 sc->media = iflib_get_media(ctx);
409 hw = &sc->hw;
410
411 /* Do base PCI setup - map BAR0 */
412 if (ixv_allocate_pci_resources(ctx)) {
413 device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
414 error = ENXIO;
415 goto err_out;
416 }
417
418 /* SYSCTL APIs */
419 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
420 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
421 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422 sc, 0, ixv_sysctl_debug, "I", "Debug Info");
423
424 /* Determine hardware revision */
425 ixv_identify_hardware(ctx);
426 ixv_init_device_features(sc);
427
428 /* Initialize the shared code */
429 error = ixgbe_init_ops_vf(hw);
430 if (error) {
431 device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
432 error = EIO;
433 goto err_out;
434 }
435
436 /* Setup the mailbox */
437 ixgbe_init_mbx_params_vf(hw);
438
439 error = hw->mac.ops.reset_hw(hw);
440 if (error == IXGBE_ERR_RESET_FAILED)
441 device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
442 else if (error)
443 device_printf(dev, "...reset_hw() failed with error %d\n",
444 error);
445 if (error) {
446 error = EIO;
447 goto err_out;
448 }
449
450 error = hw->mac.ops.init_hw(hw);
451 if (error) {
452 device_printf(dev, "...init_hw() failed with error %d\n",
453 error);
454 error = EIO;
455 goto err_out;
456 }
457
458 /* Negotiate mailbox API version */
459 error = ixv_negotiate_api(sc);
460 if (error) {
461 device_printf(dev,
462 "Mailbox API negotiation failed during attach!\n");
463 goto err_out;
464 }
465
466 /* Check if VF was disabled by PF */
467 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
468 if (error) {
469 /* PF is not capable of controlling VF state. Enable link. */
470 sc->link_enabled = true;
471 }
472
473 /* If no mac address was assigned, make a random one */
474 if (!ixv_check_ether_addr(hw->mac.addr)) {
475 ether_gen_addr(iflib_get_ifp(ctx),
476 (struct ether_addr *)hw->mac.addr);
477 bcopy(hw->mac.addr, hw->mac.perm_addr,
478 sizeof(hw->mac.perm_addr));
479 }
480
481 /* Most of the iflib initialization... */
482
483 iflib_set_mac(ctx, hw->mac.addr);
484 switch (sc->hw.mac.type) {
485 case ixgbe_mac_X550_vf:
486 case ixgbe_mac_X550EM_x_vf:
487 case ixgbe_mac_X550EM_a_vf:
488 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
489 break;
490 default:
491 scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
492 }
493 scctx->isc_txqsizes[0] =
494 roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
495 sizeof(u32), DBA_ALIGN);
496 scctx->isc_rxqsizes[0] =
497 roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
498 DBA_ALIGN);
499 /* XXX */
500 scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
501 CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
502 scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
503 scctx->isc_msix_bar = pci_msix_table_bar(dev);
504 scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
505 scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
506 scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
507
508 scctx->isc_txrx = &ixgbe_txrx;
509
510 /*
511 * Tell the upper layer(s) we support everything the PF
512 * driver does except...
513 * Wake-on-LAN
514 */
515 scctx->isc_capabilities = IXGBE_CAPS;
516 scctx->isc_capabilities ^= IFCAP_WOL;
517 scctx->isc_capenable = scctx->isc_capabilities;
518
519 INIT_DEBUGOUT("ixv_if_attach_pre: end");
520
521 return (0);
522
523 err_out:
524 ixv_free_pci_resources(ctx);
525
526 return (error);
527 } /* ixv_if_attach_pre */
528
529 static int
ixv_if_attach_post(if_ctx_t ctx)530 ixv_if_attach_post(if_ctx_t ctx)
531 {
532 struct ixgbe_softc *sc = iflib_get_softc(ctx);
533 device_t dev = iflib_get_dev(ctx);
534 int error = 0;
535
536 /* Setup OS specific network interface */
537 error = ixv_setup_interface(ctx);
538 if (error) {
539 device_printf(dev, "Interface setup failed: %d\n", error);
540 goto end;
541 }
542
543 /* Do the stats setup */
544 ixv_save_stats(sc);
545 ixv_init_stats(sc);
546 ixv_add_stats_sysctls(sc);
547
548 end:
549 return error;
550 } /* ixv_if_attach_post */
551
552 /************************************************************************
553 * ixv_detach - Device removal routine
554 *
555 * Called when the driver is being removed.
556 * Stops the adapter and deallocates all the resources
557 * that were allocated for driver operation.
558 *
559 * return 0 on success, positive on failure
560 ************************************************************************/
561 static int
ixv_if_detach(if_ctx_t ctx)562 ixv_if_detach(if_ctx_t ctx)
563 {
564 INIT_DEBUGOUT("ixv_detach: begin");
565
566 ixv_free_pci_resources(ctx);
567
568 return (0);
569 } /* ixv_if_detach */
570
571 /************************************************************************
572 * ixv_if_mtu_set
573 ************************************************************************/
574 static int
ixv_if_mtu_set(if_ctx_t ctx,uint32_t mtu)575 ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
576 {
577 struct ixgbe_softc *sc = iflib_get_softc(ctx);
578 if_t ifp = iflib_get_ifp(ctx);
579 int error = 0;
580
581 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
582 if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
583 error = EINVAL;
584 } else {
585 if_setmtu(ifp, mtu);
586 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
587 }
588
589 return error;
590 } /* ixv_if_mtu_set */
591
592 /************************************************************************
593 * ixv_if_init - Init entry point
594 *
595 * Used in two ways: It is used by the stack as an init entry
596 * point in network interface structure. It is also used
597 * by the driver as a hw/sw initialization routine to get
598 * to a consistent state.
599 *
600 * return 0 on success, positive on failure
601 ************************************************************************/
602 static void
ixv_if_init(if_ctx_t ctx)603 ixv_if_init(if_ctx_t ctx)
604 {
605 struct ixgbe_softc *sc = iflib_get_softc(ctx);
606 if_t ifp = iflib_get_ifp(ctx);
607 device_t dev = iflib_get_dev(ctx);
608 struct ixgbe_hw *hw = &sc->hw;
609 int error = 0;
610
611 INIT_DEBUGOUT("ixv_if_init: begin");
612 hw->adapter_stopped = false;
613 hw->mac.ops.stop_adapter(hw);
614
615 /* reprogram the RAR[0] in case user changed it. */
616 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
617
618 /* Get the latest mac address, User can use a LAA */
619 bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
620 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
621
622 /* Reset VF and renegotiate mailbox API version */
623 hw->mac.ops.reset_hw(hw);
624 hw->mac.ops.start_hw(hw);
625 error = ixv_negotiate_api(sc);
626 if (error) {
627 device_printf(dev,
628 "Mailbox API negotiation failed in if_init!\n");
629 return;
630 }
631
632 ixv_initialize_transmit_units(ctx);
633
634 /* Setup Multicast table */
635 ixv_if_multi_set(ctx);
636
637 sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
638
639 /* Configure RX settings */
640 ixv_initialize_receive_units(ctx);
641
642 /* Set up VLAN offload and filter */
643 ixv_setup_vlan_support(ctx);
644
645 /* Set up MSI-X routing */
646 ixv_configure_ivars(sc);
647
648 /* Set up auto-mask */
649 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
650
651 /* Set moderation on the Link interrupt */
652 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
653
654 /* Stats init */
655 ixv_init_stats(sc);
656
657 /* Config/Enable Link */
658 error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
659 if (error) {
660 /* PF is not capable of controlling VF state. Enable the link. */
661 sc->link_enabled = true;
662 } else if (sc->link_enabled == false)
663 device_printf(dev, "VF is disabled by PF\n");
664
665 hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
666 false);
667
668 /* And now turn on interrupts */
669 ixv_if_enable_intr(ctx);
670
671 return;
672 } /* ixv_if_init */
673
674 /************************************************************************
675 * ixv_enable_queue
676 ************************************************************************/
677 static inline void
ixv_enable_queue(struct ixgbe_softc * sc,u32 vector)678 ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
679 {
680 struct ixgbe_hw *hw = &sc->hw;
681 u32 queue = 1 << vector;
682 u32 mask;
683
684 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
685 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
686 } /* ixv_enable_queue */
687
688 /************************************************************************
689 * ixv_disable_queue
690 ************************************************************************/
691 static inline void
ixv_disable_queue(struct ixgbe_softc * sc,u32 vector)692 ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
693 {
694 struct ixgbe_hw *hw = &sc->hw;
695 u64 queue = (u64)(1 << vector);
696 u32 mask;
697
698 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
699 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
700 } /* ixv_disable_queue */
701
702
703 /************************************************************************
704 * ixv_msix_que - MSI-X Queue Interrupt Service routine
705 ************************************************************************/
706 static int
ixv_msix_que(void * arg)707 ixv_msix_que(void *arg)
708 {
709 struct ix_rx_queue *que = arg;
710 struct ixgbe_softc *sc = que->sc;
711
712 ixv_disable_queue(sc, que->msix);
713 ++que->irqs;
714
715 return (FILTER_SCHEDULE_THREAD);
716 } /* ixv_msix_que */
717
718 /************************************************************************
719 * ixv_msix_mbx
720 ************************************************************************/
721 static int
ixv_msix_mbx(void * arg)722 ixv_msix_mbx(void *arg)
723 {
724 struct ixgbe_softc *sc = arg;
725 struct ixgbe_hw *hw = &sc->hw;
726 u32 reg;
727
728 ++sc->link_irq;
729
730 /* First get the cause */
731 reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
732 /* Clear interrupt with write */
733 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
734
735 /* Link status change */
736 if (reg & IXGBE_EICR_LSC)
737 iflib_admin_intr_deferred(sc->ctx);
738
739 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
740
741 return (FILTER_HANDLED);
742 } /* ixv_msix_mbx */
743
744 /************************************************************************
745 * ixv_media_status - Media Ioctl callback
746 *
747 * Called whenever the user queries the status of
748 * the interface using ifconfig.
749 ************************************************************************/
750 static void
ixv_if_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)751 ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
752 {
753 struct ixgbe_softc *sc = iflib_get_softc(ctx);
754
755 INIT_DEBUGOUT("ixv_media_status: begin");
756
757 iflib_admin_intr_deferred(ctx);
758
759 ifmr->ifm_status = IFM_AVALID;
760 ifmr->ifm_active = IFM_ETHER;
761
762 if (!sc->link_active)
763 return;
764
765 ifmr->ifm_status |= IFM_ACTIVE;
766
767 switch (sc->link_speed) {
768 case IXGBE_LINK_SPEED_1GB_FULL:
769 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
770 break;
771 case IXGBE_LINK_SPEED_10GB_FULL:
772 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
773 break;
774 case IXGBE_LINK_SPEED_100_FULL:
775 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
776 break;
777 case IXGBE_LINK_SPEED_10_FULL:
778 ifmr->ifm_active |= IFM_10_T | IFM_FDX;
779 break;
780 }
781 } /* ixv_if_media_status */
782
783 /************************************************************************
784 * ixv_if_media_change - Media Ioctl callback
785 *
786 * Called when the user changes speed/duplex using
787 * media/mediopt option with ifconfig.
788 ************************************************************************/
789 static int
ixv_if_media_change(if_ctx_t ctx)790 ixv_if_media_change(if_ctx_t ctx)
791 {
792 struct ixgbe_softc *sc = iflib_get_softc(ctx);
793 struct ifmedia *ifm = iflib_get_media(ctx);
794
795 INIT_DEBUGOUT("ixv_media_change: begin");
796
797 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
798 return (EINVAL);
799
800 switch (IFM_SUBTYPE(ifm->ifm_media)) {
801 case IFM_AUTO:
802 break;
803 default:
804 device_printf(sc->dev, "Only auto media type\n");
805 return (EINVAL);
806 }
807
808 return (0);
809 } /* ixv_if_media_change */
810
811
812 /************************************************************************
813 * ixv_negotiate_api
814 *
815 * Negotiate the Mailbox API with the PF;
816 * start with the most featured API first.
817 ************************************************************************/
818 static int
ixv_negotiate_api(struct ixgbe_softc * sc)819 ixv_negotiate_api(struct ixgbe_softc *sc)
820 {
821 struct ixgbe_hw *hw = &sc->hw;
822 int mbx_api[] = {
823 ixgbe_mbox_api_12,
824 ixgbe_mbox_api_11,
825 ixgbe_mbox_api_10,
826 ixgbe_mbox_api_unknown
827 };
828 int i = 0;
829
830 while (mbx_api[i] != ixgbe_mbox_api_unknown) {
831 if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
832 return (0);
833 i++;
834 }
835
836 return (EINVAL);
837 } /* ixv_negotiate_api */
838
839
840 static u_int
ixv_if_multi_set_cb(void * cb_arg,struct sockaddr_dl * addr,u_int cnt)841 ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
842 {
843 bcopy(LLADDR(addr),
844 &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
845 IXGBE_ETH_LENGTH_OF_ADDRESS);
846
847 return (++cnt);
848 }
849
850 /************************************************************************
851 * ixv_if_multi_set - Multicast Update
852 *
853 * Called whenever multicast address list is updated.
854 ************************************************************************/
855 static void
ixv_if_multi_set(if_ctx_t ctx)856 ixv_if_multi_set(if_ctx_t ctx)
857 {
858 u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
859 struct ixgbe_softc *sc = iflib_get_softc(ctx);
860 u8 *update_ptr;
861 if_t ifp = iflib_get_ifp(ctx);
862 int mcnt = 0;
863
864 IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
865
866 mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta);
867
868 update_ptr = mta;
869
870 sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
871 ixv_mc_array_itr, true);
872 } /* ixv_if_multi_set */
873
874 /************************************************************************
875 * ixv_mc_array_itr
876 *
877 * An iterator function needed by the multicast shared code.
878 * It feeds the shared code routine the addresses in the
879 * array of ixv_set_multi() one by one.
880 ************************************************************************/
881 static u8 *
ixv_mc_array_itr(struct ixgbe_hw * hw,u8 ** update_ptr,u32 * vmdq)882 ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
883 {
884 u8 *addr = *update_ptr;
885 u8 *newptr;
886
887 *vmdq = 0;
888
889 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
890 *update_ptr = newptr;
891
892 return addr;
893 } /* ixv_mc_array_itr */
894
895 /************************************************************************
896 * ixv_if_local_timer - Timer routine
897 *
898 * Checks for link status, updates statistics,
899 * and runs the watchdog check.
900 ************************************************************************/
901 static void
ixv_if_local_timer(if_ctx_t ctx,uint16_t qid)902 ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
903 {
904 if (qid != 0)
905 return;
906
907 /* Fire off the adminq task */
908 iflib_admin_intr_deferred(ctx);
909 } /* ixv_if_local_timer */
910
911 /************************************************************************
912 * ixv_if_update_admin_status - Update OS on link state
913 *
914 * Note: Only updates the OS on the cached link state.
915 * The real check of the hardware only happens with
916 * a link interrupt.
917 ************************************************************************/
918 static void
ixv_if_update_admin_status(if_ctx_t ctx)919 ixv_if_update_admin_status(if_ctx_t ctx)
920 {
921 struct ixgbe_softc *sc = iflib_get_softc(ctx);
922 device_t dev = iflib_get_dev(ctx);
923 s32 status;
924
925 sc->hw.mac.get_link_status = true;
926
927 status = ixgbe_check_link(&sc->hw, &sc->link_speed,
928 &sc->link_up, false);
929
930 if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
931 /* Mailbox's Clear To Send status is lost or timeout occurred.
932 * We need reinitialization. */
933 if_init(iflib_get_ifp(ctx), ctx);
934 }
935
936 if (sc->link_up && sc->link_enabled) {
937 if (sc->link_active == false) {
938 if (bootverbose)
939 device_printf(dev, "Link is up %d Gbps %s \n",
940 ((sc->link_speed == 128) ? 10 : 1),
941 "Full Duplex");
942 sc->link_active = true;
943 iflib_link_state_change(ctx, LINK_STATE_UP,
944 ixgbe_link_speed_to_baudrate(sc->link_speed));
945 }
946 } else { /* Link down */
947 if (sc->link_active == true) {
948 if (bootverbose)
949 device_printf(dev, "Link is Down\n");
950 iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
951 sc->link_active = false;
952 }
953 }
954
955 /* Stats Update */
956 ixv_update_stats(sc);
957 } /* ixv_if_update_admin_status */
958
959
960 /************************************************************************
961 * ixv_if_stop - Stop the hardware
962 *
963 * Disables all traffic on the adapter by issuing a
964 * global reset on the MAC and deallocates TX/RX buffers.
965 ************************************************************************/
966 static void
ixv_if_stop(if_ctx_t ctx)967 ixv_if_stop(if_ctx_t ctx)
968 {
969 struct ixgbe_softc *sc = iflib_get_softc(ctx);
970 struct ixgbe_hw *hw = &sc->hw;
971
972 INIT_DEBUGOUT("ixv_stop: begin\n");
973
974 ixv_if_disable_intr(ctx);
975
976 hw->mac.ops.reset_hw(hw);
977 sc->hw.adapter_stopped = false;
978 hw->mac.ops.stop_adapter(hw);
979
980 /* Update the stack */
981 sc->link_up = false;
982 ixv_if_update_admin_status(ctx);
983
984 /* reprogram the RAR[0] in case user changed it. */
985 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
986 } /* ixv_if_stop */
987
988
989 /************************************************************************
990 * ixv_identify_hardware - Determine hardware revision.
991 ************************************************************************/
992 static void
ixv_identify_hardware(if_ctx_t ctx)993 ixv_identify_hardware(if_ctx_t ctx)
994 {
995 struct ixgbe_softc *sc = iflib_get_softc(ctx);
996 device_t dev = iflib_get_dev(ctx);
997 struct ixgbe_hw *hw = &sc->hw;
998
999 /* Save off the information about this board */
1000 hw->vendor_id = pci_get_vendor(dev);
1001 hw->device_id = pci_get_device(dev);
1002 hw->revision_id = pci_get_revid(dev);
1003 hw->subsystem_vendor_id = pci_get_subvendor(dev);
1004 hw->subsystem_device_id = pci_get_subdevice(dev);
1005
1006 /* A subset of set_mac_type */
1007 switch (hw->device_id) {
1008 case IXGBE_DEV_ID_82599_VF:
1009 hw->mac.type = ixgbe_mac_82599_vf;
1010 break;
1011 case IXGBE_DEV_ID_X540_VF:
1012 hw->mac.type = ixgbe_mac_X540_vf;
1013 break;
1014 case IXGBE_DEV_ID_X550_VF:
1015 hw->mac.type = ixgbe_mac_X550_vf;
1016 break;
1017 case IXGBE_DEV_ID_X550EM_X_VF:
1018 hw->mac.type = ixgbe_mac_X550EM_x_vf;
1019 break;
1020 case IXGBE_DEV_ID_X550EM_A_VF:
1021 hw->mac.type = ixgbe_mac_X550EM_a_vf;
1022 break;
1023 default:
1024 device_printf(dev, "unknown mac type\n");
1025 hw->mac.type = ixgbe_mac_unknown;
1026 break;
1027 }
1028 } /* ixv_identify_hardware */
1029
1030 /************************************************************************
1031 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1032 ************************************************************************/
1033 static int
ixv_if_msix_intr_assign(if_ctx_t ctx,int msix)1034 ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1035 {
1036 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1037 device_t dev = iflib_get_dev(ctx);
1038 struct ix_rx_queue *rx_que = sc->rx_queues;
1039 struct ix_tx_queue *tx_que;
1040 int error, rid, vector = 0;
1041 char buf[16];
1042
1043 for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1044 rid = vector + 1;
1045
1046 snprintf(buf, sizeof(buf), "rxq%d", i);
1047 error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1048 IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me,
1049 buf);
1050
1051 if (error) {
1052 device_printf(iflib_get_dev(ctx),
1053 "Failed to allocate que int %d err: %d",
1054 i, error);
1055 sc->num_rx_queues = i + 1;
1056 goto fail;
1057 }
1058
1059 rx_que->msix = vector;
1060 }
1061
1062 for (int i = 0; i < sc->num_tx_queues; i++) {
1063 snprintf(buf, sizeof(buf), "txq%d", i);
1064 tx_que = &sc->tx_queues[i];
1065 tx_que->msix = i % sc->num_rx_queues;
1066 iflib_softirq_alloc_generic(ctx,
1067 &sc->rx_queues[tx_que->msix].que_irq,
1068 IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1069 }
1070 rid = vector + 1;
1071 error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1072 IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1073 if (error) {
1074 device_printf(iflib_get_dev(ctx),
1075 "Failed to register admin handler");
1076 return (error);
1077 }
1078
1079 sc->vector = vector;
1080 /*
1081 * Due to a broken design QEMU will fail to properly
1082 * enable the guest for MSIX unless the vectors in
1083 * the table are all set up, so we must rewrite the
1084 * ENABLE in the MSIX control register again at this
1085 * point to cause it to successfully initialize us.
1086 */
1087 if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1088 int msix_ctrl;
1089 if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
1090 device_printf(dev,
1091 "Finding MSIX capability failed\n");
1092 } else {
1093 rid += PCIR_MSIX_CTRL;
1094 msix_ctrl = pci_read_config(dev, rid, 2);
1095 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1096 pci_write_config(dev, rid, msix_ctrl, 2);
1097 }
1098 }
1099
1100 return (0);
1101
1102 fail:
1103 iflib_irq_free(ctx, &sc->irq);
1104 rx_que = sc->rx_queues;
1105 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1106 iflib_irq_free(ctx, &rx_que->que_irq);
1107
1108 return (error);
1109 } /* ixv_if_msix_intr_assign */
1110
1111 /************************************************************************
1112 * ixv_allocate_pci_resources
1113 ************************************************************************/
1114 static int
ixv_allocate_pci_resources(if_ctx_t ctx)1115 ixv_allocate_pci_resources(if_ctx_t ctx)
1116 {
1117 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1118 device_t dev = iflib_get_dev(ctx);
1119 int rid;
1120
1121 rid = PCIR_BAR(0);
1122 sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1123 RF_ACTIVE);
1124
1125 if (!(sc->pci_mem)) {
1126 device_printf(dev,
1127 "Unable to allocate bus resource: memory\n");
1128 return (ENXIO);
1129 }
1130
1131 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1132 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
1133 sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1134
1135 return (0);
1136 } /* ixv_allocate_pci_resources */
1137
1138 /************************************************************************
1139 * ixv_free_pci_resources
1140 ************************************************************************/
1141 static void
ixv_free_pci_resources(if_ctx_t ctx)1142 ixv_free_pci_resources(if_ctx_t ctx)
1143 {
1144 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1145 struct ix_rx_queue *que = sc->rx_queues;
1146 device_t dev = iflib_get_dev(ctx);
1147
1148 /* Release all MSI-X queue resources */
1149 if (sc->intr_type == IFLIB_INTR_MSIX)
1150 iflib_irq_free(ctx, &sc->irq);
1151
1152 if (que != NULL) {
1153 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1154 iflib_irq_free(ctx, &que->que_irq);
1155 }
1156 }
1157
1158 if (sc->pci_mem != NULL)
1159 bus_release_resource(dev, SYS_RES_MEMORY,
1160 rman_get_rid(sc->pci_mem), sc->pci_mem);
1161 } /* ixv_free_pci_resources */
1162
1163 /************************************************************************
1164 * ixv_setup_interface
1165 *
1166 * Setup networking device structure and register an interface.
1167 ************************************************************************/
1168 static int
ixv_setup_interface(if_ctx_t ctx)1169 ixv_setup_interface(if_ctx_t ctx)
1170 {
1171 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1172 if_softc_ctx_t scctx = sc->shared;
1173 if_t ifp = iflib_get_ifp(ctx);
1174
1175 INIT_DEBUGOUT("ixv_setup_interface: begin");
1176
1177 if_setbaudrate(ifp, IF_Gbps(10));
1178 if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2);
1179
1180
1181 sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
1182 ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1183 ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1184
1185 return 0;
1186 } /* ixv_setup_interface */
1187
1188 /************************************************************************
1189 * ixv_if_get_counter
1190 ************************************************************************/
1191 static uint64_t
ixv_if_get_counter(if_ctx_t ctx,ift_counter cnt)1192 ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1193 {
1194 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1195 if_t ifp = iflib_get_ifp(ctx);
1196
1197 switch (cnt) {
1198 case IFCOUNTER_IPACKETS:
1199 return (sc->ipackets);
1200 case IFCOUNTER_OPACKETS:
1201 return (sc->opackets);
1202 case IFCOUNTER_IBYTES:
1203 return (sc->ibytes);
1204 case IFCOUNTER_OBYTES:
1205 return (sc->obytes);
1206 case IFCOUNTER_IMCASTS:
1207 return (sc->imcasts);
1208 default:
1209 return (if_get_counter_default(ifp, cnt));
1210 }
1211 } /* ixv_if_get_counter */
1212
1213 /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1214 * @ctx: iflib context
1215 * @event: event code to check
1216 *
1217 * Defaults to returning true for every event.
1218 *
1219 * @returns true if iflib needs to reinit the interface
1220 */
1221 static bool
ixv_if_needs_restart(if_ctx_t ctx __unused,enum iflib_restart_event event)1222 ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1223 {
1224 switch (event) {
1225 case IFLIB_RESTART_VLAN_CONFIG:
1226 /* XXX: This may not need to return true */
1227 default:
1228 return (true);
1229 }
1230 }
1231
1232 /************************************************************************
1233 * ixv_initialize_transmit_units - Enable transmit unit.
1234 ************************************************************************/
1235 static void
ixv_initialize_transmit_units(if_ctx_t ctx)1236 ixv_initialize_transmit_units(if_ctx_t ctx)
1237 {
1238 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1239 struct ixgbe_hw *hw = &sc->hw;
1240 if_softc_ctx_t scctx = sc->shared;
1241 struct ix_tx_queue *que = sc->tx_queues;
1242 int i;
1243
1244 for (i = 0; i < sc->num_tx_queues; i++, que++) {
1245 struct tx_ring *txr = &que->txr;
1246 u64 tdba = txr->tx_paddr;
1247 u32 txctrl, txdctl;
1248 int j = txr->me;
1249
1250 /* Set WTHRESH to 8, burst writeback */
1251 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1252 txdctl |= (8 << 16);
1253 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1254
1255 /* Set the HW Tx Head and Tail indices */
1256 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1257 IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1258
1259 /* Set Tx Tail register */
1260 txr->tail = IXGBE_VFTDT(j);
1261
1262 txr->tx_rs_cidx = txr->tx_rs_pidx;
1263 /* Initialize the last processed descriptor to be the end of
1264 * the ring, rather than the start, so that we avoid an
1265 * off-by-one error when calculating how many descriptors are
1266 * done in the credits_update function.
1267 */
1268 txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1269 for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1270 txr->tx_rsq[k] = QIDX_INVALID;
1271
1272 /* Set Ring parameters */
1273 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1274 (tdba & 0x00000000ffffffffULL));
1275 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1276 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1277 scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1278 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1279 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1280 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1281
1282 /* Now enable */
1283 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1284 txdctl |= IXGBE_TXDCTL_ENABLE;
1285 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1286 }
1287
1288 return;
1289 } /* ixv_initialize_transmit_units */
1290
1291 /************************************************************************
1292 * ixv_initialize_rss_mapping
1293 ************************************************************************/
1294 static void
ixv_initialize_rss_mapping(struct ixgbe_softc * sc)1295 ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1296 {
1297 struct ixgbe_hw *hw = &sc->hw;
1298 u32 reta = 0, mrqc, rss_key[10];
1299 int queue_id;
1300 int i, j;
1301 u32 rss_hash_config;
1302
1303 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1304 /* Fetch the configured RSS key */
1305 rss_getkey((uint8_t *)&rss_key);
1306 } else {
1307 /* set up random bits */
1308 arc4rand(&rss_key, sizeof(rss_key), 0);
1309 }
1310
1311 /* Now fill out hash function seeds */
1312 for (i = 0; i < 10; i++)
1313 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1314
1315 /* Set up the redirection table */
1316 for (i = 0, j = 0; i < 64; i++, j++) {
1317 if (j == sc->num_rx_queues)
1318 j = 0;
1319
1320 if (sc->feat_en & IXGBE_FEATURE_RSS) {
1321 /*
1322 * Fetch the RSS bucket id for the given indirection
1323 * entry. Cap it at the number of configured buckets
1324 * (which is num_rx_queues.)
1325 */
1326 queue_id = rss_get_indirection_to_bucket(i);
1327 queue_id = queue_id % sc->num_rx_queues;
1328 } else
1329 queue_id = j;
1330
1331 /*
1332 * The low 8 bits are for hash value (n+0);
1333 * The next 8 bits are for hash value (n+1), etc.
1334 */
1335 reta >>= 8;
1336 reta |= ((uint32_t)queue_id) << 24;
1337 if ((i & 3) == 3) {
1338 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1339 reta = 0;
1340 }
1341 }
1342
1343 /* Perform hash on these packet types */
1344 if (sc->feat_en & IXGBE_FEATURE_RSS)
1345 rss_hash_config = rss_gethashconfig();
1346 else {
1347 /*
1348 * Disable UDP - IP fragments aren't currently being handled
1349 * and so we end up with a mix of 2-tuple and 4-tuple
1350 * traffic.
1351 */
1352 rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1353 | RSS_HASHTYPE_RSS_TCP_IPV4
1354 | RSS_HASHTYPE_RSS_IPV6
1355 | RSS_HASHTYPE_RSS_TCP_IPV6;
1356 }
1357
1358 mrqc = IXGBE_MRQC_RSSEN;
1359 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1360 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1361 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1362 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1363 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1364 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1365 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1366 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1367 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1368 device_printf(sc->dev,
1369 "%s: RSS_HASHTYPE_RSS_IPV6_EX defined,"
1370 " but not supported\n", __func__);
1371 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1372 device_printf(sc->dev,
1373 "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined,"
1374 " but not supported\n", __func__);
1375 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1376 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1377 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1378 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1379 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1380 device_printf(sc->dev,
1381 "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined,"
1382 " but not supported\n", __func__);
1383 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1384 } /* ixv_initialize_rss_mapping */
1385
1386 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
1387 /************************************************************************
1388 * ixv_initialize_receive_units - Setup receive registers and features.
1389 ************************************************************************/
1390 static void
ixv_initialize_receive_units(if_ctx_t ctx)1391 ixv_initialize_receive_units(if_ctx_t ctx)
1392 {
1393 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1394 if_softc_ctx_t scctx;
1395 struct ixgbe_hw *hw = &sc->hw;
1396 #ifdef DEV_NETMAP
1397 if_t ifp = iflib_get_ifp(ctx);
1398 #endif
1399 struct ix_rx_queue *que = sc->rx_queues;
1400 u32 bufsz, psrtype;
1401
1402 bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
1403 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1404
1405 psrtype = IXGBE_PSRTYPE_TCPHDR |
1406 IXGBE_PSRTYPE_UDPHDR |
1407 IXGBE_PSRTYPE_IPV4HDR |
1408 IXGBE_PSRTYPE_IPV6HDR |
1409 IXGBE_PSRTYPE_L2HDR;
1410
1411 if (sc->num_rx_queues > 1)
1412 psrtype |= 1 << 29;
1413
1414 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1415
1416 /* Tell PF our max_frame size */
1417 if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1418 device_printf(sc->dev,
1419 "There is a problem with the PF setup. It is likely the"
1420 " receive unit for this VF will not function correctly."
1421 "\n");
1422 }
1423 scctx = sc->shared;
1424
1425 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1426 struct rx_ring *rxr = &que->rxr;
1427 u64 rdba = rxr->rx_paddr;
1428 u32 reg, rxdctl;
1429 int j = rxr->me;
1430
1431 /* Disable the queue */
1432 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1433 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1434 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1435 for (int k = 0; k < 10; k++) {
1436 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1437 IXGBE_RXDCTL_ENABLE)
1438 msec_delay(1);
1439 else
1440 break;
1441 }
1442 wmb();
1443 /* Setup the Base and Length of the Rx Descriptor Ring */
1444 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1445 (rdba & 0x00000000ffffffffULL));
1446 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1447 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1448 scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1449
1450 /* Reset the ring indices */
1451 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1452 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1453
1454 /* Set up the SRRCTL register */
1455 reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1456 reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1457 reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1458 reg |= bufsz;
1459 reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1460 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1461
1462 /* Capture Rx Tail index */
1463 rxr->tail = IXGBE_VFRDT(rxr->me);
1464
1465 /* Do the queue enabling last */
1466 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1467 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1468 for (int l = 0; l < 10; l++) {
1469 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1470 IXGBE_RXDCTL_ENABLE)
1471 break;
1472 msec_delay(1);
1473 }
1474 wmb();
1475
1476 /* Set the Tail Pointer */
1477 #ifdef DEV_NETMAP
1478 /*
1479 * In netmap mode, we must preserve the buffers made
1480 * available to userspace before the if_init()
1481 * (this is true by default on the TX side, because
1482 * init makes all buffers available to userspace).
1483 *
1484 * netmap_reset() and the device specific routines
1485 * (e.g. ixgbe_setup_receive_rings()) map these
1486 * buffers at the end of the NIC ring, so here we
1487 * must set the RDT (tail) register to make sure
1488 * they are not overwritten.
1489 *
1490 * In this driver the NIC ring starts at RDH = 0,
1491 * RDT points to the last slot available for reception (?),
1492 * so RDT = num_rx_desc - 1 means the whole ring is available.
1493 */
1494 if (if_getcapenable(ifp) & IFCAP_NETMAP) {
1495 struct netmap_adapter *na = NA(ifp);
1496 struct netmap_kring *kring = na->rx_rings[j];
1497 int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1498
1499 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1500 } else
1501 #endif /* DEV_NETMAP */
1502 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1503 scctx->isc_nrxd[0] - 1);
1504 }
1505
1506 /*
1507 * Do not touch RSS and RETA settings for older hardware
1508 * as those are shared among PF and all VF.
1509 */
1510 if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1511 ixv_initialize_rss_mapping(sc);
1512 } /* ixv_initialize_receive_units */
1513
1514 /************************************************************************
1515 * ixv_setup_vlan_support
1516 ************************************************************************/
1517 static void
ixv_setup_vlan_support(if_ctx_t ctx)1518 ixv_setup_vlan_support(if_ctx_t ctx)
1519 {
1520 if_t ifp = iflib_get_ifp(ctx);
1521 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1522 struct ixgbe_hw *hw = &sc->hw;
1523 u32 ctrl, vid, vfta, retry;
1524
1525 /*
1526 * We get here thru if_init, meaning
1527 * a soft reset, this has already cleared
1528 * the VFTA and other state, so if there
1529 * have been no vlan's registered do nothing.
1530 */
1531 if (sc->num_vlans == 0)
1532 return;
1533
1534 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1535 /* Enable the queues */
1536 for (int i = 0; i < sc->num_rx_queues; i++) {
1537 ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1538 ctrl |= IXGBE_RXDCTL_VME;
1539 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1540 /*
1541 * Let Rx path know that it needs to store VLAN tag
1542 * as part of extra mbuf info.
1543 */
1544 sc->rx_queues[i].rxr.vtag_strip = true;
1545 }
1546 }
1547
1548 /*
1549 * If filtering VLAN tags is disabled,
1550 * there is no need to fill VLAN Filter Table Array (VFTA).
1551 */
1552 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1553 return;
1554
1555 /*
1556 * A soft reset zero's out the VFTA, so
1557 * we need to repopulate it now.
1558 */
1559 for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1560 if (sc->shadow_vfta[i] == 0)
1561 continue;
1562 vfta = sc->shadow_vfta[i];
1563 /*
1564 * Reconstruct the vlan id's
1565 * based on the bits set in each
1566 * of the array ints.
1567 */
1568 for (int j = 0; j < 32; j++) {
1569 retry = 0;
1570 if ((vfta & (1 << j)) == 0)
1571 continue;
1572 vid = (i * 32) + j;
1573 /* Call the shared code mailbox routine */
1574 while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1575 if (++retry > 5)
1576 break;
1577 }
1578 }
1579 }
1580 } /* ixv_setup_vlan_support */
1581
1582 /************************************************************************
1583 * ixv_if_register_vlan
1584 *
1585 * Run via a vlan config EVENT, it enables us to use the
1586 * HW Filter table since we can get the vlan id. This just
1587 * creates the entry in the soft version of the VFTA, init
1588 * will repopulate the real table.
1589 ************************************************************************/
1590 static void
ixv_if_register_vlan(if_ctx_t ctx,u16 vtag)1591 ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1592 {
1593 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1594 u16 index, bit;
1595
1596 index = (vtag >> 5) & 0x7F;
1597 bit = vtag & 0x1F;
1598 sc->shadow_vfta[index] |= (1 << bit);
1599 ++sc->num_vlans;
1600 } /* ixv_if_register_vlan */
1601
1602 /************************************************************************
1603 * ixv_if_unregister_vlan
1604 *
1605 * Run via a vlan unconfig EVENT, remove our entry
1606 * in the soft vfta.
1607 ************************************************************************/
1608 static void
ixv_if_unregister_vlan(if_ctx_t ctx,u16 vtag)1609 ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1610 {
1611 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1612 u16 index, bit;
1613
1614 index = (vtag >> 5) & 0x7F;
1615 bit = vtag & 0x1F;
1616 sc->shadow_vfta[index] &= ~(1 << bit);
1617 --sc->num_vlans;
1618 } /* ixv_if_unregister_vlan */
1619
1620 /************************************************************************
1621 * ixv_if_enable_intr
1622 ************************************************************************/
1623 static void
ixv_if_enable_intr(if_ctx_t ctx)1624 ixv_if_enable_intr(if_ctx_t ctx)
1625 {
1626 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1627 struct ixgbe_hw *hw = &sc->hw;
1628 struct ix_rx_queue *que = sc->rx_queues;
1629 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1630
1631 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1632
1633 mask = IXGBE_EIMS_ENABLE_MASK;
1634 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1635 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1636
1637 for (int i = 0; i < sc->num_rx_queues; i++, que++)
1638 ixv_enable_queue(sc, que->msix);
1639
1640 IXGBE_WRITE_FLUSH(hw);
1641 } /* ixv_if_enable_intr */
1642
1643 /************************************************************************
1644 * ixv_if_disable_intr
1645 ************************************************************************/
1646 static void
ixv_if_disable_intr(if_ctx_t ctx)1647 ixv_if_disable_intr(if_ctx_t ctx)
1648 {
1649 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1650 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1651 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1652 IXGBE_WRITE_FLUSH(&sc->hw);
1653 } /* ixv_if_disable_intr */
1654
1655 /************************************************************************
1656 * ixv_if_rx_queue_intr_enable
1657 ************************************************************************/
1658 static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx,uint16_t rxqid)1659 ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1660 {
1661 struct ixgbe_softc *sc = iflib_get_softc(ctx);
1662 struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1663
1664 ixv_enable_queue(sc, que->rxr.me);
1665
1666 return (0);
1667 } /* ixv_if_rx_queue_intr_enable */
1668
1669 /************************************************************************
1670 * ixv_set_ivar
1671 *
1672 * Setup the correct IVAR register for a particular MSI-X interrupt
1673 * - entry is the register array entry
1674 * - vector is the MSI-X vector for this queue
1675 * - type is RX/TX/MISC
1676 ************************************************************************/
1677 static void
ixv_set_ivar(struct ixgbe_softc * sc,u8 entry,u8 vector,s8 type)1678 ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1679 {
1680 struct ixgbe_hw *hw = &sc->hw;
1681 u32 ivar, index;
1682
1683 vector |= IXGBE_IVAR_ALLOC_VAL;
1684
1685 if (type == -1) { /* MISC IVAR */
1686 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1687 ivar &= ~0xFF;
1688 ivar |= vector;
1689 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1690 } else { /* RX/TX IVARS */
1691 index = (16 * (entry & 1)) + (8 * type);
1692 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1693 ivar &= ~(0xFF << index);
1694 ivar |= (vector << index);
1695 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1696 }
1697 } /* ixv_set_ivar */
1698
1699 /************************************************************************
1700 * ixv_configure_ivars
1701 ************************************************************************/
1702 static void
ixv_configure_ivars(struct ixgbe_softc * sc)1703 ixv_configure_ivars(struct ixgbe_softc *sc)
1704 {
1705 struct ix_rx_queue *que = sc->rx_queues;
1706
1707 MPASS(sc->num_rx_queues == sc->num_tx_queues);
1708
1709 for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1710 /* First the RX queue entry */
1711 ixv_set_ivar(sc, i, que->msix, 0);
1712 /* ... and the TX */
1713 ixv_set_ivar(sc, i, que->msix, 1);
1714 /* Set an initial value in EITR */
1715 IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1716 IXGBE_EITR_DEFAULT);
1717 }
1718
1719 /* For the mailbox interrupt */
1720 ixv_set_ivar(sc, 1, sc->vector, -1);
1721 } /* ixv_configure_ivars */
1722
1723 /************************************************************************
1724 * ixv_save_stats
1725 *
1726 * The VF stats registers never have a truly virgin
1727 * starting point, so this routine tries to make an
1728 * artificial one, marking ground zero on attach as
1729 * it were.
1730 ************************************************************************/
1731 static void
ixv_save_stats(struct ixgbe_softc * sc)1732 ixv_save_stats(struct ixgbe_softc *sc)
1733 {
1734 if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1735 sc->stats.vf.saved_reset_vfgprc +=
1736 sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1737 sc->stats.vf.saved_reset_vfgptc +=
1738 sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1739 sc->stats.vf.saved_reset_vfgorc +=
1740 sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1741 sc->stats.vf.saved_reset_vfgotc +=
1742 sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1743 sc->stats.vf.saved_reset_vfmprc +=
1744 sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1745 }
1746 } /* ixv_save_stats */
1747
1748 /************************************************************************
1749 * ixv_init_stats
1750 ************************************************************************/
1751 static void
ixv_init_stats(struct ixgbe_softc * sc)1752 ixv_init_stats(struct ixgbe_softc *sc)
1753 {
1754 struct ixgbe_hw *hw = &sc->hw;
1755
1756 sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1757 sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1758 sc->stats.vf.last_vfgorc |=
1759 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1760
1761 sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1762 sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1763 sc->stats.vf.last_vfgotc |=
1764 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1765
1766 sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1767
1768 sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1769 sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1770 sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1771 sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1772 sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1773 } /* ixv_init_stats */
1774
1775 #define UPDATE_STAT_32(reg, last, count) \
1776 { \
1777 u32 current = IXGBE_READ_REG(hw, reg); \
1778 if (current < last) \
1779 count += 0x100000000LL; \
1780 last = current; \
1781 count &= 0xFFFFFFFF00000000LL; \
1782 count |= current; \
1783 }
1784
1785 #define UPDATE_STAT_36(lsb, msb, last, count) \
1786 { \
1787 u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \
1788 u64 cur_msb = IXGBE_READ_REG(hw, msb); \
1789 u64 current = ((cur_msb << 32) | cur_lsb); \
1790 if (current < last) \
1791 count += 0x1000000000LL; \
1792 last = current; \
1793 count &= 0xFFFFFFF000000000LL; \
1794 count |= current; \
1795 }
1796
1797 /************************************************************************
1798 * ixv_update_stats - Update the board statistics counters.
1799 ************************************************************************/
1800 void
ixv_update_stats(struct ixgbe_softc * sc)1801 ixv_update_stats(struct ixgbe_softc *sc)
1802 {
1803 struct ixgbe_hw *hw = &sc->hw;
1804 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1805
1806 UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1807 sc->stats.vf.vfgprc);
1808 UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1809 sc->stats.vf.vfgptc);
1810 UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1811 sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1812 UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1813 sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1814 UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1815 sc->stats.vf.vfmprc);
1816
1817 /* Fill out the OS statistics structure */
1818 IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1819 IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1820 IXGBE_SET_IBYTES(sc, stats->vfgorc);
1821 IXGBE_SET_OBYTES(sc, stats->vfgotc);
1822 IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1823 } /* ixv_update_stats */
1824
1825 /************************************************************************
1826 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1827 ************************************************************************/
1828 static void
ixv_add_stats_sysctls(struct ixgbe_softc * sc)1829 ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1830 {
1831 device_t dev = sc->dev;
1832 struct ix_tx_queue *tx_que = sc->tx_queues;
1833 struct ix_rx_queue *rx_que = sc->rx_queues;
1834 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
1835 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
1836 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
1837 struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1838 struct sysctl_oid *stat_node, *queue_node;
1839 struct sysctl_oid_list *stat_list, *queue_list;
1840
1841 #define QUEUE_NAME_LEN 32
1842 char namebuf[QUEUE_NAME_LEN];
1843
1844 /* Driver Statistics */
1845 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1846 CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1847 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1848 CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1849
1850 for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1851 struct tx_ring *txr = &tx_que->txr;
1852 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1853 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1854 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1855 queue_list = SYSCTL_CHILDREN(queue_node);
1856
1857 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1858 CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1859 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1860 CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1861 }
1862
1863 for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1864 struct rx_ring *rxr = &rx_que->rxr;
1865 snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1866 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1867 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1868 queue_list = SYSCTL_CHILDREN(queue_node);
1869
1870 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1871 CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1872 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1873 CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1874 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1875 CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1876 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1877 CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1878 }
1879
1880 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1881 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1882 "VF Statistics (read from HW registers)");
1883 stat_list = SYSCTL_CHILDREN(stat_node);
1884
1885 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1886 CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1887 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1888 CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1889 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1890 CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1891 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1892 CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1893 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1894 CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1895 } /* ixv_add_stats_sysctls */
1896
1897 /************************************************************************
1898 * ixv_print_debug_info
1899 *
1900 * Called only when em_display_debug_stats is enabled.
1901 * Provides a way to take a look at important statistics
1902 * maintained by the driver and hardware.
1903 ************************************************************************/
1904 static void
ixv_print_debug_info(struct ixgbe_softc * sc)1905 ixv_print_debug_info(struct ixgbe_softc *sc)
1906 {
1907 device_t dev = sc->dev;
1908 struct ixgbe_hw *hw = &sc->hw;
1909
1910 device_printf(dev, "Error Byte Count = %u \n",
1911 IXGBE_READ_REG(hw, IXGBE_ERRBC));
1912
1913 device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1914 } /* ixv_print_debug_info */
1915
1916 /************************************************************************
1917 * ixv_sysctl_debug
1918 ************************************************************************/
1919 static int
ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)1920 ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1921 {
1922 struct ixgbe_softc *sc;
1923 int error, result;
1924
1925 result = -1;
1926 error = sysctl_handle_int(oidp, &result, 0, req);
1927
1928 if (error || !req->newptr)
1929 return (error);
1930
1931 if (result == 1) {
1932 sc = (struct ixgbe_softc *)arg1;
1933 ixv_print_debug_info(sc);
1934 }
1935
1936 return error;
1937 } /* ixv_sysctl_debug */
1938
1939 /************************************************************************
1940 * ixv_init_device_features
1941 ************************************************************************/
1942 static void
ixv_init_device_features(struct ixgbe_softc * sc)1943 ixv_init_device_features(struct ixgbe_softc *sc)
1944 {
1945 sc->feat_cap = IXGBE_FEATURE_NETMAP |
1946 IXGBE_FEATURE_VF |
1947 IXGBE_FEATURE_LEGACY_TX;
1948
1949 /* A tad short on feature flags for VFs, atm. */
1950 switch (sc->hw.mac.type) {
1951 case ixgbe_mac_82599_vf:
1952 break;
1953 case ixgbe_mac_X540_vf:
1954 break;
1955 case ixgbe_mac_X550_vf:
1956 case ixgbe_mac_X550EM_x_vf:
1957 case ixgbe_mac_X550EM_a_vf:
1958 sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1959 sc->feat_cap |= IXGBE_FEATURE_RSS;
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 /* Enabled by default... */
1966 /* Is a virtual function (VF) */
1967 if (sc->feat_cap & IXGBE_FEATURE_VF)
1968 sc->feat_en |= IXGBE_FEATURE_VF;
1969 /* Netmap */
1970 if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1971 sc->feat_en |= IXGBE_FEATURE_NETMAP;
1972 /* Receive-Side Scaling (RSS) */
1973 if (sc->feat_cap & IXGBE_FEATURE_RSS)
1974 sc->feat_en |= IXGBE_FEATURE_RSS;
1975 /* Needs advanced context descriptor regardless of offloads req'd */
1976 if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1977 sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1978 } /* ixv_init_device_features */
1979
1980