1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include "opt_rss.h"
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/endian.h>
12 #include <sys/sockio.h>
13 #include <sys/mbuf.h>
14 #include <sys/malloc.h>
15 #include <sys/module.h>
16 #include <sys/socket.h>
17 #include <sys/sysctl.h>
18 #include <sys/smp.h>
19 #include <vm/vm.h>
20 #include <vm/pmap.h>
21
22 #include <net/ethernet.h>
23 #include <net/if.h>
24 #include <net/if_var.h>
25 #include <net/if_arp.h>
26 #include <net/if_dl.h>
27 #include <net/if_types.h>
28 #include <net/if_media.h>
29 #include <net/if_vlan_var.h>
30 #include <net/iflib.h>
31 #ifdef RSS
32 #include <net/rss_config.h>
33 #endif
34
35 #include <netinet/in_systm.h>
36 #include <netinet/in.h>
37 #include <netinet/ip.h>
38 #include <netinet/ip6.h>
39 #include <netinet6/ip6_var.h>
40 #include <netinet/udp.h>
41 #include <netinet/tcp.h>
42
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50
51 #include "ifdi_if.h"
52 #include "enic.h"
53
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56
57 static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
58 "ENIC");
59
60 static const pci_vendor_info_t enic_vendor_info_array[] =
61 {
62 PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET,
63 DRV_DESCRIPTION),
64 PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF,
65 DRV_DESCRIPTION " VF"),
66 /* required last entry */
67
68 PVID_END
69 };
70
71 static void *enic_register(device_t);
72 static int enic_attach_pre(if_ctx_t);
73 static int enic_msix_intr_assign(if_ctx_t, int);
74
75 static int enic_attach_post(if_ctx_t);
76 static int enic_detach(if_ctx_t);
77
78 static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
79 static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
80 static void enic_queues_free(if_ctx_t);
81 static int enic_rxq_intr(void *);
82 static int enic_event_intr(void *);
83 static int enic_err_intr(void *);
84 static void enic_stop(if_ctx_t);
85 static void enic_init(if_ctx_t);
86 static void enic_multi_set(if_ctx_t);
87 static int enic_mtu_set(if_ctx_t, uint32_t);
88 static void enic_media_status(if_ctx_t, struct ifmediareq *);
89 static int enic_media_change(if_ctx_t);
90 static int enic_promisc_set(if_ctx_t, int);
91 static uint64_t enic_get_counter(if_ctx_t, ift_counter);
92 static void enic_update_admin_status(if_ctx_t);
93 static void enic_txq_timer(if_ctx_t, uint16_t);
94 static int enic_link_is_up(struct enic_softc *);
95 static void enic_link_status(struct enic_softc *);
96 static void enic_set_lladdr(struct enic_softc *);
97 static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *,
98 struct sysctl_oid_list *);
99 static void enic_setup_rxq_sysctl(struct vnic_rq *, int, struct sysctl_ctx_list *,
100 struct sysctl_oid_list *);
101 static void enic_setup_sysctl(struct enic_softc *);
102 static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t);
103 static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t);
104 static void enic_enable_intr(struct enic_softc *, int);
105 static void enic_disable_intr(struct enic_softc *, int);
106 static void enic_intr_enable_all(if_ctx_t);
107 static void enic_intr_disable_all(if_ctx_t);
108 static int enic_dev_open(struct enic *);
109 static int enic_dev_init(struct enic *);
110 static void *enic_alloc_consistent(void *, size_t, bus_addr_t *,
111 struct iflib_dma_info *, u8 *);
112 static void enic_free_consistent(void *, size_t, void *, bus_addr_t,
113 struct iflib_dma_info *);
114 static int enic_pci_mapping(struct enic_softc *);
115 static void enic_pci_mapping_free(struct enic_softc *);
116 static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int),
117 int (*) (struct vnic_dev *, int *), int arg);
118 static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool);
119 static void enic_update_packet_filter(struct enic *enic);
120 static bool enic_if_needs_restart(if_ctx_t, enum iflib_restart_event);
121
122 typedef enum {
123 ENIC_BARRIER_RD,
124 ENIC_BARRIER_WR,
125 ENIC_BARRIER_RDWR,
126 } enic_barrier_t;
127
128 static device_method_t enic_methods[] = {
129 /* Device interface */
130 DEVMETHOD(device_register, enic_register),
131 DEVMETHOD(device_probe, iflib_device_probe),
132 DEVMETHOD(device_attach, iflib_device_attach),
133 DEVMETHOD(device_detach, iflib_device_detach),
134 DEVMETHOD(device_shutdown, iflib_device_shutdown),
135 DEVMETHOD(device_suspend, iflib_device_suspend),
136 DEVMETHOD(device_resume, iflib_device_resume),
137 DEVMETHOD_END
138 };
139
140 static driver_t enic_driver = {
141 "enic", enic_methods, sizeof(struct enic_softc)
142 };
143
144 DRIVER_MODULE(enic, pci, enic_driver, 0, 0);
145 IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array);
146 MODULE_VERSION(enic, 2);
147
148 MODULE_DEPEND(enic, pci, 1, 1, 1);
149 MODULE_DEPEND(enic, ether, 1, 1, 1);
150 MODULE_DEPEND(enic, iflib, 1, 1, 1);
151
152 static device_method_t enic_iflib_methods[] = {
153 DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc),
154 DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc),
155 DEVMETHOD(ifdi_queues_free, enic_queues_free),
156
157 DEVMETHOD(ifdi_attach_pre, enic_attach_pre),
158 DEVMETHOD(ifdi_attach_post, enic_attach_post),
159 DEVMETHOD(ifdi_detach, enic_detach),
160
161 DEVMETHOD(ifdi_init, enic_init),
162 DEVMETHOD(ifdi_stop, enic_stop),
163 DEVMETHOD(ifdi_multi_set, enic_multi_set),
164 DEVMETHOD(ifdi_mtu_set, enic_mtu_set),
165 DEVMETHOD(ifdi_media_status, enic_media_status),
166 DEVMETHOD(ifdi_media_change, enic_media_change),
167 DEVMETHOD(ifdi_promisc_set, enic_promisc_set),
168 DEVMETHOD(ifdi_get_counter, enic_get_counter),
169 DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status),
170 DEVMETHOD(ifdi_timer, enic_txq_timer),
171
172 DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable),
173 DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable),
174 DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all),
175 DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all),
176 DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign),
177
178 DEVMETHOD(ifdi_needs_restart, enic_if_needs_restart),
179
180 DEVMETHOD_END
181 };
182
183 static driver_t enic_iflib_driver = {
184 "enic", enic_iflib_methods, sizeof(struct enic_softc)
185 };
186
187 extern struct if_txrx enic_txrx;
188
189 static struct if_shared_ctx enic_sctx_init = {
190 .isc_magic = IFLIB_MAGIC,
191 .isc_q_align = 512,
192
193 .isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE,
194 .isc_tx_maxsegsize = PAGE_SIZE,
195
196 /*
197 * These values are used to configure the busdma tag used for receive
198 * descriptors. Each receive descriptor only points to one buffer.
199 */
200 .isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE, /* One buf per
201 * descriptor */
202 .isc_rx_nsegments = 1, /* One mapping per descriptor */
203 .isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,
204 .isc_admin_intrcnt = 2,
205 .isc_vendor_info = enic_vendor_info_array,
206 .isc_driver_version = "1",
207 .isc_driver = &enic_iflib_driver,
208 .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_SKIP_MSIX,
209
210 /*
211 * Number of receive queues per receive queue set, with associated
212 * descriptor settings for each.
213 */
214
215 .isc_nrxqs = 2,
216 .isc_nfl = 1, /* one free list for each receive command
217 * queue */
218 .isc_nrxd_min = {16, 16},
219 .isc_nrxd_max = {2048, 2048},
220 .isc_nrxd_default = {64, 64},
221
222 /*
223 * Number of transmit queues per transmit queue set, with associated
224 * descriptor settings for each.
225 */
226 .isc_ntxqs = 2,
227 .isc_ntxd_min = {16, 16},
228 .isc_ntxd_max = {2048, 2048},
229 .isc_ntxd_default = {64, 64},
230 };
231
232 static void *
enic_register(device_t dev)233 enic_register(device_t dev)
234 {
235 return (&enic_sctx_init);
236 }
237
238 static int
enic_allocate_msix(struct enic_softc * softc)239 enic_allocate_msix(struct enic_softc *softc) {
240 if_ctx_t ctx;
241 if_softc_ctx_t scctx;
242 if_shared_ctx_t sctx;
243 device_t dev;
244 cpuset_t cpus;
245 int queues, vectors, requested;
246 int err = 0;
247
248 dev = softc->dev;
249 ctx = softc->ctx;
250 scctx = softc->scctx;
251 sctx = iflib_get_sctx(ctx);
252
253 if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus) != 0) {
254 device_printf(dev, "Unable to fetch CPU list\n");
255 CPU_COPY(&all_cpus, &cpus);
256 }
257
258
259 queues = CPU_COUNT(&cpus);
260 queues = imin(queues, scctx->isc_nrxqsets);
261 queues = imin(queues, scctx->isc_ntxqsets);
262 requested = queues * 2 + sctx->isc_admin_intrcnt;
263 scctx->isc_nrxqsets = queues;
264 scctx->isc_ntxqsets = queues;
265
266 vectors = requested;
267 if ((err = pci_alloc_msix(dev, &vectors)) != 0) {
268 device_printf(dev,
269 "failed to allocate %d MSI-X vectors, err: %d\n", requested,
270 err);
271 err = 1;
272 goto enic_allocate_msix_out;
273 } else {
274 if (vectors != requested) {
275 device_printf(dev,
276 "Unable to allocate sufficient MSI-X vectors "
277 "(got %d, need %d)\n", requested, vectors);
278 pci_release_msi(dev);
279 err = 1;
280 goto enic_allocate_msix_out;
281 }
282 }
283
284 device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
285 vectors);
286
287 scctx->isc_intr = IFLIB_INTR_MSIX;
288 scctx->isc_vectors = vectors;
289
290 enic_allocate_msix_out:
291 return (err);
292
293 }
294
295 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
296 {0, 0}, /* 0 - 4 Gbps */
297 {0, 3}, /* 4 - 10 Gbps */
298 {3, 6}, /* 10 - 40 Gbps */
299 };
300
enic_set_rx_coal_setting(struct enic * enic)301 static void enic_set_rx_coal_setting(struct enic *enic)
302 {
303 unsigned int speed;
304 int index = -1;
305 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
306
307 /* 1. Read the link speed from fw
308 * 2. Pick the default range for the speed
309 * 3. Update it in enic->rx_coalesce_setting
310 */
311 speed = vnic_dev_port_speed(enic->vdev);
312 if (ENIC_LINK_SPEED_10G < speed)
313 index = ENIC_LINK_40G_INDEX;
314 else if (ENIC_LINK_SPEED_4G < speed)
315 index = ENIC_LINK_10G_INDEX;
316 else
317 index = ENIC_LINK_4G_INDEX;
318
319 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
320 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
321 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
322
323 /* Start with the value provided by UCSM */
324 for (index = 0; index < enic->rq_count; index++)
325 enic->cq[index].cur_rx_coal_timeval =
326 enic->config.intr_timer_usec;
327
328 rx_coal->use_adaptive_rx_coalesce = 1;
329 }
330
331 static int
enic_attach_pre(if_ctx_t ctx)332 enic_attach_pre(if_ctx_t ctx)
333 {
334 if_softc_ctx_t scctx;
335 struct enic_softc *softc;
336 struct vnic_dev *vdev;
337 struct enic *enic;
338 device_t dev;
339
340 int err = -1;
341 int rc = 0;
342 int i;
343 u64 a0 = 0, a1 = 0;
344 int wait = 1000;
345 struct vnic_stats *stats;
346 int ret;
347
348 dev = iflib_get_dev(ctx);
349 softc = iflib_get_softc(ctx);
350 softc->dev = dev;
351 softc->ctx = ctx;
352 softc->sctx = iflib_get_sctx(ctx);
353 softc->scctx = iflib_get_softc_ctx(ctx);
354 softc->ifp = iflib_get_ifp(ctx);
355 softc->media = iflib_get_media(ctx);
356 softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
357 ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF,
358 M_NOWAIT | M_ZERO);
359 if (softc->mta == NULL)
360 return (ENOMEM);
361 scctx = softc->scctx;
362
363 mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF);
364
365 pci_enable_busmaster(softc->dev);
366 if (enic_pci_mapping(softc))
367 return (ENXIO);
368
369 enic = &softc->enic;
370 enic->softc = softc;
371 vdev = &softc->vdev;
372 vdev->softc = softc;
373 enic->vdev = vdev;
374 vdev->priv = enic;
375
376 ENIC_LOCK(softc);
377 vnic_dev_register(vdev, &softc->mem, 1);
378 enic->vdev = vdev;
379 vnic_dev_cmd_init(enic->vdev);
380
381 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
382
383 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
384 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
385
386 bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN);
387 iflib_set_mac(ctx, softc->mac_addr);
388
389 vnic_register_cbacks(enic->vdev, enic_alloc_consistent,
390 enic_free_consistent);
391
392 /*
393 * Allocate the consistent memory for stats and counters upfront so
394 * both primary and secondary processes can access them.
395 */
396 ENIC_UNLOCK(softc);
397 err = vnic_dev_alloc_stats_mem(enic->vdev);
398 ENIC_LOCK(softc);
399 if (err) {
400 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
401 goto err_out_unregister;
402 }
403 vnic_dev_stats_clear(enic->vdev);
404 ret = vnic_dev_stats_dump(enic->vdev, &stats);
405 if (ret) {
406 dev_err(enic, "Error in getting stats\n");
407 goto err_out_unregister;
408 }
409 err = vnic_dev_alloc_counter_mem(enic->vdev);
410 if (err) {
411 dev_err(enic, "Failed to allocate counter memory, aborting\n");
412 goto err_out_unregister;
413 }
414
415 /* Issue device open to get device in known state */
416 err = enic_dev_open(enic);
417 if (err) {
418 dev_err(enic, "vNIC dev open failed, aborting\n");
419 goto err_out_unregister;
420 }
421
422 /* Set ingress vlan rewrite mode before vnic initialization */
423 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
424 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
425 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
426 enic->ig_vlan_rewrite_mode);
427 if (err) {
428 dev_err(enic,
429 "Failed to set ingress vlan rewrite mode, aborting.\n");
430 goto err_out_dev_close;
431 }
432
433 /*
434 * Issue device init to initialize the vnic-to-switch link. We'll
435 * start with carrier off and wait for link UP notification later to
436 * turn on carrier. We don't need to wait here for the
437 * vnic-to-switch link initialization to complete; link UP
438 * notification is the indication that the process is complete.
439 */
440
441 err = vnic_dev_init(enic->vdev, 0);
442 if (err) {
443 dev_err(enic, "vNIC dev init failed, aborting\n");
444 goto err_out_dev_close;
445 }
446
447 err = enic_dev_init(enic);
448 if (err) {
449 dev_err(enic, "Device initialization failed, aborting\n");
450 goto err_out_dev_close;
451 }
452 ENIC_UNLOCK(softc);
453
454 enic->port_mtu = vnic_dev_mtu(enic->vdev);
455
456 softc->scctx = iflib_get_softc_ctx(ctx);
457 scctx = softc->scctx;
458 scctx->isc_txrx = &enic_txrx;
459 scctx->isc_capabilities = scctx->isc_capenable = \
460 IFCAP_HWCSUM;
461 scctx->isc_tx_csum_flags = 0;
462 if_setmtu(softc->ifp, enic->config.mtu);
463 scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \
464 ETHER_CRC_LEN;
465 scctx->isc_nrxqsets_max = enic->conf_rq_count;
466 scctx->isc_ntxqsets_max = enic->conf_wq_count;
467 scctx->isc_nrxqsets = enic->conf_rq_count;
468 scctx->isc_ntxqsets = enic->conf_wq_count;
469 for (i = 0; i < enic->conf_wq_count; i++) {
470 scctx->isc_ntxd[i] = enic->config.wq_desc_count;
471 scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc)
472 * scctx->isc_ntxd[i];
473 scctx->isc_ntxd[i + enic->conf_wq_count] =
474 enic->config.wq_desc_count;
475 scctx->isc_txqsizes[i + enic->conf_wq_count] =
476 sizeof(struct cq_desc) * scctx->isc_ntxd[i +
477 enic->conf_wq_count];
478 }
479 for (i = 0; i < enic->conf_rq_count; i++) {
480 scctx->isc_nrxd[i] = enic->config.rq_desc_count;
481 scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) *
482 scctx->isc_nrxd[i];
483 scctx->isc_nrxd[i + enic->conf_rq_count] =
484 enic->config.rq_desc_count;
485 scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct
486 cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count];
487 }
488 scctx->isc_tx_nsegments = 31;
489
490 scctx->isc_msix_bar = -1;
491
492 ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
493 ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
494 ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL);
495
496 /*
497 * Allocate the CQ here since TX is called first before RX.
498 */
499 if (softc->enic.cq == NULL)
500 softc->enic.cq = malloc(sizeof(struct vnic_cq) *
501 softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF,
502 M_NOWAIT | M_ZERO);
503 if (softc->enic.cq == NULL)
504 return (ENOMEM);
505
506 /*
507 * Allocate the consistent memory for stats and counters upfront so
508 * both primary and secondary processes can access them.
509 */
510 err = vnic_dev_alloc_stats_mem(enic->vdev);
511 if (err) {
512 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
513 goto err_out_dev_close;
514 }
515
516 err = enic_allocate_msix(softc);
517 if (err) {
518 dev_err(enic, "Failed to allocate MSIX, aborting\n");
519 goto err_out_dev_close;
520 }
521
522 return (rc);
523
524 err_out_dev_close:
525 vnic_dev_close(enic->vdev);
526 vnic_dev_deinit_devcmd2(enic->vdev);
527 err_out_unregister:
528 free(softc->vdev.devcmd, M_DEVBUF);
529 free(softc->enic.intr_queues, M_DEVBUF);
530 free(softc->enic.cq, M_DEVBUF);
531 free(softc->mta, M_DEVBUF);
532 rc = -1;
533 pci_disable_busmaster(softc->dev);
534 enic_pci_mapping_free(softc);
535 mtx_destroy(&softc->enic_lock);
536 return (rc);
537 }
538
539 static int
enic_msix_intr_assign(if_ctx_t ctx,int msix)540 enic_msix_intr_assign(if_ctx_t ctx, int msix)
541 {
542 struct enic_softc *softc;
543 struct enic *enic;
544 if_softc_ctx_t scctx;
545
546 int error;
547 int i;
548 char irq_name[16];
549
550 softc = iflib_get_softc(ctx);
551 enic = &softc->enic;
552 scctx = softc->scctx;
553
554 ENIC_LOCK(softc);
555 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
556 ENIC_UNLOCK(softc);
557
558 enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
559 enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
560 enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT
561 | M_ZERO);
562 for (i = 0; i < scctx->isc_nrxqsets; i++) {
563 snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i,
564 device_get_unit(softc->dev));
565
566 error = iflib_irq_alloc_generic(ctx,
567 &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX,
568 enic_rxq_intr, &enic->rq[i], i, irq_name);
569 if (error) {
570 device_printf(iflib_get_dev(ctx),
571 "Failed to register rxq %d interrupt handler\n", i);
572 return (error);
573 }
574 enic->intr[i].index = i;
575 enic->intr[i].vdev = enic->vdev;
576 ENIC_LOCK(softc);
577 enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
578 RES_TYPE_INTR_CTRL, i);
579 vnic_intr_mask(&enic->intr[i]);
580 ENIC_UNLOCK(softc);
581 }
582
583 for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
584 snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i -
585 scctx->isc_nrxqsets, device_get_unit(softc->dev));
586
587 iflib_softirq_alloc_generic(ctx,
588 &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX,
589 &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets,
590 irq_name);
591
592 enic->intr[i].index = i;
593 enic->intr[i].vdev = enic->vdev;
594 ENIC_LOCK(softc);
595 enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
596 RES_TYPE_INTR_CTRL, i);
597 vnic_intr_mask(&enic->intr[i]);
598 ENIC_UNLOCK(softc);
599 }
600
601 i = scctx->isc_nrxqsets + scctx->isc_ntxqsets;
602 error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq,
603 i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event");
604 if (error) {
605 device_printf(iflib_get_dev(ctx),
606 "Failed to register event interrupt handler\n");
607 return (error);
608 }
609
610 enic->intr[i].index = i;
611 enic->intr[i].vdev = enic->vdev;
612 ENIC_LOCK(softc);
613 enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
614 i);
615 vnic_intr_mask(&enic->intr[i]);
616 ENIC_UNLOCK(softc);
617
618 i++;
619 error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq,
620 i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err");
621 if (error) {
622 device_printf(iflib_get_dev(ctx),
623 "Failed to register event interrupt handler\n");
624 return (error);
625 }
626 enic->intr[i].index = i;
627 enic->intr[i].vdev = enic->vdev;
628 ENIC_LOCK(softc);
629 enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
630 i);
631 vnic_intr_mask(&enic->intr[i]);
632 ENIC_UNLOCK(softc);
633
634 enic->intr_count = msix;
635
636 return (0);
637 }
638
639 static void
enic_free_irqs(struct enic_softc * softc)640 enic_free_irqs(struct enic_softc *softc)
641 {
642 if_softc_ctx_t scctx;
643
644 struct enic *enic;
645 int i;
646
647 scctx = softc->scctx;
648 enic = &softc->enic;
649
650 for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
651 iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq);
652 }
653
654 iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq);
655 iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq);
656 free(enic->intr_queues, M_DEVBUF);
657 free(enic->intr, M_DEVBUF);
658 }
659
660 static int
enic_attach_post(if_ctx_t ctx)661 enic_attach_post(if_ctx_t ctx)
662 {
663 struct enic *enic;
664 struct enic_softc *softc;
665 int error = 0;
666
667 softc = iflib_get_softc(ctx);
668 enic = &softc->enic;
669
670 enic_setup_sysctl(softc);
671
672 enic_init_vnic_resources(enic);
673 enic_set_rx_coal_setting(enic);
674 enic_setup_finish(enic);
675
676 ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
677 ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
678
679 return (error);
680 }
681
682 static int
enic_detach(if_ctx_t ctx)683 enic_detach(if_ctx_t ctx)
684 {
685 struct enic_softc *softc;
686 struct enic *enic;
687
688 softc = iflib_get_softc(ctx);
689 enic = &softc->enic;
690
691 vnic_dev_notify_unset(enic->vdev);
692
693 enic_free_irqs(softc);
694
695 ENIC_LOCK(softc);
696 vnic_dev_deinit(enic->vdev);
697 vnic_dev_close(enic->vdev);
698 vnic_dev_deinit_devcmd2(enic->vdev);
699 free(softc->vdev.devcmd, M_DEVBUF);
700 pci_disable_busmaster(softc->dev);
701 enic_pci_mapping_free(softc);
702 ENIC_UNLOCK(softc);
703
704 return 0;
705 }
706
707 static int
enic_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)708 enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
709 int ntxqs, int ntxqsets)
710 {
711 struct enic_softc *softc;
712 int q;
713
714 softc = iflib_get_softc(ctx);
715 /* Allocate the array of transmit queues */
716 softc->enic.wq = malloc(sizeof(struct vnic_wq) *
717 ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
718 if (softc->enic.wq == NULL)
719 return (ENOMEM);
720
721 /* Initialize driver state for each transmit queue */
722
723 /*
724 * Allocate queue state that is shared with the device. This check
725 * and call is performed in both enic_tx_queues_alloc() and
726 * enic_rx_queues_alloc() so that we don't have to care which order
727 * iflib invokes those routines in.
728 */
729
730 /* Record descriptor ring vaddrs and paddrs */
731 ENIC_LOCK(softc);
732 for (q = 0; q < ntxqsets; q++) {
733 struct vnic_wq *wq;
734 struct vnic_cq *cq;
735 unsigned int cq_wq;
736
737 wq = &softc->enic.wq[q];
738 cq_wq = enic_cq_wq(&softc->enic, q);
739 cq = &softc->enic.cq[cq_wq];
740
741 /* Completion ring */
742 wq->vdev = softc->enic.vdev;
743 wq->index = q;
744 wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ,
745 wq->index);
746 vnic_wq_disable(wq);
747
748 wq->ring.desc_size = sizeof(struct wq_enet_desc);
749 wq->ring.desc_count = softc->scctx->isc_ntxd[q];
750 wq->ring.desc_avail = wq->ring.desc_count - 1;
751 wq->ring.last_count = wq->ring.desc_count;
752 wq->head_idx = 0;
753 wq->tail_idx = 0;
754
755 wq->ring.descs = vaddrs[q * ntxqs + 0];
756 wq->ring.base_addr = paddrs[q * ntxqs + 0];
757
758 /* Command ring */
759 cq->vdev = softc->enic.vdev;
760 cq->index = cq_wq;
761 cq->ctrl = vnic_dev_get_res(softc->enic.vdev,
762 RES_TYPE_CQ, cq->index);
763 cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
764 cq->ring.desc_count = softc->scctx->isc_ntxd[q];
765 cq->ring.desc_avail = cq->ring.desc_count - 1;
766
767 cq->ring.descs = vaddrs[q * ntxqs + 1];
768 cq->ring.base_addr = paddrs[q * ntxqs + 1];
769
770 }
771
772 ENIC_UNLOCK(softc);
773
774 return (0);
775 }
776
777
778
779 static int
enic_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)780 enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
781 int nrxqs, int nrxqsets)
782 {
783 struct enic_softc *softc;
784 int q;
785
786 softc = iflib_get_softc(ctx);
787 /* Allocate the array of receive queues */
788 softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF,
789 M_NOWAIT | M_ZERO);
790 if (softc->enic.rq == NULL)
791 return (ENOMEM);
792
793 /* Initialize driver state for each receive queue */
794
795 /*
796 * Allocate queue state that is shared with the device. This check
797 * and call is performed in both enic_tx_queues_alloc() and
798 * enic_rx_queues_alloc() so that we don't have to care which order
799 * iflib invokes those routines in.
800 */
801
802 /* Record descriptor ring vaddrs and paddrs */
803 ENIC_LOCK(softc);
804 for (q = 0; q < nrxqsets; q++) {
805 struct vnic_rq *rq;
806 struct vnic_cq *cq;
807 unsigned int cq_rq;
808
809 rq = &softc->enic.rq[q];
810 cq_rq = enic_cq_rq(&softc->enic, q);
811 cq = &softc->enic.cq[cq_rq];
812
813 /* Completion ring */
814 cq->vdev = softc->enic.vdev;
815 cq->index = cq_rq;
816 cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ,
817 cq->index);
818 cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
819 cq->ring.desc_count = softc->scctx->isc_nrxd[1];
820 cq->ring.desc_avail = cq->ring.desc_count - 1;
821
822 cq->ring.descs = vaddrs[q * nrxqs + 0];
823 cq->ring.base_addr = paddrs[q * nrxqs + 0];
824
825 /* Command ring(s) */
826 rq->vdev = softc->enic.vdev;
827
828 rq->index = q;
829 rq->ctrl = vnic_dev_get_res(softc->enic.vdev,
830 RES_TYPE_RQ, rq->index);
831 vnic_rq_disable(rq);
832
833 rq->ring.desc_size = sizeof(struct rq_enet_desc);
834 rq->ring.desc_count = softc->scctx->isc_nrxd[0];
835 rq->ring.desc_avail = rq->ring.desc_count - 1;
836
837 rq->ring.descs = vaddrs[q * nrxqs + 1];
838 rq->ring.base_addr = paddrs[q * nrxqs + 1];
839 rq->need_initial_post = true;
840 }
841
842 ENIC_UNLOCK(softc);
843
844 return (0);
845 }
846
847 static void
enic_queues_free(if_ctx_t ctx)848 enic_queues_free(if_ctx_t ctx)
849 {
850 struct enic_softc *softc;
851 softc = iflib_get_softc(ctx);
852
853 free(softc->enic.rq, M_DEVBUF);
854 free(softc->enic.wq, M_DEVBUF);
855 free(softc->enic.cq, M_DEVBUF);
856 }
857
858 static int
enic_rxq_intr(void * rxq)859 enic_rxq_intr(void *rxq)
860 {
861 struct vnic_rq *rq;
862 if_t ifp;
863
864 rq = (struct vnic_rq *)rxq;
865 ifp = iflib_get_ifp(rq->vdev->softc->ctx);
866 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
867 return (FILTER_HANDLED);
868
869 return (FILTER_SCHEDULE_THREAD);
870 }
871
872 static int
enic_event_intr(void * vsc)873 enic_event_intr(void *vsc)
874 {
875 struct enic_softc *softc;
876 struct enic *enic;
877 uint32_t mtu;
878
879 softc = vsc;
880 enic = &softc->enic;
881
882 mtu = vnic_dev_mtu(enic->vdev);
883 if (mtu && mtu != enic->port_mtu) {
884 enic->port_mtu = mtu;
885 }
886
887 enic_link_status(softc);
888
889 return (FILTER_HANDLED);
890 }
891
892 static int
enic_err_intr(void * vsc)893 enic_err_intr(void *vsc)
894 {
895 struct enic_softc *softc;
896
897 softc = vsc;
898
899 enic_stop(softc->ctx);
900 enic_init(softc->ctx);
901
902 return (FILTER_HANDLED);
903 }
904
905 static void
enic_stop(if_ctx_t ctx)906 enic_stop(if_ctx_t ctx)
907 {
908 struct enic_softc *softc;
909 struct enic *enic;
910 if_softc_ctx_t scctx;
911 unsigned int index;
912 struct vnic_wq *wq;
913 struct vnic_rq *rq;
914 struct vnic_cq *cq;
915 unsigned int cq_wq, cq_rq;
916
917
918 softc = iflib_get_softc(ctx);
919 scctx = softc->scctx;
920 enic = &softc->enic;
921
922 if (softc->stopped)
923 return;
924 softc->link_active = 0;
925 softc->stopped = 1;
926
927 enic_dev_disable(enic);
928
929 for (index = 0; index < scctx->isc_ntxqsets; index++) {
930 enic_stop_wq(enic, index);
931 vnic_wq_clean(&enic->wq[index]);
932 vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]);
933
934 wq = &softc->enic.wq[index];
935 wq->ring.desc_avail = wq->ring.desc_count - 1;
936 wq->ring.last_count = wq->ring.desc_count;
937 wq->head_idx = 0;
938 wq->tail_idx = 0;
939
940 cq_wq = enic_cq_wq(&softc->enic, index);
941 cq = &softc->enic.cq[cq_wq];
942 cq->ring.desc_avail = cq->ring.desc_count - 1;
943 }
944
945 for (index = 0; index < scctx->isc_nrxqsets; index++) {
946 enic_stop_rq(enic, index);
947 vnic_rq_clean(&enic->rq[index]);
948 vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]);
949
950 rq = &softc->enic.rq[index];
951 cq_rq = enic_cq_rq(&softc->enic, index);
952 cq = &softc->enic.cq[cq_rq];
953
954 cq->ring.desc_avail = cq->ring.desc_count - 1;
955 rq->ring.desc_avail = rq->ring.desc_count - 1;
956 rq->need_initial_post = true;
957 }
958
959 for (index = 0; index < scctx->isc_vectors; index++) {
960 vnic_intr_clean(&enic->intr[index]);
961 }
962 }
963
964 static void
enic_init(if_ctx_t ctx)965 enic_init(if_ctx_t ctx)
966 {
967 struct enic_softc *softc;
968 struct enic *enic;
969 if_softc_ctx_t scctx;
970 unsigned int index;
971
972 softc = iflib_get_softc(ctx);
973 scctx = softc->scctx;
974 enic = &softc->enic;
975
976
977 enic_init_vnic_resources(enic);
978
979 for (index = 0; index < scctx->isc_ntxqsets; index++)
980 enic_prep_wq_for_simple_tx(&softc->enic, index);
981
982 for (index = 0; index < scctx->isc_ntxqsets; index++)
983 enic_start_wq(enic, index);
984
985 for (index = 0; index < scctx->isc_nrxqsets; index++)
986 enic_start_rq(enic, index);
987
988 /* Use the current MAC address. */
989 bcopy(if_getlladdr(softc->ifp), softc->lladdr, ETHER_ADDR_LEN);
990 enic_set_lladdr(softc);
991
992 ENIC_LOCK(softc);
993 vnic_dev_enable_wait(enic->vdev);
994 ENIC_UNLOCK(softc);
995
996 softc->stopped = 0;
997
998 enic_link_status(softc);
999 }
1000
1001 static void
enic_del_mcast(struct enic_softc * softc)1002 enic_del_mcast(struct enic_softc *softc) {
1003 struct enic *enic;
1004 int i;
1005
1006 enic = &softc->enic;
1007 for (i=0; i < softc->mc_count; i++) {
1008 vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
1009 }
1010 softc->multicast = 0;
1011 softc->mc_count = 0;
1012 }
1013
1014 static void
enic_add_mcast(struct enic_softc * softc)1015 enic_add_mcast(struct enic_softc *softc) {
1016 struct enic *enic;
1017 int i;
1018
1019 enic = &softc->enic;
1020 for (i=0; i < softc->mc_count; i++) {
1021 vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
1022 }
1023 softc->multicast = 1;
1024 }
1025
1026 static u_int
enic_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int idx)1027 enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
1028 {
1029 uint8_t *mta = arg;
1030
1031 if (idx == ENIC_MAX_MULTICAST_ADDRESSES)
1032 return (0);
1033
1034 bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1035 return (1);
1036 }
1037
1038 static void
enic_multi_set(if_ctx_t ctx)1039 enic_multi_set(if_ctx_t ctx)
1040 {
1041 if_t ifp;
1042 struct enic_softc *softc;
1043 u_int count;
1044
1045 softc = iflib_get_softc(ctx);
1046 ifp = iflib_get_ifp(ctx);
1047
1048 ENIC_LOCK(softc);
1049 enic_del_mcast(softc);
1050 count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta);
1051 softc->mc_count = count;
1052 enic_add_mcast(softc);
1053 ENIC_UNLOCK(softc);
1054
1055 if (if_getflags(ifp) & IFF_PROMISC) {
1056 softc->promisc = 1;
1057 } else {
1058 softc->promisc = 0;
1059 }
1060 if (if_getflags(ifp) & IFF_ALLMULTI) {
1061 softc->allmulti = 1;
1062 } else {
1063 softc->allmulti = 0;
1064 }
1065 enic_update_packet_filter(&softc->enic);
1066 }
1067
1068 static int
enic_mtu_set(if_ctx_t ctx,uint32_t mtu)1069 enic_mtu_set(if_ctx_t ctx, uint32_t mtu)
1070 {
1071 struct enic_softc *softc;
1072 struct enic *enic;
1073 if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
1074
1075 softc = iflib_get_softc(ctx);
1076 enic = &softc->enic;
1077
1078 enic_stop(softc->ctx);
1079 if (mtu > enic->port_mtu){
1080 return (EINVAL);
1081 }
1082
1083 enic->config.mtu = mtu;
1084 scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1085 enic_init(softc->ctx);
1086
1087 return (0);
1088 }
1089
1090 static void
enic_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1091 enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1092 {
1093 struct enic_softc *softc;
1094 struct ifmedia_entry *next;
1095 uint32_t speed;
1096 uint64_t target_baudrate;
1097
1098 softc = iflib_get_softc(ctx);
1099
1100 ifmr->ifm_status = IFM_AVALID;
1101 ifmr->ifm_active = IFM_ETHER;
1102
1103 if (enic_link_is_up(softc) != 0) {
1104 ENIC_LOCK(softc);
1105 speed = vnic_dev_port_speed(&softc->vdev);
1106 ENIC_UNLOCK(softc);
1107 target_baudrate = 1000ull * speed;
1108 LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
1109 if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
1110 ifmr->ifm_active |= next->ifm_media;
1111 }
1112 }
1113
1114 ifmr->ifm_status |= IFM_ACTIVE;
1115 ifmr->ifm_active |= IFM_AUTO;
1116 } else
1117 ifmr->ifm_active |= IFM_NONE;
1118 }
1119
1120 static int
enic_media_change(if_ctx_t ctx)1121 enic_media_change(if_ctx_t ctx)
1122 {
1123 return (ENODEV);
1124 }
1125
1126 static int
enic_promisc_set(if_ctx_t ctx,int flags)1127 enic_promisc_set(if_ctx_t ctx, int flags)
1128 {
1129 if_t ifp;
1130 struct enic_softc *softc;
1131
1132 softc = iflib_get_softc(ctx);
1133 ifp = iflib_get_ifp(ctx);
1134
1135 if (if_getflags(ifp) & IFF_PROMISC) {
1136 softc->promisc = 1;
1137 } else {
1138 softc->promisc = 0;
1139 }
1140 if (if_getflags(ifp) & IFF_ALLMULTI) {
1141 softc->allmulti = 1;
1142 } else {
1143 softc->allmulti = 0;
1144 }
1145 enic_update_packet_filter(&softc->enic);
1146
1147 return (0);
1148 }
1149
1150 static uint64_t
enic_get_counter(if_ctx_t ctx,ift_counter cnt)1151 enic_get_counter(if_ctx_t ctx, ift_counter cnt) {
1152 if_t ifp = iflib_get_ifp(ctx);
1153
1154 if (cnt < IFCOUNTERS)
1155 return if_get_counter_default(ifp, cnt);
1156
1157 return (0);
1158 }
1159
1160 static void
enic_update_admin_status(if_ctx_t ctx)1161 enic_update_admin_status(if_ctx_t ctx)
1162 {
1163 struct enic_softc *softc;
1164 softc = iflib_get_softc(ctx);
1165
1166 enic_link_status(softc);
1167 }
1168
1169 static void
enic_txq_timer(if_ctx_t ctx,uint16_t qid)1170 enic_txq_timer(if_ctx_t ctx, uint16_t qid)
1171 {
1172
1173 struct enic_softc *softc;
1174 struct enic *enic;
1175 struct vnic_stats *stats;
1176 int ret;
1177
1178 softc = iflib_get_softc(ctx);
1179 enic = &softc->enic;
1180
1181 ENIC_LOCK(softc);
1182 ret = vnic_dev_stats_dump(enic->vdev, &stats);
1183 ENIC_UNLOCK(softc);
1184 if (ret) {
1185 dev_err(enic, "Error in getting stats\n");
1186 }
1187 }
1188
1189 static int
enic_link_is_up(struct enic_softc * softc)1190 enic_link_is_up(struct enic_softc *softc)
1191 {
1192 return (vnic_dev_link_status(&softc->vdev) == 1);
1193 }
1194
1195 static void
enic_link_status(struct enic_softc * softc)1196 enic_link_status(struct enic_softc *softc)
1197 {
1198 if_ctx_t ctx;
1199 uint64_t speed;
1200 int link;
1201
1202 ctx = softc->ctx;
1203 link = enic_link_is_up(softc);
1204 speed = IF_Gbps(10);
1205
1206 ENIC_LOCK(softc);
1207 speed = vnic_dev_port_speed(&softc->vdev);
1208 ENIC_UNLOCK(softc);
1209
1210 if (link != 0 && softc->link_active == 0) {
1211 softc->link_active = 1;
1212 iflib_link_state_change(ctx, LINK_STATE_UP, speed);
1213 } else if (link == 0 && softc->link_active != 0) {
1214 softc->link_active = 0;
1215 iflib_link_state_change(ctx, LINK_STATE_DOWN, speed);
1216 }
1217 }
1218
1219 static void
enic_set_lladdr(struct enic_softc * softc)1220 enic_set_lladdr(struct enic_softc *softc)
1221 {
1222 struct enic *enic;
1223 enic = &softc->enic;
1224
1225 ENIC_LOCK(softc);
1226 vnic_dev_add_addr(enic->vdev, softc->lladdr);
1227 ENIC_UNLOCK(softc);
1228 }
1229
1230
1231 static void
enic_setup_txq_sysctl(struct vnic_wq * wq,int i,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1232 enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx,
1233 struct sysctl_oid_list *child)
1234 {
1235 struct sysctl_oid *txsnode;
1236 struct sysctl_oid_list *txslist;
1237 struct vnic_stats *stats;
1238
1239 stats = wq[i].vdev->stats;
1240
1241 txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1242 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1243 txslist = SYSCTL_CHILDREN(txsnode);
1244
1245 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD,
1246 &stats->tx.tx_frames_ok, "TX Frames OK");
1247 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD,
1248 &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK");
1249 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD,
1250 &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK");
1251 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD,
1252 &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK");
1253 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD,
1254 &stats->tx.tx_bytes_ok, "TX bytes OK ");
1255 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD,
1256 &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK");
1257 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD,
1258 &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK");
1259 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD,
1260 &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK");
1261 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD,
1262 &stats->tx.tx_drops, "TX drops");
1263 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD,
1264 &stats->tx.tx_errors, "TX errors");
1265 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD,
1266 &stats->tx.tx_tso, "TX TSO");
1267 }
1268
1269 static void
enic_setup_rxq_sysctl(struct vnic_rq * rq,int i,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1270 enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx,
1271 struct sysctl_oid_list *child)
1272 {
1273 struct sysctl_oid *rxsnode;
1274 struct sysctl_oid_list *rxslist;
1275 struct vnic_stats *stats;
1276
1277 stats = rq[i].vdev->stats;
1278
1279 rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1280 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1281 rxslist = SYSCTL_CHILDREN(rxsnode);
1282
1283 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD,
1284 &stats->rx.rx_frames_ok, "RX Frames OK");
1285 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD,
1286 &stats->rx.rx_frames_total, "RX frames total");
1287 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD,
1288 &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok");
1289 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD,
1290 &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok");
1291 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD,
1292 &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok");
1293 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD,
1294 &stats->rx.rx_bytes_ok, "RX bytes ok");
1295 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD,
1296 &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok");
1297 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD,
1298 &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok");
1299 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD,
1300 &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok");
1301 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD,
1302 &stats->rx.rx_drop, "RX drop");
1303 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD,
1304 &stats->rx.rx_errors, "RX errors");
1305 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD,
1306 &stats->rx.rx_rss, "RX rss");
1307 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
1308 &stats->rx.rx_crc_errors, "RX crc errors");
1309 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD,
1310 &stats->rx.rx_frames_64, "RX frames 64");
1311 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD,
1312 &stats->rx.rx_frames_127, "RX frames 127");
1313 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD,
1314 &stats->rx.rx_frames_255, "RX frames 255");
1315 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD,
1316 &stats->rx.rx_frames_511, "RX frames 511");
1317 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD,
1318 &stats->rx.rx_frames_1023, "RX frames 1023");
1319 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD,
1320 &stats->rx.rx_frames_1518, "RX frames 1518");
1321 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD,
1322 &stats->rx.rx_frames_to_max, "RX frames to max");
1323 }
1324
1325 static void
enic_setup_queue_sysctl(struct enic_softc * softc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1326 enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx,
1327 struct sysctl_oid_list *child)
1328 {
1329 enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child);
1330 enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child);
1331 }
1332
1333 static void
enic_setup_sysctl(struct enic_softc * softc)1334 enic_setup_sysctl(struct enic_softc *softc)
1335 {
1336 device_t dev;
1337 struct sysctl_ctx_list *ctx;
1338 struct sysctl_oid *tree;
1339 struct sysctl_oid_list *child;
1340
1341 dev = softc->dev;
1342 ctx = device_get_sysctl_ctx(dev);
1343 tree = device_get_sysctl_tree(dev);
1344 child = SYSCTL_CHILDREN(tree);
1345
1346 enic_setup_queue_sysctl(softc, ctx, child);
1347 }
1348
1349 static void
enic_enable_intr(struct enic_softc * softc,int irq)1350 enic_enable_intr(struct enic_softc *softc, int irq)
1351 {
1352 struct enic *enic = &softc->enic;
1353
1354 vnic_intr_unmask(&enic->intr[irq]);
1355 vnic_intr_return_all_credits(&enic->intr[irq]);
1356 }
1357
1358 static void
enic_disable_intr(struct enic_softc * softc,int irq)1359 enic_disable_intr(struct enic_softc *softc, int irq)
1360 {
1361 struct enic *enic = &softc->enic;
1362
1363 vnic_intr_mask(&enic->intr[irq]);
1364 vnic_intr_masked(&enic->intr[irq]); /* flush write */
1365 }
1366
1367 static int
enic_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1368 enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1369 {
1370 struct enic_softc *softc;
1371 if_softc_ctx_t scctx;
1372
1373 softc = iflib_get_softc(ctx);
1374 scctx = softc->scctx;
1375
1376 enic_enable_intr(softc, qid + scctx->isc_nrxqsets);
1377
1378 return 0;
1379 }
1380
1381 static int
enic_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1382 enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1383 {
1384 struct enic_softc *softc;
1385
1386 softc = iflib_get_softc(ctx);
1387 enic_enable_intr(softc, qid);
1388
1389 return 0;
1390 }
1391
1392 static void
enic_intr_enable_all(if_ctx_t ctx)1393 enic_intr_enable_all(if_ctx_t ctx)
1394 {
1395 struct enic_softc *softc;
1396 if_softc_ctx_t scctx;
1397 int i;
1398
1399 softc = iflib_get_softc(ctx);
1400 scctx = softc->scctx;
1401
1402 for (i = 0; i < scctx->isc_vectors; i++) {
1403 enic_enable_intr(softc, i);
1404 }
1405 }
1406
1407 static void
enic_intr_disable_all(if_ctx_t ctx)1408 enic_intr_disable_all(if_ctx_t ctx)
1409 {
1410 struct enic_softc *softc;
1411 if_softc_ctx_t scctx;
1412 int i;
1413
1414 softc = iflib_get_softc(ctx);
1415 scctx = softc->scctx;
1416 /*
1417 * iflib may invoke this routine before enic_attach_post() has run,
1418 * which is before the top level shared data area is initialized and
1419 * the device made aware of it.
1420 */
1421
1422 for (i = 0; i < scctx->isc_vectors; i++) {
1423 enic_disable_intr(softc, i);
1424 }
1425 }
1426
1427 static int
enic_dev_open(struct enic * enic)1428 enic_dev_open(struct enic *enic)
1429 {
1430 int err;
1431 int flags = CMD_OPENF_IG_DESCCACHE;
1432
1433 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1434 vnic_dev_open_done, flags);
1435 if (err)
1436 dev_err(enic_get_dev(enic),
1437 "vNIC device open failed, err %d\n", err);
1438
1439 return err;
1440 }
1441
1442 static int
enic_dev_init(struct enic * enic)1443 enic_dev_init(struct enic *enic)
1444 {
1445 int err;
1446
1447 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1448
1449 /*
1450 * Get vNIC configuration
1451 */
1452 err = enic_get_vnic_config(enic);
1453 if (err) {
1454 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1455 return err;
1456 }
1457
1458 /* Get available resource counts */
1459 enic_get_res_counts(enic);
1460
1461 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1462 enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
1463 enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
1464
1465 vnic_dev_set_reset_flag(enic->vdev, 0);
1466 enic->max_flow_counter = -1;
1467
1468 /* set up link status checking */
1469 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1470
1471 enic->overlay_offload = false;
1472 if (enic->disable_overlay && enic->vxlan) {
1473 /*
1474 * Explicitly disable overlay offload as the setting is
1475 * sticky, and resetting vNIC does not disable it.
1476 */
1477 if (vnic_dev_overlay_offload_ctrl(enic->vdev,
1478 OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) {
1479 dev_err(enic, "failed to disable overlay offload\n");
1480 } else {
1481 dev_info(enic, "Overlay offload is disabled\n");
1482 }
1483 }
1484 if (!enic->disable_overlay && enic->vxlan &&
1485 /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
1486 vnic_dev_overlay_offload_ctrl(enic->vdev,
1487 OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) {
1488 enic->overlay_offload = true;
1489 enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
1490 dev_info(enic, "Overlay offload is enabled\n");
1491 /*
1492 * Reset the vxlan port to the default, as the NIC firmware
1493 * does not reset it automatically and keeps the old setting.
1494 */
1495 if (vnic_dev_overlay_offload_cfg(enic->vdev,
1496 OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) {
1497 dev_err(enic, "failed to update vxlan port\n");
1498 return (EINVAL);
1499 }
1500 }
1501 return 0;
1502 }
1503
1504 static void *
enic_alloc_consistent(void * priv,size_t size,bus_addr_t * dma_handle,struct iflib_dma_info * res,u8 * name)1505 enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle,
1506 struct iflib_dma_info *res, u8 * name)
1507 {
1508 void *vaddr;
1509 *dma_handle = 0;
1510 struct enic *enic = (struct enic *)priv;
1511 int rz;
1512
1513 rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT);
1514 if (rz) {
1515 pr_err("%s : Failed to allocate memory requested for %s\n",
1516 __func__, name);
1517 return NULL;
1518 }
1519
1520 vaddr = res->idi_vaddr;
1521 *dma_handle = res->idi_paddr;
1522
1523 return vaddr;
1524 }
1525
1526 static void
enic_free_consistent(void * priv,size_t size,void * vaddr,bus_addr_t dma_handle,struct iflib_dma_info * res)1527 enic_free_consistent(void *priv, size_t size, void *vaddr,
1528 bus_addr_t dma_handle, struct iflib_dma_info *res)
1529 {
1530 iflib_dma_free(res);
1531 }
1532
1533 static int
enic_pci_mapping(struct enic_softc * softc)1534 enic_pci_mapping(struct enic_softc *softc)
1535 {
1536 int rc;
1537
1538 rc = enic_map_bar(softc, &softc->mem, 0, true);
1539 if (rc)
1540 return rc;
1541
1542 rc = enic_map_bar(softc, &softc->io, 2, false);
1543
1544 return rc;
1545 }
1546
1547 static void
enic_pci_mapping_free(struct enic_softc * softc)1548 enic_pci_mapping_free(struct enic_softc *softc)
1549 {
1550 if (softc->mem.res != NULL)
1551 bus_release_resource(softc->dev, SYS_RES_MEMORY,
1552 softc->mem.rid, softc->mem.res);
1553 softc->mem.res = NULL;
1554
1555 if (softc->io.res != NULL)
1556 bus_release_resource(softc->dev, SYS_RES_MEMORY,
1557 softc->io.rid, softc->io.res);
1558 softc->io.res = NULL;
1559 }
1560
1561 static int
enic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)1562 enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int),
1563 int (*finished) (struct vnic_dev *, int *), int arg)
1564 {
1565 int done;
1566 int err;
1567 int i;
1568
1569 err = start(vdev, arg);
1570 if (err)
1571 return err;
1572
1573 /* Wait for func to complete...2 seconds max */
1574 for (i = 0; i < 2000; i++) {
1575 err = finished(vdev, &done);
1576 if (err)
1577 return err;
1578 if (done)
1579 return 0;
1580 usleep(1000);
1581 }
1582 return (ETIMEDOUT);
1583 }
1584
1585 static int
enic_map_bar(struct enic_softc * softc,struct enic_bar_info * bar,int bar_num,bool shareable)1586 enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num,
1587 bool shareable)
1588 {
1589 uint32_t flag;
1590
1591 if (bar->res != NULL) {
1592 device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
1593 return (EDOOFUS);
1594 }
1595
1596 bar->rid = PCIR_BAR(bar_num);
1597 flag = RF_ACTIVE;
1598 if (shareable)
1599 flag |= RF_SHAREABLE;
1600
1601 if ((bar->res = bus_alloc_resource_any(softc->dev,
1602 SYS_RES_MEMORY, &bar->rid, flag)) == NULL) {
1603 device_printf(softc->dev,
1604 "PCI BAR%d mapping failure\n", bar_num);
1605 return (ENXIO);
1606 }
1607 bar->tag = rman_get_bustag(bar->res);
1608 bar->handle = rman_get_bushandle(bar->res);
1609 bar->size = rman_get_size(bar->res);
1610
1611 return 0;
1612 }
1613
1614 void
enic_init_vnic_resources(struct enic * enic)1615 enic_init_vnic_resources(struct enic *enic)
1616 {
1617 unsigned int error_interrupt_enable = 1;
1618 unsigned int error_interrupt_offset = 0;
1619 unsigned int rxq_interrupt_enable = 0;
1620 unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
1621 unsigned int txq_interrupt_enable = 0;
1622 unsigned int txq_interrupt_offset;
1623 unsigned int index = 0;
1624 unsigned int cq_idx;
1625 if_softc_ctx_t scctx;
1626
1627 scctx = enic->softc->scctx;
1628
1629 rxq_interrupt_enable = 1;
1630 txq_interrupt_enable = 0;
1631
1632 rxq_interrupt_offset = 0;
1633 txq_interrupt_offset = scctx->isc_nrxqsets;
1634
1635 for (index = 0; index < enic->intr_count; index++) {
1636 vnic_intr_alloc(enic->vdev, &enic->intr[index], index);
1637 }
1638
1639 for (index = 0; index < scctx->isc_nrxqsets; index++) {
1640 cq_idx = enic_cq_rq(enic, index);
1641
1642 vnic_rq_clean(&enic->rq[index]);
1643 vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable,
1644 error_interrupt_offset);
1645
1646 vnic_cq_clean(&enic->cq[cq_idx]);
1647 vnic_cq_init(&enic->cq[cq_idx],
1648 0 /* flow_control_enable */ ,
1649 1 /* color_enable */ ,
1650 0 /* cq_head */ ,
1651 0 /* cq_tail */ ,
1652 1 /* cq_tail_color */ ,
1653 rxq_interrupt_enable,
1654 1 /* cq_entry_enable */ ,
1655 0 /* cq_message_enable */ ,
1656 rxq_interrupt_offset,
1657 0 /* cq_message_addr */ );
1658 if (rxq_interrupt_enable)
1659 rxq_interrupt_offset++;
1660 }
1661
1662 for (index = 0; index < scctx->isc_ntxqsets; index++) {
1663 cq_idx = enic_cq_wq(enic, index);
1664 vnic_wq_clean(&enic->wq[index]);
1665 vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable,
1666 error_interrupt_offset);
1667 /* Compute unsupported ol flags for enic_prep_pkts() */
1668 enic->wq[index].tx_offload_notsup_mask = 0;
1669
1670 vnic_cq_clean(&enic->cq[cq_idx]);
1671 vnic_cq_init(&enic->cq[cq_idx],
1672 0 /* flow_control_enable */ ,
1673 1 /* color_enable */ ,
1674 0 /* cq_head */ ,
1675 0 /* cq_tail */ ,
1676 1 /* cq_tail_color */ ,
1677 txq_interrupt_enable,
1678 1,
1679 0,
1680 txq_interrupt_offset,
1681 0 /* (u64)enic->wq[index].cqmsg_rz->iova */ );
1682
1683 }
1684
1685 for (index = 0; index < enic->intr_count; index++) {
1686 vnic_intr_init(&enic->intr[index], 125,
1687 enic->config.intr_timer_type, /* mask_on_assertion */ 1);
1688 }
1689 }
1690
1691 static void
enic_update_packet_filter(struct enic * enic)1692 enic_update_packet_filter(struct enic *enic)
1693 {
1694 struct enic_softc *softc = enic->softc;
1695
1696 ENIC_LOCK(softc);
1697 vnic_dev_packet_filter(enic->vdev,
1698 softc->directed,
1699 softc->multicast,
1700 softc->broadcast,
1701 softc->promisc,
1702 softc->allmulti);
1703 ENIC_UNLOCK(softc);
1704 }
1705
1706 static bool
enic_if_needs_restart(if_ctx_t ctx,enum iflib_restart_event event)1707 enic_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event)
1708 {
1709 switch (event) {
1710 case IFLIB_RESTART_VLAN_CONFIG:
1711 default:
1712 return (false);
1713 }
1714 }
1715
1716 int
enic_setup_finish(struct enic * enic)1717 enic_setup_finish(struct enic *enic)
1718 {
1719 struct enic_softc *softc = enic->softc;
1720
1721 /* Default conf */
1722 softc->directed = 1;
1723 softc->multicast = 0;
1724 softc->broadcast = 1;
1725 softc->promisc = 0;
1726 softc->allmulti = 1;
1727 enic_update_packet_filter(enic);
1728
1729 return 0;
1730 }
1731