xref: /freebsd/sys/dev/enic/if_enic.c (revision 0acab8b3d1336d4db73a9946ef76b4bcd0b0aabe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include "opt_rss.h"
7 
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/endian.h>
12 #include <sys/sockio.h>
13 #include <sys/mbuf.h>
14 #include <sys/malloc.h>
15 #include <sys/module.h>
16 #include <sys/socket.h>
17 #include <sys/sysctl.h>
18 #include <sys/smp.h>
19 #include <vm/vm.h>
20 #include <vm/pmap.h>
21 
22 #include <net/ethernet.h>
23 #include <net/if.h>
24 #include <net/if_var.h>
25 #include <net/if_arp.h>
26 #include <net/if_dl.h>
27 #include <net/if_types.h>
28 #include <net/if_media.h>
29 #include <net/if_vlan_var.h>
30 #include <net/iflib.h>
31 #ifdef RSS
32 #include <net/rss_config.h>
33 #endif
34 
35 #include <netinet/in_systm.h>
36 #include <netinet/in.h>
37 #include <netinet/ip.h>
38 #include <netinet/ip6.h>
39 #include <netinet6/ip6_var.h>
40 #include <netinet/udp.h>
41 #include <netinet/tcp.h>
42 
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 
51 #include "ifdi_if.h"
52 #include "enic.h"
53 
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56 
57 static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
58     "ENIC");
59 
60 static const pci_vendor_info_t enic_vendor_info_array[] =
61 {
62 	PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET,
63 	     DRV_DESCRIPTION),
64 		PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF,
65 		     DRV_DESCRIPTION " VF"),
66 	/* required last entry */
67 
68 		PVID_END
69 };
70 
71 static void *enic_register(device_t);
72 static int enic_attach_pre(if_ctx_t);
73 static int enic_msix_intr_assign(if_ctx_t, int);
74 
75 static int enic_attach_post(if_ctx_t);
76 static int enic_detach(if_ctx_t);
77 
78 static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
79 static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
80 static void enic_queues_free(if_ctx_t);
81 static int enic_rxq_intr(void *);
82 static int enic_event_intr(void *);
83 static int enic_err_intr(void *);
84 static void enic_stop(if_ctx_t);
85 static void enic_init(if_ctx_t);
86 static void enic_multi_set(if_ctx_t);
87 static int enic_mtu_set(if_ctx_t, uint32_t);
88 static void enic_media_status(if_ctx_t, struct ifmediareq *);
89 static int enic_media_change(if_ctx_t);
90 static int enic_promisc_set(if_ctx_t, int);
91 static uint64_t enic_get_counter(if_ctx_t, ift_counter);
92 static void enic_update_admin_status(if_ctx_t);
93 static void enic_txq_timer(if_ctx_t, uint16_t);
94 static int enic_link_is_up(struct enic_softc *);
95 static void enic_link_status(struct enic_softc *);
96 static void enic_set_lladdr(struct enic_softc *);
97 static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *,
98     struct sysctl_oid_list *);
99 static void enic_setup_rxq_sysctl(struct vnic_rq *, int,  struct sysctl_ctx_list *,
100     struct sysctl_oid_list *);
101 static void enic_setup_sysctl(struct enic_softc *);
102 static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t);
103 static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t);
104 static void enic_enable_intr(struct enic_softc *, int);
105 static void enic_disable_intr(struct enic_softc *, int);
106 static void enic_intr_enable_all(if_ctx_t);
107 static void enic_intr_disable_all(if_ctx_t);
108 static int enic_dev_open(struct enic *);
109 static int enic_dev_init(struct enic *);
110 static void *enic_alloc_consistent(void *, size_t, bus_addr_t *,
111     struct iflib_dma_info *, u8 *);
112 static void enic_free_consistent(void *, size_t, void *, bus_addr_t,
113     struct iflib_dma_info *);
114 static int enic_pci_mapping(struct enic_softc *);
115 static void enic_pci_mapping_free(struct enic_softc *);
116 static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int),
117     int (*) (struct vnic_dev *, int *), int arg);
118 static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool);
119 static void enic_update_packet_filter(struct enic *enic);
120 static bool enic_if_needs_restart(if_ctx_t, enum iflib_restart_event);
121 
122 typedef enum {
123 	ENIC_BARRIER_RD,
124 	ENIC_BARRIER_WR,
125 	ENIC_BARRIER_RDWR,
126 } enic_barrier_t;
127 
128 static device_method_t enic_methods[] = {
129 	/* Device interface */
130 	DEVMETHOD(device_register, enic_register),
131 	DEVMETHOD(device_probe, iflib_device_probe),
132 	DEVMETHOD(device_attach, iflib_device_attach),
133 	DEVMETHOD(device_detach, iflib_device_detach),
134 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
135 	DEVMETHOD(device_suspend, iflib_device_suspend),
136 	DEVMETHOD(device_resume, iflib_device_resume),
137 	DEVMETHOD_END
138 };
139 
140 static driver_t enic_driver = {
141 	"enic", enic_methods, sizeof(struct enic_softc)
142 };
143 
144 DRIVER_MODULE(enic, pci, enic_driver, 0, 0);
145 IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array);
146 MODULE_VERSION(enic, 2);
147 
148 MODULE_DEPEND(enic, pci, 1, 1, 1);
149 MODULE_DEPEND(enic, ether, 1, 1, 1);
150 MODULE_DEPEND(enic, iflib, 1, 1, 1);
151 
152 static device_method_t enic_iflib_methods[] = {
153 	DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc),
154 	DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc),
155 	DEVMETHOD(ifdi_queues_free, enic_queues_free),
156 
157 	DEVMETHOD(ifdi_attach_pre, enic_attach_pre),
158 	DEVMETHOD(ifdi_attach_post, enic_attach_post),
159 	DEVMETHOD(ifdi_detach, enic_detach),
160 
161 	DEVMETHOD(ifdi_init, enic_init),
162 	DEVMETHOD(ifdi_stop, enic_stop),
163 	DEVMETHOD(ifdi_multi_set, enic_multi_set),
164 	DEVMETHOD(ifdi_mtu_set, enic_mtu_set),
165 	DEVMETHOD(ifdi_media_status, enic_media_status),
166 	DEVMETHOD(ifdi_media_change, enic_media_change),
167 	DEVMETHOD(ifdi_promisc_set, enic_promisc_set),
168 	DEVMETHOD(ifdi_get_counter, enic_get_counter),
169 	DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status),
170 	DEVMETHOD(ifdi_timer, enic_txq_timer),
171 
172 	DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable),
173 	DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable),
174 	DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all),
175 	DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all),
176 	DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign),
177 
178 	DEVMETHOD(ifdi_needs_restart, enic_if_needs_restart),
179 
180 	DEVMETHOD_END
181 };
182 
183 static driver_t enic_iflib_driver = {
184 	"enic", enic_iflib_methods, sizeof(struct enic_softc)
185 };
186 
187 extern struct if_txrx enic_txrx;
188 
189 static struct if_shared_ctx enic_sctx_init = {
190 	.isc_magic = IFLIB_MAGIC,
191 	.isc_q_align = 512,
192 
193 	.isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE,
194 	.isc_tx_maxsegsize = PAGE_SIZE,
195 
196 	/*
197 	 * These values are used to configure the busdma tag used for receive
198 	 * descriptors.  Each receive descriptor only points to one buffer.
199 	 */
200 	.isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,	/* One buf per
201 							 * descriptor */
202 	.isc_rx_nsegments = 1,	/* One mapping per descriptor */
203 	.isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,
204 	.isc_admin_intrcnt = 2,
205 	.isc_vendor_info = enic_vendor_info_array,
206 	.isc_driver_version = "1",
207 	.isc_driver = &enic_iflib_driver,
208 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_SKIP_MSIX,
209 
210 	/*
211 	 * Number of receive queues per receive queue set, with associated
212 	 * descriptor settings for each.
213 	 */
214 
215 	.isc_nrxqs = 2,
216 	.isc_nfl = 1,		/* one free list for each receive command
217 				 * queue */
218 	.isc_nrxd_min = {16, 16},
219 	.isc_nrxd_max = {2048, 2048},
220 	.isc_nrxd_default = {64, 64},
221 
222 	/*
223 	 * Number of transmit queues per transmit queue set, with associated
224 	 * descriptor settings for each.
225 	 */
226 	.isc_ntxqs = 2,
227 	.isc_ntxd_min = {16, 16},
228 	.isc_ntxd_max = {2048, 2048},
229 	.isc_ntxd_default = {64, 64},
230 };
231 
232 static void *
enic_register(device_t dev)233 enic_register(device_t dev)
234 {
235 	return (&enic_sctx_init);
236 }
237 
238 static int
enic_allocate_msix(struct enic_softc * softc)239 enic_allocate_msix(struct enic_softc *softc) {
240 	if_ctx_t ctx;
241 	if_softc_ctx_t scctx;
242 	if_shared_ctx_t sctx;
243 	device_t dev;
244 	cpuset_t cpus;
245 	int queues, vectors, requested;
246 	int err = 0;
247 
248 	dev = softc->dev;
249 	ctx = softc->ctx;
250 	scctx = softc->scctx;
251 	sctx = iflib_get_sctx(ctx);
252 
253 	if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus) != 0) {
254 		device_printf(dev, "Unable to fetch CPU list\n");
255 		CPU_COPY(&all_cpus, &cpus);
256 	}
257 
258 
259 	queues = CPU_COUNT(&cpus);
260 	queues = imin(queues, scctx->isc_nrxqsets);
261 	queues = imin(queues, scctx->isc_ntxqsets);
262 	requested = queues * 2 + sctx->isc_admin_intrcnt;
263 	scctx->isc_nrxqsets = queues;
264 	scctx->isc_ntxqsets = queues;
265 
266 	vectors = requested;
267 	if ((err = pci_alloc_msix(dev, &vectors)) != 0) {
268 		device_printf(dev,
269                     "failed to allocate %d MSI-X vectors, err: %d\n", requested,
270                     err);
271 		err = 1;
272 		goto enic_allocate_msix_out;
273 	} else {
274 		if (vectors != requested) {
275 			device_printf(dev,
276 			    "Unable to allocate sufficient MSI-X vectors "
277 			     "(got %d, need %d)\n", requested, vectors);
278 			pci_release_msi(dev);
279 			err = 1;
280 			goto enic_allocate_msix_out;
281 		}
282 	}
283 
284 	device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
285 	    vectors);
286 
287 	scctx->isc_intr = IFLIB_INTR_MSIX;
288 	scctx->isc_vectors = vectors;
289 
290 enic_allocate_msix_out:
291 	return (err);
292 
293 }
294 
295 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
296 	{0,  0}, /* 0  - 4  Gbps */
297 	{0,  3}, /* 4  - 10 Gbps */
298 	{3,  6}, /* 10 - 40 Gbps */
299 };
300 
enic_set_rx_coal_setting(struct enic * enic)301 static void enic_set_rx_coal_setting(struct enic *enic)
302 {
303 	unsigned int speed;
304 	int index = -1;
305 	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
306 
307 	/* 1. Read the link speed from fw
308 	 * 2. Pick the default range for the speed
309 	 * 3. Update it in enic->rx_coalesce_setting
310 	 */
311 	speed = vnic_dev_port_speed(enic->vdev);
312 	if (ENIC_LINK_SPEED_10G < speed)
313 		index = ENIC_LINK_40G_INDEX;
314 	else if (ENIC_LINK_SPEED_4G < speed)
315 		index = ENIC_LINK_10G_INDEX;
316 	else
317 		index = ENIC_LINK_4G_INDEX;
318 
319 	rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
320 	rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
321 	rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
322 
323 	/* Start with the value provided by UCSM */
324 	for (index = 0; index < enic->rq_count; index++)
325 		enic->cq[index].cur_rx_coal_timeval =
326 		enic->config.intr_timer_usec;
327 
328 	rx_coal->use_adaptive_rx_coalesce = 1;
329 }
330 
331 static int
enic_attach_pre(if_ctx_t ctx)332 enic_attach_pre(if_ctx_t ctx)
333 {
334 	if_softc_ctx_t	scctx;
335 	struct enic_softc *softc;
336 	struct vnic_dev *vdev;
337 	struct enic *enic;
338 	device_t dev;
339 
340 	int err = -1;
341 	int rc = 0;
342 	int i;
343 	u64 a0 = 0, a1 = 0;
344 	int wait = 1000;
345 	struct vnic_stats *stats;
346 	int ret;
347 
348 	dev = iflib_get_dev(ctx);
349 	softc = iflib_get_softc(ctx);
350 	softc->dev = dev;
351 	softc->ctx = ctx;
352 	softc->sctx = iflib_get_sctx(ctx);
353 	softc->scctx = iflib_get_softc_ctx(ctx);
354 	softc->ifp = iflib_get_ifp(ctx);
355 	softc->media = iflib_get_media(ctx);
356 	softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
357 		ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF,
358 		     M_NOWAIT | M_ZERO);
359 	if (softc->mta == NULL)
360 		return (ENOMEM);
361 	scctx = softc->scctx;
362 
363 	mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF);
364 
365 	pci_enable_busmaster(softc->dev);
366 	if (enic_pci_mapping(softc))
367 		return (ENXIO);
368 
369 	enic = &softc->enic;
370 	enic->softc = softc;
371 	vdev = &softc->vdev;
372 	vdev->softc = softc;
373 	enic->vdev = vdev;
374 	vdev->priv = enic;
375 
376 	ENIC_LOCK(softc);
377 	vnic_dev_register(vdev, &softc->mem, 1);
378 	enic->vdev = vdev;
379 	vnic_dev_cmd_init(enic->vdev);
380 
381 	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
382 
383 	vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
384 	vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
385 
386 	bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN);
387 	iflib_set_mac(ctx, softc->mac_addr);
388 
389 	vnic_register_cbacks(enic->vdev, enic_alloc_consistent,
390 	    enic_free_consistent);
391 
392 	/*
393 	 * Allocate the consistent memory for stats and counters upfront so
394 	 * both primary and secondary processes can access them.
395 	 */
396 	ENIC_UNLOCK(softc);
397 	err = vnic_dev_alloc_stats_mem(enic->vdev);
398 	ENIC_LOCK(softc);
399 	if (err) {
400 		dev_err(enic, "Failed to allocate cmd memory, aborting\n");
401 		goto err_out_unregister;
402 	}
403 	vnic_dev_stats_clear(enic->vdev);
404 	ret = vnic_dev_stats_dump(enic->vdev, &stats);
405 	if (ret) {
406 		dev_err(enic, "Error in getting stats\n");
407 		goto err_out_unregister;
408 	}
409 	err = vnic_dev_alloc_counter_mem(enic->vdev);
410 	if (err) {
411 		dev_err(enic, "Failed to allocate counter memory, aborting\n");
412 		goto err_out_unregister;
413 	}
414 
415 	/* Issue device open to get device in known state */
416 	err = enic_dev_open(enic);
417 	if (err) {
418 		dev_err(enic, "vNIC dev open failed, aborting\n");
419 		goto err_out_unregister;
420 	}
421 
422 	/* Set ingress vlan rewrite mode before vnic initialization */
423 	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
424 	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
425 	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
426 						enic->ig_vlan_rewrite_mode);
427 	if (err) {
428 		dev_err(enic,
429 		    "Failed to set ingress vlan rewrite mode, aborting.\n");
430 		goto err_out_dev_close;
431 	}
432 
433 	/*
434 	 * Issue device init to initialize the vnic-to-switch link. We'll
435 	 * start with carrier off and wait for link UP notification later to
436 	 * turn on carrier.  We don't need to wait here for the
437 	 * vnic-to-switch link initialization to complete; link UP
438 	 * notification is the indication that the process is complete.
439 	 */
440 
441 	err = vnic_dev_init(enic->vdev, 0);
442 	if (err) {
443 		dev_err(enic, "vNIC dev init failed, aborting\n");
444 		goto err_out_dev_close;
445 	}
446 
447 	err = enic_dev_init(enic);
448 	if (err) {
449 		dev_err(enic, "Device initialization failed, aborting\n");
450 		goto err_out_dev_close;
451 	}
452 	ENIC_UNLOCK(softc);
453 
454 	enic->port_mtu = vnic_dev_mtu(enic->vdev);
455 
456 	softc->scctx = iflib_get_softc_ctx(ctx);
457 	scctx = softc->scctx;
458 	scctx->isc_txrx = &enic_txrx;
459 	scctx->isc_capabilities = scctx->isc_capenable = \
460 		IFCAP_HWCSUM;
461 	scctx->isc_tx_csum_flags = 0;
462 	if_setmtu(softc->ifp, enic->config.mtu);
463 	scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \
464 		ETHER_CRC_LEN;
465 	scctx->isc_nrxqsets_max = enic->conf_rq_count;
466 	scctx->isc_ntxqsets_max = enic->conf_wq_count;
467 	scctx->isc_nrxqsets = enic->conf_rq_count;
468 	scctx->isc_ntxqsets = enic->conf_wq_count;
469 	for (i = 0; i < enic->conf_wq_count; i++) {
470 		scctx->isc_ntxd[i] = enic->config.wq_desc_count;
471 		scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc)
472 			* scctx->isc_ntxd[i];
473 		scctx->isc_ntxd[i + enic->conf_wq_count] =
474 		    enic->config.wq_desc_count;
475 		scctx->isc_txqsizes[i + enic->conf_wq_count] =
476 		    sizeof(struct cq_desc) * scctx->isc_ntxd[i +
477 		    enic->conf_wq_count];
478 	}
479 	for (i = 0; i < enic->conf_rq_count; i++) {
480 		scctx->isc_nrxd[i] = enic->config.rq_desc_count;
481 		scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) *
482 		    scctx->isc_nrxd[i];
483 		scctx->isc_nrxd[i + enic->conf_rq_count] =
484 		    enic->config.rq_desc_count;
485 		scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct
486 		    cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count];
487 	}
488 	scctx->isc_tx_nsegments = 31;
489 
490 	scctx->isc_msix_bar = -1;
491 
492 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
493 	ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
494 	ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL);
495 
496 	/*
497 	 * Allocate the CQ here since TX is called first before RX for now
498 	 * assume RX and TX are the same
499 	 */
500 	if (softc->enic.cq == NULL)
501 		softc->enic.cq = malloc(sizeof(struct vnic_cq) *
502 		     softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF,
503 		     M_NOWAIT | M_ZERO);
504 	if (softc->enic.cq == NULL)
505 		return (ENOMEM);
506 
507 	softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count;
508 
509 	/*
510 	 * Allocate the consistent memory for stats and counters upfront so
511 	 * both primary and secondary processes can access them.
512 	 */
513 	err = vnic_dev_alloc_stats_mem(enic->vdev);
514 	if (err) {
515 		dev_err(enic, "Failed to allocate cmd memory, aborting\n");
516 		goto err_out_dev_close;
517 	}
518 
519         err = enic_allocate_msix(softc);
520         if (err) {
521 		dev_err(enic, "Failed to allocate MSIX, aborting\n");
522 		goto err_out_dev_close;
523 	}
524 
525 	return (rc);
526 
527 err_out_dev_close:
528 	vnic_dev_close(enic->vdev);
529 	vnic_dev_deinit_devcmd2(enic->vdev);
530 err_out_unregister:
531 	free(softc->vdev.devcmd, M_DEVBUF);
532 	free(softc->enic.intr_queues, M_DEVBUF);
533 	free(softc->enic.cq, M_DEVBUF);
534 	free(softc->mta, M_DEVBUF);
535 	rc = -1;
536 	pci_disable_busmaster(softc->dev);
537 	enic_pci_mapping_free(softc);
538 	mtx_destroy(&softc->enic_lock);
539 	return (rc);
540 }
541 
542 static int
enic_msix_intr_assign(if_ctx_t ctx,int msix)543 enic_msix_intr_assign(if_ctx_t ctx, int msix)
544 {
545 	struct enic_softc *softc;
546 	struct enic *enic;
547 	if_softc_ctx_t scctx;
548 
549 	int error;
550 	int i;
551 	char irq_name[16];
552 
553 	softc = iflib_get_softc(ctx);
554 	enic = &softc->enic;
555 	scctx = softc->scctx;
556 
557 	ENIC_LOCK(softc);
558 	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
559 	ENIC_UNLOCK(softc);
560 
561 	enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
562 	    enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
563 	enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT
564 	    | M_ZERO);
565 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
566 		snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i,
567 		    device_get_unit(softc->dev));
568 
569 		error = iflib_irq_alloc_generic(ctx,
570 		    &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX,
571 		    enic_rxq_intr, &enic->rq[i], i, irq_name);
572 		if (error) {
573 			device_printf(iflib_get_dev(ctx),
574 			    "Failed to register rxq %d interrupt handler\n", i);
575 			return (error);
576 		}
577 		enic->intr[i].index = i;
578 		enic->intr[i].vdev = enic->vdev;
579 		ENIC_LOCK(softc);
580 		enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
581 		    RES_TYPE_INTR_CTRL, i);
582 		vnic_intr_mask(&enic->intr[i]);
583 		ENIC_UNLOCK(softc);
584 	}
585 
586 	for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
587 		snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i -
588 		    scctx->isc_nrxqsets, device_get_unit(softc->dev));
589 
590 		iflib_softirq_alloc_generic(ctx,
591 		    &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX,
592 		    &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets,
593 		    irq_name);
594 
595 		enic->intr[i].index = i;
596 		enic->intr[i].vdev = enic->vdev;
597 		ENIC_LOCK(softc);
598 		enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
599 		    RES_TYPE_INTR_CTRL, i);
600 		vnic_intr_mask(&enic->intr[i]);
601 		ENIC_UNLOCK(softc);
602 	}
603 
604 	i = scctx->isc_nrxqsets + scctx->isc_ntxqsets;
605 	error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq,
606 		 i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event");
607 	if (error) {
608 		device_printf(iflib_get_dev(ctx),
609 		    "Failed to register event interrupt handler\n");
610 		return (error);
611 	}
612 
613 	enic->intr[i].index = i;
614 	enic->intr[i].vdev = enic->vdev;
615 	ENIC_LOCK(softc);
616 	enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
617 	    i);
618 	vnic_intr_mask(&enic->intr[i]);
619 	ENIC_UNLOCK(softc);
620 
621 	i++;
622 	error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq,
623 		   i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err");
624 	if (error) {
625 		device_printf(iflib_get_dev(ctx),
626 		    "Failed to register event interrupt handler\n");
627 		return (error);
628 	}
629 	enic->intr[i].index = i;
630 	enic->intr[i].vdev = enic->vdev;
631 	ENIC_LOCK(softc);
632 	enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
633 	    i);
634 	vnic_intr_mask(&enic->intr[i]);
635 	ENIC_UNLOCK(softc);
636 
637 	enic->intr_count = msix;
638 
639 	return (0);
640 }
641 
642 static void
enic_free_irqs(struct enic_softc * softc)643 enic_free_irqs(struct enic_softc *softc)
644 {
645 	if_softc_ctx_t	scctx;
646 
647 	struct enic    *enic;
648 	int		i;
649 
650 	scctx = softc->scctx;
651 	enic = &softc->enic;
652 
653 	for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
654 		iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq);
655 	}
656 
657 	iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq);
658 	iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq);
659 	free(enic->intr_queues, M_DEVBUF);
660 	free(enic->intr, M_DEVBUF);
661 }
662 
663 static int
enic_attach_post(if_ctx_t ctx)664 enic_attach_post(if_ctx_t ctx)
665 {
666 	struct enic *enic;
667 	struct enic_softc *softc;
668 	int error = 0;
669 
670 	softc = iflib_get_softc(ctx);
671 	enic = &softc->enic;
672 
673 	enic_setup_sysctl(softc);
674 
675 	enic_init_vnic_resources(enic);
676 	enic_set_rx_coal_setting(enic);
677 	enic_setup_finish(enic);
678 
679 	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
680 	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
681 
682 	return (error);
683 }
684 
685 static int
enic_detach(if_ctx_t ctx)686 enic_detach(if_ctx_t ctx)
687 {
688 	struct enic_softc *softc;
689 	struct enic *enic;
690 
691 	softc = iflib_get_softc(ctx);
692 	enic = &softc->enic;
693 
694 	vnic_dev_notify_unset(enic->vdev);
695 
696 	enic_free_irqs(softc);
697 
698 	ENIC_LOCK(softc);
699 	vnic_dev_deinit(enic->vdev);
700 	vnic_dev_close(enic->vdev);
701 	vnic_dev_deinit_devcmd2(enic->vdev);
702 	free(softc->vdev.devcmd, M_DEVBUF);
703 	pci_disable_busmaster(softc->dev);
704 	enic_pci_mapping_free(softc);
705 	ENIC_UNLOCK(softc);
706 
707 	return 0;
708 }
709 
710 static int
enic_tx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int ntxqs,int ntxqsets)711 enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
712 		     int ntxqs, int ntxqsets)
713 {
714 	struct enic_softc *softc;
715 	int q;
716 
717 	softc = iflib_get_softc(ctx);
718 	/* Allocate the array of transmit queues */
719 	softc->enic.wq = malloc(sizeof(struct vnic_wq) *
720 				ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
721 	if (softc->enic.wq == NULL)
722 		return (ENOMEM);
723 
724 	/* Initialize driver state for each transmit queue */
725 
726 	/*
727 	 * Allocate queue state that is shared with the device.  This check
728 	 * and call is performed in both enic_tx_queues_alloc() and
729 	 * enic_rx_queues_alloc() so that we don't have to care which order
730 	 * iflib invokes those routines in.
731 	 */
732 
733 	/* Record descriptor ring vaddrs and paddrs */
734 	ENIC_LOCK(softc);
735 	for (q = 0; q < ntxqsets; q++) {
736 		struct vnic_wq *wq;
737 		struct vnic_cq *cq;
738 		unsigned int	cq_wq;
739 
740 		wq = &softc->enic.wq[q];
741 		cq_wq = enic_cq_wq(&softc->enic, q);
742 		cq = &softc->enic.cq[cq_wq];
743 
744 		/* Completion ring */
745 		wq->vdev = softc->enic.vdev;
746 		wq->index = q;
747 		wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ,
748 		    wq->index);
749 		vnic_wq_disable(wq);
750 
751 		wq->ring.desc_size = sizeof(struct wq_enet_desc);
752 		wq->ring.desc_count = softc->scctx->isc_ntxd[q];
753 		wq->ring.desc_avail = wq->ring.desc_count - 1;
754 		wq->ring.last_count = wq->ring.desc_count;
755 		wq->head_idx = 0;
756 		wq->tail_idx = 0;
757 
758 		wq->ring.size = wq->ring.desc_count * wq->ring.desc_size;
759 		wq->ring.descs = vaddrs[q * ntxqs + 0];
760 		wq->ring.base_addr = paddrs[q * ntxqs + 0];
761 
762 		/* Command ring */
763 		cq->vdev = softc->enic.vdev;
764 		cq->index = cq_wq;
765 		cq->ctrl = vnic_dev_get_res(softc->enic.vdev,
766 					    RES_TYPE_CQ, cq->index);
767 		cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
768 		cq->ring.desc_count = softc->scctx->isc_ntxd[q];
769 		cq->ring.desc_avail = cq->ring.desc_count - 1;
770 
771 		cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
772 		cq->ring.descs = vaddrs[q * ntxqs + 1];
773 		cq->ring.base_addr = paddrs[q * ntxqs + 1];
774 
775 	}
776 
777 	ENIC_UNLOCK(softc);
778 
779 	return (0);
780 }
781 
782 
783 
784 static int
enic_rx_queues_alloc(if_ctx_t ctx,caddr_t * vaddrs,uint64_t * paddrs,int nrxqs,int nrxqsets)785 enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
786 		     int nrxqs, int nrxqsets)
787 {
788 	struct enic_softc *softc;
789 	int q;
790 
791 	softc = iflib_get_softc(ctx);
792 	/* Allocate the array of receive queues */
793 	softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF,
794 	    M_NOWAIT | M_ZERO);
795 	if (softc->enic.rq == NULL)
796 		return (ENOMEM);
797 
798 	/* Initialize driver state for each receive queue */
799 
800 	/*
801 	 * Allocate queue state that is shared with the device.  This check
802 	 * and call is performed in both enic_tx_queues_alloc() and
803 	 * enic_rx_queues_alloc() so that we don't have to care which order
804 	 * iflib invokes those routines in.
805 	 */
806 
807 	/* Record descriptor ring vaddrs and paddrs */
808 	ENIC_LOCK(softc);
809 	for (q = 0; q < nrxqsets; q++) {
810 		struct vnic_rq *rq;
811 		struct vnic_cq *cq;
812 		unsigned int	cq_rq;
813 
814 		rq = &softc->enic.rq[q];
815 		cq_rq = enic_cq_rq(&softc->enic, q);
816 		cq = &softc->enic.cq[cq_rq];
817 
818 		/* Completion ring */
819 		cq->vdev = softc->enic.vdev;
820 		cq->index = cq_rq;
821 		cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ,
822 		    cq->index);
823 		cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
824 		cq->ring.desc_count = softc->scctx->isc_nrxd[1];
825 		cq->ring.desc_avail = cq->ring.desc_count - 1;
826 
827 		cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
828 		cq->ring.descs = vaddrs[q * nrxqs + 0];
829 		cq->ring.base_addr = paddrs[q * nrxqs + 0];
830 
831 		/* Command ring(s) */
832 		rq->vdev = softc->enic.vdev;
833 
834 		rq->index = q;
835 		rq->ctrl = vnic_dev_get_res(softc->enic.vdev,
836 					    RES_TYPE_RQ, rq->index);
837 		vnic_rq_disable(rq);
838 
839 		rq->ring.desc_size = sizeof(struct rq_enet_desc);
840 		rq->ring.desc_count = softc->scctx->isc_nrxd[0];
841 		rq->ring.desc_avail = rq->ring.desc_count - 1;
842 
843 		rq->ring.size = rq->ring.desc_count * rq->ring.desc_size;
844 		rq->ring.descs = vaddrs[q * nrxqs + 1];
845 		rq->ring.base_addr = paddrs[q * nrxqs + 1];
846 		rq->need_initial_post = true;
847 	}
848 
849 	ENIC_UNLOCK(softc);
850 
851 	return (0);
852 }
853 
854 static void
enic_queues_free(if_ctx_t ctx)855 enic_queues_free(if_ctx_t ctx)
856 {
857 	struct enic_softc *softc;
858 	softc = iflib_get_softc(ctx);
859 
860 	free(softc->enic.rq, M_DEVBUF);
861 	free(softc->enic.wq, M_DEVBUF);
862 	free(softc->enic.cq, M_DEVBUF);
863 }
864 
865 static int
enic_rxq_intr(void * rxq)866 enic_rxq_intr(void *rxq)
867 {
868 	struct vnic_rq *rq;
869 	if_t ifp;
870 
871 	rq = (struct vnic_rq *)rxq;
872 	ifp = iflib_get_ifp(rq->vdev->softc->ctx);
873 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
874 		return (FILTER_HANDLED);
875 
876 	return (FILTER_SCHEDULE_THREAD);
877 }
878 
879 static int
enic_event_intr(void * vsc)880 enic_event_intr(void *vsc)
881 {
882 	struct enic_softc *softc;
883 	struct enic    *enic;
884 	uint32_t mtu;
885 
886 	softc = vsc;
887 	enic = &softc->enic;
888 
889 	mtu = vnic_dev_mtu(enic->vdev);
890 	if (mtu && mtu != enic->port_mtu) {
891 		enic->port_mtu = mtu;
892 	}
893 
894 	enic_link_status(softc);
895 
896 	return (FILTER_HANDLED);
897 }
898 
899 static int
enic_err_intr(void * vsc)900 enic_err_intr(void *vsc)
901 {
902 	struct enic_softc *softc;
903 
904 	softc = vsc;
905 
906 	enic_stop(softc->ctx);
907 	enic_init(softc->ctx);
908 
909 	return (FILTER_HANDLED);
910 }
911 
912 static void
enic_stop(if_ctx_t ctx)913 enic_stop(if_ctx_t ctx)
914 {
915 	struct enic_softc *softc;
916 	struct enic    *enic;
917 	if_softc_ctx_t	scctx;
918 	unsigned int	index;
919 	struct vnic_wq *wq;
920 	struct vnic_rq *rq;
921 	struct vnic_cq *cq;
922 	unsigned int	cq_wq, cq_rq;
923 
924 
925 	softc = iflib_get_softc(ctx);
926 	scctx = softc->scctx;
927 	enic = &softc->enic;
928 
929 	if (softc->stopped)
930 		return;
931 	softc->link_active = 0;
932 	softc->stopped = 1;
933 
934 	enic_dev_disable(enic);
935 
936 	for (index = 0; index < scctx->isc_ntxqsets; index++) {
937 		enic_stop_wq(enic, index);
938 		vnic_wq_clean(&enic->wq[index]);
939 		vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]);
940 
941 		wq = &softc->enic.wq[index];
942 		wq->ring.desc_avail = wq->ring.desc_count - 1;
943 		wq->ring.last_count = wq->ring.desc_count;
944 		wq->head_idx = 0;
945 		wq->tail_idx = 0;
946 
947 		cq_wq = enic_cq_wq(&softc->enic, index);
948 		cq = &softc->enic.cq[cq_wq];
949 		cq->ring.desc_avail = cq->ring.desc_count - 1;
950 	}
951 
952 	for (index = 0; index < scctx->isc_nrxqsets; index++) {
953 		enic_stop_rq(enic, index);
954 		vnic_rq_clean(&enic->rq[index]);
955 		vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]);
956 
957 		rq = &softc->enic.rq[index];
958 		cq_rq = enic_cq_rq(&softc->enic, index);
959 		cq = &softc->enic.cq[cq_rq];
960 
961 		cq->ring.desc_avail = cq->ring.desc_count - 1;
962 		rq->ring.desc_avail = rq->ring.desc_count - 1;
963 		rq->need_initial_post = true;
964 	}
965 
966 	for (index = 0; index < scctx->isc_vectors; index++) {
967 		vnic_intr_clean(&enic->intr[index]);
968 	}
969 }
970 
971 static void
enic_init(if_ctx_t ctx)972 enic_init(if_ctx_t ctx)
973 {
974 	struct enic_softc *softc;
975 	struct enic *enic;
976 	if_softc_ctx_t scctx;
977 	unsigned int index;
978 
979 	softc = iflib_get_softc(ctx);
980 	scctx = softc->scctx;
981 	enic = &softc->enic;
982 
983 
984 	enic_init_vnic_resources(enic);
985 
986 	for (index = 0; index < scctx->isc_ntxqsets; index++)
987 		enic_prep_wq_for_simple_tx(&softc->enic, index);
988 
989 	for (index = 0; index < scctx->isc_ntxqsets; index++)
990 		enic_start_wq(enic, index);
991 
992 	for (index = 0; index < scctx->isc_nrxqsets; index++)
993 		enic_start_rq(enic, index);
994 
995 	/* Use the current MAC address. */
996 	bcopy(if_getlladdr(softc->ifp), softc->lladdr, ETHER_ADDR_LEN);
997 	enic_set_lladdr(softc);
998 
999 	ENIC_LOCK(softc);
1000 	vnic_dev_enable_wait(enic->vdev);
1001 	ENIC_UNLOCK(softc);
1002 
1003 	softc->stopped = 0;
1004 
1005 	enic_link_status(softc);
1006 }
1007 
1008 static void
enic_del_mcast(struct enic_softc * softc)1009 enic_del_mcast(struct enic_softc *softc) {
1010 	struct enic *enic;
1011 	int i;
1012 
1013 	enic = &softc->enic;
1014 	for (i=0; i < softc->mc_count; i++) {
1015 		vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
1016 	}
1017 	softc->multicast = 0;
1018 	softc->mc_count = 0;
1019 }
1020 
1021 static void
enic_add_mcast(struct enic_softc * softc)1022 enic_add_mcast(struct enic_softc *softc) {
1023 	struct enic *enic;
1024 	int i;
1025 
1026 	enic = &softc->enic;
1027 	for (i=0; i < softc->mc_count; i++) {
1028 		vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
1029 	}
1030 	softc->multicast = 1;
1031 }
1032 
1033 static u_int
enic_copy_maddr(void * arg,struct sockaddr_dl * sdl,u_int idx)1034 enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
1035 {
1036 	uint8_t *mta = arg;
1037 
1038 	if (idx == ENIC_MAX_MULTICAST_ADDRESSES)
1039 		return (0);
1040 
1041 	bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1042 	return (1);
1043 }
1044 
1045 static void
enic_multi_set(if_ctx_t ctx)1046 enic_multi_set(if_ctx_t ctx)
1047 {
1048 	if_t ifp;
1049 	struct enic_softc *softc;
1050 	u_int count;
1051 
1052 	softc = iflib_get_softc(ctx);
1053 	ifp = iflib_get_ifp(ctx);
1054 
1055 	ENIC_LOCK(softc);
1056 	enic_del_mcast(softc);
1057 	count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta);
1058 	softc->mc_count = count;
1059 	enic_add_mcast(softc);
1060 	ENIC_UNLOCK(softc);
1061 
1062 	if (if_getflags(ifp) & IFF_PROMISC) {
1063 		softc->promisc = 1;
1064 	} else {
1065 		softc->promisc = 0;
1066 	}
1067 	if (if_getflags(ifp) & IFF_ALLMULTI) {
1068 		softc->allmulti = 1;
1069 	} else {
1070 		softc->allmulti = 0;
1071 	}
1072 	enic_update_packet_filter(&softc->enic);
1073 }
1074 
1075 static int
enic_mtu_set(if_ctx_t ctx,uint32_t mtu)1076 enic_mtu_set(if_ctx_t ctx, uint32_t mtu)
1077 {
1078 	struct enic_softc *softc;
1079 	struct enic *enic;
1080 	if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
1081 
1082 	softc = iflib_get_softc(ctx);
1083 	enic = &softc->enic;
1084 
1085 	enic_stop(softc->ctx);
1086 	if (mtu > enic->port_mtu){
1087 		return (EINVAL);
1088 	}
1089 
1090 	enic->config.mtu = mtu;
1091 	scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1092 	enic_init(softc->ctx);
1093 
1094 	return (0);
1095 }
1096 
1097 static void
enic_media_status(if_ctx_t ctx,struct ifmediareq * ifmr)1098 enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1099 {
1100 	struct enic_softc *softc;
1101 	struct ifmedia_entry *next;
1102 	uint32_t speed;
1103 	uint64_t target_baudrate;
1104 
1105 	softc = iflib_get_softc(ctx);
1106 
1107 	ifmr->ifm_status = IFM_AVALID;
1108 	ifmr->ifm_active = IFM_ETHER;
1109 
1110 	if (enic_link_is_up(softc) != 0) {
1111 		ENIC_LOCK(softc);
1112 		speed = vnic_dev_port_speed(&softc->vdev);
1113 		ENIC_UNLOCK(softc);
1114 		target_baudrate = 1000ull * speed;
1115 		LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
1116 			if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
1117 				ifmr->ifm_active |= next->ifm_media;
1118 			}
1119 		}
1120 
1121 		ifmr->ifm_status |= IFM_ACTIVE;
1122 		ifmr->ifm_active |= IFM_AUTO;
1123 	} else
1124 		ifmr->ifm_active |= IFM_NONE;
1125 }
1126 
1127 static int
enic_media_change(if_ctx_t ctx)1128 enic_media_change(if_ctx_t ctx)
1129 {
1130 	return (ENODEV);
1131 }
1132 
1133 static int
enic_promisc_set(if_ctx_t ctx,int flags)1134 enic_promisc_set(if_ctx_t ctx, int flags)
1135 {
1136 	if_t ifp;
1137 	struct enic_softc *softc;
1138 
1139 	softc = iflib_get_softc(ctx);
1140 	ifp = iflib_get_ifp(ctx);
1141 
1142 	if (if_getflags(ifp) & IFF_PROMISC) {
1143 		softc->promisc = 1;
1144 	} else {
1145 		softc->promisc = 0;
1146 	}
1147 	if (if_getflags(ifp) & IFF_ALLMULTI) {
1148 		softc->allmulti = 1;
1149 	} else {
1150 		softc->allmulti = 0;
1151 	}
1152 	enic_update_packet_filter(&softc->enic);
1153 
1154 	return (0);
1155 }
1156 
1157 static uint64_t
enic_get_counter(if_ctx_t ctx,ift_counter cnt)1158 enic_get_counter(if_ctx_t ctx, ift_counter cnt) {
1159 	if_t ifp = iflib_get_ifp(ctx);
1160 
1161 	if (cnt < IFCOUNTERS)
1162 		return if_get_counter_default(ifp, cnt);
1163 
1164 	return (0);
1165 }
1166 
1167 static void
enic_update_admin_status(if_ctx_t ctx)1168 enic_update_admin_status(if_ctx_t ctx)
1169 {
1170 	struct enic_softc *softc;
1171 	softc = iflib_get_softc(ctx);
1172 
1173 	enic_link_status(softc);
1174 }
1175 
1176 static void
enic_txq_timer(if_ctx_t ctx,uint16_t qid)1177 enic_txq_timer(if_ctx_t ctx, uint16_t qid)
1178 {
1179 
1180 	struct enic_softc *softc;
1181 	struct enic *enic;
1182 	struct vnic_stats *stats;
1183 	int ret;
1184 
1185 	softc = iflib_get_softc(ctx);
1186 	enic = &softc->enic;
1187 
1188 	ENIC_LOCK(softc);
1189 	ret = vnic_dev_stats_dump(enic->vdev, &stats);
1190 	ENIC_UNLOCK(softc);
1191 	if (ret) {
1192 		dev_err(enic, "Error in getting stats\n");
1193 	}
1194 }
1195 
1196 static int
enic_link_is_up(struct enic_softc * softc)1197 enic_link_is_up(struct enic_softc *softc)
1198 {
1199 	return (vnic_dev_link_status(&softc->vdev) == 1);
1200 }
1201 
1202 static void
enic_link_status(struct enic_softc * softc)1203 enic_link_status(struct enic_softc *softc)
1204 {
1205 	if_ctx_t ctx;
1206 	uint64_t speed;
1207 	int link;
1208 
1209 	ctx = softc->ctx;
1210 	link = enic_link_is_up(softc);
1211 	speed = IF_Gbps(10);
1212 
1213 	ENIC_LOCK(softc);
1214 	speed = vnic_dev_port_speed(&softc->vdev);
1215 	ENIC_UNLOCK(softc);
1216 
1217 	if (link != 0 && softc->link_active == 0) {
1218 		softc->link_active = 1;
1219 		iflib_link_state_change(ctx, LINK_STATE_UP, speed);
1220 	} else if (link == 0 && softc->link_active != 0) {
1221 		softc->link_active = 0;
1222 		iflib_link_state_change(ctx, LINK_STATE_DOWN, speed);
1223 	}
1224 }
1225 
1226 static void
enic_set_lladdr(struct enic_softc * softc)1227 enic_set_lladdr(struct enic_softc *softc)
1228 {
1229 	struct enic *enic;
1230 	enic = &softc->enic;
1231 
1232 	ENIC_LOCK(softc);
1233 	vnic_dev_add_addr(enic->vdev, softc->lladdr);
1234 	ENIC_UNLOCK(softc);
1235 }
1236 
1237 
1238 static void
enic_setup_txq_sysctl(struct vnic_wq * wq,int i,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1239 enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx,
1240     struct sysctl_oid_list *child)
1241 {
1242 	struct sysctl_oid *txsnode;
1243 	struct sysctl_oid_list *txslist;
1244 	struct vnic_stats *stats = wq[i].vdev->stats;
1245 
1246 	txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1247 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1248 	txslist = SYSCTL_CHILDREN(txsnode);
1249 
1250 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD,
1251 	   &stats->tx.tx_frames_ok, "TX Frames OK");
1252 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD,
1253 	   &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK");
1254 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD,
1255 	    &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK");
1256 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD,
1257 	    &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK");
1258 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD,
1259 	    &stats->tx.tx_bytes_ok, "TX bytes OK ");
1260 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD,
1261 	    &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK");
1262 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD,
1263 	    &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK");
1264 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD,
1265 	    &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK");
1266 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD,
1267 	    &stats->tx.tx_drops, "TX drops");
1268 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD,
1269 	    &stats->tx.tx_errors, "TX errors");
1270 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD,
1271 	    &stats->tx.tx_tso, "TX TSO");
1272 }
1273 
1274 static void
enic_setup_rxq_sysctl(struct vnic_rq * rq,int i,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1275 enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx,
1276     struct sysctl_oid_list *child)
1277 {
1278 	struct sysctl_oid *rxsnode;
1279 	struct sysctl_oid_list *rxslist;
1280 	struct vnic_stats *stats = rq[i].vdev->stats;
1281 
1282 	rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1283 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1284 	rxslist = SYSCTL_CHILDREN(rxsnode);
1285 
1286 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD,
1287 	    &stats->rx.rx_frames_ok, "RX Frames OK");
1288 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD,
1289 	    &stats->rx.rx_frames_total, "RX frames total");
1290 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD,
1291 	    &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok");
1292 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD,
1293 	    &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok");
1294 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD,
1295 	    &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok");
1296 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD,
1297 	    &stats->rx.rx_bytes_ok, "RX bytes ok");
1298 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD,
1299 	    &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok");
1300 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD,
1301 	    &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok");
1302 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD,
1303 	    &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok");
1304 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD,
1305 	    &stats->rx.rx_drop, "RX drop");
1306 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD,
1307 	    &stats->rx.rx_errors, "RX errors");
1308 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD,
1309 	    &stats->rx.rx_rss, "RX rss");
1310 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
1311 	    &stats->rx.rx_crc_errors, "RX crc errors");
1312 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD,
1313 	    &stats->rx.rx_frames_64, "RX frames 64");
1314 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD,
1315 	    &stats->rx.rx_frames_127, "RX frames 127");
1316 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD,
1317 	    &stats->rx.rx_frames_255, "RX frames 255");
1318 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD,
1319 	    &stats->rx.rx_frames_511, "RX frames 511");
1320 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD,
1321 	    &stats->rx.rx_frames_1023, "RX frames 1023");
1322 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD,
1323 	    &stats->rx.rx_frames_1518, "RX frames 1518");
1324 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD,
1325 	    &stats->rx.rx_frames_to_max, "RX frames to max");
1326 }
1327 
1328 static void
enic_setup_queue_sysctl(struct enic_softc * softc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child)1329 enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx,
1330     struct sysctl_oid_list *child)
1331 {
1332 	enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child);
1333 	enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child);
1334 }
1335 
1336 static void
enic_setup_sysctl(struct enic_softc * softc)1337 enic_setup_sysctl(struct enic_softc *softc)
1338 {
1339 	device_t dev;
1340 	struct sysctl_ctx_list *ctx;
1341 	struct sysctl_oid *tree;
1342 	struct sysctl_oid_list *child;
1343 
1344 	dev = softc->dev;
1345 	ctx = device_get_sysctl_ctx(dev);
1346 	tree = device_get_sysctl_tree(dev);
1347 	child = SYSCTL_CHILDREN(tree);
1348 
1349 	enic_setup_queue_sysctl(softc, ctx, child);
1350 }
1351 
1352 static void
enic_enable_intr(struct enic_softc * softc,int irq)1353 enic_enable_intr(struct enic_softc *softc, int irq)
1354 {
1355 	struct enic *enic = &softc->enic;
1356 
1357 	vnic_intr_unmask(&enic->intr[irq]);
1358 	vnic_intr_return_all_credits(&enic->intr[irq]);
1359 }
1360 
1361 static void
enic_disable_intr(struct enic_softc * softc,int irq)1362 enic_disable_intr(struct enic_softc *softc, int irq)
1363 {
1364 	struct enic *enic = &softc->enic;
1365 
1366 	vnic_intr_mask(&enic->intr[irq]);
1367 	vnic_intr_masked(&enic->intr[irq]);	/* flush write */
1368 }
1369 
1370 static int
enic_tx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1371 enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1372 {
1373 	struct enic_softc *softc;
1374 	if_softc_ctx_t scctx;
1375 
1376 	softc = iflib_get_softc(ctx);
1377 	scctx = softc->scctx;
1378 
1379 	enic_enable_intr(softc, qid + scctx->isc_nrxqsets);
1380 
1381 	return 0;
1382 }
1383 
1384 static int
enic_rx_queue_intr_enable(if_ctx_t ctx,uint16_t qid)1385 enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1386 {
1387 	struct enic_softc *softc;
1388 
1389 	softc = iflib_get_softc(ctx);
1390 	enic_enable_intr(softc, qid);
1391 
1392 	return 0;
1393 }
1394 
1395 static void
enic_intr_enable_all(if_ctx_t ctx)1396 enic_intr_enable_all(if_ctx_t ctx)
1397 {
1398 	struct enic_softc *softc;
1399 	if_softc_ctx_t scctx;
1400 	int i;
1401 
1402 	softc = iflib_get_softc(ctx);
1403 	scctx = softc->scctx;
1404 
1405 	for (i = 0; i < scctx->isc_vectors; i++) {
1406 		enic_enable_intr(softc, i);
1407 	}
1408 }
1409 
1410 static void
enic_intr_disable_all(if_ctx_t ctx)1411 enic_intr_disable_all(if_ctx_t ctx)
1412 {
1413 	struct enic_softc *softc;
1414 	if_softc_ctx_t scctx;
1415 	int i;
1416 
1417 	softc = iflib_get_softc(ctx);
1418 	scctx = softc->scctx;
1419 	/*
1420 	 * iflib may invoke this routine before enic_attach_post() has run,
1421 	 * which is before the top level shared data area is initialized and
1422 	 * the device made aware of it.
1423 	 */
1424 
1425 	for (i = 0; i < scctx->isc_vectors; i++) {
1426 		enic_disable_intr(softc, i);
1427 	}
1428 }
1429 
1430 static int
enic_dev_open(struct enic * enic)1431 enic_dev_open(struct enic *enic)
1432 {
1433 	int err;
1434 	int flags = CMD_OPENF_IG_DESCCACHE;
1435 
1436 	err = enic_dev_wait(enic->vdev, vnic_dev_open,
1437 			    vnic_dev_open_done, flags);
1438 	if (err)
1439 		dev_err(enic_get_dev(enic),
1440 			"vNIC device open failed, err %d\n", err);
1441 
1442 	return err;
1443 }
1444 
1445 static int
enic_dev_init(struct enic * enic)1446 enic_dev_init(struct enic *enic)
1447 {
1448 	int err;
1449 
1450 	vnic_dev_intr_coal_timer_info_default(enic->vdev);
1451 
1452 	/*
1453 	 * Get vNIC configuration
1454 	 */
1455 	err = enic_get_vnic_config(enic);
1456 	if (err) {
1457 		dev_err(dev, "Get vNIC configuration failed, aborting\n");
1458 		return err;
1459 	}
1460 
1461 	/* Get available resource counts */
1462 	enic_get_res_counts(enic);
1463 
1464 	/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1465 	enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
1466 	    enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
1467 
1468 	vnic_dev_set_reset_flag(enic->vdev, 0);
1469 	enic->max_flow_counter = -1;
1470 
1471 	/* set up link status checking */
1472 	vnic_dev_notify_set(enic->vdev, -1);	/* No Intr for notify */
1473 
1474 	enic->overlay_offload = false;
1475 	if (enic->disable_overlay && enic->vxlan) {
1476 		/*
1477 		 * Explicitly disable overlay offload as the setting is
1478 		 * sticky, and resetting vNIC does not disable it.
1479 		 */
1480 		if (vnic_dev_overlay_offload_ctrl(enic->vdev,
1481 		    OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) {
1482 			dev_err(enic, "failed to disable overlay offload\n");
1483 		} else {
1484 			dev_info(enic, "Overlay offload is disabled\n");
1485 		}
1486 	}
1487 	if (!enic->disable_overlay && enic->vxlan &&
1488 	/* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
1489 	    vnic_dev_overlay_offload_ctrl(enic->vdev,
1490 	    OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) {
1491 		enic->overlay_offload = true;
1492 		enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
1493 		dev_info(enic, "Overlay offload is enabled\n");
1494 		/*
1495 		 * Reset the vxlan port to the default, as the NIC firmware
1496 		 * does not reset it automatically and keeps the old setting.
1497 		 */
1498 		if (vnic_dev_overlay_offload_cfg(enic->vdev,
1499 		   OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) {
1500 			dev_err(enic, "failed to update vxlan port\n");
1501 			return (EINVAL);
1502 		}
1503 	}
1504 	return 0;
1505 }
1506 
1507 static void    *
enic_alloc_consistent(void * priv,size_t size,bus_addr_t * dma_handle,struct iflib_dma_info * res,u8 * name)1508 enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle,
1509     struct iflib_dma_info *res, u8 * name)
1510 {
1511 	void	       *vaddr;
1512 	*dma_handle = 0;
1513 	struct enic    *enic = (struct enic *)priv;
1514 	int		rz;
1515 
1516 	rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT);
1517 	if (rz) {
1518 		pr_err("%s : Failed to allocate memory requested for %s\n",
1519 		    __func__, name);
1520 		return NULL;
1521 	}
1522 
1523 	vaddr = res->idi_vaddr;
1524 	*dma_handle = res->idi_paddr;
1525 
1526 	return vaddr;
1527 }
1528 
1529 static void
enic_free_consistent(void * priv,size_t size,void * vaddr,bus_addr_t dma_handle,struct iflib_dma_info * res)1530 enic_free_consistent(void *priv, size_t size, void *vaddr,
1531     bus_addr_t dma_handle, struct iflib_dma_info *res)
1532 {
1533 	iflib_dma_free(res);
1534 }
1535 
1536 static int
enic_pci_mapping(struct enic_softc * softc)1537 enic_pci_mapping(struct enic_softc *softc)
1538 {
1539 	int rc;
1540 
1541 	rc = enic_map_bar(softc, &softc->mem, 0, true);
1542 	if (rc)
1543 		return rc;
1544 
1545 	rc = enic_map_bar(softc, &softc->io, 2, false);
1546 
1547 	return rc;
1548 }
1549 
1550 static void
enic_pci_mapping_free(struct enic_softc * softc)1551 enic_pci_mapping_free(struct enic_softc *softc)
1552 {
1553 	if (softc->mem.res != NULL)
1554 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
1555 				     softc->mem.rid, softc->mem.res);
1556 	softc->mem.res = NULL;
1557 
1558 	if (softc->io.res != NULL)
1559 		bus_release_resource(softc->dev, SYS_RES_MEMORY,
1560 				     softc->io.rid, softc->io.res);
1561 	softc->io.res = NULL;
1562 }
1563 
1564 static int
enic_dev_wait(struct vnic_dev * vdev,int (* start)(struct vnic_dev *,int),int (* finished)(struct vnic_dev *,int *),int arg)1565 enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int),
1566     int (*finished) (struct vnic_dev *, int *), int arg)
1567 {
1568 	int done;
1569 	int err;
1570 	int i;
1571 
1572 	err = start(vdev, arg);
1573 	if (err)
1574 		return err;
1575 
1576 	/* Wait for func to complete...2 seconds max */
1577 	for (i = 0; i < 2000; i++) {
1578 		err = finished(vdev, &done);
1579 		if (err)
1580 			return err;
1581 		if (done)
1582 			return 0;
1583 		usleep(1000);
1584 	}
1585 	return (ETIMEDOUT);
1586 }
1587 
1588 static int
enic_map_bar(struct enic_softc * softc,struct enic_bar_info * bar,int bar_num,bool shareable)1589 enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num,
1590     bool shareable)
1591 {
1592 	uint32_t flag;
1593 
1594 	if (bar->res != NULL) {
1595 		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
1596 		return (EDOOFUS);
1597 	}
1598 
1599 	bar->rid = PCIR_BAR(bar_num);
1600 	flag = RF_ACTIVE;
1601 	if (shareable)
1602 		flag |= RF_SHAREABLE;
1603 
1604 	if ((bar->res = bus_alloc_resource_any(softc->dev,
1605 	   SYS_RES_MEMORY, &bar->rid, flag)) == NULL) {
1606 		device_printf(softc->dev,
1607 			      "PCI BAR%d mapping failure\n", bar_num);
1608 		return (ENXIO);
1609 	}
1610 	bar->tag = rman_get_bustag(bar->res);
1611 	bar->handle = rman_get_bushandle(bar->res);
1612 	bar->size = rman_get_size(bar->res);
1613 
1614 	return 0;
1615 }
1616 
1617 void
enic_init_vnic_resources(struct enic * enic)1618 enic_init_vnic_resources(struct enic *enic)
1619 {
1620 	unsigned int error_interrupt_enable = 1;
1621 	unsigned int error_interrupt_offset = 0;
1622 	unsigned int rxq_interrupt_enable = 0;
1623 	unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
1624 	unsigned int txq_interrupt_enable = 0;
1625 	unsigned int txq_interrupt_offset;
1626 	unsigned int index = 0;
1627 	unsigned int cq_idx;
1628 	if_softc_ctx_t scctx;
1629 
1630 	scctx = enic->softc->scctx;
1631 
1632 	rxq_interrupt_enable = 1;
1633 	txq_interrupt_enable = 0;
1634 
1635 	rxq_interrupt_offset = 0;
1636 	txq_interrupt_offset = scctx->isc_nrxqsets;
1637 
1638 	for (index = 0; index < enic->intr_count; index++) {
1639 		vnic_intr_alloc(enic->vdev, &enic->intr[index], index);
1640 	}
1641 
1642 	for (index = 0; index < scctx->isc_nrxqsets; index++) {
1643 		cq_idx = enic_cq_rq(enic, index);
1644 
1645 		vnic_rq_clean(&enic->rq[index]);
1646 		vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable,
1647 		    error_interrupt_offset);
1648 
1649 		vnic_cq_clean(&enic->cq[cq_idx]);
1650 		vnic_cq_init(&enic->cq[cq_idx],
1651 		    0 /* flow_control_enable */ ,
1652 		    1 /* color_enable */ ,
1653 		    0 /* cq_head */ ,
1654 		    0 /* cq_tail */ ,
1655 		    1 /* cq_tail_color */ ,
1656 		    rxq_interrupt_enable,
1657 		    1 /* cq_entry_enable */ ,
1658 		    0 /* cq_message_enable */ ,
1659 		    rxq_interrupt_offset,
1660 		    0 /* cq_message_addr */ );
1661 		if (rxq_interrupt_enable)
1662 			rxq_interrupt_offset++;
1663 	}
1664 
1665 	for (index = 0; index < scctx->isc_ntxqsets; index++) {
1666 		cq_idx = enic_cq_wq(enic, index);
1667 		vnic_wq_clean(&enic->wq[index]);
1668 		vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable,
1669 		    error_interrupt_offset);
1670 		/* Compute unsupported ol flags for enic_prep_pkts() */
1671 		enic->wq[index].tx_offload_notsup_mask = 0;
1672 
1673 		vnic_cq_clean(&enic->cq[cq_idx]);
1674 		vnic_cq_init(&enic->cq[cq_idx],
1675 		   0 /* flow_control_enable */ ,
1676 		   1 /* color_enable */ ,
1677 		   0 /* cq_head */ ,
1678 		   0 /* cq_tail */ ,
1679 		   1 /* cq_tail_color */ ,
1680 		   txq_interrupt_enable,
1681 		   1,
1682 		   0,
1683 		   txq_interrupt_offset,
1684 		   0 /* (u64)enic->wq[index].cqmsg_rz->iova */ );
1685 
1686 	}
1687 
1688 	for (index = 0; index < enic->intr_count; index++) {
1689 		vnic_intr_init(&enic->intr[index], 125,
1690 		    enic->config.intr_timer_type, /* mask_on_assertion */ 1);
1691 	}
1692 }
1693 
1694 static void
enic_update_packet_filter(struct enic * enic)1695 enic_update_packet_filter(struct enic *enic)
1696 {
1697 	struct enic_softc *softc = enic->softc;
1698 
1699 	ENIC_LOCK(softc);
1700 	vnic_dev_packet_filter(enic->vdev,
1701 	    softc->directed,
1702 	    softc->multicast,
1703 	    softc->broadcast,
1704 	    softc->promisc,
1705 	    softc->allmulti);
1706 	ENIC_UNLOCK(softc);
1707 }
1708 
1709 static bool
enic_if_needs_restart(if_ctx_t ctx,enum iflib_restart_event event)1710 enic_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event)
1711 {
1712 	switch (event) {
1713 	case IFLIB_RESTART_VLAN_CONFIG:
1714 	default:
1715 		return (false);
1716 	}
1717 }
1718 
1719 int
enic_setup_finish(struct enic * enic)1720 enic_setup_finish(struct enic *enic)
1721 {
1722 	struct enic_softc *softc = enic->softc;
1723 
1724 	/* Default conf */
1725 	softc->directed = 1;
1726 	softc->multicast = 0;
1727 	softc->broadcast = 1;
1728 	softc->promisc = 0;
1729 	softc->allmulti = 1;
1730 	enic_update_packet_filter(enic);
1731 
1732 	return 0;
1733 }
1734