xref: /freebsd/sys/dev/vnic/nicvf_main.c (revision 895f86f15fbf6540071feb9328c3c50ed1f027b8)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  *
28  */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bitset.h>
38 #include <sys/bitstring.h>
39 #include <sys/bus.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/module.h>
45 #include <sys/rman.h>
46 #include <sys/pciio.h>
47 #include <sys/pcpu.h>
48 #include <sys/proc.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/stdatomic.h>
52 #include <sys/cpuset.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/smp.h>
56 #include <sys/taskqueue.h>
57 
58 #include <net/bpf.h>
59 #include <net/ethernet.h>
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_vlan_var.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 
74 #include <sys/dnv.h>
75 #include <sys/nv.h>
76 #include <sys/iov_schema.h>
77 
78 #include <machine/bus.h>
79 
80 #include "thunder_bgx.h"
81 #include "nic_reg.h"
82 #include "nic.h"
83 #include "nicvf_queues.h"
84 
85 #define	VNIC_VF_DEVSTR		"Cavium Thunder NIC Virtual Function Driver"
86 
87 #define	VNIC_VF_REG_RID		PCIR_BAR(PCI_CFG_REG_BAR_NUM)
88 
89 /* Lock for core interface settings */
90 #define	NICVF_CORE_LOCK_INIT(nic)				\
91     sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
92 
93 #define	NICVF_CORE_LOCK_DESTROY(nic)				\
94     sx_destroy(&(nic)->core_sx)
95 
96 #define	NICVF_CORE_LOCK(nic)		sx_xlock(&(nic)->core_sx)
97 #define	NICVF_CORE_UNLOCK(nic)		sx_xunlock(&(nic)->core_sx)
98 
99 #define	NICVF_CORE_LOCK_ASSERT(nic)	sx_assert(&(nic)->core_sx, SA_XLOCKED)
100 
101 #define	SPEED_10	10
102 #define	SPEED_100	100
103 #define	SPEED_1000	1000
104 #define	SPEED_10000	10000
105 #define	SPEED_40000	40000
106 
107 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
108 
109 static int nicvf_probe(device_t);
110 static int nicvf_attach(device_t);
111 static int nicvf_detach(device_t);
112 
113 static device_method_t nicvf_methods[] = {
114 	/* Device interface */
115 	DEVMETHOD(device_probe,		nicvf_probe),
116 	DEVMETHOD(device_attach,	nicvf_attach),
117 	DEVMETHOD(device_detach,	nicvf_detach),
118 
119 	DEVMETHOD_END,
120 };
121 
122 static driver_t nicvf_driver = {
123 	"vnic",
124 	nicvf_methods,
125 	sizeof(struct nicvf),
126 };
127 
128 static devclass_t nicvf_devclass;
129 
130 DRIVER_MODULE(nicvf, pci, nicvf_driver, nicvf_devclass, 0, 0);
131 MODULE_DEPEND(nicvf, pci, 1, 1, 1);
132 MODULE_DEPEND(nicvf, ether, 1, 1, 1);
133 MODULE_DEPEND(nicvf, vnic_pf, 1, 1, 1);
134 
135 static int nicvf_allocate_misc_interrupt(struct nicvf *);
136 static int nicvf_enable_misc_interrupt(struct nicvf *);
137 static int nicvf_allocate_net_interrupts(struct nicvf *);
138 static void nicvf_release_all_interrupts(struct nicvf *);
139 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
140 static void nicvf_config_cpi(struct nicvf *);
141 static int nicvf_init_resources(struct nicvf *);
142 
143 static int nicvf_setup_ifnet(struct nicvf *);
144 static int nicvf_setup_ifmedia(struct nicvf *);
145 static void nicvf_hw_addr_random(uint8_t *);
146 
147 static int nicvf_if_ioctl(struct ifnet *, u_long, caddr_t);
148 static void nicvf_if_init(void *);
149 static void nicvf_if_init_locked(struct nicvf *);
150 static int nicvf_if_transmit(struct ifnet *, struct mbuf *);
151 static void nicvf_if_qflush(struct ifnet *);
152 static uint64_t nicvf_if_getcounter(struct ifnet *, ift_counter);
153 
154 static int nicvf_stop_locked(struct nicvf *);
155 
156 static void nicvf_media_status(struct ifnet *, struct ifmediareq *);
157 static int nicvf_media_change(struct ifnet *);
158 
159 static void nicvf_tick_stats(void *);
160 
161 static int
162 nicvf_probe(device_t dev)
163 {
164 	uint16_t vendor_id;
165 	uint16_t device_id;
166 
167 	vendor_id = pci_get_vendor(dev);
168 	device_id = pci_get_device(dev);
169 
170 	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
171 		return (ENXIO);
172 
173 	if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
174 	    device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
175 		device_set_desc(dev, VNIC_VF_DEVSTR);
176 		return (BUS_PROBE_DEFAULT);
177 	}
178 
179 	return (ENXIO);
180 }
181 
182 static int
183 nicvf_attach(device_t dev)
184 {
185 	struct nicvf *nic;
186 	int rid, qcount;
187 	int err = 0;
188 	uint8_t hwaddr[ETHER_ADDR_LEN];
189 	uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
190 
191 	nic = device_get_softc(dev);
192 	nic->dev = dev;
193 	nic->pnicvf = nic;
194 
195 	NICVF_CORE_LOCK_INIT(nic);
196 
197 	rid = VNIC_VF_REG_RID;
198 	nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
199 	    RF_ACTIVE);
200 	if (nic->reg_base == NULL) {
201 		device_printf(dev, "Could not allocate registers memory\n");
202 		return (ENXIO);
203 	}
204 
205 	qcount = MAX_CMP_QUEUES_PER_QS;
206 	nic->max_queues = qcount;
207 
208 	err = nicvf_set_qset_resources(nic);
209 	if (err != 0)
210 		goto err_free_res;
211 
212 	/* Check if PF is alive and get MAC address for this VF */
213 	err = nicvf_allocate_misc_interrupt(nic);
214 	if (err != 0)
215 		goto err_free_res;
216 
217 	NICVF_CORE_LOCK(nic);
218 	err = nicvf_enable_misc_interrupt(nic);
219 	NICVF_CORE_UNLOCK(nic);
220 	if (err != 0)
221 		goto err_release_intr;
222 
223 	err = nicvf_allocate_net_interrupts(nic);
224 	if (err != 0) {
225 		device_printf(dev,
226 		    "Could not allocate network interface interrupts\n");
227 		goto err_free_ifnet;
228 	}
229 
230 	/* If no MAC address was obtained we generate random one */
231 	if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
232 		nicvf_hw_addr_random(hwaddr);
233 		memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
234 		NICVF_CORE_LOCK(nic);
235 		nicvf_hw_set_mac_addr(nic, hwaddr);
236 		NICVF_CORE_UNLOCK(nic);
237 	}
238 
239 	/* Configure CPI alorithm */
240 	nic->cpi_alg = CPI_ALG_NONE;
241 	NICVF_CORE_LOCK(nic);
242 	nicvf_config_cpi(nic);
243 	NICVF_CORE_UNLOCK(nic);
244 
245 	err = nicvf_setup_ifnet(nic);
246 	if (err != 0) {
247 		device_printf(dev, "Could not set-up ifnet\n");
248 		goto err_release_intr;
249 	}
250 
251 	err = nicvf_setup_ifmedia(nic);
252 	if (err != 0) {
253 		device_printf(dev, "Could not set-up ifmedia\n");
254 		goto err_free_ifnet;
255 	}
256 
257 	mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
258 	callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
259 
260 	ether_ifattach(nic->ifp, nic->hwaddr);
261 
262 	return (0);
263 
264 err_free_ifnet:
265 	if_free(nic->ifp);
266 err_release_intr:
267 	nicvf_release_all_interrupts(nic);
268 err_free_res:
269 	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
270 	    nic->reg_base);
271 
272 	return (err);
273 }
274 
275 static int
276 nicvf_detach(device_t dev)
277 {
278 	struct nicvf *nic;
279 
280 	nic = device_get_softc(dev);
281 
282 	NICVF_CORE_LOCK(nic);
283 	/* Shut down the port and release ring resources */
284 	nicvf_stop_locked(nic);
285 	/* Release stats lock */
286 	mtx_destroy(&nic->stats_mtx);
287 	/* Release interrupts */
288 	nicvf_release_all_interrupts(nic);
289 	/* Release memory resource */
290 	if (nic->reg_base != NULL) {
291 		bus_release_resource(dev, SYS_RES_MEMORY,
292 		    rman_get_rid(nic->reg_base), nic->reg_base);
293 	}
294 
295 	/* Remove all ifmedia configurations */
296 	ifmedia_removeall(&nic->if_media);
297 	/* Free this ifnet */
298 	if_free(nic->ifp);
299 	NICVF_CORE_UNLOCK(nic);
300 	/* Finally destroy the lock */
301 	NICVF_CORE_LOCK_DESTROY(nic);
302 
303 	return (0);
304 }
305 
306 static void
307 nicvf_hw_addr_random(uint8_t *hwaddr)
308 {
309 	uint32_t rnd;
310 	uint8_t addr[ETHER_ADDR_LEN];
311 
312 	/*
313 	 * Create randomized MAC address.
314 	 * Set 'bsd' + random 24 low-order bits.
315 	 */
316 	rnd = arc4random() & 0x00ffffff;
317 	addr[0] = 'b';
318 	addr[1] = 's';
319 	addr[2] = 'd';
320 	addr[3] = rnd >> 16;
321 	addr[4] = rnd >> 8;
322 	addr[5] = rnd >> 0;
323 
324 	memcpy(hwaddr, addr, ETHER_ADDR_LEN);
325 }
326 
327 static int
328 nicvf_setup_ifnet(struct nicvf *nic)
329 {
330 	struct ifnet *ifp;
331 
332 	ifp = if_alloc(IFT_ETHER);
333 	if (ifp == NULL) {
334 		device_printf(nic->dev, "Could not allocate ifnet structure\n");
335 		return (ENOMEM);
336 	}
337 
338 	nic->ifp = ifp;
339 
340 	if_setsoftc(ifp, nic);
341 	if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
342 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX);
343 
344 	if_settransmitfn(ifp, nicvf_if_transmit);
345 	if_setqflushfn(ifp, nicvf_if_qflush);
346 	if_setioctlfn(ifp, nicvf_if_ioctl);
347 	if_setinitfn(ifp, nicvf_if_init);
348 	if_setgetcounterfn(ifp, nicvf_if_getcounter);
349 
350 	/* Set send queue len to number to default maximum */
351 	if_setsendqlen(ifp, IFQ_MAXLEN);
352 	if_setsendqready(ifp);
353 	if_setmtu(ifp, ETHERMTU);
354 
355 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
356 #ifdef DEVICE_POLLING
357 #error "DEVICE_POLLING not supported in VNIC driver yet"
358 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
359 #endif
360 	if_setcapenable(ifp, if_getcapabilities(ifp));
361 	if_setmtu(ifp, ETHERMTU);
362 
363 	return (0);
364 }
365 
366 static int
367 nicvf_setup_ifmedia(struct nicvf *nic)
368 {
369 
370 	ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
371 	    nicvf_media_status);
372 
373 	/*
374 	 * Advertise availability of all possible connection types,
375 	 * even though not all are possible at the same time.
376 	 */
377 
378 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
379 	    0, NULL);
380 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
381 	    0, NULL);
382 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
383 	    0, NULL);
384 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
385 	    0, NULL);
386 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
387 	    0, NULL);
388 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
389 	    0, NULL);
390 
391 	ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
392 
393 	return (0);
394 }
395 
396 static int
397 nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
398 {
399 	struct nicvf *nic;
400 	struct ifreq *ifr;
401 	uint32_t flags;
402 	int mask, err;
403 #if defined(INET) || defined(INET6)
404 	struct ifaddr *ifa;
405 	boolean_t avoid_reset = FALSE;
406 #endif
407 
408 	nic = if_getsoftc(ifp);
409 	ifr = (struct ifreq *)data;
410 #if defined(INET) || defined(INET6)
411 	ifa = (struct ifaddr *)data;
412 #endif
413 	err = 0;
414 	switch (cmd) {
415 	case SIOCSIFADDR:
416 #ifdef INET
417 		if (ifa->ifa_addr->sa_family == AF_INET)
418 			avoid_reset = TRUE;
419 #endif
420 #ifdef INET6
421 		if (ifa->ifa_addr->sa_family == AF_INET6)
422 			avoid_reset = TRUE;
423 #endif
424 
425 #if defined(INET) || defined(INET6)
426 		/* Avoid reinitialization unless it's necessary */
427 		if (avoid_reset) {
428 			ifp->if_flags |= IFF_UP;
429 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
430 				nicvf_if_init(nic);
431 #ifdef INET
432 			if (!(if_getflags(ifp) & IFF_NOARP))
433 				arp_ifinit(ifp, ifa);
434 #endif
435 
436 			return (0);
437 		}
438 #endif
439 		err = ether_ioctl(ifp, cmd, data);
440 		break;
441 	case SIOCSIFMTU:
442 		/*
443 		 * ARM64TODO: Needs to be implemented.
444 		 * Currently ETHERMTU is set by default.
445 		 */
446 		err = ether_ioctl(ifp, cmd, data);
447 		break;
448 	case SIOCSIFFLAGS:
449 		NICVF_CORE_LOCK(nic);
450 		if (if_getflags(ifp) & IFF_UP) {
451 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
452 				flags = ifp->if_flags ^ nic->if_flags;
453 				if ((nic->if_flags & ifp->if_flags) &
454 				    IFF_PROMISC) {
455 					/* Change promiscous mode */
456 #if 0
457 					/* ARM64TODO */
458 					nicvf_set_promiscous(nic);
459 #endif
460 				}
461 
462 				if ((nic->if_flags ^ ifp->if_flags) &
463 				    IFF_ALLMULTI) {
464 					/* Change multicasting settings */
465 #if 0
466 					/* ARM64TODO */
467 					nicvf_set_multicast(nic);
468 #endif
469 				}
470 			} else {
471 				nicvf_if_init_locked(nic);
472 			}
473 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
474 			nicvf_stop_locked(nic);
475 
476 		nic->if_flags = ifp->if_flags;
477 		NICVF_CORE_UNLOCK(nic);
478 		break;
479 
480 	case SIOCADDMULTI:
481 	case SIOCDELMULTI:
482 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
483 #if 0
484 			NICVF_CORE_LOCK(nic);
485 			/* ARM64TODO */
486 			nicvf_set_multicast(nic);
487 			NICVF_CORE_UNLOCK(nic);
488 #endif
489 		}
490 		break;
491 
492 	case SIOCSIFMEDIA:
493 	case SIOCGIFMEDIA:
494 		err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
495 		break;
496 
497 	case SIOCSIFCAP:
498 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
499 		if (mask & IFCAP_VLAN_MTU) {
500 			/* No work to do except acknowledge the change took. */
501 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
502 		}
503 		break;
504 
505 	default:
506 		err = ether_ioctl(ifp, cmd, data);
507 		break;
508 	}
509 
510 	return (err);
511 }
512 
513 static void
514 nicvf_if_init_locked(struct nicvf *nic)
515 {
516 	struct queue_set *qs = nic->qs;
517 	struct ifnet *ifp;
518 	int qidx;
519 	int err;
520 	caddr_t if_addr;
521 
522 	NICVF_CORE_LOCK_ASSERT(nic);
523 	ifp = nic->ifp;
524 
525 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
526 		nicvf_stop_locked(nic);
527 
528 	err = nicvf_enable_misc_interrupt(nic);
529 	if (err != 0) {
530 		if_printf(ifp, "Could not reenable Mbox interrupt\n");
531 		return;
532 	}
533 
534 	/* Get the latest MAC address */
535 	if_addr = if_getlladdr(ifp);
536 	/* Update MAC address if changed */
537 	if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
538 		memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
539 		nicvf_hw_set_mac_addr(nic, if_addr);
540 	}
541 
542 	/* Initialize the queues */
543 	err = nicvf_init_resources(nic);
544 	if (err != 0)
545 		goto error;
546 
547 	/* Make sure queue initialization is written */
548 	wmb();
549 
550 	nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
551 	/* Enable Qset err interrupt */
552 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
553 
554 	/* Enable completion queue interrupt */
555 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
556 		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
557 
558 	/* Enable RBDR threshold interrupt */
559 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
560 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
561 
562 	nic->drv_stats.txq_stop = 0;
563 	nic->drv_stats.txq_wake = 0;
564 
565 	/* Activate network interface */
566 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
567 
568 	/* Schedule callout to update stats */
569 	callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
570 
571 	return;
572 
573 error:
574 	/* Something went very wrong. Disable this ifnet for good */
575 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
576 }
577 
578 static void
579 nicvf_if_init(void *if_softc)
580 {
581 	struct nicvf *nic = if_softc;
582 
583 	NICVF_CORE_LOCK(nic);
584 	nicvf_if_init_locked(nic);
585 	NICVF_CORE_UNLOCK(nic);
586 }
587 
588 static int
589 nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf)
590 {
591 	struct nicvf *nic = if_getsoftc(ifp);
592 	struct queue_set *qs = nic->qs;
593 	struct snd_queue *sq;
594 	int qidx;
595 	int err = 0;
596 
597 
598 	if (__predict_false(qs == NULL)) {
599 		panic("%s: missing queue set for %s", __func__,
600 		    device_get_nameunit(nic->dev));
601 	}
602 
603 	/* Select queue */
604 	if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
605 		qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
606 	else
607 		qidx = curcpu % qs->sq_cnt;
608 
609 	sq = &qs->sq[qidx];
610 
611 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
612 	    IFF_DRV_RUNNING) {
613 		if (mbuf != NULL)
614 			err = drbr_enqueue(ifp, sq->br, mbuf);
615 		return (err);
616 	}
617 
618 	if (mbuf != NULL) {
619 		err = drbr_enqueue(ifp, sq->br, mbuf);
620 		if (err != 0)
621 			return (err);
622 	}
623 
624 	taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
625 
626 	return (0);
627 }
628 
629 static void
630 nicvf_if_qflush(struct ifnet *ifp)
631 {
632 	struct nicvf *nic;
633 	struct queue_set *qs;
634 	struct snd_queue *sq;
635 	struct mbuf *mbuf;
636 	size_t idx;
637 
638 	nic = if_getsoftc(ifp);
639 	qs = nic->qs;
640 
641 	for (idx = 0; idx < qs->sq_cnt; idx++) {
642 		sq = &qs->sq[idx];
643 		NICVF_TX_LOCK(sq);
644 		while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
645 			m_freem(mbuf);
646 		NICVF_TX_UNLOCK(sq);
647 	}
648 	if_qflush(ifp);
649 }
650 
651 static uint64_t
652 nicvf_if_getcounter(struct ifnet *ifp, ift_counter cnt)
653 {
654 	struct nicvf *nic;
655 	struct nicvf_hw_stats *hw_stats;
656 	struct nicvf_drv_stats *drv_stats;
657 
658 	nic = if_getsoftc(ifp);
659 	hw_stats = &nic->hw_stats;
660 	drv_stats = &nic->drv_stats;
661 
662 	switch (cnt) {
663 	case IFCOUNTER_IPACKETS:
664 		return (drv_stats->rx_frames_ok);
665 	case IFCOUNTER_OPACKETS:
666 		return (drv_stats->tx_frames_ok);
667 	case IFCOUNTER_IBYTES:
668 		return (hw_stats->rx_bytes);
669 	case IFCOUNTER_OBYTES:
670 		return (hw_stats->tx_bytes_ok);
671 	case IFCOUNTER_IMCASTS:
672 		return (hw_stats->rx_mcast_frames);
673 	case IFCOUNTER_COLLISIONS:
674 		return (0);
675 	case IFCOUNTER_IQDROPS:
676 		return (drv_stats->rx_drops);
677 	case IFCOUNTER_OQDROPS:
678 		return (drv_stats->tx_drops);
679 	default:
680 		return (if_get_counter_default(ifp, cnt));
681 	}
682 
683 }
684 
685 static void
686 nicvf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
687 {
688 	struct nicvf *nic = if_getsoftc(ifp);
689 
690 	NICVF_CORE_LOCK(nic);
691 
692 	ifmr->ifm_status = IFM_AVALID;
693 	ifmr->ifm_active = IFM_ETHER;
694 
695 	if (nic->link_up) {
696 		/* Device attached to working network */
697 		ifmr->ifm_status |= IFM_ACTIVE;
698 	}
699 
700 	switch (nic->speed) {
701 	case SPEED_10:
702 		ifmr->ifm_active |= IFM_10_T;
703 		break;
704 	case SPEED_100:
705 		ifmr->ifm_active |= IFM_100_TX;
706 		break;
707 	case SPEED_1000:
708 		ifmr->ifm_active |= IFM_1000_T;
709 		break;
710 	case SPEED_10000:
711 		ifmr->ifm_active |= IFM_10G_SR;
712 		break;
713 	case SPEED_40000:
714 		ifmr->ifm_active |= IFM_40G_CR4;
715 		break;
716 	default:
717 		ifmr->ifm_active |= IFM_AUTO;
718 		break;
719 	}
720 
721 	if (nic->duplex)
722 		ifmr->ifm_active |= IFM_FDX;
723 	else
724 		ifmr->ifm_active |= IFM_HDX;
725 
726 	NICVF_CORE_UNLOCK(nic);
727 }
728 
729 static int
730 nicvf_media_change(struct ifnet *ifp __unused)
731 {
732 
733 	return (0);
734 }
735 
736 /* Register read/write APIs */
737 void
738 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
739 {
740 
741 	bus_write_8(nic->reg_base, offset, val);
742 }
743 
744 uint64_t
745 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
746 {
747 
748 	return (bus_read_8(nic->reg_base, offset));
749 }
750 
751 void
752 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
753     uint64_t qidx, uint64_t val)
754 {
755 
756 	bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
757 }
758 
759 uint64_t
760 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
761     uint64_t qidx)
762 {
763 
764 	return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
765 }
766 
767 /* VF -> PF mailbox communication */
768 static void
769 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
770 {
771 	uint64_t *msg = (uint64_t *)mbx;
772 
773 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
774 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
775 }
776 
777 int
778 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
779 {
780 	int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
781 	int sleep = 2;
782 
783 	NICVF_CORE_LOCK_ASSERT(nic);
784 
785 	nic->pf_acked = FALSE;
786 	nic->pf_nacked = FALSE;
787 
788 	nicvf_write_to_mbx(nic, mbx);
789 
790 	/* Wait for previous message to be acked, timeout 2sec */
791 	while (!nic->pf_acked) {
792 		if (nic->pf_nacked)
793 			return (EINVAL);
794 
795 		DELAY(sleep * 1000);
796 
797 		if (nic->pf_acked)
798 			break;
799 		timeout -= sleep;
800 		if (!timeout) {
801 			device_printf(nic->dev,
802 				   "PF didn't ack to mbox msg %d from VF%d\n",
803 				   (mbx->msg.msg & 0xFF), nic->vf_id);
804 
805 			return (EBUSY);
806 		}
807 	}
808 	return (0);
809 }
810 
811 /*
812  * Checks if VF is able to comminicate with PF
813  * and also gets the VNIC number this VF is associated to.
814  */
815 static int
816 nicvf_check_pf_ready(struct nicvf *nic)
817 {
818 	union nic_mbx mbx = {};
819 
820 	mbx.msg.msg = NIC_MBOX_MSG_READY;
821 	if (nicvf_send_msg_to_pf(nic, &mbx)) {
822 		device_printf(nic->dev,
823 			   "PF didn't respond to READY msg\n");
824 		return 0;
825 	}
826 
827 	return 1;
828 }
829 
830 static void
831 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
832 {
833 
834 	if (bgx->rx)
835 		nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
836 	else
837 		nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
838 }
839 
840 static void
841 nicvf_handle_mbx_intr(struct nicvf *nic)
842 {
843 	union nic_mbx mbx = {};
844 	uint64_t *mbx_data;
845 	uint64_t mbx_addr;
846 	int i;
847 
848 	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
849 	mbx_data = (uint64_t *)&mbx;
850 
851 	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
852 		*mbx_data = nicvf_reg_read(nic, mbx_addr);
853 		mbx_data++;
854 		mbx_addr += sizeof(uint64_t);
855 	}
856 
857 	switch (mbx.msg.msg) {
858 	case NIC_MBOX_MSG_READY:
859 		nic->pf_acked = TRUE;
860 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
861 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
862 		nic->node = mbx.nic_cfg.node_id;
863 		memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
864 		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
865 		nic->link_up = FALSE;
866 		nic->duplex = 0;
867 		nic->speed = 0;
868 		break;
869 	case NIC_MBOX_MSG_ACK:
870 		nic->pf_acked = TRUE;
871 		break;
872 	case NIC_MBOX_MSG_NACK:
873 		nic->pf_nacked = TRUE;
874 		break;
875 	case NIC_MBOX_MSG_BGX_STATS:
876 		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
877 		nic->pf_acked = TRUE;
878 		break;
879 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
880 		nic->pf_acked = TRUE;
881 		nic->link_up = mbx.link_status.link_up;
882 		nic->duplex = mbx.link_status.duplex;
883 		nic->speed = mbx.link_status.speed;
884 		if (nic->link_up) {
885 			if_setbaudrate(nic->ifp, nic->speed * 1000000);
886 			if_link_state_change(nic->ifp, LINK_STATE_UP);
887 		} else {
888 			if_setbaudrate(nic->ifp, 0);
889 			if_link_state_change(nic->ifp, LINK_STATE_DOWN);
890 		}
891 		break;
892 	default:
893 		device_printf(nic->dev,
894 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
895 		break;
896 	}
897 	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
898 }
899 
900 static int
901 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
902 {
903 	union nic_mbx mbx = {};
904 
905 	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
906 	mbx.mac.vf_id = nic->vf_id;
907 	memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
908 
909 	return (nicvf_send_msg_to_pf(nic, &mbx));
910 }
911 
912 static void
913 nicvf_config_cpi(struct nicvf *nic)
914 {
915 	union nic_mbx mbx = {};
916 
917 	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
918 	mbx.cpi_cfg.vf_id = nic->vf_id;
919 	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
920 	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
921 
922 	nicvf_send_msg_to_pf(nic, &mbx);
923 }
924 
925 static int
926 nicvf_init_resources(struct nicvf *nic)
927 {
928 	int err;
929 	union nic_mbx mbx = {};
930 
931 	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
932 
933 	/* Enable Qset */
934 	nicvf_qset_config(nic, TRUE);
935 
936 	/* Initialize queues and HW for data transfer */
937 	err = nicvf_config_data_transfer(nic, TRUE);
938 	if (err) {
939 		device_printf(nic->dev,
940 		    "Failed to alloc/config VF's QSet resources\n");
941 		return (err);
942 	}
943 
944 	/* Send VF config done msg to PF */
945 	nicvf_write_to_mbx(nic, &mbx);
946 
947 	return (0);
948 }
949 
950 static void
951 nicvf_misc_intr_handler(void *arg)
952 {
953 	struct nicvf *nic = (struct nicvf *)arg;
954 	uint64_t intr;
955 
956 	intr = nicvf_reg_read(nic, NIC_VF_INT);
957 	/* Check for spurious interrupt */
958 	if (!(intr & NICVF_INTR_MBOX_MASK))
959 		return;
960 
961 	nicvf_handle_mbx_intr(nic);
962 }
963 
964 static int
965 nicvf_intr_handler(void *arg)
966 {
967 	struct nicvf *nic;
968 	struct cmp_queue *cq;
969 	int qidx;
970 
971 	cq = (struct cmp_queue *)arg;
972 	nic = cq->nic;
973 	qidx = cq->idx;
974 
975 	/* Disable interrupts */
976 	nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
977 
978 	taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
979 
980 	/* Clear interrupt */
981 	nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
982 
983 	return (FILTER_HANDLED);
984 }
985 
986 static void
987 nicvf_rbdr_intr_handler(void *arg)
988 {
989 	struct nicvf *nic;
990 	struct queue_set *qs;
991 	struct rbdr *rbdr;
992 	int qidx;
993 
994 	nic = (struct nicvf *)arg;
995 
996 	/* Disable RBDR interrupt and schedule softirq */
997 	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
998 		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
999 			continue;
1000 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1001 
1002 		qs = nic->qs;
1003 		rbdr = &qs->rbdr[qidx];
1004 		taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1005 		/* Clear interrupt */
1006 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1007 	}
1008 }
1009 
1010 static void
1011 nicvf_qs_err_intr_handler(void *arg)
1012 {
1013 	struct nicvf *nic = (struct nicvf *)arg;
1014 	struct queue_set *qs = nic->qs;
1015 
1016 	/* Disable Qset err interrupt and schedule softirq */
1017 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1018 	taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1019 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1020 
1021 }
1022 
1023 static int
1024 nicvf_enable_msix(struct nicvf *nic)
1025 {
1026 	struct pci_devinfo *dinfo;
1027 	int rid, count;
1028 	int ret;
1029 
1030 	dinfo = device_get_ivars(nic->dev);
1031 	rid = dinfo->cfg.msix.msix_table_bar;
1032 	nic->msix_table_res =
1033 	    bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1034 	if (nic->msix_table_res == NULL) {
1035 		device_printf(nic->dev,
1036 		    "Could not allocate memory for MSI-X table\n");
1037 		return (ENXIO);
1038 	}
1039 
1040 	count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1041 
1042 	ret = pci_alloc_msix(nic->dev, &count);
1043 	if ((ret != 0) || (count != nic->num_vec)) {
1044 		device_printf(nic->dev,
1045 		    "Request for #%d msix vectors failed, error: %d\n",
1046 		    nic->num_vec, ret);
1047 		return (ret);
1048 	}
1049 
1050 	nic->msix_enabled = 1;
1051 	return (0);
1052 }
1053 
1054 static void
1055 nicvf_disable_msix(struct nicvf *nic)
1056 {
1057 
1058 	if (nic->msix_enabled) {
1059 		pci_release_msi(nic->dev);
1060 		nic->msix_enabled = 0;
1061 		nic->num_vec = 0;
1062 	}
1063 }
1064 
1065 static void
1066 nicvf_release_all_interrupts(struct nicvf *nic)
1067 {
1068 	struct resource *res;
1069 	int irq;
1070 	int err;
1071 
1072 	/* Free registered interrupts */
1073 	for (irq = 0; irq < nic->num_vec; irq++) {
1074 		res = nic->msix_entries[irq].irq_res;
1075 		if (res == NULL)
1076 			continue;
1077 		/* Teardown interrupt first */
1078 		if (nic->msix_entries[irq].handle != NULL) {
1079 			err = bus_teardown_intr(nic->dev,
1080 			    nic->msix_entries[irq].irq_res,
1081 			    nic->msix_entries[irq].handle);
1082 			KASSERT(err == 0,
1083 			    ("ERROR: Unable to teardown interrupt %d", irq));
1084 			nic->msix_entries[irq].handle = NULL;
1085 		}
1086 
1087 		bus_release_resource(nic->dev, SYS_RES_IRQ,
1088 			    rman_get_rid(res), nic->msix_entries[irq].irq_res);
1089 		nic->msix_entries[irq].irq_res = NULL;
1090 	}
1091 	/* Disable MSI-X */
1092 	nicvf_disable_msix(nic);
1093 }
1094 
1095 /*
1096  * Initialize MSIX vectors and register MISC interrupt.
1097  * Send READY message to PF to check if its alive
1098  */
1099 static int
1100 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1101 {
1102 	struct resource *res;
1103 	int irq, rid;
1104 	int ret = 0;
1105 
1106 	/* Return if mailbox interrupt is already registered */
1107 	if (nic->msix_enabled)
1108 		return (0);
1109 
1110 	/* Enable MSI-X */
1111 	if (nicvf_enable_msix(nic) != 0)
1112 		return (ENXIO);
1113 
1114 	irq = NICVF_INTR_ID_MISC;
1115 	rid = irq + 1;
1116 	nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1117 	    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1118 	if (nic->msix_entries[irq].irq_res == NULL) {
1119 		device_printf(nic->dev,
1120 		    "Could not allocate Mbox interrupt for VF%d\n",
1121 		    device_get_unit(nic->dev));
1122 		return (ENXIO);
1123 	}
1124 
1125 	ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1126 	    (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1127 	    &nic->msix_entries[irq].handle);
1128 	if (ret != 0) {
1129 		res = nic->msix_entries[irq].irq_res;
1130 		bus_release_resource(nic->dev, SYS_RES_IRQ,
1131 			    rman_get_rid(res), res);
1132 		nic->msix_entries[irq].irq_res = NULL;
1133 		return (ret);
1134 	}
1135 
1136 	return (0);
1137 }
1138 
1139 static int
1140 nicvf_enable_misc_interrupt(struct nicvf *nic)
1141 {
1142 
1143 	/* Enable mailbox interrupt */
1144 	nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1145 
1146 	/* Check if VF is able to communicate with PF */
1147 	if (!nicvf_check_pf_ready(nic)) {
1148 		nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1149 		return (ENXIO);
1150 	}
1151 
1152 	return (0);
1153 }
1154 
1155 static void
1156 nicvf_release_net_interrupts(struct nicvf *nic)
1157 {
1158 	struct resource *res;
1159 	int irq;
1160 	int err;
1161 
1162 	for_each_cq_irq(irq) {
1163 		res = nic->msix_entries[irq].irq_res;
1164 		if (res == NULL)
1165 			continue;
1166 		/* Teardown active interrupts first */
1167 		if (nic->msix_entries[irq].handle != NULL) {
1168 			err = bus_teardown_intr(nic->dev,
1169 			    nic->msix_entries[irq].irq_res,
1170 			    nic->msix_entries[irq].handle);
1171 			KASSERT(err == 0,
1172 			    ("ERROR: Unable to teardown CQ interrupt %d",
1173 			    (irq - NICVF_INTR_ID_CQ)));
1174 			if (err != 0)
1175 				continue;
1176 		}
1177 
1178 		/* Release resource */
1179 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1180 		    res);
1181 		nic->msix_entries[irq].irq_res = NULL;
1182 	}
1183 
1184 	for_each_rbdr_irq(irq) {
1185 		res = nic->msix_entries[irq].irq_res;
1186 		if (res == NULL)
1187 			continue;
1188 		/* Teardown active interrupts first */
1189 		if (nic->msix_entries[irq].handle != NULL) {
1190 			err = bus_teardown_intr(nic->dev,
1191 			    nic->msix_entries[irq].irq_res,
1192 			    nic->msix_entries[irq].handle);
1193 			KASSERT(err == 0,
1194 			    ("ERROR: Unable to teardown RDBR interrupt %d",
1195 			    (irq - NICVF_INTR_ID_RBDR)));
1196 			if (err != 0)
1197 				continue;
1198 		}
1199 
1200 		/* Release resource */
1201 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1202 		    res);
1203 		nic->msix_entries[irq].irq_res = NULL;
1204 	}
1205 
1206 	irq = NICVF_INTR_ID_QS_ERR;
1207 	res = nic->msix_entries[irq].irq_res;
1208 	if (res != NULL) {
1209 		/* Teardown active interrupts first */
1210 		if (nic->msix_entries[irq].handle != NULL) {
1211 			err = bus_teardown_intr(nic->dev,
1212 			    nic->msix_entries[irq].irq_res,
1213 			    nic->msix_entries[irq].handle);
1214 			KASSERT(err == 0,
1215 			    ("ERROR: Unable to teardown QS Error interrupt %d",
1216 			    irq));
1217 			if (err != 0)
1218 				return;
1219 		}
1220 
1221 		/* Release resource */
1222 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1223 		    res);
1224 		nic->msix_entries[irq].irq_res = NULL;
1225 	}
1226 }
1227 
1228 static int
1229 nicvf_allocate_net_interrupts(struct nicvf *nic)
1230 {
1231 	int irq, rid;
1232 	int qidx;
1233 	int ret = 0;
1234 
1235 	/* MSI-X must be configured by now */
1236 	if (!nic->msix_enabled) {
1237 		device_printf(nic->dev, "Cannot alloacte queue interrups. "
1238 		    "MSI-X interrupts disabled.\n");
1239 		return (ENXIO);
1240 	}
1241 
1242 	/* Register CQ interrupts */
1243 	for_each_cq_irq(irq) {
1244 		if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1245 			break;
1246 
1247 		qidx = irq - NICVF_INTR_ID_CQ;
1248 		rid = irq + 1;
1249 		nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1250 		    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1251 		if (nic->msix_entries[irq].irq_res == NULL) {
1252 			device_printf(nic->dev,
1253 			    "Could not allocate CQ interrupt %d for VF%d\n",
1254 			    (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1255 			ret = ENXIO;
1256 			goto error;
1257 		}
1258 		ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1259 		    (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1260 		    NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1261 		if (ret != 0) {
1262 			device_printf(nic->dev,
1263 			    "Could not setup CQ interrupt %d for VF%d\n",
1264 			    (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1265 			goto error;
1266 		}
1267 	}
1268 
1269 	/* Register RBDR interrupt */
1270 	for_each_rbdr_irq(irq) {
1271 		if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1272 			break;
1273 
1274 		rid = irq + 1;
1275 		nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1276 		    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1277 		if (nic->msix_entries[irq].irq_res == NULL) {
1278 			device_printf(nic->dev,
1279 			    "Could not allocate RBDR interrupt %d for VF%d\n",
1280 			    (irq - NICVF_INTR_ID_RBDR),
1281 			    device_get_unit(nic->dev));
1282 			ret = ENXIO;
1283 			goto error;
1284 		}
1285 		ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1286 		    (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1287 		    nicvf_rbdr_intr_handler, nic,
1288 		    &nic->msix_entries[irq].handle);
1289 		if (ret != 0) {
1290 			device_printf(nic->dev,
1291 			    "Could not setup RBDR interrupt %d for VF%d\n",
1292 			    (irq - NICVF_INTR_ID_RBDR),
1293 			    device_get_unit(nic->dev));
1294 			goto error;
1295 		}
1296 	}
1297 
1298 	/* Register QS error interrupt */
1299 	irq = NICVF_INTR_ID_QS_ERR;
1300 	rid = irq + 1;
1301 	nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1302 	    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1303 	if (nic->msix_entries[irq].irq_res == NULL) {
1304 		device_printf(nic->dev,
1305 		    "Could not allocate QS Error interrupt for VF%d\n",
1306 		    device_get_unit(nic->dev));
1307 		ret = ENXIO;
1308 		goto error;
1309 	}
1310 	ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1311 	    (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1312 	    nic, &nic->msix_entries[irq].handle);
1313 	if (ret != 0) {
1314 		device_printf(nic->dev,
1315 		    "Could not setup QS Error interrupt for VF%d\n",
1316 		    device_get_unit(nic->dev));
1317 		goto error;
1318 	}
1319 
1320 	return (0);
1321 error:
1322 	nicvf_release_net_interrupts(nic);
1323 	return (ret);
1324 }
1325 
1326 static int
1327 nicvf_stop_locked(struct nicvf *nic)
1328 {
1329 	struct ifnet *ifp;
1330 	int qidx;
1331 	struct queue_set *qs = nic->qs;
1332 	union nic_mbx mbx = {};
1333 
1334 	NICVF_CORE_LOCK_ASSERT(nic);
1335 	/* Stop callout. Can block here since holding SX lock */
1336 	callout_drain(&nic->stats_callout);
1337 
1338 	ifp = nic->ifp;
1339 
1340 	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1341 	nicvf_send_msg_to_pf(nic, &mbx);
1342 
1343 	/* Disable RBDR & QS error interrupts */
1344 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1345 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1346 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1347 	}
1348 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1349 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1350 
1351 	/* Deactivate network interface */
1352 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1353 
1354 	/* Free resources */
1355 	nicvf_config_data_transfer(nic, FALSE);
1356 
1357 	/* Disable HW Qset */
1358 	nicvf_qset_config(nic, FALSE);
1359 
1360 	/* disable mailbox interrupt */
1361 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1362 
1363 	return (0);
1364 }
1365 
1366 static void
1367 nicvf_update_stats(struct nicvf *nic)
1368 {
1369 	int qidx;
1370 	struct nicvf_hw_stats *stats = &nic->hw_stats;
1371 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1372 	struct queue_set *qs = nic->qs;
1373 
1374 #define	GET_RX_STATS(reg) \
1375     nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1376 #define GET_TX_STATS(reg) \
1377     nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1378 
1379 	stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1380 	stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1381 	stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1382 	stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1383 	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1384 	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1385 	stats->rx_drop_red = GET_RX_STATS(RX_RED);
1386 	stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1387 	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1388 	stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1389 	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1390 	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1391 	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1392 	stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1393 
1394 	stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1395 	stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1396 	stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1397 	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1398 	stats->tx_drops = GET_TX_STATS(TX_DROP);
1399 
1400 	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1401 	    stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1402 	drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1403 	drv_stats->tx_drops = stats->tx_drops;
1404 
1405 	/* Update RQ and SQ stats */
1406 	for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1407 		nicvf_update_rq_stats(nic, qidx);
1408 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1409 		nicvf_update_sq_stats(nic, qidx);
1410 }
1411 
1412 static void
1413 nicvf_tick_stats(void *arg)
1414 {
1415 	struct nicvf *nic;
1416 
1417 	nic = (struct nicvf *)arg;
1418 
1419 	/* Read the statistics */
1420 	nicvf_update_stats(nic);
1421 
1422 	callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
1423 }
1424