xref: /freebsd/sys/dev/vnic/nicvf_main.c (revision aa3860851b9f6a6002d135b1cac7736e0995eedc)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include <sys/cdefs.h>
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bitset.h>
34 #include <sys/bitstring.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/pciio.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/stdatomic.h>
48 #include <sys/cpuset.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/smp.h>
52 #include <sys/taskqueue.h>
53 
54 #include <net/bpf.h>
55 #include <net/ethernet.h>
56 #include <net/if.h>
57 #include <net/if_var.h>
58 #include <net/if_arp.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_vlan_var.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/if_ether.h>
67 #include <netinet/tcp_lro.h>
68 
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 
72 #include <sys/dnv.h>
73 #include <sys/nv.h>
74 #include <sys/iov_schema.h>
75 
76 #include <machine/bus.h>
77 
78 #include "thunder_bgx.h"
79 #include "nic_reg.h"
80 #include "nic.h"
81 #include "nicvf_queues.h"
82 
83 #define	VNIC_VF_DEVSTR		"Cavium Thunder NIC Virtual Function Driver"
84 
85 #define	VNIC_VF_REG_RID		PCIR_BAR(PCI_CFG_REG_BAR_NUM)
86 
87 /* Lock for core interface settings */
88 #define	NICVF_CORE_LOCK_INIT(nic)				\
89     sx_init(&(nic)->core_sx, device_get_nameunit((nic)->dev))
90 
91 #define	NICVF_CORE_LOCK_DESTROY(nic)				\
92     sx_destroy(&(nic)->core_sx)
93 
94 #define	NICVF_CORE_LOCK(nic)		sx_xlock(&(nic)->core_sx)
95 #define	NICVF_CORE_UNLOCK(nic)		sx_xunlock(&(nic)->core_sx)
96 
97 #define	NICVF_CORE_LOCK_ASSERT(nic)	sx_assert(&(nic)->core_sx, SA_XLOCKED)
98 
99 #define	SPEED_10	10
100 #define	SPEED_100	100
101 #define	SPEED_1000	1000
102 #define	SPEED_10000	10000
103 #define	SPEED_40000	40000
104 
105 MALLOC_DEFINE(M_NICVF, "nicvf", "ThunderX VNIC VF dynamic memory");
106 
107 static int nicvf_probe(device_t);
108 static int nicvf_attach(device_t);
109 static int nicvf_detach(device_t);
110 
111 static device_method_t nicvf_methods[] = {
112 	/* Device interface */
113 	DEVMETHOD(device_probe,		nicvf_probe),
114 	DEVMETHOD(device_attach,	nicvf_attach),
115 	DEVMETHOD(device_detach,	nicvf_detach),
116 
117 	DEVMETHOD_END,
118 };
119 
120 static driver_t nicvf_driver = {
121 	"vnic",
122 	nicvf_methods,
123 	sizeof(struct nicvf),
124 };
125 
126 DRIVER_MODULE(vnicvf, pci, nicvf_driver, 0, 0);
127 MODULE_VERSION(vnicvf, 1);
128 MODULE_DEPEND(vnicvf, pci, 1, 1, 1);
129 MODULE_DEPEND(vnicvf, ether, 1, 1, 1);
130 MODULE_DEPEND(vnicvf, vnicpf, 1, 1, 1);
131 
132 static int nicvf_allocate_misc_interrupt(struct nicvf *);
133 static int nicvf_enable_misc_interrupt(struct nicvf *);
134 static int nicvf_allocate_net_interrupts(struct nicvf *);
135 static void nicvf_release_all_interrupts(struct nicvf *);
136 static int nicvf_update_hw_max_frs(struct nicvf *, int);
137 static int nicvf_hw_set_mac_addr(struct nicvf *, uint8_t *);
138 static void nicvf_config_cpi(struct nicvf *);
139 static int nicvf_rss_init(struct nicvf *);
140 static int nicvf_init_resources(struct nicvf *);
141 
142 static void nicvf_setup_ifnet(struct nicvf *);
143 static int nicvf_setup_ifmedia(struct nicvf *);
144 static void nicvf_hw_addr_random(uint8_t *);
145 
146 static int nicvf_if_ioctl(if_t, u_long, caddr_t);
147 static void nicvf_if_init(void *);
148 static void nicvf_if_init_locked(struct nicvf *);
149 static int nicvf_if_transmit(if_t, struct mbuf *);
150 static void nicvf_if_qflush(if_t);
151 static uint64_t nicvf_if_getcounter(if_t, ift_counter);
152 
153 static int nicvf_stop_locked(struct nicvf *);
154 
155 static void nicvf_media_status(if_t, struct ifmediareq *);
156 static int nicvf_media_change(if_t);
157 
158 static void nicvf_tick_stats(void *);
159 
160 static int
nicvf_probe(device_t dev)161 nicvf_probe(device_t dev)
162 {
163 	uint16_t vendor_id;
164 	uint16_t device_id;
165 
166 	vendor_id = pci_get_vendor(dev);
167 	device_id = pci_get_device(dev);
168 
169 	if (vendor_id != PCI_VENDOR_ID_CAVIUM)
170 		return (ENXIO);
171 
172 	if (device_id == PCI_DEVICE_ID_THUNDER_NIC_VF ||
173 	    device_id == PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF) {
174 		device_set_desc(dev, VNIC_VF_DEVSTR);
175 		return (BUS_PROBE_DEFAULT);
176 	}
177 
178 	return (ENXIO);
179 }
180 
181 static int
nicvf_attach(device_t dev)182 nicvf_attach(device_t dev)
183 {
184 	struct nicvf *nic;
185 	int rid, qcount;
186 	int err = 0;
187 	uint8_t hwaddr[ETHER_ADDR_LEN];
188 	uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0};
189 
190 	nic = device_get_softc(dev);
191 	nic->dev = dev;
192 	nic->pnicvf = nic;
193 
194 	NICVF_CORE_LOCK_INIT(nic);
195 	/* Enable HW TSO on Pass2 */
196 	if (!pass1_silicon(dev))
197 		nic->hw_tso = TRUE;
198 
199 	rid = VNIC_VF_REG_RID;
200 	nic->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
201 	    RF_ACTIVE);
202 	if (nic->reg_base == NULL) {
203 		device_printf(dev, "Could not allocate registers memory\n");
204 		return (ENXIO);
205 	}
206 
207 	qcount = MAX_CMP_QUEUES_PER_QS;
208 	nic->max_queues = qcount;
209 
210 	err = nicvf_set_qset_resources(nic);
211 	if (err != 0)
212 		goto err_free_res;
213 
214 	/* Check if PF is alive and get MAC address for this VF */
215 	err = nicvf_allocate_misc_interrupt(nic);
216 	if (err != 0)
217 		goto err_free_res;
218 
219 	NICVF_CORE_LOCK(nic);
220 	err = nicvf_enable_misc_interrupt(nic);
221 	NICVF_CORE_UNLOCK(nic);
222 	if (err != 0)
223 		goto err_release_intr;
224 
225 	err = nicvf_allocate_net_interrupts(nic);
226 	if (err != 0) {
227 		device_printf(dev,
228 		    "Could not allocate network interface interrupts\n");
229 		goto err_free_ifnet;
230 	}
231 
232 	/* If no MAC address was obtained we generate random one */
233 	if (memcmp(nic->hwaddr, zeromac, ETHER_ADDR_LEN) == 0) {
234 		nicvf_hw_addr_random(hwaddr);
235 		memcpy(nic->hwaddr, hwaddr, ETHER_ADDR_LEN);
236 		NICVF_CORE_LOCK(nic);
237 		nicvf_hw_set_mac_addr(nic, hwaddr);
238 		NICVF_CORE_UNLOCK(nic);
239 	}
240 
241 	/* Configure CPI alorithm */
242 	nic->cpi_alg = CPI_ALG_NONE;
243 	NICVF_CORE_LOCK(nic);
244 	nicvf_config_cpi(nic);
245 	/* Configure receive side scaling */
246 	if (nic->qs->rq_cnt > 1)
247 		nicvf_rss_init(nic);
248 	NICVF_CORE_UNLOCK(nic);
249 
250 	nicvf_setup_ifnet(nic);
251 
252 	err = nicvf_setup_ifmedia(nic);
253 	if (err != 0) {
254 		device_printf(dev, "Could not set-up ifmedia\n");
255 		goto err_free_ifnet;
256 	}
257 
258 	mtx_init(&nic->stats_mtx, "VNIC stats", NULL, MTX_DEF);
259 	callout_init_mtx(&nic->stats_callout, &nic->stats_mtx, 0);
260 
261 	ether_ifattach(nic->ifp, nic->hwaddr);
262 
263 	return (0);
264 
265 err_free_ifnet:
266 	if_free(nic->ifp);
267 err_release_intr:
268 	nicvf_release_all_interrupts(nic);
269 err_free_res:
270 	bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(nic->reg_base),
271 	    nic->reg_base);
272 
273 	return (err);
274 }
275 
276 static int
nicvf_detach(device_t dev)277 nicvf_detach(device_t dev)
278 {
279 	struct nicvf *nic;
280 
281 	nic = device_get_softc(dev);
282 
283 	NICVF_CORE_LOCK(nic);
284 	/* Shut down the port and release ring resources */
285 	nicvf_stop_locked(nic);
286 	/* Release stats lock */
287 	mtx_destroy(&nic->stats_mtx);
288 	/* Release interrupts */
289 	nicvf_release_all_interrupts(nic);
290 	/* Release memory resource */
291 	if (nic->reg_base != NULL) {
292 		bus_release_resource(dev, SYS_RES_MEMORY,
293 		    rman_get_rid(nic->reg_base), nic->reg_base);
294 	}
295 
296 	/* Remove all ifmedia configurations */
297 	ifmedia_removeall(&nic->if_media);
298 	/* Free this ifnet */
299 	if_free(nic->ifp);
300 	NICVF_CORE_UNLOCK(nic);
301 	/* Finally destroy the lock */
302 	NICVF_CORE_LOCK_DESTROY(nic);
303 
304 	return (0);
305 }
306 
307 static void
nicvf_hw_addr_random(uint8_t * hwaddr)308 nicvf_hw_addr_random(uint8_t *hwaddr)
309 {
310 	uint32_t rnd;
311 	uint8_t addr[ETHER_ADDR_LEN];
312 
313 	/*
314 	 * Create randomized MAC address.
315 	 * Set 'bsd' + random 24 low-order bits.
316 	 */
317 	rnd = arc4random() & 0x00ffffff;
318 	addr[0] = 'b';
319 	addr[1] = 's';
320 	addr[2] = 'd';
321 	addr[3] = rnd >> 16;
322 	addr[4] = rnd >> 8;
323 	addr[5] = rnd >> 0;
324 
325 	memcpy(hwaddr, addr, ETHER_ADDR_LEN);
326 }
327 
328 static void
nicvf_setup_ifnet(struct nicvf * nic)329 nicvf_setup_ifnet(struct nicvf *nic)
330 {
331 	if_t ifp;
332 
333 	ifp = if_alloc(IFT_ETHER);
334 	nic->ifp = ifp;
335 
336 	if_setsoftc(ifp, nic);
337 	if_initname(ifp, device_get_name(nic->dev), device_get_unit(nic->dev));
338 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
339 
340 	if_settransmitfn(ifp, nicvf_if_transmit);
341 	if_setqflushfn(ifp, nicvf_if_qflush);
342 	if_setioctlfn(ifp, nicvf_if_ioctl);
343 	if_setinitfn(ifp, nicvf_if_init);
344 	if_setgetcounterfn(ifp, nicvf_if_getcounter);
345 
346 	if_setmtu(ifp, ETHERMTU);
347 
348 	/* Reset caps */
349 	if_setcapabilities(ifp, 0);
350 
351 	/* Set the default values */
352 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
353 	if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
354 	if (nic->hw_tso) {
355 		/* TSO */
356 		if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
357 		/* TSO parameters */
358 		if_sethwtsomax(ifp, NICVF_TSO_MAXSIZE);
359 		if_sethwtsomaxsegcount(ifp, NICVF_TSO_NSEGS);
360 		if_sethwtsomaxsegsize(ifp, MCLBYTES);
361 	}
362 	/* IP/TCP/UDP HW checksums */
363 	if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
364 	if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
365 	/*
366 	 * HW offload enable
367 	 */
368 	if_clearhwassist(ifp);
369 	if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP), 0);
370 	if (nic->hw_tso)
371 		if_sethwassistbits(ifp, (CSUM_TSO), 0);
372 	if_setcapenable(ifp, if_getcapabilities(ifp));
373 }
374 
375 static int
nicvf_setup_ifmedia(struct nicvf * nic)376 nicvf_setup_ifmedia(struct nicvf *nic)
377 {
378 
379 	ifmedia_init(&nic->if_media, IFM_IMASK, nicvf_media_change,
380 	    nicvf_media_status);
381 
382 	/*
383 	 * Advertise availability of all possible connection types,
384 	 * even though not all are possible at the same time.
385 	 */
386 
387 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10_T | IFM_FDX),
388 	    0, NULL);
389 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_100_TX | IFM_FDX),
390 	    0, NULL);
391 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_1000_T | IFM_FDX),
392 	    0, NULL);
393 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_10G_SR | IFM_FDX),
394 	    0, NULL);
395 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_40G_CR4 | IFM_FDX),
396 	    0, NULL);
397 	ifmedia_add(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX),
398 	    0, NULL);
399 
400 	ifmedia_set(&nic->if_media, (IFM_ETHER | IFM_AUTO | IFM_FDX));
401 
402 	return (0);
403 }
404 
405 static int
nicvf_if_ioctl(if_t ifp,u_long cmd,caddr_t data)406 nicvf_if_ioctl(if_t ifp, u_long cmd, caddr_t data)
407 {
408 	struct nicvf *nic;
409 	struct rcv_queue *rq;
410 	struct ifreq *ifr;
411 	uint32_t flags;
412 	int mask, err;
413 	int rq_idx;
414 #if defined(INET) || defined(INET6)
415 	struct ifaddr *ifa;
416 	boolean_t avoid_reset = FALSE;
417 #endif
418 
419 	nic = if_getsoftc(ifp);
420 	ifr = (struct ifreq *)data;
421 #if defined(INET) || defined(INET6)
422 	ifa = (struct ifaddr *)data;
423 #endif
424 	err = 0;
425 	switch (cmd) {
426 	case SIOCSIFADDR:
427 #ifdef INET
428 		if (ifa->ifa_addr->sa_family == AF_INET)
429 			avoid_reset = TRUE;
430 #endif
431 #ifdef INET6
432 		if (ifa->ifa_addr->sa_family == AF_INET6)
433 			avoid_reset = TRUE;
434 #endif
435 
436 #if defined(INET) || defined(INET6)
437 		/* Avoid reinitialization unless it's necessary */
438 		if (avoid_reset) {
439 			if_setflagbits(ifp, IFF_UP, 0);
440 			if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
441 				nicvf_if_init(nic);
442 #ifdef INET
443 			if (!(if_getflags(ifp) & IFF_NOARP))
444 				arp_ifinit(ifp, ifa);
445 #endif
446 
447 			return (0);
448 		}
449 #endif
450 		err = ether_ioctl(ifp, cmd, data);
451 		break;
452 	case SIOCSIFMTU:
453 		if (ifr->ifr_mtu < NIC_HW_MIN_FRS ||
454 		    ifr->ifr_mtu > NIC_HW_MAX_FRS) {
455 			err = EINVAL;
456 		} else {
457 			NICVF_CORE_LOCK(nic);
458 			err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu);
459 			if (err == 0)
460 				if_setmtu(ifp, ifr->ifr_mtu);
461 			NICVF_CORE_UNLOCK(nic);
462 		}
463 		break;
464 	case SIOCSIFFLAGS:
465 		NICVF_CORE_LOCK(nic);
466 		flags = if_getflags(ifp);
467 		if (flags & IFF_UP) {
468 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
469 				if ((flags ^ nic->if_flags) & IFF_PROMISC) {
470 					/* Change promiscous mode */
471 #if 0 /* XXX */
472 					nicvf_set_promiscous(nic);
473 #endif
474 				}
475 
476 				if ((flags ^ nic->if_flags) & IFF_ALLMULTI) {
477 					/* Change multicasting settings */
478 #if 0 /* XXX */
479 					nicvf_set_multicast(nic);
480 #endif
481 				}
482 			} else {
483 				nicvf_if_init_locked(nic);
484 			}
485 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
486 			nicvf_stop_locked(nic);
487 
488 		nic->if_flags = flags;
489 		NICVF_CORE_UNLOCK(nic);
490 		break;
491 
492 	case SIOCADDMULTI:
493 	case SIOCDELMULTI:
494 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
495 #if 0
496 			NICVF_CORE_LOCK(nic);
497 			/* ARM64TODO */
498 			nicvf_set_multicast(nic);
499 			NICVF_CORE_UNLOCK(nic);
500 #endif
501 		}
502 		break;
503 
504 	case SIOCSIFMEDIA:
505 	case SIOCGIFMEDIA:
506 		err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd);
507 		break;
508 
509 	case SIOCSIFCAP:
510 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
511 		if (mask & IFCAP_VLAN_MTU) {
512 			/* No work to do except acknowledge the change took. */
513 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
514 		}
515 		if (mask & IFCAP_TXCSUM)
516 			if_togglecapenable(ifp, IFCAP_TXCSUM);
517 		if (mask & IFCAP_RXCSUM)
518 			if_togglecapenable(ifp, IFCAP_RXCSUM);
519 		if ((mask & IFCAP_TSO4) && nic->hw_tso)
520 			if_togglecapenable(ifp, IFCAP_TSO4);
521 		if (mask & IFCAP_LRO) {
522 			/*
523 			 * Lock the driver for a moment to avoid
524 			 * mismatch in per-queue settings.
525 			 */
526 			NICVF_CORE_LOCK(nic);
527 			if_togglecapenable(ifp, IFCAP_LRO);
528 			if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) {
529 				/*
530 				 * Now disable LRO for subsequent packets.
531 				 * Atomicity of this change is not necessary
532 				 * as we don't need precise toggle of this
533 				 * feature for all threads processing the
534 				 * completion queue.
535 				 */
536 				for (rq_idx = 0;
537 				    rq_idx < nic->qs->rq_cnt; rq_idx++) {
538 					rq = &nic->qs->rq[rq_idx];
539 					rq->lro_enabled = !rq->lro_enabled;
540 				}
541 			}
542 			NICVF_CORE_UNLOCK(nic);
543 		}
544 
545 		break;
546 
547 	default:
548 		err = ether_ioctl(ifp, cmd, data);
549 		break;
550 	}
551 
552 	return (err);
553 }
554 
555 static void
nicvf_if_init_locked(struct nicvf * nic)556 nicvf_if_init_locked(struct nicvf *nic)
557 {
558 	struct queue_set *qs = nic->qs;
559 	if_t ifp;
560 	int qidx;
561 	int err;
562 	caddr_t if_addr;
563 
564 	NICVF_CORE_LOCK_ASSERT(nic);
565 	ifp = nic->ifp;
566 
567 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
568 		nicvf_stop_locked(nic);
569 
570 	err = nicvf_enable_misc_interrupt(nic);
571 	if (err != 0) {
572 		if_printf(ifp, "Could not reenable Mbox interrupt\n");
573 		return;
574 	}
575 
576 	/* Get the latest MAC address */
577 	if_addr = if_getlladdr(ifp);
578 	/* Update MAC address if changed */
579 	if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) {
580 		memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN);
581 		nicvf_hw_set_mac_addr(nic, if_addr);
582 	}
583 
584 	/* Initialize the queues */
585 	err = nicvf_init_resources(nic);
586 	if (err != 0)
587 		goto error;
588 
589 	/* Make sure queue initialization is written */
590 	wmb();
591 
592 	nicvf_reg_write(nic, NIC_VF_INT, ~0UL);
593 	/* Enable Qset err interrupt */
594 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
595 
596 	/* Enable completion queue interrupt */
597 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
598 		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
599 
600 	/* Enable RBDR threshold interrupt */
601 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
602 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
603 
604 	nic->drv_stats.txq_stop = 0;
605 	nic->drv_stats.txq_wake = 0;
606 
607 	/* Activate network interface */
608 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
609 
610 	/* Schedule callout to update stats */
611 	callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
612 
613 	return;
614 
615 error:
616 	/* Something went very wrong. Disable this ifnet for good */
617 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
618 }
619 
620 static void
nicvf_if_init(void * if_softc)621 nicvf_if_init(void *if_softc)
622 {
623 	struct nicvf *nic = if_softc;
624 
625 	NICVF_CORE_LOCK(nic);
626 	nicvf_if_init_locked(nic);
627 	NICVF_CORE_UNLOCK(nic);
628 }
629 
630 static int
nicvf_if_transmit(if_t ifp,struct mbuf * mbuf)631 nicvf_if_transmit(if_t ifp, struct mbuf *mbuf)
632 {
633 	struct nicvf *nic = if_getsoftc(ifp);
634 	struct queue_set *qs = nic->qs;
635 	struct snd_queue *sq;
636 	struct mbuf *mtmp;
637 	int qidx;
638 	int err = 0;
639 
640 	if (__predict_false(qs == NULL)) {
641 		panic("%s: missing queue set for %s", __func__,
642 		    device_get_nameunit(nic->dev));
643 	}
644 
645 	/* Select queue */
646 	if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE)
647 		qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt;
648 	else
649 		qidx = curcpu % qs->sq_cnt;
650 
651 	sq = &qs->sq[qidx];
652 
653 	if (mbuf->m_next != NULL &&
654 	    (mbuf->m_pkthdr.csum_flags &
655 	    (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) {
656 		if (M_WRITABLE(mbuf) == 0) {
657 			mtmp = m_dup(mbuf, M_NOWAIT);
658 			m_freem(mbuf);
659 			if (mtmp == NULL)
660 				return (ENOBUFS);
661 			mbuf = mtmp;
662 		}
663 	}
664 
665 	err = drbr_enqueue(ifp, sq->br, mbuf);
666 	if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
667 	    IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) {
668 		/*
669 		 * Try to enqueue packet to the ring buffer.
670 		 * If the driver is not active, link down or enqueue operation
671 		 * failed, return with the appropriate error code.
672 		 */
673 		return (err);
674 	}
675 
676 	if (NICVF_TX_TRYLOCK(sq) != 0) {
677 		err = nicvf_xmit_locked(sq);
678 		NICVF_TX_UNLOCK(sq);
679 		return (err);
680 	} else
681 		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
682 
683 	return (0);
684 }
685 
686 static void
nicvf_if_qflush(if_t ifp)687 nicvf_if_qflush(if_t ifp)
688 {
689 	struct nicvf *nic;
690 	struct queue_set *qs;
691 	struct snd_queue *sq;
692 	struct mbuf *mbuf;
693 	size_t idx;
694 
695 	nic = if_getsoftc(ifp);
696 	qs = nic->qs;
697 
698 	for (idx = 0; idx < qs->sq_cnt; idx++) {
699 		sq = &qs->sq[idx];
700 		NICVF_TX_LOCK(sq);
701 		while ((mbuf = buf_ring_dequeue_sc(sq->br)) != NULL)
702 			m_freem(mbuf);
703 		NICVF_TX_UNLOCK(sq);
704 	}
705 	if_qflush(ifp);
706 }
707 
708 static uint64_t
nicvf_if_getcounter(if_t ifp,ift_counter cnt)709 nicvf_if_getcounter(if_t ifp, ift_counter cnt)
710 {
711 	struct nicvf *nic;
712 	struct nicvf_hw_stats *hw_stats;
713 	struct nicvf_drv_stats *drv_stats;
714 
715 	nic = if_getsoftc(ifp);
716 	hw_stats = &nic->hw_stats;
717 	drv_stats = &nic->drv_stats;
718 
719 	switch (cnt) {
720 	case IFCOUNTER_IPACKETS:
721 		return (drv_stats->rx_frames_ok);
722 	case IFCOUNTER_OPACKETS:
723 		return (drv_stats->tx_frames_ok);
724 	case IFCOUNTER_IBYTES:
725 		return (hw_stats->rx_bytes);
726 	case IFCOUNTER_OBYTES:
727 		return (hw_stats->tx_bytes_ok);
728 	case IFCOUNTER_IMCASTS:
729 		return (hw_stats->rx_mcast_frames);
730 	case IFCOUNTER_COLLISIONS:
731 		return (0);
732 	case IFCOUNTER_IQDROPS:
733 		return (drv_stats->rx_drops);
734 	case IFCOUNTER_OQDROPS:
735 		return (drv_stats->tx_drops);
736 	default:
737 		return (if_get_counter_default(ifp, cnt));
738 	}
739 
740 }
741 
742 static void
nicvf_media_status(if_t ifp,struct ifmediareq * ifmr)743 nicvf_media_status(if_t ifp, struct ifmediareq *ifmr)
744 {
745 	struct nicvf *nic = if_getsoftc(ifp);
746 
747 	NICVF_CORE_LOCK(nic);
748 
749 	ifmr->ifm_status = IFM_AVALID;
750 	ifmr->ifm_active = IFM_ETHER;
751 
752 	if (nic->link_up) {
753 		/* Device attached to working network */
754 		ifmr->ifm_status |= IFM_ACTIVE;
755 	}
756 
757 	switch (nic->speed) {
758 	case SPEED_10:
759 		ifmr->ifm_active |= IFM_10_T;
760 		break;
761 	case SPEED_100:
762 		ifmr->ifm_active |= IFM_100_TX;
763 		break;
764 	case SPEED_1000:
765 		ifmr->ifm_active |= IFM_1000_T;
766 		break;
767 	case SPEED_10000:
768 		ifmr->ifm_active |= IFM_10G_SR;
769 		break;
770 	case SPEED_40000:
771 		ifmr->ifm_active |= IFM_40G_CR4;
772 		break;
773 	default:
774 		ifmr->ifm_active |= IFM_AUTO;
775 		break;
776 	}
777 
778 	if (nic->duplex)
779 		ifmr->ifm_active |= IFM_FDX;
780 	else
781 		ifmr->ifm_active |= IFM_HDX;
782 
783 	NICVF_CORE_UNLOCK(nic);
784 }
785 
786 static int
nicvf_media_change(if_t ifp __unused)787 nicvf_media_change(if_t ifp __unused)
788 {
789 
790 	return (0);
791 }
792 
793 /* Register read/write APIs */
794 void
nicvf_reg_write(struct nicvf * nic,bus_space_handle_t offset,uint64_t val)795 nicvf_reg_write(struct nicvf *nic, bus_space_handle_t offset, uint64_t val)
796 {
797 
798 	bus_write_8(nic->reg_base, offset, val);
799 }
800 
801 uint64_t
nicvf_reg_read(struct nicvf * nic,uint64_t offset)802 nicvf_reg_read(struct nicvf *nic, uint64_t offset)
803 {
804 
805 	return (bus_read_8(nic->reg_base, offset));
806 }
807 
808 void
nicvf_queue_reg_write(struct nicvf * nic,bus_space_handle_t offset,uint64_t qidx,uint64_t val)809 nicvf_queue_reg_write(struct nicvf *nic, bus_space_handle_t offset,
810     uint64_t qidx, uint64_t val)
811 {
812 
813 	bus_write_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT), val);
814 }
815 
816 uint64_t
nicvf_queue_reg_read(struct nicvf * nic,bus_space_handle_t offset,uint64_t qidx)817 nicvf_queue_reg_read(struct nicvf *nic, bus_space_handle_t offset,
818     uint64_t qidx)
819 {
820 
821 	return (bus_read_8(nic->reg_base, offset + (qidx << NIC_Q_NUM_SHIFT)));
822 }
823 
824 /* VF -> PF mailbox communication */
825 static void
nicvf_write_to_mbx(struct nicvf * nic,union nic_mbx * mbx)826 nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
827 {
828 	uint64_t *msg = (uint64_t *)mbx;
829 
830 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
831 	nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
832 }
833 
834 int
nicvf_send_msg_to_pf(struct nicvf * nic,union nic_mbx * mbx)835 nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
836 {
837 	int timeout = NIC_MBOX_MSG_TIMEOUT * 10;
838 	int sleep = 2;
839 
840 	NICVF_CORE_LOCK_ASSERT(nic);
841 
842 	nic->pf_acked = FALSE;
843 	nic->pf_nacked = FALSE;
844 
845 	nicvf_write_to_mbx(nic, mbx);
846 
847 	/* Wait for previous message to be acked, timeout 2sec */
848 	while (!nic->pf_acked) {
849 		if (nic->pf_nacked)
850 			return (EINVAL);
851 
852 		DELAY(sleep * 1000);
853 
854 		if (nic->pf_acked)
855 			break;
856 		timeout -= sleep;
857 		if (!timeout) {
858 			device_printf(nic->dev,
859 				   "PF didn't ack to mbox msg %d from VF%d\n",
860 				   (mbx->msg.msg & 0xFF), nic->vf_id);
861 
862 			return (EBUSY);
863 		}
864 	}
865 	return (0);
866 }
867 
868 /*
869  * Checks if VF is able to comminicate with PF
870  * and also gets the VNIC number this VF is associated to.
871  */
872 static int
nicvf_check_pf_ready(struct nicvf * nic)873 nicvf_check_pf_ready(struct nicvf *nic)
874 {
875 	union nic_mbx mbx = {};
876 
877 	mbx.msg.msg = NIC_MBOX_MSG_READY;
878 	if (nicvf_send_msg_to_pf(nic, &mbx)) {
879 		device_printf(nic->dev,
880 			   "PF didn't respond to READY msg\n");
881 		return 0;
882 	}
883 
884 	return 1;
885 }
886 
887 static void
nicvf_read_bgx_stats(struct nicvf * nic,struct bgx_stats_msg * bgx)888 nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
889 {
890 
891 	if (bgx->rx)
892 		nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
893 	else
894 		nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
895 }
896 
897 static void
nicvf_handle_mbx_intr(struct nicvf * nic)898 nicvf_handle_mbx_intr(struct nicvf *nic)
899 {
900 	union nic_mbx mbx = {};
901 	uint64_t *mbx_data;
902 	uint64_t mbx_addr;
903 	int i;
904 
905 	mbx_addr = NIC_VF_PF_MAILBOX_0_1;
906 	mbx_data = (uint64_t *)&mbx;
907 
908 	for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
909 		*mbx_data = nicvf_reg_read(nic, mbx_addr);
910 		mbx_data++;
911 		mbx_addr += sizeof(uint64_t);
912 	}
913 
914 	switch (mbx.msg.msg) {
915 	case NIC_MBOX_MSG_READY:
916 		nic->pf_acked = TRUE;
917 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
918 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
919 		nic->node = mbx.nic_cfg.node_id;
920 		memcpy(nic->hwaddr, mbx.nic_cfg.mac_addr, ETHER_ADDR_LEN);
921 		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
922 		nic->link_up = FALSE;
923 		nic->duplex = 0;
924 		nic->speed = 0;
925 		break;
926 	case NIC_MBOX_MSG_ACK:
927 		nic->pf_acked = TRUE;
928 		break;
929 	case NIC_MBOX_MSG_NACK:
930 		nic->pf_nacked = TRUE;
931 		break;
932 	case NIC_MBOX_MSG_RSS_SIZE:
933 		nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
934 		nic->pf_acked = TRUE;
935 		break;
936 	case NIC_MBOX_MSG_BGX_STATS:
937 		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
938 		nic->pf_acked = TRUE;
939 		break;
940 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
941 		nic->pf_acked = TRUE;
942 		nic->link_up = mbx.link_status.link_up;
943 		nic->duplex = mbx.link_status.duplex;
944 		nic->speed = mbx.link_status.speed;
945 		if (nic->link_up) {
946 			if_setbaudrate(nic->ifp, nic->speed * 1000000);
947 			if_link_state_change(nic->ifp, LINK_STATE_UP);
948 		} else {
949 			if_setbaudrate(nic->ifp, 0);
950 			if_link_state_change(nic->ifp, LINK_STATE_DOWN);
951 		}
952 		break;
953 	default:
954 		device_printf(nic->dev,
955 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
956 		break;
957 	}
958 	nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
959 }
960 
961 static int
nicvf_update_hw_max_frs(struct nicvf * nic,int mtu)962 nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
963 {
964 	union nic_mbx mbx = {};
965 
966 	mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
967 	mbx.frs.max_frs = mtu;
968 	mbx.frs.vf_id = nic->vf_id;
969 
970 	return nicvf_send_msg_to_pf(nic, &mbx);
971 }
972 
973 static int
nicvf_hw_set_mac_addr(struct nicvf * nic,uint8_t * hwaddr)974 nicvf_hw_set_mac_addr(struct nicvf *nic, uint8_t *hwaddr)
975 {
976 	union nic_mbx mbx = {};
977 
978 	mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
979 	mbx.mac.vf_id = nic->vf_id;
980 	memcpy(mbx.mac.mac_addr, hwaddr, ETHER_ADDR_LEN);
981 
982 	return (nicvf_send_msg_to_pf(nic, &mbx));
983 }
984 
985 static void
nicvf_config_cpi(struct nicvf * nic)986 nicvf_config_cpi(struct nicvf *nic)
987 {
988 	union nic_mbx mbx = {};
989 
990 	mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
991 	mbx.cpi_cfg.vf_id = nic->vf_id;
992 	mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
993 	mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
994 
995 	nicvf_send_msg_to_pf(nic, &mbx);
996 }
997 
998 static void
nicvf_get_rss_size(struct nicvf * nic)999 nicvf_get_rss_size(struct nicvf *nic)
1000 {
1001 	union nic_mbx mbx = {};
1002 
1003 	mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
1004 	mbx.rss_size.vf_id = nic->vf_id;
1005 	nicvf_send_msg_to_pf(nic, &mbx);
1006 }
1007 
1008 static void
nicvf_config_rss(struct nicvf * nic)1009 nicvf_config_rss(struct nicvf *nic)
1010 {
1011 	union nic_mbx mbx = {};
1012 	struct nicvf_rss_info *rss;
1013 	int ind_tbl_len;
1014 	int i, nextq;
1015 
1016 	rss = &nic->rss_info;
1017 	ind_tbl_len = rss->rss_size;
1018 	nextq = 0;
1019 
1020 	mbx.rss_cfg.vf_id = nic->vf_id;
1021 	mbx.rss_cfg.hash_bits = rss->hash_bits;
1022 	while (ind_tbl_len != 0) {
1023 		mbx.rss_cfg.tbl_offset = nextq;
1024 		mbx.rss_cfg.tbl_len = MIN(ind_tbl_len,
1025 		    RSS_IND_TBL_LEN_PER_MBX_MSG);
1026 		mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
1027 		    NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
1028 
1029 		for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
1030 			mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
1031 
1032 		nicvf_send_msg_to_pf(nic, &mbx);
1033 
1034 		ind_tbl_len -= mbx.rss_cfg.tbl_len;
1035 	}
1036 }
1037 
1038 static void
nicvf_set_rss_key(struct nicvf * nic)1039 nicvf_set_rss_key(struct nicvf *nic)
1040 {
1041 	struct nicvf_rss_info *rss;
1042 	uint64_t key_addr;
1043 	int idx;
1044 
1045 	rss = &nic->rss_info;
1046 	key_addr = NIC_VNIC_RSS_KEY_0_4;
1047 
1048 	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
1049 		nicvf_reg_write(nic, key_addr, rss->key[idx]);
1050 		key_addr += sizeof(uint64_t);
1051 	}
1052 }
1053 
1054 static int
nicvf_rss_init(struct nicvf * nic)1055 nicvf_rss_init(struct nicvf *nic)
1056 {
1057 	struct nicvf_rss_info *rss;
1058 	int idx;
1059 
1060 	nicvf_get_rss_size(nic);
1061 
1062 	rss = &nic->rss_info;
1063 	if (nic->cpi_alg != CPI_ALG_NONE) {
1064 		rss->enable = FALSE;
1065 		rss->hash_bits = 0;
1066 		return (ENXIO);
1067 	}
1068 
1069 	rss->enable = TRUE;
1070 
1071 	/* Using the HW reset value for now */
1072 	rss->key[0] = 0xFEED0BADFEED0BADUL;
1073 	rss->key[1] = 0xFEED0BADFEED0BADUL;
1074 	rss->key[2] = 0xFEED0BADFEED0BADUL;
1075 	rss->key[3] = 0xFEED0BADFEED0BADUL;
1076 	rss->key[4] = 0xFEED0BADFEED0BADUL;
1077 
1078 	nicvf_set_rss_key(nic);
1079 
1080 	rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
1081 	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
1082 
1083 	rss->hash_bits = fls(rss->rss_size) - 1;
1084 	for (idx = 0; idx < rss->rss_size; idx++)
1085 		rss->ind_tbl[idx] = idx % nic->rx_queues;
1086 
1087 	nicvf_config_rss(nic);
1088 
1089 	return (0);
1090 }
1091 
1092 static int
nicvf_init_resources(struct nicvf * nic)1093 nicvf_init_resources(struct nicvf *nic)
1094 {
1095 	int err;
1096 	union nic_mbx mbx = {};
1097 
1098 	mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1099 
1100 	/* Enable Qset */
1101 	nicvf_qset_config(nic, TRUE);
1102 
1103 	/* Initialize queues and HW for data transfer */
1104 	err = nicvf_config_data_transfer(nic, TRUE);
1105 	if (err) {
1106 		device_printf(nic->dev,
1107 		    "Failed to alloc/config VF's QSet resources\n");
1108 		return (err);
1109 	}
1110 
1111 	/* Send VF config done msg to PF */
1112 	nicvf_write_to_mbx(nic, &mbx);
1113 
1114 	return (0);
1115 }
1116 
1117 static void
nicvf_misc_intr_handler(void * arg)1118 nicvf_misc_intr_handler(void *arg)
1119 {
1120 	struct nicvf *nic = (struct nicvf *)arg;
1121 	uint64_t intr;
1122 
1123 	intr = nicvf_reg_read(nic, NIC_VF_INT);
1124 	/* Check for spurious interrupt */
1125 	if (!(intr & NICVF_INTR_MBOX_MASK))
1126 		return;
1127 
1128 	nicvf_handle_mbx_intr(nic);
1129 }
1130 
1131 static int
nicvf_intr_handler(void * arg)1132 nicvf_intr_handler(void *arg)
1133 {
1134 	struct nicvf *nic;
1135 	struct cmp_queue *cq;
1136 	int qidx;
1137 
1138 	cq = (struct cmp_queue *)arg;
1139 	nic = cq->nic;
1140 	qidx = cq->idx;
1141 
1142 	/* Disable interrupts */
1143 	nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1144 
1145 	taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
1146 
1147 	/* Clear interrupt */
1148 	nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1149 
1150 	return (FILTER_HANDLED);
1151 }
1152 
1153 static void
nicvf_rbdr_intr_handler(void * arg)1154 nicvf_rbdr_intr_handler(void *arg)
1155 {
1156 	struct nicvf *nic;
1157 	struct queue_set *qs;
1158 	struct rbdr *rbdr;
1159 	int qidx;
1160 
1161 	nic = (struct nicvf *)arg;
1162 
1163 	/* Disable RBDR interrupt and schedule softirq */
1164 	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1165 		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1166 			continue;
1167 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1168 
1169 		qs = nic->qs;
1170 		rbdr = &qs->rbdr[qidx];
1171 		taskqueue_enqueue(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait);
1172 		/* Clear interrupt */
1173 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1174 	}
1175 }
1176 
1177 static void
nicvf_qs_err_intr_handler(void * arg)1178 nicvf_qs_err_intr_handler(void *arg)
1179 {
1180 	struct nicvf *nic = (struct nicvf *)arg;
1181 	struct queue_set *qs = nic->qs;
1182 
1183 	/* Disable Qset err interrupt and schedule softirq */
1184 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1185 	taskqueue_enqueue(qs->qs_err_taskq, &qs->qs_err_task);
1186 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1187 
1188 }
1189 
1190 static int
nicvf_enable_msix(struct nicvf * nic)1191 nicvf_enable_msix(struct nicvf *nic)
1192 {
1193 	struct pci_devinfo *dinfo;
1194 	int rid, count;
1195 	int ret;
1196 
1197 	dinfo = device_get_ivars(nic->dev);
1198 	rid = dinfo->cfg.msix.msix_table_bar;
1199 	nic->msix_table_res =
1200 	    bus_alloc_resource_any(nic->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1201 	if (nic->msix_table_res == NULL) {
1202 		device_printf(nic->dev,
1203 		    "Could not allocate memory for MSI-X table\n");
1204 		return (ENXIO);
1205 	}
1206 
1207 	count = nic->num_vec = NIC_VF_MSIX_VECTORS;
1208 
1209 	ret = pci_alloc_msix(nic->dev, &count);
1210 	if ((ret != 0) || (count != nic->num_vec)) {
1211 		device_printf(nic->dev,
1212 		    "Request for #%d msix vectors failed, error: %d\n",
1213 		    nic->num_vec, ret);
1214 		return (ret);
1215 	}
1216 
1217 	nic->msix_enabled = 1;
1218 	return (0);
1219 }
1220 
1221 static void
nicvf_disable_msix(struct nicvf * nic)1222 nicvf_disable_msix(struct nicvf *nic)
1223 {
1224 
1225 	if (nic->msix_enabled) {
1226 		pci_release_msi(nic->dev);
1227 		nic->msix_enabled = 0;
1228 		nic->num_vec = 0;
1229 	}
1230 }
1231 
1232 static void
nicvf_release_all_interrupts(struct nicvf * nic)1233 nicvf_release_all_interrupts(struct nicvf *nic)
1234 {
1235 	struct resource *res;
1236 	int irq;
1237 	int err __diagused;
1238 
1239 	/* Free registered interrupts */
1240 	for (irq = 0; irq < nic->num_vec; irq++) {
1241 		res = nic->msix_entries[irq].irq_res;
1242 		if (res == NULL)
1243 			continue;
1244 		/* Teardown interrupt first */
1245 		if (nic->msix_entries[irq].handle != NULL) {
1246 			err = bus_teardown_intr(nic->dev,
1247 			    nic->msix_entries[irq].irq_res,
1248 			    nic->msix_entries[irq].handle);
1249 			KASSERT(err == 0,
1250 			    ("ERROR: Unable to teardown interrupt %d", irq));
1251 			nic->msix_entries[irq].handle = NULL;
1252 		}
1253 
1254 		bus_release_resource(nic->dev, SYS_RES_IRQ,
1255 			    rman_get_rid(res), nic->msix_entries[irq].irq_res);
1256 		nic->msix_entries[irq].irq_res = NULL;
1257 	}
1258 	/* Disable MSI-X */
1259 	nicvf_disable_msix(nic);
1260 }
1261 
1262 /*
1263  * Initialize MSIX vectors and register MISC interrupt.
1264  * Send READY message to PF to check if its alive
1265  */
1266 static int
nicvf_allocate_misc_interrupt(struct nicvf * nic)1267 nicvf_allocate_misc_interrupt(struct nicvf *nic)
1268 {
1269 	struct resource *res;
1270 	int irq, rid;
1271 	int ret = 0;
1272 
1273 	/* Return if mailbox interrupt is already registered */
1274 	if (nic->msix_enabled)
1275 		return (0);
1276 
1277 	/* Enable MSI-X */
1278 	if (nicvf_enable_msix(nic) != 0)
1279 		return (ENXIO);
1280 
1281 	irq = NICVF_INTR_ID_MISC;
1282 	rid = irq + 1;
1283 	nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1284 	    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1285 	if (nic->msix_entries[irq].irq_res == NULL) {
1286 		device_printf(nic->dev,
1287 		    "Could not allocate Mbox interrupt for VF%d\n",
1288 		    device_get_unit(nic->dev));
1289 		return (ENXIO);
1290 	}
1291 
1292 	ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1293 	    (INTR_MPSAFE | INTR_TYPE_MISC), NULL, nicvf_misc_intr_handler, nic,
1294 	    &nic->msix_entries[irq].handle);
1295 	if (ret != 0) {
1296 		res = nic->msix_entries[irq].irq_res;
1297 		bus_release_resource(nic->dev, SYS_RES_IRQ,
1298 			    rman_get_rid(res), res);
1299 		nic->msix_entries[irq].irq_res = NULL;
1300 		return (ret);
1301 	}
1302 
1303 	return (0);
1304 }
1305 
1306 static int
nicvf_enable_misc_interrupt(struct nicvf * nic)1307 nicvf_enable_misc_interrupt(struct nicvf *nic)
1308 {
1309 
1310 	/* Enable mailbox interrupt */
1311 	nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1312 
1313 	/* Check if VF is able to communicate with PF */
1314 	if (!nicvf_check_pf_ready(nic)) {
1315 		nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1316 		return (ENXIO);
1317 	}
1318 
1319 	return (0);
1320 }
1321 
1322 static void
nicvf_release_net_interrupts(struct nicvf * nic)1323 nicvf_release_net_interrupts(struct nicvf *nic)
1324 {
1325 	struct resource *res;
1326 	int irq;
1327 	int err;
1328 
1329 	for_each_cq_irq(irq) {
1330 		res = nic->msix_entries[irq].irq_res;
1331 		if (res == NULL)
1332 			continue;
1333 		/* Teardown active interrupts first */
1334 		if (nic->msix_entries[irq].handle != NULL) {
1335 			err = bus_teardown_intr(nic->dev,
1336 			    nic->msix_entries[irq].irq_res,
1337 			    nic->msix_entries[irq].handle);
1338 			KASSERT(err == 0,
1339 			    ("ERROR: Unable to teardown CQ interrupt %d",
1340 			    (irq - NICVF_INTR_ID_CQ)));
1341 			if (err != 0)
1342 				continue;
1343 		}
1344 
1345 		/* Release resource */
1346 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1347 		    res);
1348 		nic->msix_entries[irq].irq_res = NULL;
1349 	}
1350 
1351 	for_each_rbdr_irq(irq) {
1352 		res = nic->msix_entries[irq].irq_res;
1353 		if (res == NULL)
1354 			continue;
1355 		/* Teardown active interrupts first */
1356 		if (nic->msix_entries[irq].handle != NULL) {
1357 			err = bus_teardown_intr(nic->dev,
1358 			    nic->msix_entries[irq].irq_res,
1359 			    nic->msix_entries[irq].handle);
1360 			KASSERT(err == 0,
1361 			    ("ERROR: Unable to teardown RDBR interrupt %d",
1362 			    (irq - NICVF_INTR_ID_RBDR)));
1363 			if (err != 0)
1364 				continue;
1365 		}
1366 
1367 		/* Release resource */
1368 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1369 		    res);
1370 		nic->msix_entries[irq].irq_res = NULL;
1371 	}
1372 
1373 	irq = NICVF_INTR_ID_QS_ERR;
1374 	res = nic->msix_entries[irq].irq_res;
1375 	if (res != NULL) {
1376 		/* Teardown active interrupts first */
1377 		if (nic->msix_entries[irq].handle != NULL) {
1378 			err = bus_teardown_intr(nic->dev,
1379 			    nic->msix_entries[irq].irq_res,
1380 			    nic->msix_entries[irq].handle);
1381 			KASSERT(err == 0,
1382 			    ("ERROR: Unable to teardown QS Error interrupt %d",
1383 			    irq));
1384 			if (err != 0)
1385 				return;
1386 		}
1387 
1388 		/* Release resource */
1389 		bus_release_resource(nic->dev, SYS_RES_IRQ, rman_get_rid(res),
1390 		    res);
1391 		nic->msix_entries[irq].irq_res = NULL;
1392 	}
1393 }
1394 
1395 static int
nicvf_allocate_net_interrupts(struct nicvf * nic)1396 nicvf_allocate_net_interrupts(struct nicvf *nic)
1397 {
1398 	u_int cpuid;
1399 	int irq, rid;
1400 	int qidx;
1401 	int ret = 0;
1402 
1403 	/* MSI-X must be configured by now */
1404 	if (!nic->msix_enabled) {
1405 		device_printf(nic->dev, "Cannot alloacte queue interrups. "
1406 		    "MSI-X interrupts disabled.\n");
1407 		return (ENXIO);
1408 	}
1409 
1410 	/* Register CQ interrupts */
1411 	for_each_cq_irq(irq) {
1412 		if (irq >= (NICVF_INTR_ID_CQ + nic->qs->cq_cnt))
1413 			break;
1414 
1415 		qidx = irq - NICVF_INTR_ID_CQ;
1416 		rid = irq + 1;
1417 		nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1418 		    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1419 		if (nic->msix_entries[irq].irq_res == NULL) {
1420 			device_printf(nic->dev,
1421 			    "Could not allocate CQ interrupt %d for VF%d\n",
1422 			    (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1423 			ret = ENXIO;
1424 			goto error;
1425 		}
1426 		ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1427 		    (INTR_MPSAFE | INTR_TYPE_NET), nicvf_intr_handler,
1428 		    NULL, &nic->qs->cq[qidx], &nic->msix_entries[irq].handle);
1429 		if (ret != 0) {
1430 			device_printf(nic->dev,
1431 			    "Could not setup CQ interrupt %d for VF%d\n",
1432 			    (irq - NICVF_INTR_ID_CQ), device_get_unit(nic->dev));
1433 			goto error;
1434 		}
1435 		cpuid = (device_get_unit(nic->dev) * CMP_QUEUE_CNT) + qidx;
1436 		cpuid %= mp_ncpus;
1437 		/*
1438 		 * Save CPU ID for later use when system-wide RSS is enabled.
1439 		 * It will be used to pit the CQ task to the same CPU that got
1440 		 * interrupted.
1441 		 */
1442 		nic->qs->cq[qidx].cmp_cpuid = cpuid;
1443 		if (bootverbose) {
1444 			device_printf(nic->dev, "bind CQ%d IRQ to CPU%d\n",
1445 			    qidx, cpuid);
1446 		}
1447 		/* Bind interrupts to the given CPU */
1448 		bus_bind_intr(nic->dev, nic->msix_entries[irq].irq_res, cpuid);
1449 	}
1450 
1451 	/* Register RBDR interrupt */
1452 	for_each_rbdr_irq(irq) {
1453 		if (irq >= (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt))
1454 			break;
1455 
1456 		rid = irq + 1;
1457 		nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1458 		    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1459 		if (nic->msix_entries[irq].irq_res == NULL) {
1460 			device_printf(nic->dev,
1461 			    "Could not allocate RBDR interrupt %d for VF%d\n",
1462 			    (irq - NICVF_INTR_ID_RBDR),
1463 			    device_get_unit(nic->dev));
1464 			ret = ENXIO;
1465 			goto error;
1466 		}
1467 		ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1468 		    (INTR_MPSAFE | INTR_TYPE_NET), NULL,
1469 		    nicvf_rbdr_intr_handler, nic,
1470 		    &nic->msix_entries[irq].handle);
1471 		if (ret != 0) {
1472 			device_printf(nic->dev,
1473 			    "Could not setup RBDR interrupt %d for VF%d\n",
1474 			    (irq - NICVF_INTR_ID_RBDR),
1475 			    device_get_unit(nic->dev));
1476 			goto error;
1477 		}
1478 	}
1479 
1480 	/* Register QS error interrupt */
1481 	irq = NICVF_INTR_ID_QS_ERR;
1482 	rid = irq + 1;
1483 	nic->msix_entries[irq].irq_res = bus_alloc_resource_any(nic->dev,
1484 	    SYS_RES_IRQ, &rid, (RF_SHAREABLE | RF_ACTIVE));
1485 	if (nic->msix_entries[irq].irq_res == NULL) {
1486 		device_printf(nic->dev,
1487 		    "Could not allocate QS Error interrupt for VF%d\n",
1488 		    device_get_unit(nic->dev));
1489 		ret = ENXIO;
1490 		goto error;
1491 	}
1492 	ret = bus_setup_intr(nic->dev, nic->msix_entries[irq].irq_res,
1493 	    (INTR_MPSAFE | INTR_TYPE_NET), NULL, nicvf_qs_err_intr_handler,
1494 	    nic, &nic->msix_entries[irq].handle);
1495 	if (ret != 0) {
1496 		device_printf(nic->dev,
1497 		    "Could not setup QS Error interrupt for VF%d\n",
1498 		    device_get_unit(nic->dev));
1499 		goto error;
1500 	}
1501 
1502 	return (0);
1503 error:
1504 	nicvf_release_net_interrupts(nic);
1505 	return (ret);
1506 }
1507 
1508 static int
nicvf_stop_locked(struct nicvf * nic)1509 nicvf_stop_locked(struct nicvf *nic)
1510 {
1511 	if_t ifp;
1512 	int qidx;
1513 	struct queue_set *qs = nic->qs;
1514 	union nic_mbx mbx = {};
1515 
1516 	NICVF_CORE_LOCK_ASSERT(nic);
1517 	/* Stop callout. Can block here since holding SX lock */
1518 	callout_drain(&nic->stats_callout);
1519 
1520 	ifp = nic->ifp;
1521 
1522 	mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1523 	nicvf_send_msg_to_pf(nic, &mbx);
1524 
1525 	/* Disable RBDR & QS error interrupts */
1526 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1527 		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1528 		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1529 	}
1530 	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1531 	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1532 
1533 	/* Deactivate network interface */
1534 	if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
1535 
1536 	/* Free resources */
1537 	nicvf_config_data_transfer(nic, FALSE);
1538 
1539 	/* Disable HW Qset */
1540 	nicvf_qset_config(nic, FALSE);
1541 
1542 	/* disable mailbox interrupt */
1543 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1544 
1545 	return (0);
1546 }
1547 
1548 static void
nicvf_update_stats(struct nicvf * nic)1549 nicvf_update_stats(struct nicvf *nic)
1550 {
1551 	int qidx;
1552 	struct nicvf_hw_stats *stats = &nic->hw_stats;
1553 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1554 	struct queue_set *qs = nic->qs;
1555 
1556 #define	GET_RX_STATS(reg) \
1557     nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | ((reg) << 3))
1558 #define GET_TX_STATS(reg) \
1559     nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | ((reg) << 3))
1560 
1561 	stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1562 	stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1563 	stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1564 	stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1565 	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1566 	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1567 	stats->rx_drop_red = GET_RX_STATS(RX_RED);
1568 	stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1569 	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1570 	stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1571 	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1572 	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1573 	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1574 	stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1575 
1576 	stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
1577 	stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
1578 	stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
1579 	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
1580 	stats->tx_drops = GET_TX_STATS(TX_DROP);
1581 
1582 	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1583 	    stats->tx_bcast_frames_ok + stats->tx_mcast_frames_ok;
1584 	drv_stats->rx_drops = stats->rx_drop_red + stats->rx_drop_overrun;
1585 	drv_stats->tx_drops = stats->tx_drops;
1586 
1587 	/* Update RQ and SQ stats */
1588 	for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1589 		nicvf_update_rq_stats(nic, qidx);
1590 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1591 		nicvf_update_sq_stats(nic, qidx);
1592 }
1593 
1594 static void
nicvf_tick_stats(void * arg)1595 nicvf_tick_stats(void *arg)
1596 {
1597 	struct nicvf *nic;
1598 
1599 	nic = (struct nicvf *)arg;
1600 
1601 	/* Read the statistics */
1602 	nicvf_update_stats(nic);
1603 
1604 	callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic);
1605 }
1606