xref: /freebsd/sys/dev/virtio/network/if_vtnet.c (revision 7661de35d15f582ab33e3bd6b8d909601557e436)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO network devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sockio.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/random.h>
43 #include <sys/sglist.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/taskqueue.h>
47 #include <sys/smp.h>
48 #include <machine/smp.h>
49 
50 #include <vm/uma.h>
51 
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
58 #include <net/if_media.h>
59 #include <net/if_vlan_var.h>
60 
61 #include <net/bpf.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
67 #include <netinet6/ip6_var.h>
68 #include <netinet/udp.h>
69 #include <netinet/tcp.h>
70 #include <netinet/sctp.h>
71 
72 #include <machine/bus.h>
73 #include <machine/resource.h>
74 #include <sys/bus.h>
75 #include <sys/rman.h>
76 
77 #include <dev/virtio/virtio.h>
78 #include <dev/virtio/virtqueue.h>
79 #include <dev/virtio/network/virtio_net.h>
80 #include <dev/virtio/network/if_vtnetvar.h>
81 
82 #include "virtio_if.h"
83 
84 #include "opt_inet.h"
85 #include "opt_inet6.h"
86 
87 static int	vtnet_modevent(module_t, int, void *);
88 
89 static int	vtnet_probe(device_t);
90 static int	vtnet_attach(device_t);
91 static int	vtnet_detach(device_t);
92 static int	vtnet_suspend(device_t);
93 static int	vtnet_resume(device_t);
94 static int	vtnet_shutdown(device_t);
95 static int	vtnet_attach_completed(device_t);
96 static int	vtnet_config_change(device_t);
97 
98 static void	vtnet_negotiate_features(struct vtnet_softc *);
99 static void	vtnet_setup_features(struct vtnet_softc *);
100 static int	vtnet_init_rxq(struct vtnet_softc *, int);
101 static int	vtnet_init_txq(struct vtnet_softc *, int);
102 static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
103 static void	vtnet_free_rxtx_queues(struct vtnet_softc *);
104 static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
105 static void	vtnet_free_rx_filters(struct vtnet_softc *);
106 static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
107 static int	vtnet_setup_interface(struct vtnet_softc *);
108 static int	vtnet_change_mtu(struct vtnet_softc *, int);
109 static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
110 
111 static int	vtnet_rxq_populate(struct vtnet_rxq *);
112 static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
113 static struct mbuf *
114 		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
115 static int	vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *,
116 		    struct mbuf *, int);
117 static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
118 static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
119 static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
120 static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
121 		     struct virtio_net_hdr *);
122 static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
123 static void	vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
124 static int	vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
125 static void	vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
126 		    struct virtio_net_hdr *);
127 static int	vtnet_rxq_eof(struct vtnet_rxq *);
128 static void	vtnet_rx_vq_intr(void *);
129 static void	vtnet_rxq_tq_intr(void *, int);
130 
131 static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
132 static int	vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
133 		    int *, int *, int *);
134 static int	vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
135 		    int, struct virtio_net_hdr *);
136 static struct mbuf *
137 		vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
138 		    struct virtio_net_hdr *);
139 static int	vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
140 		    struct vtnet_tx_header *);
141 static int	vtnet_txq_encap(struct vtnet_txq *, struct mbuf **);
142 #ifdef VTNET_LEGACY_TX
143 static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
144 static void	vtnet_start(struct ifnet *);
145 #else
146 static int	vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
147 static int	vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
148 static void	vtnet_txq_tq_deferred(void *, int);
149 #endif
150 static void	vtnet_txq_start(struct vtnet_txq *);
151 static void	vtnet_txq_tq_intr(void *, int);
152 static void	vtnet_txq_eof(struct vtnet_txq *);
153 static void	vtnet_tx_vq_intr(void *);
154 static void	vtnet_tx_start_all(struct vtnet_softc *);
155 
156 #ifndef VTNET_LEGACY_TX
157 static void	vtnet_qflush(struct ifnet *);
158 #endif
159 
160 static int	vtnet_watchdog(struct vtnet_txq *);
161 static void	vtnet_rxq_accum_stats(struct vtnet_rxq *,
162 		    struct vtnet_rxq_stats *);
163 static void	vtnet_txq_accum_stats(struct vtnet_txq *,
164 		    struct vtnet_txq_stats *);
165 static void	vtnet_accumulate_stats(struct vtnet_softc *);
166 static void	vtnet_tick(void *);
167 
168 static void	vtnet_start_taskqueues(struct vtnet_softc *);
169 static void	vtnet_free_taskqueues(struct vtnet_softc *);
170 static void	vtnet_drain_taskqueues(struct vtnet_softc *);
171 
172 static void	vtnet_drain_rxtx_queues(struct vtnet_softc *);
173 static void	vtnet_stop_rendezvous(struct vtnet_softc *);
174 static void	vtnet_stop(struct vtnet_softc *);
175 static int	vtnet_virtio_reinit(struct vtnet_softc *);
176 static void	vtnet_init_rx_filters(struct vtnet_softc *);
177 static int	vtnet_init_rx_queues(struct vtnet_softc *);
178 static int	vtnet_init_tx_queues(struct vtnet_softc *);
179 static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
180 static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
181 static int	vtnet_reinit(struct vtnet_softc *);
182 static void	vtnet_init_locked(struct vtnet_softc *);
183 static void	vtnet_init(void *);
184 
185 static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
186 static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
187 		    struct sglist *, int, int);
188 static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
189 static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
190 static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
191 static int	vtnet_set_promisc(struct vtnet_softc *, int);
192 static int	vtnet_set_allmulti(struct vtnet_softc *, int);
193 static void	vtnet_attach_disable_promisc(struct vtnet_softc *);
194 static void	vtnet_rx_filter(struct vtnet_softc *);
195 static void	vtnet_rx_filter_mac(struct vtnet_softc *);
196 static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
197 static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
198 static void	vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
199 static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
200 static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
201 
202 static int	vtnet_is_link_up(struct vtnet_softc *);
203 static void	vtnet_update_link_status(struct vtnet_softc *);
204 static int	vtnet_ifmedia_upd(struct ifnet *);
205 static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206 static void	vtnet_get_hwaddr(struct vtnet_softc *);
207 static void	vtnet_set_hwaddr(struct vtnet_softc *);
208 static void	vtnet_vlan_tag_remove(struct mbuf *);
209 
210 static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
211 		    struct sysctl_oid_list *, struct vtnet_rxq *);
212 static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
213 		    struct sysctl_oid_list *, struct vtnet_txq *);
214 static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
215 static void	vtnet_setup_sysctl(struct vtnet_softc *);
216 
217 static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
218 static void	vtnet_rxq_disable_intr(struct vtnet_rxq *);
219 static int	vtnet_txq_enable_intr(struct vtnet_txq *);
220 static void	vtnet_txq_disable_intr(struct vtnet_txq *);
221 static void	vtnet_enable_rx_interrupts(struct vtnet_softc *);
222 static void	vtnet_enable_tx_interrupts(struct vtnet_softc *);
223 static void	vtnet_enable_interrupts(struct vtnet_softc *);
224 static void	vtnet_disable_rx_interrupts(struct vtnet_softc *);
225 static void	vtnet_disable_tx_interrupts(struct vtnet_softc *);
226 static void	vtnet_disable_interrupts(struct vtnet_softc *);
227 
228 static int	vtnet_tunable_int(struct vtnet_softc *, const char *, int);
229 
230 /* Tunables. */
231 static int vtnet_csum_disable = 0;
232 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
233 static int vtnet_tso_disable = 0;
234 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
235 static int vtnet_lro_disable = 0;
236 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
237 static int vtnet_mq_disable = 0;
238 TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
239 static int vtnet_mq_max_pairs = 0;
240 TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
241 static int vtnet_rx_process_limit = 512;
242 TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
243 
244 /*
245  * Reducing the number of transmit completed interrupts can improve
246  * performance. To do so, the define below keeps the Tx vq interrupt
247  * disabled and adds calls to vtnet_txeof() in the start and watchdog
248  * paths. The price to pay for this is the m_free'ing of transmitted
249  * mbufs may be delayed until the watchdog fires.
250  *
251  * BMV: Reintroduce this later as a run-time option, if it makes
252  * sense after the EVENT_IDX feature is supported.
253  *
254  * #define VTNET_TX_INTR_MODERATION
255  */
256 
257 static uma_zone_t vtnet_tx_header_zone;
258 
259 static struct virtio_feature_desc vtnet_feature_desc[] = {
260 	{ VIRTIO_NET_F_CSUM,		"TxChecksum"	},
261 	{ VIRTIO_NET_F_GUEST_CSUM,	"RxChecksum"	},
262 	{ VIRTIO_NET_F_MAC,		"MacAddress"	},
263 	{ VIRTIO_NET_F_GSO,		"TxAllGSO"	},
264 	{ VIRTIO_NET_F_GUEST_TSO4,	"RxTSOv4"	},
265 	{ VIRTIO_NET_F_GUEST_TSO6,	"RxTSOv6"	},
266 	{ VIRTIO_NET_F_GUEST_ECN,	"RxECN"		},
267 	{ VIRTIO_NET_F_GUEST_UFO,	"RxUFO"		},
268 	{ VIRTIO_NET_F_HOST_TSO4,	"TxTSOv4"	},
269 	{ VIRTIO_NET_F_HOST_TSO6,	"TxTSOv6"	},
270 	{ VIRTIO_NET_F_HOST_ECN,	"TxTSOECN"	},
271 	{ VIRTIO_NET_F_HOST_UFO,	"TxUFO"		},
272 	{ VIRTIO_NET_F_MRG_RXBUF,	"MrgRxBuf"	},
273 	{ VIRTIO_NET_F_STATUS,		"Status"	},
274 	{ VIRTIO_NET_F_CTRL_VQ,		"ControlVq"	},
275 	{ VIRTIO_NET_F_CTRL_RX,		"RxMode"	},
276 	{ VIRTIO_NET_F_CTRL_VLAN,	"VLanFilter"	},
277 	{ VIRTIO_NET_F_CTRL_RX_EXTRA,	"RxModeExtra"	},
278 	{ VIRTIO_NET_F_GUEST_ANNOUNCE,	"GuestAnnounce"	},
279 	{ VIRTIO_NET_F_MQ,		"Multiqueue"	},
280 	{ VIRTIO_NET_F_CTRL_MAC_ADDR,	"SetMacAddress"	},
281 
282 	{ 0, NULL }
283 };
284 
285 static device_method_t vtnet_methods[] = {
286 	/* Device methods. */
287 	DEVMETHOD(device_probe,			vtnet_probe),
288 	DEVMETHOD(device_attach,		vtnet_attach),
289 	DEVMETHOD(device_detach,		vtnet_detach),
290 	DEVMETHOD(device_suspend,		vtnet_suspend),
291 	DEVMETHOD(device_resume,		vtnet_resume),
292 	DEVMETHOD(device_shutdown,		vtnet_shutdown),
293 
294 	/* VirtIO methods. */
295 	DEVMETHOD(virtio_attach_completed,	vtnet_attach_completed),
296 	DEVMETHOD(virtio_config_change,		vtnet_config_change),
297 
298 	DEVMETHOD_END
299 };
300 
301 static driver_t vtnet_driver = {
302 	"vtnet",
303 	vtnet_methods,
304 	sizeof(struct vtnet_softc)
305 };
306 static devclass_t vtnet_devclass;
307 
308 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
309     vtnet_modevent, 0);
310 MODULE_VERSION(vtnet, 1);
311 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
312 
313 static int
314 vtnet_modevent(module_t mod, int type, void *unused)
315 {
316 	int error;
317 
318 	error = 0;
319 
320 	switch (type) {
321 	case MOD_LOAD:
322 		vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
323 		    sizeof(struct vtnet_tx_header),
324 		    NULL, NULL, NULL, NULL, 0, 0);
325 		break;
326 	case MOD_QUIESCE:
327 	case MOD_UNLOAD:
328 		if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
329 			error = EBUSY;
330 		else if (type == MOD_UNLOAD) {
331 			uma_zdestroy(vtnet_tx_header_zone);
332 			vtnet_tx_header_zone = NULL;
333 		}
334 		break;
335 	case MOD_SHUTDOWN:
336 		break;
337 	default:
338 		error = EOPNOTSUPP;
339 		break;
340 	}
341 
342 	return (error);
343 }
344 
345 static int
346 vtnet_probe(device_t dev)
347 {
348 
349 	if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
350 		return (ENXIO);
351 
352 	device_set_desc(dev, "VirtIO Networking Adapter");
353 
354 	return (BUS_PROBE_DEFAULT);
355 }
356 
357 static int
358 vtnet_attach(device_t dev)
359 {
360 	struct vtnet_softc *sc;
361 	int error;
362 
363 	sc = device_get_softc(dev);
364 	sc->vtnet_dev = dev;
365 
366 	/* Register our feature descriptions. */
367 	virtio_set_feature_desc(dev, vtnet_feature_desc);
368 
369 	VTNET_CORE_LOCK_INIT(sc);
370 	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
371 
372 	vtnet_setup_sysctl(sc);
373 	vtnet_setup_features(sc);
374 
375 	error = vtnet_alloc_rx_filters(sc);
376 	if (error) {
377 		device_printf(dev, "cannot allocate Rx filters\n");
378 		goto fail;
379 	}
380 
381 	error = vtnet_alloc_rxtx_queues(sc);
382 	if (error) {
383 		device_printf(dev, "cannot allocate queues\n");
384 		goto fail;
385 	}
386 
387 	error = vtnet_alloc_virtqueues(sc);
388 	if (error) {
389 		device_printf(dev, "cannot allocate virtqueues\n");
390 		goto fail;
391 	}
392 
393 	error = vtnet_setup_interface(sc);
394 	if (error) {
395 		device_printf(dev, "cannot setup interface\n");
396 		goto fail;
397 	}
398 
399 	error = virtio_setup_intr(dev, INTR_TYPE_NET);
400 	if (error) {
401 		device_printf(dev, "cannot setup virtqueue interrupts\n");
402 		/* BMV: This will crash if during boot! */
403 		ether_ifdetach(sc->vtnet_ifp);
404 		goto fail;
405 	}
406 
407 	vtnet_start_taskqueues(sc);
408 
409 fail:
410 	if (error)
411 		vtnet_detach(dev);
412 
413 	return (error);
414 }
415 
416 static int
417 vtnet_detach(device_t dev)
418 {
419 	struct vtnet_softc *sc;
420 	struct ifnet *ifp;
421 
422 	sc = device_get_softc(dev);
423 	ifp = sc->vtnet_ifp;
424 
425 	if (device_is_attached(dev)) {
426 		VTNET_CORE_LOCK(sc);
427 		vtnet_stop(sc);
428 		VTNET_CORE_UNLOCK(sc);
429 
430 		callout_drain(&sc->vtnet_tick_ch);
431 		vtnet_drain_taskqueues(sc);
432 
433 		ether_ifdetach(ifp);
434 	}
435 
436 	vtnet_free_taskqueues(sc);
437 
438 	if (sc->vtnet_vlan_attach != NULL) {
439 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
440 		sc->vtnet_vlan_attach = NULL;
441 	}
442 	if (sc->vtnet_vlan_detach != NULL) {
443 		EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
444 		sc->vtnet_vlan_detach = NULL;
445 	}
446 
447 	ifmedia_removeall(&sc->vtnet_media);
448 
449 	if (ifp != NULL) {
450 		if_free(ifp);
451 		sc->vtnet_ifp = NULL;
452 	}
453 
454 	vtnet_free_rxtx_queues(sc);
455 	vtnet_free_rx_filters(sc);
456 
457 	if (sc->vtnet_ctrl_vq != NULL)
458 		vtnet_free_ctrl_vq(sc);
459 
460 	VTNET_CORE_LOCK_DESTROY(sc);
461 
462 	return (0);
463 }
464 
465 static int
466 vtnet_suspend(device_t dev)
467 {
468 	struct vtnet_softc *sc;
469 
470 	sc = device_get_softc(dev);
471 
472 	VTNET_CORE_LOCK(sc);
473 	vtnet_stop(sc);
474 	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
475 	VTNET_CORE_UNLOCK(sc);
476 
477 	return (0);
478 }
479 
480 static int
481 vtnet_resume(device_t dev)
482 {
483 	struct vtnet_softc *sc;
484 	struct ifnet *ifp;
485 
486 	sc = device_get_softc(dev);
487 	ifp = sc->vtnet_ifp;
488 
489 	VTNET_CORE_LOCK(sc);
490 	if (ifp->if_flags & IFF_UP)
491 		vtnet_init_locked(sc);
492 	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
493 	VTNET_CORE_UNLOCK(sc);
494 
495 	return (0);
496 }
497 
498 static int
499 vtnet_shutdown(device_t dev)
500 {
501 
502 	/*
503 	 * Suspend already does all of what we need to
504 	 * do here; we just never expect to be resumed.
505 	 */
506 	return (vtnet_suspend(dev));
507 }
508 
509 static int
510 vtnet_attach_completed(device_t dev)
511 {
512 
513 	vtnet_attach_disable_promisc(device_get_softc(dev));
514 
515 	return (0);
516 }
517 
518 static int
519 vtnet_config_change(device_t dev)
520 {
521 	struct vtnet_softc *sc;
522 
523 	sc = device_get_softc(dev);
524 
525 	VTNET_CORE_LOCK(sc);
526 	vtnet_update_link_status(sc);
527 	if (sc->vtnet_link_active != 0)
528 		vtnet_tx_start_all(sc);
529 	VTNET_CORE_UNLOCK(sc);
530 
531 	return (0);
532 }
533 
534 static void
535 vtnet_negotiate_features(struct vtnet_softc *sc)
536 {
537 	device_t dev;
538 	uint64_t mask, features;
539 
540 	dev = sc->vtnet_dev;
541 	mask = 0;
542 
543 	/*
544 	 * TSO and LRO are only available when their corresponding checksum
545 	 * offload feature is also negotiated.
546 	 */
547 	if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) {
548 		mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
549 		mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES;
550 	}
551 	if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
552 		mask |= VTNET_TSO_FEATURES;
553 	if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
554 		mask |= VTNET_LRO_FEATURES;
555 	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
556 		mask |= VIRTIO_NET_F_MQ;
557 #ifdef VTNET_LEGACY_TX
558 	mask |= VIRTIO_NET_F_MQ;
559 #endif
560 
561 	features = VTNET_FEATURES & ~mask;
562 	sc->vtnet_features = virtio_negotiate_features(dev, features);
563 
564 	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) == 0)
565 		return;
566 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF))
567 		return;
568 
569 	/*
570 	 * LRO without mergeable buffers requires special care. This is not
571 	 * ideal because every receive buffer must be large enough to hold
572 	 * the maximum TCP packet, the Ethernet header, and the header. This
573 	 * requires up to 34 descriptors with MCLBYTES clusters. If we do
574 	 * not have indirect descriptors, LRO is disabled since the virtqueue
575 	 * will not contain very many receive buffers.
576 	 */
577 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
578 		device_printf(dev,
579 		    "LRO disabled due to both mergeable buffers and indirect "
580 		    "descriptors not negotiated\n");
581 
582 		features &= ~VTNET_LRO_FEATURES;
583 		sc->vtnet_features = virtio_negotiate_features(dev, features);
584 	} else
585 		sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
586 }
587 
588 static void
589 vtnet_setup_features(struct vtnet_softc *sc)
590 {
591 	device_t dev;
592 	int max_pairs, max;
593 
594 	dev = sc->vtnet_dev;
595 
596 	vtnet_negotiate_features(sc);
597 
598 	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
599 		sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
600 
601 	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
602 		/* This feature should always be negotiated. */
603 		sc->vtnet_flags |= VTNET_FLAG_MAC;
604 	}
605 
606 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
607 		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
608 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
609 	} else
610 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
611 
612 	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
613 		sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
614 	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
615 		sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS;
616 	else
617 		sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS;
618 
619 	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
620 	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
621 	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
622 		sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
623 	else
624 		sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
625 
626 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
627 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
628 
629 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
630 			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
631 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
632 			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
633 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
634 			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
635 	}
636 
637 	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
638 	    sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
639 		max_pairs = virtio_read_dev_config_2(dev,
640 		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
641 		if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
642 		    max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
643 			max_pairs = 1;
644 	} else
645 		max_pairs = 1;
646 
647 	if (max_pairs > 1) {
648 		/*
649 		 * Limit the maximum number of queue pairs to the number of
650 		 * CPUs or the configured maximum. The actual number of
651 		 * queues that get used may be less.
652 		 */
653 		max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
654 		if (max > 0 && max_pairs > max)
655 			max_pairs = max;
656 		if (max_pairs > mp_ncpus)
657 			max_pairs = mp_ncpus;
658 		if (max_pairs > VTNET_MAX_QUEUE_PAIRS)
659 			max_pairs = VTNET_MAX_QUEUE_PAIRS;
660 		if (max_pairs > 1)
661 			sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
662 	}
663 
664 	sc->vtnet_max_vq_pairs = max_pairs;
665 }
666 
667 static int
668 vtnet_init_rxq(struct vtnet_softc *sc, int id)
669 {
670 	struct vtnet_rxq *rxq;
671 
672 	rxq = &sc->vtnet_rxqs[id];
673 
674 	snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
675 	    device_get_nameunit(sc->vtnet_dev), id);
676 	mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
677 
678 	rxq->vtnrx_sc = sc;
679 	rxq->vtnrx_id = id;
680 
681 	rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
682 	if (rxq->vtnrx_sg == NULL)
683 		return (ENOMEM);
684 
685 	TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
686 	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
687 	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
688 
689 	return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
690 }
691 
692 static int
693 vtnet_init_txq(struct vtnet_softc *sc, int id)
694 {
695 	struct vtnet_txq *txq;
696 
697 	txq = &sc->vtnet_txqs[id];
698 
699 	snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
700 	    device_get_nameunit(sc->vtnet_dev), id);
701 	mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
702 
703 	txq->vtntx_sc = sc;
704 	txq->vtntx_id = id;
705 
706 	txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
707 	if (txq->vtntx_sg == NULL)
708 		return (ENOMEM);
709 
710 #ifndef VTNET_LEGACY_TX
711 	txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
712 	    M_NOWAIT, &txq->vtntx_mtx);
713 	if (txq->vtntx_br == NULL)
714 		return (ENOMEM);
715 
716 	TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
717 #endif
718 	TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
719 	txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
720 	    taskqueue_thread_enqueue, &txq->vtntx_tq);
721 	if (txq->vtntx_tq == NULL)
722 		return (ENOMEM);
723 
724 	return (0);
725 }
726 
727 static int
728 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
729 {
730 	int i, npairs, error;
731 
732 	npairs = sc->vtnet_max_vq_pairs;
733 
734 	sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
735 	    M_NOWAIT | M_ZERO);
736 	sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
737 	    M_NOWAIT | M_ZERO);
738 	if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
739 		return (ENOMEM);
740 
741 	for (i = 0; i < npairs; i++) {
742 		error = vtnet_init_rxq(sc, i);
743 		if (error)
744 			return (error);
745 		error = vtnet_init_txq(sc, i);
746 		if (error)
747 			return (error);
748 	}
749 
750 	vtnet_setup_queue_sysctl(sc);
751 
752 	return (0);
753 }
754 
755 static void
756 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
757 {
758 
759 	rxq->vtnrx_sc = NULL;
760 	rxq->vtnrx_id = -1;
761 
762 	if (rxq->vtnrx_sg != NULL) {
763 		sglist_free(rxq->vtnrx_sg);
764 		rxq->vtnrx_sg = NULL;
765 	}
766 
767 	if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
768 		mtx_destroy(&rxq->vtnrx_mtx);
769 }
770 
771 static void
772 vtnet_destroy_txq(struct vtnet_txq *txq)
773 {
774 
775 	txq->vtntx_sc = NULL;
776 	txq->vtntx_id = -1;
777 
778 	if (txq->vtntx_sg != NULL) {
779 		sglist_free(txq->vtntx_sg);
780 		txq->vtntx_sg = NULL;
781 	}
782 
783 #ifndef VTNET_LEGACY_TX
784 	if (txq->vtntx_br != NULL) {
785 		buf_ring_free(txq->vtntx_br, M_DEVBUF);
786 		txq->vtntx_br = NULL;
787 	}
788 #endif
789 
790 	if (mtx_initialized(&txq->vtntx_mtx) != 0)
791 		mtx_destroy(&txq->vtntx_mtx);
792 }
793 
794 static void
795 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
796 {
797 	int i;
798 
799 	if (sc->vtnet_rxqs != NULL) {
800 		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
801 			vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
802 		free(sc->vtnet_rxqs, M_DEVBUF);
803 		sc->vtnet_rxqs = NULL;
804 	}
805 
806 	if (sc->vtnet_txqs != NULL) {
807 		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
808 			vtnet_destroy_txq(&sc->vtnet_txqs[i]);
809 		free(sc->vtnet_txqs, M_DEVBUF);
810 		sc->vtnet_txqs = NULL;
811 	}
812 }
813 
814 static int
815 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
816 {
817 
818 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
819 		sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
820 		    M_DEVBUF, M_NOWAIT | M_ZERO);
821 		if (sc->vtnet_mac_filter == NULL)
822 			return (ENOMEM);
823 	}
824 
825 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
826 		sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
827 		    VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
828 		if (sc->vtnet_vlan_filter == NULL)
829 			return (ENOMEM);
830 	}
831 
832 	return (0);
833 }
834 
835 static void
836 vtnet_free_rx_filters(struct vtnet_softc *sc)
837 {
838 
839 	if (sc->vtnet_mac_filter != NULL) {
840 		free(sc->vtnet_mac_filter, M_DEVBUF);
841 		sc->vtnet_mac_filter = NULL;
842 	}
843 
844 	if (sc->vtnet_vlan_filter != NULL) {
845 		free(sc->vtnet_vlan_filter, M_DEVBUF);
846 		sc->vtnet_vlan_filter = NULL;
847 	}
848 }
849 
850 static int
851 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
852 {
853 	device_t dev;
854 	struct vq_alloc_info *info;
855 	struct vtnet_rxq *rxq;
856 	struct vtnet_txq *txq;
857 	int i, idx, flags, nvqs, error;
858 
859 	dev = sc->vtnet_dev;
860 	flags = 0;
861 
862 	nvqs = sc->vtnet_max_vq_pairs * 2;
863 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
864 		nvqs++;
865 
866 	info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
867 	if (info == NULL)
868 		return (ENOMEM);
869 
870 	for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) {
871 		rxq = &sc->vtnet_rxqs[i];
872 		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
873 		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
874 		    "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id);
875 
876 		txq = &sc->vtnet_txqs[i];
877 		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
878 		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
879 		    "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id);
880 	}
881 
882 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
883 		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
884 		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
885 	}
886 
887 	/*
888 	 * Enable interrupt binding if this is multiqueue. This only matters
889 	 * when per-vq MSIX is available.
890 	 */
891 	if (sc->vtnet_flags & VTNET_FLAG_MULTIQ)
892 		flags |= 0;
893 
894 	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
895 	free(info, M_TEMP);
896 
897 	return (error);
898 }
899 
900 static int
901 vtnet_setup_interface(struct vtnet_softc *sc)
902 {
903 	device_t dev;
904 	struct ifnet *ifp;
905 	int limit;
906 
907 	dev = sc->vtnet_dev;
908 
909 	ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
910 	if (ifp == NULL) {
911 		device_printf(dev, "cannot allocate ifnet structure\n");
912 		return (ENOSPC);
913 	}
914 
915 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
916 	if_initbaudrate(ifp, IF_Gbps(10));	/* Approx. */
917 	ifp->if_softc = sc;
918 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
919 	ifp->if_init = vtnet_init;
920 	ifp->if_ioctl = vtnet_ioctl;
921 
922 #ifndef VTNET_LEGACY_TX
923 	ifp->if_transmit = vtnet_txq_mq_start;
924 	ifp->if_qflush = vtnet_qflush;
925 #else
926 	struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
927 	ifp->if_start = vtnet_start;
928 	IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
929 	ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
930 	IFQ_SET_READY(&ifp->if_snd);
931 #endif
932 
933 	ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
934 	    vtnet_ifmedia_sts);
935 	ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
936 	ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
937 
938 	/* Read (or generate) the MAC address for the adapter. */
939 	vtnet_get_hwaddr(sc);
940 
941 	ether_ifattach(ifp, sc->vtnet_hwaddr);
942 
943 	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
944 		ifp->if_capabilities |= IFCAP_LINKSTATE;
945 
946 	/* Tell the upper layer(s) we support long frames. */
947 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
948 	ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
949 
950 	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
951 		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
952 
953 		if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) {
954 			ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
955 			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
956 		} else {
957 			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
958 				ifp->if_capabilities |= IFCAP_TSO4;
959 			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
960 				ifp->if_capabilities |= IFCAP_TSO6;
961 			if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
962 				sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
963 		}
964 
965 		if (ifp->if_capabilities & IFCAP_TSO)
966 			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
967 	}
968 
969 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM))
970 		ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
971 
972 	if (ifp->if_capabilities & IFCAP_HWCSUM) {
973 		/*
974 		 * VirtIO does not support VLAN tagging, but we can fake
975 		 * it by inserting and removing the 802.1Q header during
976 		 * transmit and receive. We are then able to do checksum
977 		 * offloading of VLAN frames.
978 		 */
979 		ifp->if_capabilities |=
980 		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
981 	}
982 
983 	ifp->if_capenable = ifp->if_capabilities;
984 
985 	/*
986 	 * Capabilities after here are not enabled by default.
987 	 */
988 
989 	if (ifp->if_capabilities & IFCAP_RXCSUM) {
990 		if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
991 		    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
992 			ifp->if_capabilities |= IFCAP_LRO;
993 	}
994 
995 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
996 		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
997 
998 		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
999 		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1000 		sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1001 		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1002 	}
1003 
1004 	limit = vtnet_tunable_int(sc, "rx_process_limit",
1005 	    vtnet_rx_process_limit);
1006 	if (limit < 0)
1007 		limit = INT_MAX;
1008 	sc->vtnet_rx_process_limit = limit;
1009 
1010 	return (0);
1011 }
1012 
1013 static int
1014 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1015 {
1016 	struct ifnet *ifp;
1017 	int frame_size, clsize;
1018 
1019 	ifp = sc->vtnet_ifp;
1020 
1021 	if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU)
1022 		return (EINVAL);
1023 
1024 	frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) +
1025 	    new_mtu;
1026 
1027 	/*
1028 	 * Based on the new MTU (and hence frame size) determine which
1029 	 * cluster size is most appropriate for the receive queues.
1030 	 */
1031 	if (frame_size <= MCLBYTES) {
1032 		clsize = MCLBYTES;
1033 	} else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1034 		/* Avoid going past 9K jumbos. */
1035 		if (frame_size > MJUM9BYTES)
1036 			return (EINVAL);
1037 		clsize = MJUM9BYTES;
1038 	} else
1039 		clsize = MJUMPAGESIZE;
1040 
1041 	ifp->if_mtu = new_mtu;
1042 	sc->vtnet_rx_new_clsize = clsize;
1043 
1044 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1045 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1046 		vtnet_init_locked(sc);
1047 	}
1048 
1049 	return (0);
1050 }
1051 
1052 static int
1053 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1054 {
1055 	struct vtnet_softc *sc;
1056 	struct ifreq *ifr;
1057 	int reinit, mask, error;
1058 
1059 	sc = ifp->if_softc;
1060 	ifr = (struct ifreq *) data;
1061 	error = 0;
1062 
1063 	switch (cmd) {
1064 	case SIOCSIFMTU:
1065 		if (ifp->if_mtu != ifr->ifr_mtu) {
1066 			VTNET_CORE_LOCK(sc);
1067 			error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1068 			VTNET_CORE_UNLOCK(sc);
1069 		}
1070 		break;
1071 
1072 	case SIOCSIFFLAGS:
1073 		VTNET_CORE_LOCK(sc);
1074 		if ((ifp->if_flags & IFF_UP) == 0) {
1075 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1076 				vtnet_stop(sc);
1077 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1078 			if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1079 			    (IFF_PROMISC | IFF_ALLMULTI)) {
1080 				if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1081 					vtnet_rx_filter(sc);
1082 				else
1083 					error = ENOTSUP;
1084 			}
1085 		} else
1086 			vtnet_init_locked(sc);
1087 
1088 		if (error == 0)
1089 			sc->vtnet_if_flags = ifp->if_flags;
1090 		VTNET_CORE_UNLOCK(sc);
1091 		break;
1092 
1093 	case SIOCADDMULTI:
1094 	case SIOCDELMULTI:
1095 		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1096 			break;
1097 		VTNET_CORE_LOCK(sc);
1098 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099 			vtnet_rx_filter_mac(sc);
1100 		VTNET_CORE_UNLOCK(sc);
1101 		break;
1102 
1103 	case SIOCSIFMEDIA:
1104 	case SIOCGIFMEDIA:
1105 		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1106 		break;
1107 
1108 	case SIOCSIFCAP:
1109 		VTNET_CORE_LOCK(sc);
1110 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1111 
1112 		if (mask & IFCAP_TXCSUM)
1113 			ifp->if_capenable ^= IFCAP_TXCSUM;
1114 		if (mask & IFCAP_TXCSUM_IPV6)
1115 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1116 		if (mask & IFCAP_TSO4)
1117 			ifp->if_capenable ^= IFCAP_TSO4;
1118 		if (mask & IFCAP_TSO6)
1119 			ifp->if_capenable ^= IFCAP_TSO6;
1120 
1121 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
1122 		    IFCAP_VLAN_HWFILTER)) {
1123 			/* These Rx features require us to renegotiate. */
1124 			reinit = 1;
1125 
1126 			if (mask & IFCAP_RXCSUM)
1127 				ifp->if_capenable ^= IFCAP_RXCSUM;
1128 			if (mask & IFCAP_RXCSUM_IPV6)
1129 				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1130 			if (mask & IFCAP_LRO)
1131 				ifp->if_capenable ^= IFCAP_LRO;
1132 			if (mask & IFCAP_VLAN_HWFILTER)
1133 				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1134 		} else
1135 			reinit = 0;
1136 
1137 		if (mask & IFCAP_VLAN_HWTSO)
1138 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1139 		if (mask & IFCAP_VLAN_HWTAGGING)
1140 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1141 
1142 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1143 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1144 			vtnet_init_locked(sc);
1145 		}
1146 
1147 		VTNET_CORE_UNLOCK(sc);
1148 		VLAN_CAPABILITIES(ifp);
1149 
1150 		break;
1151 
1152 	default:
1153 		error = ether_ioctl(ifp, cmd, data);
1154 		break;
1155 	}
1156 
1157 	VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1158 
1159 	return (error);
1160 }
1161 
1162 static int
1163 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1164 {
1165 	struct virtqueue *vq;
1166 	int nbufs, error;
1167 
1168 	vq = rxq->vtnrx_vq;
1169 	error = ENOSPC;
1170 
1171 	for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1172 		error = vtnet_rxq_new_buf(rxq);
1173 		if (error)
1174 			break;
1175 	}
1176 
1177 	if (nbufs > 0) {
1178 		virtqueue_notify(vq);
1179 		/*
1180 		 * EMSGSIZE signifies the virtqueue did not have enough
1181 		 * entries available to hold the last mbuf. This is not
1182 		 * an error.
1183 		 */
1184 		if (error == EMSGSIZE)
1185 			error = 0;
1186 	}
1187 
1188 	return (error);
1189 }
1190 
1191 static void
1192 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1193 {
1194 	struct virtqueue *vq;
1195 	struct mbuf *m;
1196 	int last;
1197 
1198 	vq = rxq->vtnrx_vq;
1199 	last = 0;
1200 
1201 	while ((m = virtqueue_drain(vq, &last)) != NULL)
1202 		m_freem(m);
1203 
1204 	KASSERT(virtqueue_empty(vq),
1205 	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1206 }
1207 
1208 static struct mbuf *
1209 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1210 {
1211 	struct mbuf *m_head, *m_tail, *m;
1212 	int i, clsize;
1213 
1214 	clsize = sc->vtnet_rx_clsize;
1215 
1216 	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1217 	    ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs));
1218 
1219 	m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize);
1220 	if (m_head == NULL)
1221 		goto fail;
1222 
1223 	m_head->m_len = clsize;
1224 	m_tail = m_head;
1225 
1226 	/* Allocate the rest of the chain. */
1227 	for (i = 1; i < nbufs; i++) {
1228 		m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize);
1229 		if (m == NULL)
1230 			goto fail;
1231 
1232 		m->m_len = clsize;
1233 		m_tail->m_next = m;
1234 		m_tail = m;
1235 	}
1236 
1237 	if (m_tailp != NULL)
1238 		*m_tailp = m_tail;
1239 
1240 	return (m_head);
1241 
1242 fail:
1243 	sc->vtnet_stats.mbuf_alloc_failed++;
1244 	m_freem(m_head);
1245 
1246 	return (NULL);
1247 }
1248 
1249 /*
1250  * Slow path for when LRO without mergeable buffers is negotiated.
1251  */
1252 static int
1253 vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1254     int len0)
1255 {
1256 	struct vtnet_softc *sc;
1257 	struct mbuf *m, *m_prev;
1258 	struct mbuf *m_new, *m_tail;
1259 	int len, clsize, nreplace, error;
1260 
1261 	sc = rxq->vtnrx_sc;
1262 	clsize = sc->vtnet_rx_clsize;
1263 
1264 	m_prev = NULL;
1265 	m_tail = NULL;
1266 	nreplace = 0;
1267 
1268 	m = m0;
1269 	len = len0;
1270 
1271 	/*
1272 	 * Since these mbuf chains are so large, we avoid allocating an
1273 	 * entire replacement chain if possible. When the received frame
1274 	 * did not consume the entire chain, the unused mbufs are moved
1275 	 * to the replacement chain.
1276 	 */
1277 	while (len > 0) {
1278 		/*
1279 		 * Something is seriously wrong if we received a frame
1280 		 * larger than the chain. Drop it.
1281 		 */
1282 		if (m == NULL) {
1283 			sc->vtnet_stats.rx_frame_too_large++;
1284 			return (EMSGSIZE);
1285 		}
1286 
1287 		/* We always allocate the same cluster size. */
1288 		KASSERT(m->m_len == clsize,
1289 		    ("%s: mbuf size %d is not the cluster size %d",
1290 		    __func__, m->m_len, clsize));
1291 
1292 		m->m_len = MIN(m->m_len, len);
1293 		len -= m->m_len;
1294 
1295 		m_prev = m;
1296 		m = m->m_next;
1297 		nreplace++;
1298 	}
1299 
1300 	KASSERT(nreplace <= sc->vtnet_rx_nmbufs,
1301 	    ("%s: too many replacement mbufs %d max %d", __func__, nreplace,
1302 	    sc->vtnet_rx_nmbufs));
1303 
1304 	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1305 	if (m_new == NULL) {
1306 		m_prev->m_len = clsize;
1307 		return (ENOBUFS);
1308 	}
1309 
1310 	/*
1311 	 * Move any unused mbufs from the received chain onto the end
1312 	 * of the new chain.
1313 	 */
1314 	if (m_prev->m_next != NULL) {
1315 		m_tail->m_next = m_prev->m_next;
1316 		m_prev->m_next = NULL;
1317 	}
1318 
1319 	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1320 	if (error) {
1321 		/*
1322 		 * BAD! We could not enqueue the replacement mbuf chain. We
1323 		 * must restore the m0 chain to the original state if it was
1324 		 * modified so we can subsequently discard it.
1325 		 *
1326 		 * NOTE: The replacement is suppose to be an identical copy
1327 		 * to the one just dequeued so this is an unexpected error.
1328 		 */
1329 		sc->vtnet_stats.rx_enq_replacement_failed++;
1330 
1331 		if (m_tail->m_next != NULL) {
1332 			m_prev->m_next = m_tail->m_next;
1333 			m_tail->m_next = NULL;
1334 		}
1335 
1336 		m_prev->m_len = clsize;
1337 		m_freem(m_new);
1338 	}
1339 
1340 	return (error);
1341 }
1342 
1343 static int
1344 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1345 {
1346 	struct vtnet_softc *sc;
1347 	struct mbuf *m_new;
1348 	int error;
1349 
1350 	sc = rxq->vtnrx_sc;
1351 
1352 	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1353 	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1354 
1355 	if (m->m_next == NULL) {
1356 		/* Fast-path for the common case of just one mbuf. */
1357 		if (m->m_len < len)
1358 			return (EINVAL);
1359 
1360 		m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1361 		if (m_new == NULL)
1362 			return (ENOBUFS);
1363 
1364 		error = vtnet_rxq_enqueue_buf(rxq, m_new);
1365 		if (error) {
1366 			/*
1367 			 * The new mbuf is suppose to be an identical
1368 			 * copy of the one just dequeued so this is an
1369 			 * unexpected error.
1370 			 */
1371 			m_freem(m_new);
1372 			sc->vtnet_stats.rx_enq_replacement_failed++;
1373 		} else
1374 			m->m_len = len;
1375 	} else
1376 		error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len);
1377 
1378 	return (error);
1379 }
1380 
1381 static int
1382 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1383 {
1384 	struct vtnet_softc *sc;
1385 	struct sglist *sg;
1386 	struct vtnet_rx_header *rxhdr;
1387 	uint8_t *mdata;
1388 	int offset, error;
1389 
1390 	sc = rxq->vtnrx_sc;
1391 	sg = rxq->vtnrx_sg;
1392 	mdata = mtod(m, uint8_t *);
1393 
1394 	VTNET_RXQ_LOCK_ASSERT(rxq);
1395 	KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL,
1396 	    ("%s: chained mbuf without LRO_NOMRG", __func__));
1397 	KASSERT(m->m_len == sc->vtnet_rx_clsize,
1398 	    ("%s: unexpected cluster size %d/%d", __func__, m->m_len,
1399 	     sc->vtnet_rx_clsize));
1400 
1401 	sglist_reset(sg);
1402 	if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1403 		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1404 		rxhdr = (struct vtnet_rx_header *) mdata;
1405 		sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1406 		offset = sizeof(struct vtnet_rx_header);
1407 	} else
1408 		offset = 0;
1409 
1410 	sglist_append(sg, mdata + offset, m->m_len - offset);
1411 	if (m->m_next != NULL) {
1412 		error = sglist_append_mbuf(sg, m->m_next);
1413 		MPASS(error == 0);
1414 	}
1415 
1416 	error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg);
1417 
1418 	return (error);
1419 }
1420 
1421 static int
1422 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1423 {
1424 	struct vtnet_softc *sc;
1425 	struct mbuf *m;
1426 	int error;
1427 
1428 	sc = rxq->vtnrx_sc;
1429 
1430 	m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1431 	if (m == NULL)
1432 		return (ENOBUFS);
1433 
1434 	error = vtnet_rxq_enqueue_buf(rxq, m);
1435 	if (error)
1436 		m_freem(m);
1437 
1438 	return (error);
1439 }
1440 
1441 /*
1442  * Use the checksum offset in the VirtIO header to set the
1443  * correct CSUM_* flags.
1444  */
1445 static int
1446 vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m,
1447     uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1448 {
1449 	struct vtnet_softc *sc;
1450 #if defined(INET) || defined(INET6)
1451 	int offset = hdr->csum_start + hdr->csum_offset;
1452 #endif
1453 
1454 	sc = rxq->vtnrx_sc;
1455 
1456 	/* Only do a basic sanity check on the offset. */
1457 	switch (eth_type) {
1458 #if defined(INET)
1459 	case ETHERTYPE_IP:
1460 		if (__predict_false(offset < ip_start + sizeof(struct ip)))
1461 			return (1);
1462 		break;
1463 #endif
1464 #if defined(INET6)
1465 	case ETHERTYPE_IPV6:
1466 		if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1467 			return (1);
1468 		break;
1469 #endif
1470 	default:
1471 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1472 		return (1);
1473 	}
1474 
1475 	/*
1476 	 * Use the offset to determine the appropriate CSUM_* flags. This is
1477 	 * a bit dirty, but we can get by with it since the checksum offsets
1478 	 * happen to be different. We assume the host host does not do IPv4
1479 	 * header checksum offloading.
1480 	 */
1481 	switch (hdr->csum_offset) {
1482 	case offsetof(struct udphdr, uh_sum):
1483 	case offsetof(struct tcphdr, th_sum):
1484 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1485 		m->m_pkthdr.csum_data = 0xFFFF;
1486 		break;
1487 	case offsetof(struct sctphdr, checksum):
1488 		m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1489 		break;
1490 	default:
1491 		sc->vtnet_stats.rx_csum_bad_offset++;
1492 		return (1);
1493 	}
1494 
1495 	return (0);
1496 }
1497 
1498 static int
1499 vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m,
1500     uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr)
1501 {
1502 	struct vtnet_softc *sc;
1503 	int offset, proto;
1504 
1505 	sc = rxq->vtnrx_sc;
1506 
1507 	switch (eth_type) {
1508 #if defined(INET)
1509 	case ETHERTYPE_IP: {
1510 		struct ip *ip;
1511 		if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1512 			return (1);
1513 		ip = (struct ip *)(m->m_data + ip_start);
1514 		proto = ip->ip_p;
1515 		offset = ip_start + (ip->ip_hl << 2);
1516 		break;
1517 	}
1518 #endif
1519 #if defined(INET6)
1520 	case ETHERTYPE_IPV6:
1521 		if (__predict_false(m->m_len < ip_start +
1522 		    sizeof(struct ip6_hdr)))
1523 			return (1);
1524 		offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1525 		if (__predict_false(offset < 0))
1526 			return (1);
1527 		break;
1528 #endif
1529 	default:
1530 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1531 		return (1);
1532 	}
1533 
1534 	switch (proto) {
1535 	case IPPROTO_TCP:
1536 		if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1537 			return (1);
1538 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1539 		m->m_pkthdr.csum_data = 0xFFFF;
1540 		break;
1541 	case IPPROTO_UDP:
1542 		if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1543 			return (1);
1544 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1545 		m->m_pkthdr.csum_data = 0xFFFF;
1546 		break;
1547 	case IPPROTO_SCTP:
1548 		if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1549 			return (1);
1550 		m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1551 		break;
1552 	default:
1553 		/*
1554 		 * For the remaining protocols, FreeBSD does not support
1555 		 * checksum offloading, so the checksum will be recomputed.
1556 		 */
1557 #if 0
1558 		if_printf(sc->vtnet_ifp, "cksum offload of unsupported "
1559 		    "protocol eth_type=%#x proto=%d csum_start=%d "
1560 		    "csum_offset=%d\n", __func__, eth_type, proto,
1561 		    hdr->csum_start, hdr->csum_offset);
1562 #endif
1563 		break;
1564 	}
1565 
1566 	return (0);
1567 }
1568 
1569 /*
1570  * Set the appropriate CSUM_* flags. Unfortunately, the information
1571  * provided is not directly useful to us. The VirtIO header gives the
1572  * offset of the checksum, which is all Linux needs, but this is not
1573  * how FreeBSD does things. We are forced to peek inside the packet
1574  * a bit.
1575  *
1576  * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1577  * could accept the offsets and let the stack figure it out.
1578  */
1579 static int
1580 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1581     struct virtio_net_hdr *hdr)
1582 {
1583 	struct ether_header *eh;
1584 	struct ether_vlan_header *evh;
1585 	uint16_t eth_type;
1586 	int offset, error;
1587 
1588 	eh = mtod(m, struct ether_header *);
1589 	eth_type = ntohs(eh->ether_type);
1590 	if (eth_type == ETHERTYPE_VLAN) {
1591 		/* BMV: We should handle nested VLAN tags too. */
1592 		evh = mtod(m, struct ether_vlan_header *);
1593 		eth_type = ntohs(evh->evl_proto);
1594 		offset = sizeof(struct ether_vlan_header);
1595 	} else
1596 		offset = sizeof(struct ether_header);
1597 
1598 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1599 		error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr);
1600 	else
1601 		error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr);
1602 
1603 	return (error);
1604 }
1605 
1606 static void
1607 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1608 {
1609 	struct mbuf *m;
1610 
1611 	while (--nbufs > 0) {
1612 		m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1613 		if (m == NULL)
1614 			break;
1615 		vtnet_rxq_discard_buf(rxq, m);
1616 	}
1617 }
1618 
1619 static void
1620 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1621 {
1622 	int error;
1623 
1624 	/*
1625 	 * Requeue the discarded mbuf. This should always be successful
1626 	 * since it was just dequeued.
1627 	 */
1628 	error = vtnet_rxq_enqueue_buf(rxq, m);
1629 	KASSERT(error == 0,
1630 	    ("%s: cannot requeue discarded mbuf %d", __func__, error));
1631 }
1632 
1633 static int
1634 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1635 {
1636 	struct vtnet_softc *sc;
1637 	struct ifnet *ifp;
1638 	struct virtqueue *vq;
1639 	struct mbuf *m, *m_tail;
1640 	int len;
1641 
1642 	sc = rxq->vtnrx_sc;
1643 	vq = rxq->vtnrx_vq;
1644 	ifp = sc->vtnet_ifp;
1645 	m_tail = m_head;
1646 
1647 	while (--nbufs > 0) {
1648 		m = virtqueue_dequeue(vq, &len);
1649 		if (m == NULL) {
1650 			rxq->vtnrx_stats.vrxs_ierrors++;
1651 			goto fail;
1652 		}
1653 
1654 		if (vtnet_rxq_new_buf(rxq) != 0) {
1655 			rxq->vtnrx_stats.vrxs_iqdrops++;
1656 			vtnet_rxq_discard_buf(rxq, m);
1657 			if (nbufs > 1)
1658 				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1659 			goto fail;
1660 		}
1661 
1662 		if (m->m_len < len)
1663 			len = m->m_len;
1664 
1665 		m->m_len = len;
1666 		m->m_flags &= ~M_PKTHDR;
1667 
1668 		m_head->m_pkthdr.len += len;
1669 		m_tail->m_next = m;
1670 		m_tail = m;
1671 	}
1672 
1673 	return (0);
1674 
1675 fail:
1676 	sc->vtnet_stats.rx_mergeable_failed++;
1677 	m_freem(m_head);
1678 
1679 	return (1);
1680 }
1681 
1682 static void
1683 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1684     struct virtio_net_hdr *hdr)
1685 {
1686 	struct vtnet_softc *sc;
1687 	struct ifnet *ifp;
1688 	struct ether_header *eh;
1689 
1690 	sc = rxq->vtnrx_sc;
1691 	ifp = sc->vtnet_ifp;
1692 
1693 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1694 		eh = mtod(m, struct ether_header *);
1695 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1696 			vtnet_vlan_tag_remove(m);
1697 			/*
1698 			 * With the 802.1Q header removed, update the
1699 			 * checksum starting location accordingly.
1700 			 */
1701 			if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1702 				hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
1703 		}
1704 	}
1705 
1706 	m->m_pkthdr.flowid = rxq->vtnrx_id;
1707 	m->m_flags |= M_FLOWID;
1708 
1709 	/*
1710 	 * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
1711 	 * distinction that Linux does. Need to reevaluate if performing
1712 	 * offloading for the NEEDS_CSUM case is really appropriate.
1713 	 */
1714 	if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM |
1715 	    VIRTIO_NET_HDR_F_DATA_VALID)) {
1716 		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
1717 			rxq->vtnrx_stats.vrxs_csum++;
1718 		else
1719 			rxq->vtnrx_stats.vrxs_csum_failed++;
1720 	}
1721 
1722 	rxq->vtnrx_stats.vrxs_ipackets++;
1723 	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
1724 
1725 	VTNET_RXQ_UNLOCK(rxq);
1726 	(*ifp->if_input)(ifp, m);
1727 	VTNET_RXQ_LOCK(rxq);
1728 }
1729 
1730 static int
1731 vtnet_rxq_eof(struct vtnet_rxq *rxq)
1732 {
1733 	struct virtio_net_hdr lhdr, *hdr;
1734 	struct vtnet_softc *sc;
1735 	struct ifnet *ifp;
1736 	struct virtqueue *vq;
1737 	struct mbuf *m;
1738 	struct virtio_net_hdr_mrg_rxbuf *mhdr;
1739 	int len, deq, nbufs, adjsz, count;
1740 
1741 	sc = rxq->vtnrx_sc;
1742 	vq = rxq->vtnrx_vq;
1743 	ifp = sc->vtnet_ifp;
1744 	hdr = &lhdr;
1745 	deq = 0;
1746 	count = sc->vtnet_rx_process_limit;
1747 
1748 	VTNET_RXQ_LOCK_ASSERT(rxq);
1749 
1750 	while (count-- > 0) {
1751 		m = virtqueue_dequeue(vq, &len);
1752 		if (m == NULL)
1753 			break;
1754 		deq++;
1755 
1756 		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1757 			rxq->vtnrx_stats.vrxs_ierrors++;
1758 			vtnet_rxq_discard_buf(rxq, m);
1759 			continue;
1760 		}
1761 
1762 		if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1763 			nbufs = 1;
1764 			adjsz = sizeof(struct vtnet_rx_header);
1765 			/*
1766 			 * Account for our pad inserted between the header
1767 			 * and the actual start of the frame.
1768 			 */
1769 			len += VTNET_RX_HEADER_PAD;
1770 		} else {
1771 			mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1772 			nbufs = mhdr->num_buffers;
1773 			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1774 		}
1775 
1776 		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
1777 			rxq->vtnrx_stats.vrxs_iqdrops++;
1778 			vtnet_rxq_discard_buf(rxq, m);
1779 			if (nbufs > 1)
1780 				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1781 			continue;
1782 		}
1783 
1784 		m->m_pkthdr.len = len;
1785 		m->m_pkthdr.rcvif = ifp;
1786 		m->m_pkthdr.csum_flags = 0;
1787 
1788 		if (nbufs > 1) {
1789 			/* Dequeue the rest of chain. */
1790 			if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
1791 				continue;
1792 		}
1793 
1794 		/*
1795 		 * Save copy of header before we strip it. For both mergeable
1796 		 * and non-mergeable, the header is at the beginning of the
1797 		 * mbuf data. We no longer need num_buffers, so always use a
1798 		 * regular header.
1799 		 *
1800 		 * BMV: Is this memcpy() expensive? We know the mbuf data is
1801 		 * still valid even after the m_adj().
1802 		 */
1803 		memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1804 		m_adj(m, adjsz);
1805 
1806 		vtnet_rxq_input(rxq, m, hdr);
1807 
1808 		/* Must recheck after dropping the Rx lock. */
1809 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1810 			break;
1811 	}
1812 
1813 	if (deq > 0)
1814 		virtqueue_notify(vq);
1815 
1816 	return (count > 0 ? 0 : EAGAIN);
1817 }
1818 
1819 static void
1820 vtnet_rx_vq_intr(void *xrxq)
1821 {
1822 	struct vtnet_softc *sc;
1823 	struct vtnet_rxq *rxq;
1824 	struct ifnet *ifp;
1825 	int tries, more;
1826 
1827 	rxq = xrxq;
1828 	sc = rxq->vtnrx_sc;
1829 	ifp = sc->vtnet_ifp;
1830 	tries = 0;
1831 
1832 	if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
1833 		/*
1834 		 * Ignore this interrupt. Either this is a spurious interrupt
1835 		 * or multiqueue without per-VQ MSIX so every queue needs to
1836 		 * be polled (a brain dead configuration we could try harder
1837 		 * to avoid).
1838 		 */
1839 		vtnet_rxq_disable_intr(rxq);
1840 		return;
1841 	}
1842 
1843 	VTNET_RXQ_LOCK(rxq);
1844 
1845 again:
1846 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1847 		VTNET_RXQ_UNLOCK(rxq);
1848 		return;
1849 	}
1850 
1851 	more = vtnet_rxq_eof(rxq);
1852 	if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1853 		if (!more)
1854 			vtnet_rxq_disable_intr(rxq);
1855 		/*
1856 		 * This is an occasional condition or race (when !more),
1857 		 * so retry a few times before scheduling the taskqueue.
1858 		 */
1859 		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
1860 			goto again;
1861 
1862 		VTNET_RXQ_UNLOCK(rxq);
1863 		rxq->vtnrx_stats.vrxs_rescheduled++;
1864 		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1865 	} else
1866 		VTNET_RXQ_UNLOCK(rxq);
1867 }
1868 
1869 static void
1870 vtnet_rxq_tq_intr(void *xrxq, int pending)
1871 {
1872 	struct vtnet_softc *sc;
1873 	struct vtnet_rxq *rxq;
1874 	struct ifnet *ifp;
1875 	int more;
1876 
1877 	rxq = xrxq;
1878 	sc = rxq->vtnrx_sc;
1879 	ifp = sc->vtnet_ifp;
1880 
1881 	VTNET_RXQ_LOCK(rxq);
1882 
1883 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1884 		VTNET_RXQ_UNLOCK(rxq);
1885 		return;
1886 	}
1887 
1888 	more = vtnet_rxq_eof(rxq);
1889 	if (more || vtnet_rxq_enable_intr(rxq) != 0) {
1890 		if (!more)
1891 			vtnet_rxq_disable_intr(rxq);
1892 		rxq->vtnrx_stats.vrxs_rescheduled++;
1893 		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
1894 	}
1895 
1896 	VTNET_RXQ_UNLOCK(rxq);
1897 }
1898 
1899 static void
1900 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
1901 {
1902 	struct virtqueue *vq;
1903 	struct vtnet_tx_header *txhdr;
1904 	int last;
1905 
1906 	vq = txq->vtntx_vq;
1907 	last = 0;
1908 
1909 	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1910 		m_freem(txhdr->vth_mbuf);
1911 		uma_zfree(vtnet_tx_header_zone, txhdr);
1912 	}
1913 
1914 	KASSERT(virtqueue_empty(vq),
1915 	    ("%s: mbufs remaining in tx queue %p", __func__, txq));
1916 }
1917 
1918 /*
1919  * BMV: Much of this can go away once we finally have offsets in
1920  * the mbuf packet header. Bug andre@.
1921  */
1922 static int
1923 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m,
1924     int *etype, int *proto, int *start)
1925 {
1926 	struct vtnet_softc *sc;
1927 	struct ether_vlan_header *evh;
1928 	int offset;
1929 
1930 	sc = txq->vtntx_sc;
1931 
1932 	evh = mtod(m, struct ether_vlan_header *);
1933 	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1934 		/* BMV: We should handle nested VLAN tags too. */
1935 		*etype = ntohs(evh->evl_proto);
1936 		offset = sizeof(struct ether_vlan_header);
1937 	} else {
1938 		*etype = ntohs(evh->evl_encap_proto);
1939 		offset = sizeof(struct ether_header);
1940 	}
1941 
1942 	switch (*etype) {
1943 #if defined(INET)
1944 	case ETHERTYPE_IP: {
1945 		struct ip *ip, iphdr;
1946 		if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
1947 			m_copydata(m, offset, sizeof(struct ip),
1948 			    (caddr_t) &iphdr);
1949 			ip = &iphdr;
1950 		} else
1951 			ip = (struct ip *)(m->m_data + offset);
1952 		*proto = ip->ip_p;
1953 		*start = offset + (ip->ip_hl << 2);
1954 		break;
1955 	}
1956 #endif
1957 #if defined(INET6)
1958 	case ETHERTYPE_IPV6:
1959 		*proto = -1;
1960 		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
1961 		/* Assert the network stack sent us a valid packet. */
1962 		KASSERT(*start > offset,
1963 		    ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
1964 		    *start, offset, *proto));
1965 		break;
1966 #endif
1967 	default:
1968 		sc->vtnet_stats.tx_csum_bad_ethtype++;
1969 		return (EINVAL);
1970 	}
1971 
1972 	return (0);
1973 }
1974 
1975 static int
1976 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
1977     int offset, struct virtio_net_hdr *hdr)
1978 {
1979 	static struct timeval lastecn;
1980 	static int curecn;
1981 	struct vtnet_softc *sc;
1982 	struct tcphdr *tcp, tcphdr;
1983 
1984 	sc = txq->vtntx_sc;
1985 
1986 	if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
1987 		m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
1988 		tcp = &tcphdr;
1989 	} else
1990 		tcp = (struct tcphdr *)(m->m_data + offset);
1991 
1992 	hdr->hdr_len = offset + (tcp->th_off << 2);
1993 	hdr->gso_size = m->m_pkthdr.tso_segsz;
1994 	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
1995 	    VIRTIO_NET_HDR_GSO_TCPV6;
1996 
1997 	if (tcp->th_flags & TH_CWR) {
1998 		/*
1999 		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
2000 		 * ECN support is not on a per-interface basis, but globally via
2001 		 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
2002 		 */
2003 		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2004 			if (ppsratecheck(&lastecn, &curecn, 1))
2005 				if_printf(sc->vtnet_ifp,
2006 				    "TSO with ECN not negotiated with host\n");
2007 			return (ENOTSUP);
2008 		}
2009 		hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2010 	}
2011 
2012 	txq->vtntx_stats.vtxs_tso++;
2013 
2014 	return (0);
2015 }
2016 
2017 static struct mbuf *
2018 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2019     struct virtio_net_hdr *hdr)
2020 {
2021 	struct vtnet_softc *sc;
2022 	int flags, etype, csum_start, proto, error;
2023 
2024 	sc = txq->vtntx_sc;
2025 	flags = m->m_pkthdr.csum_flags;
2026 
2027 	error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2028 	if (error)
2029 		goto drop;
2030 
2031 	if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) ||
2032 	    (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) {
2033 		/*
2034 		 * We could compare the IP protocol vs the CSUM_ flag too,
2035 		 * but that really should not be necessary.
2036 		 */
2037 		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2038 		hdr->csum_start = csum_start;
2039 		hdr->csum_offset = m->m_pkthdr.csum_data;
2040 		txq->vtntx_stats.vtxs_csum++;
2041 	}
2042 
2043 	if (flags & CSUM_TSO) {
2044 		if (__predict_false(proto != IPPROTO_TCP)) {
2045 			/* Likely failed to correctly parse the mbuf. */
2046 			sc->vtnet_stats.tx_tso_not_tcp++;
2047 			goto drop;
2048 		}
2049 
2050 		KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
2051 		    ("%s: mbuf %p TSO without checksum offload %#x",
2052 		    __func__, m, flags));
2053 
2054 		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2055 		if (error)
2056 			goto drop;
2057 	}
2058 
2059 	return (m);
2060 
2061 drop:
2062 	m_freem(m);
2063 	return (NULL);
2064 }
2065 
2066 static int
2067 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2068     struct vtnet_tx_header *txhdr)
2069 {
2070 	struct vtnet_softc *sc;
2071 	struct virtqueue *vq;
2072 	struct sglist *sg;
2073 	struct mbuf *m;
2074 	int error;
2075 
2076 	sc = txq->vtntx_sc;
2077 	vq = txq->vtntx_vq;
2078 	sg = txq->vtntx_sg;
2079 	m = *m_head;
2080 
2081 	sglist_reset(sg);
2082 	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2083 	KASSERT(error == 0 && sg->sg_nseg == 1,
2084 	    ("%s: error %d adding header to sglist", __func__, error));
2085 
2086 	error = sglist_append_mbuf(sg, m);
2087 	if (error) {
2088 		m = m_defrag(m, M_NOWAIT);
2089 		if (m == NULL)
2090 			goto fail;
2091 
2092 		*m_head = m;
2093 		sc->vtnet_stats.tx_defragged++;
2094 
2095 		error = sglist_append_mbuf(sg, m);
2096 		if (error)
2097 			goto fail;
2098 	}
2099 
2100 	txhdr->vth_mbuf = m;
2101 	error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2102 
2103 	return (error);
2104 
2105 fail:
2106 	sc->vtnet_stats.tx_defrag_failed++;
2107 	m_freem(*m_head);
2108 	*m_head = NULL;
2109 
2110 	return (ENOBUFS);
2111 }
2112 
2113 static int
2114 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head)
2115 {
2116 	struct vtnet_softc *sc;
2117 	struct vtnet_tx_header *txhdr;
2118 	struct virtio_net_hdr *hdr;
2119 	struct mbuf *m;
2120 	int error;
2121 
2122 	sc = txq->vtntx_sc;
2123 	m = *m_head;
2124 	M_ASSERTPKTHDR(m);
2125 
2126 	txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO);
2127 	if (txhdr == NULL) {
2128 		m_freem(m);
2129 		*m_head = NULL;
2130 		return (ENOMEM);
2131 	}
2132 
2133 	/*
2134 	 * Always use the non-mergeable header, regardless if the feature
2135 	 * was negotiated. For transmit, num_buffers is always zero. The
2136 	 * vtnet_hdr_size is used to enqueue the correct header size.
2137 	 */
2138 	hdr = &txhdr->vth_uhdr.hdr;
2139 
2140 	if (m->m_flags & M_VLANTAG) {
2141 		m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2142 		if ((*m_head = m) == NULL) {
2143 			error = ENOBUFS;
2144 			goto fail;
2145 		}
2146 		m->m_flags &= ~M_VLANTAG;
2147 	}
2148 
2149 	if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2150 		m = vtnet_txq_offload(txq, m, hdr);
2151 		if ((*m_head = m) == NULL) {
2152 			error = ENOBUFS;
2153 			goto fail;
2154 		}
2155 	}
2156 
2157 	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2158 	if (error == 0)
2159 		return (0);
2160 
2161 fail:
2162 	uma_zfree(vtnet_tx_header_zone, txhdr);
2163 
2164 	return (error);
2165 }
2166 
2167 #ifdef VTNET_LEGACY_TX
2168 
2169 static void
2170 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2171 {
2172 	struct vtnet_softc *sc;
2173 	struct virtqueue *vq;
2174 	struct mbuf *m0;
2175 	int enq;
2176 
2177 	sc = txq->vtntx_sc;
2178 	vq = txq->vtntx_vq;
2179 	enq = 0;
2180 
2181 	VTNET_TXQ_LOCK_ASSERT(txq);
2182 
2183 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2184 	    sc->vtnet_link_active == 0)
2185 		return;
2186 
2187 	vtnet_txq_eof(txq);
2188 
2189 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2190 		if (virtqueue_full(vq))
2191 			break;
2192 
2193 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2194 		if (m0 == NULL)
2195 			break;
2196 
2197 		if (vtnet_txq_encap(txq, &m0) != 0) {
2198 			if (m0 != NULL)
2199 				IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2200 			break;
2201 		}
2202 
2203 		enq++;
2204 		ETHER_BPF_MTAP(ifp, m0);
2205 	}
2206 
2207 	if (enq > 0) {
2208 		virtqueue_notify(vq);
2209 		txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2210 	}
2211 }
2212 
2213 static void
2214 vtnet_start(struct ifnet *ifp)
2215 {
2216 	struct vtnet_softc *sc;
2217 	struct vtnet_txq *txq;
2218 
2219 	sc = ifp->if_softc;
2220 	txq = &sc->vtnet_txqs[0];
2221 
2222 	VTNET_TXQ_LOCK(txq);
2223 	vtnet_start_locked(txq, ifp);
2224 	VTNET_TXQ_UNLOCK(txq);
2225 }
2226 
2227 #else /* !VTNET_LEGACY_TX */
2228 
2229 static int
2230 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2231 {
2232 	struct vtnet_softc *sc;
2233 	struct virtqueue *vq;
2234 	struct buf_ring *br;
2235 	struct ifnet *ifp;
2236 	int enq, error;
2237 
2238 	sc = txq->vtntx_sc;
2239 	vq = txq->vtntx_vq;
2240 	br = txq->vtntx_br;
2241 	ifp = sc->vtnet_ifp;
2242 	enq = 0;
2243 	error = 0;
2244 
2245 	VTNET_TXQ_LOCK_ASSERT(txq);
2246 
2247 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2248 	    sc->vtnet_link_active == 0) {
2249 		if (m != NULL)
2250 			error = drbr_enqueue(ifp, br, m);
2251 		return (error);
2252 	}
2253 
2254 	if (m != NULL) {
2255 		error = drbr_enqueue(ifp, br, m);
2256 		if (error)
2257 			return (error);
2258 	}
2259 
2260 	vtnet_txq_eof(txq);
2261 
2262 	while ((m = drbr_peek(ifp, br)) != NULL) {
2263 		if (virtqueue_full(vq)) {
2264 			drbr_putback(ifp, br, m);
2265 			error = ENOBUFS;
2266 			break;
2267 		}
2268 
2269 		error = vtnet_txq_encap(txq, &m);
2270 		if (error) {
2271 			if (m != NULL)
2272 				drbr_putback(ifp, br, m);
2273 			else
2274 				drbr_advance(ifp, br);
2275 			break;
2276 		}
2277 		drbr_advance(ifp, br);
2278 
2279 		enq++;
2280 		ETHER_BPF_MTAP(ifp, m);
2281 	}
2282 
2283 	if (enq > 0) {
2284 		virtqueue_notify(vq);
2285 		txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2286 	}
2287 
2288 	return (error);
2289 }
2290 
2291 static int
2292 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2293 {
2294 	struct vtnet_softc *sc;
2295 	struct vtnet_txq *txq;
2296 	int i, npairs, error;
2297 
2298 	sc = ifp->if_softc;
2299 	npairs = sc->vtnet_act_vq_pairs;
2300 
2301 	if (m->m_flags & M_FLOWID)
2302 		i = m->m_pkthdr.flowid % npairs;
2303 	else
2304 		i = curcpu % npairs;
2305 
2306 	txq = &sc->vtnet_txqs[i];
2307 
2308 	if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2309 		error = vtnet_txq_mq_start_locked(txq, m);
2310 		VTNET_TXQ_UNLOCK(txq);
2311 	} else {
2312 		error = drbr_enqueue(ifp, txq->vtntx_br, m);
2313 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2314 	}
2315 
2316 	return (error);
2317 }
2318 
2319 static void
2320 vtnet_txq_tq_deferred(void *xtxq, int pending)
2321 {
2322 	struct vtnet_softc *sc;
2323 	struct vtnet_txq *txq;
2324 
2325 	txq = xtxq;
2326 	sc = txq->vtntx_sc;
2327 
2328 	VTNET_TXQ_LOCK(txq);
2329 	if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2330 		vtnet_txq_mq_start_locked(txq, NULL);
2331 	VTNET_TXQ_UNLOCK(txq);
2332 }
2333 
2334 #endif /* VTNET_LEGACY_TX */
2335 
2336 static void
2337 vtnet_txq_start(struct vtnet_txq *txq)
2338 {
2339 	struct vtnet_softc *sc;
2340 	struct ifnet *ifp;
2341 
2342 	sc = txq->vtntx_sc;
2343 	ifp = sc->vtnet_ifp;
2344 
2345 #ifdef VTNET_LEGACY_TX
2346 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2347 		vtnet_start_locked(txq, ifp);
2348 #else
2349 	if (!drbr_empty(ifp, txq->vtntx_br))
2350 		vtnet_txq_mq_start_locked(txq, NULL);
2351 #endif
2352 }
2353 
2354 static void
2355 vtnet_txq_tq_intr(void *xtxq, int pending)
2356 {
2357 	struct vtnet_softc *sc;
2358 	struct vtnet_txq *txq;
2359 	struct ifnet *ifp;
2360 
2361 	txq = xtxq;
2362 	sc = txq->vtntx_sc;
2363 	ifp = sc->vtnet_ifp;
2364 
2365 	VTNET_TXQ_LOCK(txq);
2366 
2367 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2368 		VTNET_TXQ_UNLOCK(txq);
2369 		return;
2370 	}
2371 
2372 	vtnet_txq_eof(txq);
2373 
2374 	vtnet_txq_start(txq);
2375 
2376 	if (vtnet_txq_enable_intr(txq) != 0) {
2377 		vtnet_txq_disable_intr(txq);
2378 		txq->vtntx_stats.vtxs_rescheduled++;
2379 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2380 	}
2381 
2382 	VTNET_TXQ_UNLOCK(txq);
2383 }
2384 
2385 static void
2386 vtnet_txq_eof(struct vtnet_txq *txq)
2387 {
2388 	struct virtqueue *vq;
2389 	struct vtnet_tx_header *txhdr;
2390 	struct mbuf *m;
2391 
2392 	vq = txq->vtntx_vq;
2393 	VTNET_TXQ_LOCK_ASSERT(txq);
2394 
2395 	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2396 		m = txhdr->vth_mbuf;
2397 
2398 		txq->vtntx_stats.vtxs_opackets++;
2399 		txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2400 		if (m->m_flags & M_MCAST)
2401 			txq->vtntx_stats.vtxs_omcasts++;
2402 
2403 		m_freem(m);
2404 		uma_zfree(vtnet_tx_header_zone, txhdr);
2405 	}
2406 
2407 	if (virtqueue_empty(vq))
2408 		txq->vtntx_watchdog = 0;
2409 }
2410 
2411 static void
2412 vtnet_tx_vq_intr(void *xtxq)
2413 {
2414 	struct vtnet_softc *sc;
2415 	struct vtnet_txq *txq;
2416 	struct ifnet *ifp;
2417 	int tries;
2418 
2419 	txq = xtxq;
2420 	sc = txq->vtntx_sc;
2421 	ifp = sc->vtnet_ifp;
2422 	tries = 0;
2423 
2424 	if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2425 		/*
2426 		 * Ignore this interrupt. Either this is a spurious interrupt
2427 		 * or multiqueue without per-VQ MSIX so every queue needs to
2428 		 * be polled (a brain dead configuration we could try harder
2429 		 * to avoid).
2430 		 */
2431 		vtnet_txq_disable_intr(txq);
2432 		return;
2433 	}
2434 
2435 	VTNET_TXQ_LOCK(txq);
2436 
2437 again:
2438 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2439 		VTNET_TXQ_UNLOCK(txq);
2440 		return;
2441 	}
2442 
2443 	vtnet_txq_eof(txq);
2444 
2445 	vtnet_txq_start(txq);
2446 
2447 	if (vtnet_txq_enable_intr(txq) != 0) {
2448 		vtnet_txq_disable_intr(txq);
2449 		/*
2450 		 * This is an occasional race, so retry a few times
2451 		 * before scheduling the taskqueue.
2452 		 */
2453 		if (tries++ < VTNET_INTR_DISABLE_RETRIES)
2454 			goto again;
2455 
2456 		VTNET_TXQ_UNLOCK(txq);
2457 		txq->vtntx_stats.vtxs_rescheduled++;
2458 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2459 	} else
2460 		VTNET_TXQ_UNLOCK(txq);
2461 }
2462 
2463 static void
2464 vtnet_tx_start_all(struct vtnet_softc *sc)
2465 {
2466 	struct vtnet_txq *txq;
2467 	int i;
2468 
2469 	VTNET_CORE_LOCK_ASSERT(sc);
2470 
2471 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2472 		txq = &sc->vtnet_txqs[i];
2473 
2474 		VTNET_TXQ_LOCK(txq);
2475 		vtnet_txq_start(txq);
2476 		VTNET_TXQ_UNLOCK(txq);
2477 	}
2478 }
2479 
2480 #ifndef VTNET_LEGACY_TX
2481 static void
2482 vtnet_qflush(struct ifnet *ifp)
2483 {
2484 	struct vtnet_softc *sc;
2485 	struct vtnet_txq *txq;
2486 	struct mbuf *m;
2487 	int i;
2488 
2489 	sc = ifp->if_softc;
2490 
2491 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2492 		txq = &sc->vtnet_txqs[i];
2493 
2494 		VTNET_TXQ_LOCK(txq);
2495 		while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2496 			m_freem(m);
2497 		VTNET_TXQ_UNLOCK(txq);
2498 	}
2499 
2500 	if_qflush(ifp);
2501 }
2502 #endif
2503 
2504 static int
2505 vtnet_watchdog(struct vtnet_txq *txq)
2506 {
2507 	struct vtnet_softc *sc;
2508 
2509 	sc = txq->vtntx_sc;
2510 
2511 	VTNET_TXQ_LOCK(txq);
2512 	if (sc->vtnet_flags & VTNET_FLAG_EVENT_IDX)
2513 		vtnet_txq_eof(txq);
2514 	if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2515 		VTNET_TXQ_UNLOCK(txq);
2516 		return (0);
2517 	}
2518 	VTNET_TXQ_UNLOCK(txq);
2519 
2520 	if_printf(sc->vtnet_ifp, "watchdog timeout on queue %d\n",
2521 	    txq->vtntx_id);
2522 	return (1);
2523 }
2524 
2525 static void
2526 vtnet_rxq_accum_stats(struct vtnet_rxq *rxq, struct vtnet_rxq_stats *accum)
2527 {
2528 	struct vtnet_rxq_stats *st;
2529 
2530 	st = &rxq->vtnrx_stats;
2531 
2532 	accum->vrxs_ipackets += st->vrxs_ipackets;
2533 	accum->vrxs_ibytes += st->vrxs_ibytes;
2534 	accum->vrxs_iqdrops += st->vrxs_iqdrops;
2535 	accum->vrxs_csum += st->vrxs_csum;
2536 	accum->vrxs_csum_failed += st->vrxs_csum_failed;
2537 	accum->vrxs_rescheduled += st->vrxs_rescheduled;
2538 }
2539 
2540 static void
2541 vtnet_txq_accum_stats(struct vtnet_txq *txq, struct vtnet_txq_stats *accum)
2542 {
2543 	struct vtnet_txq_stats *st;
2544 
2545 	st = &txq->vtntx_stats;
2546 
2547 	accum->vtxs_opackets += st->vtxs_opackets;
2548 	accum->vtxs_obytes += st->vtxs_obytes;
2549 	accum->vtxs_csum += st->vtxs_csum;
2550 	accum->vtxs_tso += st->vtxs_tso;
2551 	accum->vtxs_rescheduled += st->vtxs_rescheduled;
2552 }
2553 
2554 static void
2555 vtnet_accumulate_stats(struct vtnet_softc *sc)
2556 {
2557 	struct ifnet *ifp;
2558 	struct vtnet_statistics *st;
2559 	struct vtnet_rxq_stats rxaccum;
2560 	struct vtnet_txq_stats txaccum;
2561 	int i;
2562 
2563 	ifp = sc->vtnet_ifp;
2564 	st = &sc->vtnet_stats;
2565 	bzero(&rxaccum, sizeof(struct vtnet_rxq_stats));
2566 	bzero(&txaccum, sizeof(struct vtnet_txq_stats));
2567 
2568 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2569 		vtnet_rxq_accum_stats(&sc->vtnet_rxqs[i], &rxaccum);
2570 		vtnet_txq_accum_stats(&sc->vtnet_txqs[i], &txaccum);
2571 	}
2572 
2573 	st->rx_csum_offloaded = rxaccum.vrxs_csum;
2574 	st->rx_csum_failed = rxaccum.vrxs_csum_failed;
2575 	st->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
2576 	st->tx_csum_offloaded = txaccum.vtxs_csum;
2577 	st->tx_tso_offloaded = txaccum.vtxs_tso;
2578 	st->tx_task_rescheduled = txaccum.vtxs_rescheduled;
2579 
2580 	/*
2581 	 * With the exception of if_ierrors, these ifnet statistics are
2582 	 * only updated in the driver, so just set them to our accumulated
2583 	 * values. if_ierrors is updated in ether_input() for malformed
2584 	 * frames that we should have already discarded.
2585 	 */
2586 	ifp->if_ipackets = rxaccum.vrxs_ipackets;
2587 	ifp->if_iqdrops = rxaccum.vrxs_iqdrops;
2588 	ifp->if_ierrors = rxaccum.vrxs_ierrors;
2589 	ifp->if_opackets = txaccum.vtxs_opackets;
2590 #ifndef VTNET_LEGACY_TX
2591 	ifp->if_obytes = txaccum.vtxs_obytes;
2592 	ifp->if_omcasts = txaccum.vtxs_omcasts;
2593 #endif
2594 }
2595 
2596 static void
2597 vtnet_tick(void *xsc)
2598 {
2599 	struct vtnet_softc *sc;
2600 	struct ifnet *ifp;
2601 	int i, timedout;
2602 
2603 	sc = xsc;
2604 	ifp = sc->vtnet_ifp;
2605 	timedout = 0;
2606 
2607 	VTNET_CORE_LOCK_ASSERT(sc);
2608 	vtnet_accumulate_stats(sc);
2609 
2610 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
2611 		timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
2612 
2613 	if (timedout != 0) {
2614 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2615 		vtnet_init_locked(sc);
2616 	} else
2617 		callout_schedule(&sc->vtnet_tick_ch, hz);
2618 }
2619 
2620 static void
2621 vtnet_start_taskqueues(struct vtnet_softc *sc)
2622 {
2623 	device_t dev;
2624 	struct vtnet_rxq *rxq;
2625 	struct vtnet_txq *txq;
2626 	int i, error;
2627 
2628 	dev = sc->vtnet_dev;
2629 
2630 	/*
2631 	 * Errors here are very difficult to recover from - we cannot
2632 	 * easily fail because, if this is during boot, we will hang
2633 	 * when freeing any successfully started taskqueues because
2634 	 * the scheduler isn't up yet.
2635 	 *
2636 	 * Most drivers just ignore the return value - it only fails
2637 	 * with ENOMEM so an error is not likely.
2638 	 */
2639 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2640 		rxq = &sc->vtnet_rxqs[i];
2641 		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
2642 		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
2643 		if (error) {
2644 			device_printf(dev, "failed to start rx taskq %d\n",
2645 			    rxq->vtnrx_id);
2646 		}
2647 
2648 		txq = &sc->vtnet_txqs[i];
2649 		error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
2650 		    "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
2651 		if (error) {
2652 			device_printf(dev, "failed to start tx taskq %d\n",
2653 			    txq->vtntx_id);
2654 		}
2655 	}
2656 }
2657 
2658 static void
2659 vtnet_free_taskqueues(struct vtnet_softc *sc)
2660 {
2661 	struct vtnet_rxq *rxq;
2662 	struct vtnet_txq *txq;
2663 	int i;
2664 
2665 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2666 		rxq = &sc->vtnet_rxqs[i];
2667 		if (rxq->vtnrx_tq != NULL) {
2668 			taskqueue_free(rxq->vtnrx_tq);
2669 			rxq->vtnrx_vq = NULL;
2670 		}
2671 
2672 		txq = &sc->vtnet_txqs[i];
2673 		if (txq->vtntx_tq != NULL) {
2674 			taskqueue_free(txq->vtntx_tq);
2675 			txq->vtntx_tq = NULL;
2676 		}
2677 	}
2678 }
2679 
2680 static void
2681 vtnet_drain_taskqueues(struct vtnet_softc *sc)
2682 {
2683 	struct vtnet_rxq *rxq;
2684 	struct vtnet_txq *txq;
2685 	int i;
2686 
2687 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2688 		rxq = &sc->vtnet_rxqs[i];
2689 		if (rxq->vtnrx_tq != NULL)
2690 			taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2691 
2692 		txq = &sc->vtnet_txqs[i];
2693 		if (txq->vtntx_tq != NULL) {
2694 			taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
2695 #ifndef VTNET_LEGACY_TX
2696 			taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
2697 #endif
2698 		}
2699 	}
2700 }
2701 
2702 static void
2703 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
2704 {
2705 	struct vtnet_rxq *rxq;
2706 	struct vtnet_txq *txq;
2707 	int i;
2708 
2709 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2710 		rxq = &sc->vtnet_rxqs[i];
2711 		vtnet_rxq_free_mbufs(rxq);
2712 
2713 		txq = &sc->vtnet_txqs[i];
2714 		vtnet_txq_free_mbufs(txq);
2715 	}
2716 }
2717 
2718 static void
2719 vtnet_stop_rendezvous(struct vtnet_softc *sc)
2720 {
2721 	struct vtnet_rxq *rxq;
2722 	struct vtnet_txq *txq;
2723 	int i;
2724 
2725 	/*
2726 	 * Lock and unlock the per-queue mutex so we known the stop
2727 	 * state is visible. Doing only the active queues should be
2728 	 * sufficient, but it does not cost much extra to do all the
2729 	 * queues. Note we hold the core mutex here too.
2730 	 */
2731 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2732 		rxq = &sc->vtnet_rxqs[i];
2733 		VTNET_RXQ_LOCK(rxq);
2734 		VTNET_RXQ_UNLOCK(rxq);
2735 
2736 		txq = &sc->vtnet_txqs[i];
2737 		VTNET_TXQ_LOCK(txq);
2738 		VTNET_TXQ_UNLOCK(txq);
2739 	}
2740 }
2741 
2742 static void
2743 vtnet_stop(struct vtnet_softc *sc)
2744 {
2745 	device_t dev;
2746 	struct ifnet *ifp;
2747 
2748 	dev = sc->vtnet_dev;
2749 	ifp = sc->vtnet_ifp;
2750 
2751 	VTNET_CORE_LOCK_ASSERT(sc);
2752 
2753 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2754 	sc->vtnet_link_active = 0;
2755 	callout_stop(&sc->vtnet_tick_ch);
2756 
2757 	/* Only advisory. */
2758 	vtnet_disable_interrupts(sc);
2759 
2760 	/*
2761 	 * Stop the host adapter. This resets it to the pre-initialized
2762 	 * state. It will not generate any interrupts until after it is
2763 	 * reinitialized.
2764 	 */
2765 	virtio_stop(dev);
2766 	vtnet_stop_rendezvous(sc);
2767 
2768 	/* Free any mbufs left in the virtqueues. */
2769 	vtnet_drain_rxtx_queues(sc);
2770 }
2771 
2772 static int
2773 vtnet_virtio_reinit(struct vtnet_softc *sc)
2774 {
2775 	device_t dev;
2776 	struct ifnet *ifp;
2777 	uint64_t features;
2778 	int mask, error;
2779 
2780 	dev = sc->vtnet_dev;
2781 	ifp = sc->vtnet_ifp;
2782 	features = sc->vtnet_features;
2783 
2784 	mask = 0;
2785 #if defined(INET)
2786 	mask |= IFCAP_RXCSUM;
2787 #endif
2788 #if defined (INET6)
2789 	mask |= IFCAP_RXCSUM_IPV6;
2790 #endif
2791 
2792 	/*
2793 	 * Re-negotiate with the host, removing any disabled receive
2794 	 * features. Transmit features are disabled only on our side
2795 	 * via if_capenable and if_hwassist.
2796 	 */
2797 
2798 	if (ifp->if_capabilities & mask) {
2799 		/*
2800 		 * We require both IPv4 and IPv6 offloading to be enabled
2801 		 * in order to negotiated it: VirtIO does not distinguish
2802 		 * between the two.
2803 		 */
2804 		if ((ifp->if_capenable & mask) != mask)
2805 			features &= ~VIRTIO_NET_F_GUEST_CSUM;
2806 	}
2807 
2808 	if (ifp->if_capabilities & IFCAP_LRO) {
2809 		if ((ifp->if_capenable & IFCAP_LRO) == 0)
2810 			features &= ~VTNET_LRO_FEATURES;
2811 	}
2812 
2813 	if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2814 		if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2815 			features &= ~VIRTIO_NET_F_CTRL_VLAN;
2816 	}
2817 
2818 	error = virtio_reinit(dev, features);
2819 	if (error)
2820 		device_printf(dev, "virtio reinit error %d\n", error);
2821 
2822 	return (error);
2823 }
2824 
2825 static void
2826 vtnet_init_rx_filters(struct vtnet_softc *sc)
2827 {
2828 	struct ifnet *ifp;
2829 
2830 	ifp = sc->vtnet_ifp;
2831 
2832 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2833 		/* Restore promiscuous and all-multicast modes. */
2834 		vtnet_rx_filter(sc);
2835 		/* Restore filtered MAC addresses. */
2836 		vtnet_rx_filter_mac(sc);
2837 	}
2838 
2839 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2840 		vtnet_rx_filter_vlan(sc);
2841 }
2842 
2843 static int
2844 vtnet_init_rx_queues(struct vtnet_softc *sc)
2845 {
2846 	device_t dev;
2847 	struct vtnet_rxq *rxq;
2848 	int i, clsize, error;
2849 
2850 	dev = sc->vtnet_dev;
2851 
2852 	/*
2853 	 * Use the new cluster size if one has been set (via a MTU
2854 	 * change). Otherwise, use the standard 2K clusters.
2855 	 *
2856 	 * BMV: It might make sense to use page sized clusters as
2857 	 * the default (depending on the features negotiated).
2858 	 */
2859 	if (sc->vtnet_rx_new_clsize != 0) {
2860 		clsize = sc->vtnet_rx_new_clsize;
2861 		sc->vtnet_rx_new_clsize = 0;
2862 	} else
2863 		clsize = MCLBYTES;
2864 
2865 	sc->vtnet_rx_clsize = clsize;
2866 	sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize);
2867 
2868 	KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS ||
2869 	    sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
2870 	    ("%s: too many rx mbufs %d for %d segments", __func__,
2871 	    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
2872 
2873 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2874 		rxq = &sc->vtnet_rxqs[i];
2875 
2876 		/* Hold the lock to satisfy asserts. */
2877 		VTNET_RXQ_LOCK(rxq);
2878 		error = vtnet_rxq_populate(rxq);
2879 		VTNET_RXQ_UNLOCK(rxq);
2880 
2881 		if (error) {
2882 			device_printf(dev,
2883 			    "cannot allocate mbufs for Rx queue %d\n", i);
2884 			return (error);
2885 		}
2886 	}
2887 
2888 	return (0);
2889 }
2890 
2891 static int
2892 vtnet_init_tx_queues(struct vtnet_softc *sc)
2893 {
2894 	struct vtnet_txq *txq;
2895 	int i;
2896 
2897 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2898 		txq = &sc->vtnet_txqs[i];
2899 		txq->vtntx_watchdog = 0;
2900 	}
2901 
2902 	return (0);
2903 }
2904 
2905 static int
2906 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
2907 {
2908 	int error;
2909 
2910 	error = vtnet_init_rx_queues(sc);
2911 	if (error)
2912 		return (error);
2913 
2914 	error = vtnet_init_tx_queues(sc);
2915 	if (error)
2916 		return (error);
2917 
2918 	return (0);
2919 }
2920 
2921 static void
2922 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
2923 {
2924 	device_t dev;
2925 	int npairs;
2926 
2927 	dev = sc->vtnet_dev;
2928 
2929 	if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
2930 		MPASS(sc->vtnet_max_vq_pairs == 1);
2931 		sc->vtnet_act_vq_pairs = 1;
2932 		return;
2933 	}
2934 
2935 	/* BMV: Just use the maximum configured for now. */
2936 	npairs = sc->vtnet_max_vq_pairs;
2937 
2938 	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
2939 		device_printf(dev,
2940 		    "cannot set active queue pairs to %d\n", npairs);
2941 		npairs = 1;
2942 	}
2943 
2944 	sc->vtnet_act_vq_pairs = npairs;
2945 }
2946 
2947 static int
2948 vtnet_reinit(struct vtnet_softc *sc)
2949 {
2950 	device_t dev;
2951 	struct ifnet *ifp;
2952 	int error;
2953 
2954 	dev = sc->vtnet_dev;
2955 	ifp = sc->vtnet_ifp;
2956 
2957 	/* Use the current MAC address. */
2958 	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2959 	vtnet_set_hwaddr(sc);
2960 
2961 	vtnet_set_active_vq_pairs(sc);
2962 
2963 	ifp->if_hwassist = 0;
2964 	if (ifp->if_capenable & IFCAP_TXCSUM)
2965 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2966 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
2967 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
2968 	if (ifp->if_capenable & IFCAP_TSO4)
2969 		ifp->if_hwassist |= CSUM_TSO;
2970 	if (ifp->if_capenable & IFCAP_TSO6)
2971 		ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
2972 
2973 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
2974 		vtnet_init_rx_filters(sc);
2975 
2976 	error = vtnet_init_rxtx_queues(sc);
2977 	if (error)
2978 		return (error);
2979 
2980 	vtnet_enable_interrupts(sc);
2981 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2982 
2983 	return (0);
2984 }
2985 
2986 static void
2987 vtnet_init_locked(struct vtnet_softc *sc)
2988 {
2989 	device_t dev;
2990 	struct ifnet *ifp;
2991 
2992 	dev = sc->vtnet_dev;
2993 	ifp = sc->vtnet_ifp;
2994 
2995 	VTNET_CORE_LOCK_ASSERT(sc);
2996 
2997 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2998 		return;
2999 
3000 	vtnet_stop(sc);
3001 
3002 	/* Reinitialize with the host. */
3003 	if (vtnet_virtio_reinit(sc) != 0)
3004 		goto fail;
3005 
3006 	if (vtnet_reinit(sc) != 0)
3007 		goto fail;
3008 
3009 	virtio_reinit_complete(dev);
3010 
3011 	vtnet_update_link_status(sc);
3012 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3013 
3014 	return;
3015 
3016 fail:
3017 	vtnet_stop(sc);
3018 }
3019 
3020 static void
3021 vtnet_init(void *xsc)
3022 {
3023 	struct vtnet_softc *sc;
3024 
3025 	sc = xsc;
3026 
3027 	VTNET_CORE_LOCK(sc);
3028 	vtnet_init_locked(sc);
3029 	VTNET_CORE_UNLOCK(sc);
3030 }
3031 
3032 static void
3033 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3034 {
3035 	struct virtqueue *vq;
3036 
3037 	vq = sc->vtnet_ctrl_vq;
3038 
3039 	/*
3040 	 * The control virtqueue is only polled and therefore it should
3041 	 * already be empty.
3042 	 */
3043 	KASSERT(virtqueue_empty(vq),
3044 	    ("%s: ctrl vq %p not empty", __func__, vq));
3045 }
3046 
3047 static void
3048 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3049     struct sglist *sg, int readable, int writable)
3050 {
3051 	struct virtqueue *vq;
3052 
3053 	vq = sc->vtnet_ctrl_vq;
3054 
3055 	VTNET_CORE_LOCK_ASSERT(sc);
3056 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
3057 	    ("%s: CTRL_VQ feature not negotiated", __func__));
3058 
3059 	if (!virtqueue_empty(vq))
3060 		return;
3061 	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
3062 		return;
3063 
3064 	/*
3065 	 * Poll for the response, but the command is likely already
3066 	 * done when we return from the notify.
3067 	 */
3068 	virtqueue_notify(vq);
3069 	virtqueue_poll(vq, NULL);
3070 }
3071 
3072 static int
3073 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3074 {
3075 	struct virtio_net_ctrl_hdr hdr;
3076 	struct sglist_seg segs[3];
3077 	struct sglist sg;
3078 	uint8_t ack;
3079 	int error;
3080 
3081 	hdr.class = VIRTIO_NET_CTRL_MAC;
3082 	hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3083 	ack = VIRTIO_NET_ERR;
3084 
3085 	sglist_init(&sg, 3, segs);
3086 	error = 0;
3087 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3088 	error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN);
3089 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3090 	KASSERT(error == 0 && sg.sg_nseg == 3,
3091 	    ("%s: error %d adding set MAC msg to sglist", __func__, error));
3092 
3093 	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3094 
3095 	return (ack == VIRTIO_NET_OK ? 0 : EIO);
3096 }
3097 
3098 static int
3099 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3100 {
3101 	struct sglist_seg segs[3];
3102 	struct sglist sg;
3103 	struct {
3104 		struct virtio_net_ctrl_hdr hdr;
3105 		uint8_t pad1;
3106 		struct virtio_net_ctrl_mq mq;
3107 		uint8_t pad2;
3108 		uint8_t ack;
3109 	} s;
3110 	int error;
3111 
3112 	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3113 	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3114 	s.mq.virtqueue_pairs = npairs;
3115 	s.ack = VIRTIO_NET_ERR;
3116 
3117 	sglist_init(&sg, 3, segs);
3118 	error = 0;
3119 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3120 	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3121 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3122 	KASSERT(error == 0 && sg.sg_nseg == 3,
3123 	    ("%s: error %d adding MQ message to sglist", __func__, error));
3124 
3125 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3126 
3127 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3128 }
3129 
3130 static int
3131 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
3132 {
3133 	struct sglist_seg segs[3];
3134 	struct sglist sg;
3135 	struct {
3136 		struct virtio_net_ctrl_hdr hdr;
3137 		uint8_t pad1;
3138 		uint8_t onoff;
3139 		uint8_t pad2;
3140 		uint8_t ack;
3141 	} s;
3142 	int error;
3143 
3144 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3145 	    ("%s: CTRL_RX feature not negotiated", __func__));
3146 
3147 	s.hdr.class = VIRTIO_NET_CTRL_RX;
3148 	s.hdr.cmd = cmd;
3149 	s.onoff = !!on;
3150 	s.ack = VIRTIO_NET_ERR;
3151 
3152 	sglist_init(&sg, 3, segs);
3153 	error = 0;
3154 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3155 	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3156 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3157 	KASSERT(error == 0 && sg.sg_nseg == 3,
3158 	    ("%s: error %d adding Rx message to sglist", __func__, error));
3159 
3160 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3161 
3162 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3163 }
3164 
3165 static int
3166 vtnet_set_promisc(struct vtnet_softc *sc, int on)
3167 {
3168 
3169 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3170 }
3171 
3172 static int
3173 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
3174 {
3175 
3176 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3177 }
3178 
3179 /*
3180  * The device defaults to promiscuous mode for backwards compatibility.
3181  * Turn it off at attach time if possible.
3182  */
3183 static void
3184 vtnet_attach_disable_promisc(struct vtnet_softc *sc)
3185 {
3186 	struct ifnet *ifp;
3187 
3188 	ifp = sc->vtnet_ifp;
3189 
3190 	VTNET_CORE_LOCK(sc);
3191 	if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) {
3192 		ifp->if_flags |= IFF_PROMISC;
3193 	} else if (vtnet_set_promisc(sc, 0) != 0) {
3194 		ifp->if_flags |= IFF_PROMISC;
3195 		device_printf(sc->vtnet_dev,
3196 		    "cannot disable default promiscuous mode\n");
3197 	}
3198 	VTNET_CORE_UNLOCK(sc);
3199 }
3200 
3201 static void
3202 vtnet_rx_filter(struct vtnet_softc *sc)
3203 {
3204 	device_t dev;
3205 	struct ifnet *ifp;
3206 
3207 	dev = sc->vtnet_dev;
3208 	ifp = sc->vtnet_ifp;
3209 
3210 	VTNET_CORE_LOCK_ASSERT(sc);
3211 
3212 	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
3213 		device_printf(dev, "cannot %s promiscuous mode\n",
3214 		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3215 
3216 	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
3217 		device_printf(dev, "cannot %s all-multicast mode\n",
3218 		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3219 }
3220 
3221 static void
3222 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3223 {
3224 	struct virtio_net_ctrl_hdr hdr;
3225 	struct vtnet_mac_filter *filter;
3226 	struct sglist_seg segs[4];
3227 	struct sglist sg;
3228 	struct ifnet *ifp;
3229 	struct ifaddr *ifa;
3230 	struct ifmultiaddr *ifma;
3231 	int ucnt, mcnt, promisc, allmulti, error;
3232 	uint8_t ack;
3233 
3234 	ifp = sc->vtnet_ifp;
3235 	filter = sc->vtnet_mac_filter;
3236 	ucnt = 0;
3237 	mcnt = 0;
3238 	promisc = 0;
3239 	allmulti = 0;
3240 
3241 	VTNET_CORE_LOCK_ASSERT(sc);
3242 	KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
3243 	    ("%s: CTRL_RX feature not negotiated", __func__));
3244 
3245 	/* Unicast MAC addresses: */
3246 	if_addr_rlock(ifp);
3247 	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
3248 		if (ifa->ifa_addr->sa_family != AF_LINK)
3249 			continue;
3250 		else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3251 		    sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3252 			continue;
3253 		else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
3254 			promisc = 1;
3255 			break;
3256 		}
3257 
3258 		bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
3259 		    &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
3260 		ucnt++;
3261 	}
3262 	if_addr_runlock(ifp);
3263 
3264 	if (promisc != 0) {
3265 		filter->vmf_unicast.nentries = 0;
3266 		if_printf(ifp, "more than %d MAC addresses assigned, "
3267 		    "falling back to promiscuous mode\n",
3268 		    VTNET_MAX_MAC_ENTRIES);
3269 	} else
3270 		filter->vmf_unicast.nentries = ucnt;
3271 
3272 	/* Multicast MAC addresses: */
3273 	if_maddr_rlock(ifp);
3274 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3275 		if (ifma->ifma_addr->sa_family != AF_LINK)
3276 			continue;
3277 		else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
3278 			allmulti = 1;
3279 			break;
3280 		}
3281 
3282 		bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3283 		    &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
3284 		mcnt++;
3285 	}
3286 	if_maddr_runlock(ifp);
3287 
3288 	if (allmulti != 0) {
3289 		filter->vmf_multicast.nentries = 0;
3290 		if_printf(ifp, "more than %d multicast MAC addresses "
3291 		    "assigned, falling back to all-multicast mode\n",
3292 		    VTNET_MAX_MAC_ENTRIES);
3293 	} else
3294 		filter->vmf_multicast.nentries = mcnt;
3295 
3296 	if (promisc != 0 && allmulti != 0)
3297 		goto out;
3298 
3299 	hdr.class = VIRTIO_NET_CTRL_MAC;
3300 	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3301 	ack = VIRTIO_NET_ERR;
3302 
3303 	sglist_init(&sg, 4, segs);
3304 	error = 0;
3305 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3306 	error |= sglist_append(&sg, &filter->vmf_unicast,
3307 	    sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
3308 	error |= sglist_append(&sg, &filter->vmf_multicast,
3309 	    sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
3310 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3311 	KASSERT(error == 0 && sg.sg_nseg == 4,
3312 	    ("%s: error %d adding MAC filter msg to sglist", __func__, error));
3313 
3314 	vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3315 
3316 	if (ack != VIRTIO_NET_OK)
3317 		if_printf(ifp, "error setting host MAC filter table\n");
3318 
3319 out:
3320 	if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
3321 		if_printf(ifp, "cannot enable promiscuous mode\n");
3322 	if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
3323 		if_printf(ifp, "cannot enable all-multicast mode\n");
3324 }
3325 
3326 static int
3327 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3328 {
3329 	struct sglist_seg segs[3];
3330 	struct sglist sg;
3331 	struct {
3332 		struct virtio_net_ctrl_hdr hdr;
3333 		uint8_t pad1;
3334 		uint16_t tag;
3335 		uint8_t pad2;
3336 		uint8_t ack;
3337 	} s;
3338 	int error;
3339 
3340 	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3341 	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3342 	s.tag = tag;
3343 	s.ack = VIRTIO_NET_ERR;
3344 
3345 	sglist_init(&sg, 3, segs);
3346 	error = 0;
3347 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3348 	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3349 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3350 	KASSERT(error == 0 && sg.sg_nseg == 3,
3351 	    ("%s: error %d adding VLAN message to sglist", __func__, error));
3352 
3353 	vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3354 
3355 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3356 }
3357 
3358 static void
3359 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3360 {
3361 	uint32_t w;
3362 	uint16_t tag;
3363 	int i, bit;
3364 
3365 	VTNET_CORE_LOCK_ASSERT(sc);
3366 	KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
3367 	    ("%s: VLAN_FILTER feature not negotiated", __func__));
3368 
3369 	/* Enable the filter for each configured VLAN. */
3370 	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3371 		w = sc->vtnet_vlan_filter[i];
3372 
3373 		while ((bit = ffs(w) - 1) != -1) {
3374 			w &= ~(1 << bit);
3375 			tag = sizeof(w) * CHAR_BIT * i + bit;
3376 
3377 			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3378 				device_printf(sc->vtnet_dev,
3379 				    "cannot enable VLAN %d filter\n", tag);
3380 			}
3381 		}
3382 	}
3383 }
3384 
3385 static void
3386 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3387 {
3388 	struct ifnet *ifp;
3389 	int idx, bit;
3390 
3391 	ifp = sc->vtnet_ifp;
3392 	idx = (tag >> 5) & 0x7F;
3393 	bit = tag & 0x1F;
3394 
3395 	if (tag == 0 || tag > 4095)
3396 		return;
3397 
3398 	VTNET_CORE_LOCK(sc);
3399 
3400 	if (add)
3401 		sc->vtnet_vlan_filter[idx] |= (1 << bit);
3402 	else
3403 		sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3404 
3405 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3406 	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3407 		device_printf(sc->vtnet_dev,
3408 		    "cannot %s VLAN %d %s the host filter table\n",
3409 		    add ? "add" : "remove", tag, add ? "to" : "from");
3410 	}
3411 
3412 	VTNET_CORE_UNLOCK(sc);
3413 }
3414 
3415 static void
3416 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3417 {
3418 
3419 	if (ifp->if_softc != arg)
3420 		return;
3421 
3422 	vtnet_update_vlan_filter(arg, 1, tag);
3423 }
3424 
3425 static void
3426 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3427 {
3428 
3429 	if (ifp->if_softc != arg)
3430 		return;
3431 
3432 	vtnet_update_vlan_filter(arg, 0, tag);
3433 }
3434 
3435 static int
3436 vtnet_is_link_up(struct vtnet_softc *sc)
3437 {
3438 	device_t dev;
3439 	struct ifnet *ifp;
3440 	uint16_t status;
3441 
3442 	dev = sc->vtnet_dev;
3443 	ifp = sc->vtnet_ifp;
3444 
3445 	if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0)
3446 		status = VIRTIO_NET_S_LINK_UP;
3447 	else
3448 		status = virtio_read_dev_config_2(dev,
3449 		    offsetof(struct virtio_net_config, status));
3450 
3451 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3452 }
3453 
3454 static void
3455 vtnet_update_link_status(struct vtnet_softc *sc)
3456 {
3457 	struct ifnet *ifp;
3458 	int link;
3459 
3460 	ifp = sc->vtnet_ifp;
3461 
3462 	VTNET_CORE_LOCK_ASSERT(sc);
3463 	link = vtnet_is_link_up(sc);
3464 
3465 	/* Notify if the link status has changed. */
3466 	if (link != 0 && sc->vtnet_link_active == 0) {
3467 		sc->vtnet_link_active = 1;
3468 		if_link_state_change(ifp, LINK_STATE_UP);
3469 	} else if (link == 0 && sc->vtnet_link_active != 0) {
3470 		sc->vtnet_link_active = 0;
3471 		if_link_state_change(ifp, LINK_STATE_DOWN);
3472 	}
3473 }
3474 
3475 static int
3476 vtnet_ifmedia_upd(struct ifnet *ifp)
3477 {
3478 	struct vtnet_softc *sc;
3479 	struct ifmedia *ifm;
3480 
3481 	sc = ifp->if_softc;
3482 	ifm = &sc->vtnet_media;
3483 
3484 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3485 		return (EINVAL);
3486 
3487 	return (0);
3488 }
3489 
3490 static void
3491 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3492 {
3493 	struct vtnet_softc *sc;
3494 
3495 	sc = ifp->if_softc;
3496 
3497 	ifmr->ifm_status = IFM_AVALID;
3498 	ifmr->ifm_active = IFM_ETHER;
3499 
3500 	VTNET_CORE_LOCK(sc);
3501 	if (vtnet_is_link_up(sc) != 0) {
3502 		ifmr->ifm_status |= IFM_ACTIVE;
3503 		ifmr->ifm_active |= VTNET_MEDIATYPE;
3504 	} else
3505 		ifmr->ifm_active |= IFM_NONE;
3506 	VTNET_CORE_UNLOCK(sc);
3507 }
3508 
3509 static void
3510 vtnet_set_hwaddr(struct vtnet_softc *sc)
3511 {
3512 	device_t dev;
3513 	int i;
3514 
3515 	dev = sc->vtnet_dev;
3516 
3517 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
3518 		if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
3519 			device_printf(dev, "unable to set MAC address\n");
3520 	} else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3521 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
3522 			virtio_write_dev_config_1(dev,
3523 			    offsetof(struct virtio_net_config, mac) + i,
3524 			    sc->vtnet_hwaddr[i]);
3525 		}
3526 	}
3527 }
3528 
3529 static void
3530 vtnet_get_hwaddr(struct vtnet_softc *sc)
3531 {
3532 	device_t dev;
3533 	int i;
3534 
3535 	dev = sc->vtnet_dev;
3536 
3537 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
3538 		/*
3539 		 * Generate a random locally administered unicast address.
3540 		 *
3541 		 * It would be nice to generate the same MAC address across
3542 		 * reboots, but it seems all the hosts currently available
3543 		 * support the MAC feature, so this isn't too important.
3544 		 */
3545 		sc->vtnet_hwaddr[0] = 0xB2;
3546 		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3547 		vtnet_set_hwaddr(sc);
3548 		return;
3549 	}
3550 
3551 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
3552 		sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev,
3553 		    offsetof(struct virtio_net_config, mac) + i);
3554 	}
3555 }
3556 
3557 static void
3558 vtnet_vlan_tag_remove(struct mbuf *m)
3559 {
3560 	struct ether_vlan_header *evh;
3561 
3562 	evh = mtod(m, struct ether_vlan_header *);
3563 	m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
3564 	m->m_flags |= M_VLANTAG;
3565 
3566 	/* Strip the 802.1Q header. */
3567 	bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
3568 	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
3569 	m_adj(m, ETHER_VLAN_ENCAP_LEN);
3570 }
3571 
3572 static void
3573 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
3574     struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
3575 {
3576 	struct sysctl_oid *node;
3577 	struct sysctl_oid_list *list;
3578 	struct vtnet_rxq_stats *stats;
3579 	char namebuf[16];
3580 
3581 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
3582 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3583 	    CTLFLAG_RD, NULL, "Receive Queue");
3584 	list = SYSCTL_CHILDREN(node);
3585 
3586 	stats = &rxq->vtnrx_stats;
3587 
3588 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3589 	    &stats->vrxs_ipackets, "Receive packets");
3590 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3591 	    &stats->vrxs_ibytes, "Receive bytes");
3592 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3593 	    &stats->vrxs_iqdrops, "Receive drops");
3594 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3595 	    &stats->vrxs_ierrors, "Receive errors");
3596 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3597 	    &stats->vrxs_csum, "Receive checksum offloaded");
3598 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
3599 	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
3600 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3601 	    &stats->vrxs_rescheduled,
3602 	    "Receive interrupt handler rescheduled");
3603 }
3604 
3605 static void
3606 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
3607     struct sysctl_oid_list *child, struct vtnet_txq *txq)
3608 {
3609 	struct sysctl_oid *node;
3610 	struct sysctl_oid_list *list;
3611 	struct vtnet_txq_stats *stats;
3612 	char namebuf[16];
3613 
3614 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
3615 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
3616 	    CTLFLAG_RD, NULL, "Transmit Queue");
3617 	list = SYSCTL_CHILDREN(node);
3618 
3619 	stats = &txq->vtntx_stats;
3620 
3621 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3622 	    &stats->vtxs_opackets, "Transmit packets");
3623 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3624 	    &stats->vtxs_obytes, "Transmit bytes");
3625 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3626 	    &stats->vtxs_omcasts, "Transmit multicasts");
3627 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3628 	    &stats->vtxs_csum, "Transmit checksum offloaded");
3629 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3630 	    &stats->vtxs_tso, "Transmit segmentation offloaded");
3631 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
3632 	    &stats->vtxs_rescheduled,
3633 	    "Transmit interrupt handler rescheduled");
3634 }
3635 
3636 static void
3637 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
3638 {
3639 	device_t dev;
3640 	struct sysctl_ctx_list *ctx;
3641 	struct sysctl_oid *tree;
3642 	struct sysctl_oid_list *child;
3643 	int i;
3644 
3645 	dev = sc->vtnet_dev;
3646 	ctx = device_get_sysctl_ctx(dev);
3647 	tree = device_get_sysctl_tree(dev);
3648 	child = SYSCTL_CHILDREN(tree);
3649 
3650 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3651 		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
3652 		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
3653 	}
3654 }
3655 
3656 static void
3657 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
3658     struct sysctl_oid_list *child, struct vtnet_softc *sc)
3659 {
3660 	struct vtnet_statistics *stats;
3661 
3662 	stats = &sc->vtnet_stats;
3663 
3664 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
3665 	    CTLFLAG_RD, &stats->mbuf_alloc_failed,
3666 	    "Mbuf cluster allocation failures");
3667 
3668 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
3669 	    CTLFLAG_RD, &stats->rx_frame_too_large,
3670 	    "Received frame larger than the mbuf chain");
3671 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
3672 	    CTLFLAG_RD, &stats->rx_enq_replacement_failed,
3673 	    "Enqueuing the replacement receive mbuf failed");
3674 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
3675 	    CTLFLAG_RD, &stats->rx_mergeable_failed,
3676 	    "Mergeable buffers receive failures");
3677 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
3678 	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
3679 	    "Received checksum offloaded buffer with unsupported "
3680 	    "Ethernet type");
3681 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
3682 	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
3683 	    "Received checksum offloaded buffer with incorrect IP protocol");
3684 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
3685 	    CTLFLAG_RD, &stats->rx_csum_bad_offset,
3686 	    "Received checksum offloaded buffer with incorrect offset");
3687 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
3688 	    CTLFLAG_RD, &stats->rx_csum_bad_proto,
3689 	    "Received checksum offloaded buffer with incorrect protocol");
3690 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
3691 	    CTLFLAG_RD, &stats->rx_csum_failed,
3692 	    "Received buffer checksum offload failed");
3693 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
3694 	    CTLFLAG_RD, &stats->rx_csum_offloaded,
3695 	    "Received buffer checksum offload succeeded");
3696 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
3697 	    CTLFLAG_RD, &stats->rx_task_rescheduled,
3698 	    "Times the receive interrupt task rescheduled itself");
3699 
3700 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
3701 	    CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
3702 	    "Aborted transmit of checksum offloaded buffer with unknown "
3703 	    "Ethernet type");
3704 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
3705 	    CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
3706 	    "Aborted transmit of TSO buffer with unknown Ethernet type");
3707 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
3708 	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
3709 	    "Aborted transmit of TSO buffer with non TCP protocol");
3710 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
3711 	    CTLFLAG_RD, &stats->tx_defragged,
3712 	    "Transmit mbufs defragged");
3713 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
3714 	    CTLFLAG_RD, &stats->tx_defrag_failed,
3715 	    "Aborted transmit of buffer because defrag failed");
3716 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
3717 	    CTLFLAG_RD, &stats->tx_csum_offloaded,
3718 	    "Offloaded checksum of transmitted buffer");
3719 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
3720 	    CTLFLAG_RD, &stats->tx_tso_offloaded,
3721 	    "Segmentation offload of transmitted buffer");
3722 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
3723 	    CTLFLAG_RD, &stats->tx_task_rescheduled,
3724 	    "Times the transmit interrupt task rescheduled itself");
3725 }
3726 
3727 static void
3728 vtnet_setup_sysctl(struct vtnet_softc *sc)
3729 {
3730 	device_t dev;
3731 	struct sysctl_ctx_list *ctx;
3732 	struct sysctl_oid *tree;
3733 	struct sysctl_oid_list *child;
3734 
3735 	dev = sc->vtnet_dev;
3736 	ctx = device_get_sysctl_ctx(dev);
3737 	tree = device_get_sysctl_tree(dev);
3738 	child = SYSCTL_CHILDREN(tree);
3739 
3740 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
3741 	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
3742 	    "Maximum number of supported virtqueue pairs");
3743 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
3744 	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
3745 	    "Number of active virtqueue pairs");
3746 
3747 	vtnet_setup_stat_sysctl(ctx, child, sc);
3748 }
3749 
3750 static int
3751 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
3752 {
3753 
3754 	return (virtqueue_enable_intr(rxq->vtnrx_vq));
3755 }
3756 
3757 static void
3758 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
3759 {
3760 
3761 	virtqueue_disable_intr(rxq->vtnrx_vq);
3762 }
3763 
3764 static int
3765 vtnet_txq_enable_intr(struct vtnet_txq *txq)
3766 {
3767 
3768 	return (virtqueue_postpone_intr(txq->vtntx_vq, VQ_POSTPONE_LONG));
3769 }
3770 
3771 static void
3772 vtnet_txq_disable_intr(struct vtnet_txq *txq)
3773 {
3774 
3775 	virtqueue_disable_intr(txq->vtntx_vq);
3776 }
3777 
3778 static void
3779 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
3780 {
3781 	int i;
3782 
3783 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3784 		vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]);
3785 }
3786 
3787 static void
3788 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
3789 {
3790 	int i;
3791 
3792 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3793 		vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
3794 }
3795 
3796 static void
3797 vtnet_enable_interrupts(struct vtnet_softc *sc)
3798 {
3799 
3800 	vtnet_enable_rx_interrupts(sc);
3801 	vtnet_enable_tx_interrupts(sc);
3802 }
3803 
3804 static void
3805 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
3806 {
3807 	int i;
3808 
3809 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3810 		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
3811 }
3812 
3813 static void
3814 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
3815 {
3816 	int i;
3817 
3818 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3819 		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
3820 }
3821 
3822 static void
3823 vtnet_disable_interrupts(struct vtnet_softc *sc)
3824 {
3825 
3826 	vtnet_disable_rx_interrupts(sc);
3827 	vtnet_disable_tx_interrupts(sc);
3828 }
3829 
3830 static int
3831 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
3832 {
3833 	char path[64];
3834 
3835 	snprintf(path, sizeof(path),
3836 	    "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
3837 	TUNABLE_INT_FETCH(path, &def);
3838 
3839 	return (def);
3840 }
3841