xref: /freebsd/sys/dev/virtio/network/if_vtnet.c (revision 0a36787e4c1fa0cf77dcf83be0867178476e372b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO network devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/sockio.h>
39 #include <sys/mbuf.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/socket.h>
43 #include <sys/sysctl.h>
44 #include <sys/random.h>
45 #include <sys/sglist.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/taskqueue.h>
49 #include <sys/smp.h>
50 #include <machine/smp.h>
51 
52 #include <vm/uma.h>
53 
54 #include <net/debugnet.h>
55 #include <net/ethernet.h>
56 #include <net/pfil.h>
57 #include <net/if.h>
58 #include <net/if_var.h>
59 #include <net/if_arp.h>
60 #include <net/if_dl.h>
61 #include <net/if_types.h>
62 #include <net/if_media.h>
63 #include <net/if_vlan_var.h>
64 
65 #include <net/bpf.h>
66 
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip6.h>
71 #include <netinet6/ip6_var.h>
72 #include <netinet/udp.h>
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_lro.h>
75 
76 #include <machine/bus.h>
77 #include <machine/resource.h>
78 #include <sys/bus.h>
79 #include <sys/rman.h>
80 
81 #include <dev/virtio/virtio.h>
82 #include <dev/virtio/virtqueue.h>
83 #include <dev/virtio/network/virtio_net.h>
84 #include <dev/virtio/network/if_vtnetvar.h>
85 #include "virtio_if.h"
86 
87 #include "opt_inet.h"
88 #include "opt_inet6.h"
89 
90 #if defined(INET) || defined(INET6)
91 #include <machine/in_cksum.h>
92 #endif
93 
94 static int	vtnet_modevent(module_t, int, void *);
95 
96 static int	vtnet_probe(device_t);
97 static int	vtnet_attach(device_t);
98 static int	vtnet_detach(device_t);
99 static int	vtnet_suspend(device_t);
100 static int	vtnet_resume(device_t);
101 static int	vtnet_shutdown(device_t);
102 static int	vtnet_attach_completed(device_t);
103 static int	vtnet_config_change(device_t);
104 
105 static int	vtnet_negotiate_features(struct vtnet_softc *);
106 static int	vtnet_setup_features(struct vtnet_softc *);
107 static int	vtnet_init_rxq(struct vtnet_softc *, int);
108 static int	vtnet_init_txq(struct vtnet_softc *, int);
109 static int	vtnet_alloc_rxtx_queues(struct vtnet_softc *);
110 static void	vtnet_free_rxtx_queues(struct vtnet_softc *);
111 static int	vtnet_alloc_rx_filters(struct vtnet_softc *);
112 static void	vtnet_free_rx_filters(struct vtnet_softc *);
113 static int	vtnet_alloc_virtqueues(struct vtnet_softc *);
114 static int	vtnet_alloc_interface(struct vtnet_softc *);
115 static int	vtnet_setup_interface(struct vtnet_softc *);
116 static int	vtnet_ioctl_mtu(struct vtnet_softc *, u_int);
117 static int	vtnet_ioctl_ifflags(struct vtnet_softc *);
118 static int	vtnet_ioctl_multi(struct vtnet_softc *);
119 static int	vtnet_ioctl_ifcap(struct vtnet_softc *, struct ifreq *);
120 static int	vtnet_ioctl(struct ifnet *, u_long, caddr_t);
121 static uint64_t	vtnet_get_counter(struct ifnet *, ift_counter);
122 
123 static int	vtnet_rxq_populate(struct vtnet_rxq *);
124 static void	vtnet_rxq_free_mbufs(struct vtnet_rxq *);
125 static struct mbuf *
126 		vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **);
127 static int	vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
128 		    struct mbuf *, int);
129 static int	vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
130 static int	vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
131 static int	vtnet_rxq_new_buf(struct vtnet_rxq *);
132 static int	vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
133 		     uint16_t, int, struct virtio_net_hdr *);
134 static int	vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
135 		     uint16_t, int, struct virtio_net_hdr *);
136 static int	vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
137 		     struct virtio_net_hdr *);
138 static void	vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
139 static void	vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
140 static int	vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
141 static void	vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *,
142 		    struct virtio_net_hdr *);
143 static int	vtnet_rxq_eof(struct vtnet_rxq *);
144 static void	vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries);
145 static void	vtnet_rx_vq_intr(void *);
146 static void	vtnet_rxq_tq_intr(void *, int);
147 
148 static int	vtnet_txq_intr_threshold(struct vtnet_txq *);
149 static int	vtnet_txq_below_threshold(struct vtnet_txq *);
150 static int	vtnet_txq_notify(struct vtnet_txq *);
151 static void	vtnet_txq_free_mbufs(struct vtnet_txq *);
152 static int	vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *,
153 		    int *, int *, int *);
154 static int	vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int,
155 		    int, struct virtio_net_hdr *);
156 static struct mbuf *
157 		vtnet_txq_offload(struct vtnet_txq *, struct mbuf *,
158 		    struct virtio_net_hdr *);
159 static int	vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
160 		    struct vtnet_tx_header *);
161 static int	vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
162 #ifdef VTNET_LEGACY_TX
163 static void	vtnet_start_locked(struct vtnet_txq *, struct ifnet *);
164 static void	vtnet_start(struct ifnet *);
165 #else
166 static int	vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
167 static int	vtnet_txq_mq_start(struct ifnet *, struct mbuf *);
168 static void	vtnet_txq_tq_deferred(void *, int);
169 #endif
170 static void	vtnet_txq_start(struct vtnet_txq *);
171 static void	vtnet_txq_tq_intr(void *, int);
172 static int	vtnet_txq_eof(struct vtnet_txq *);
173 static void	vtnet_tx_vq_intr(void *);
174 static void	vtnet_tx_start_all(struct vtnet_softc *);
175 
176 #ifndef VTNET_LEGACY_TX
177 static void	vtnet_qflush(struct ifnet *);
178 #endif
179 
180 static int	vtnet_watchdog(struct vtnet_txq *);
181 static void	vtnet_accum_stats(struct vtnet_softc *,
182 		    struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
183 static void	vtnet_tick(void *);
184 
185 static void	vtnet_start_taskqueues(struct vtnet_softc *);
186 static void	vtnet_free_taskqueues(struct vtnet_softc *);
187 static void	vtnet_drain_taskqueues(struct vtnet_softc *);
188 
189 static void	vtnet_drain_rxtx_queues(struct vtnet_softc *);
190 static void	vtnet_stop_rendezvous(struct vtnet_softc *);
191 static void	vtnet_stop(struct vtnet_softc *);
192 static int	vtnet_virtio_reinit(struct vtnet_softc *);
193 static void	vtnet_init_rx_filters(struct vtnet_softc *);
194 static int	vtnet_init_rx_queues(struct vtnet_softc *);
195 static int	vtnet_init_tx_queues(struct vtnet_softc *);
196 static int	vtnet_init_rxtx_queues(struct vtnet_softc *);
197 static void	vtnet_set_active_vq_pairs(struct vtnet_softc *);
198 static void	vtnet_update_rx_offloads(struct vtnet_softc *);
199 static int	vtnet_reinit(struct vtnet_softc *);
200 static void	vtnet_init_locked(struct vtnet_softc *, int);
201 static void	vtnet_init(void *);
202 
203 static void	vtnet_free_ctrl_vq(struct vtnet_softc *);
204 static void	vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
205 		    struct sglist *, int, int);
206 static int	vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
207 static int	vtnet_ctrl_guest_offloads(struct vtnet_softc *, uint64_t);
208 static int	vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t);
209 static int	vtnet_ctrl_rx_cmd(struct vtnet_softc *, uint8_t, bool);
210 static int	vtnet_set_promisc(struct vtnet_softc *, bool);
211 static int	vtnet_set_allmulti(struct vtnet_softc *, bool);
212 static void	vtnet_rx_filter(struct vtnet_softc *);
213 static void	vtnet_rx_filter_mac(struct vtnet_softc *);
214 static int	vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
215 static void	vtnet_rx_filter_vlan(struct vtnet_softc *);
216 static void	vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
217 static void	vtnet_register_vlan(void *, struct ifnet *, uint16_t);
218 static void	vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
219 
220 static void	vtnet_update_speed_duplex(struct vtnet_softc *);
221 static int	vtnet_is_link_up(struct vtnet_softc *);
222 static void	vtnet_update_link_status(struct vtnet_softc *);
223 static int	vtnet_ifmedia_upd(struct ifnet *);
224 static void	vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
225 static void	vtnet_get_macaddr(struct vtnet_softc *);
226 static void	vtnet_set_macaddr(struct vtnet_softc *);
227 static void	vtnet_attached_set_macaddr(struct vtnet_softc *);
228 static void	vtnet_vlan_tag_remove(struct mbuf *);
229 static void	vtnet_set_rx_process_limit(struct vtnet_softc *);
230 
231 static void	vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *,
232 		    struct sysctl_oid_list *, struct vtnet_rxq *);
233 static void	vtnet_setup_txq_sysctl(struct sysctl_ctx_list *,
234 		    struct sysctl_oid_list *, struct vtnet_txq *);
235 static void	vtnet_setup_queue_sysctl(struct vtnet_softc *);
236 static void	vtnet_load_tunables(struct vtnet_softc *);
237 static void	vtnet_setup_sysctl(struct vtnet_softc *);
238 
239 static int	vtnet_rxq_enable_intr(struct vtnet_rxq *);
240 static void	vtnet_rxq_disable_intr(struct vtnet_rxq *);
241 static int	vtnet_txq_enable_intr(struct vtnet_txq *);
242 static void	vtnet_txq_disable_intr(struct vtnet_txq *);
243 static void	vtnet_enable_rx_interrupts(struct vtnet_softc *);
244 static void	vtnet_enable_tx_interrupts(struct vtnet_softc *);
245 static void	vtnet_enable_interrupts(struct vtnet_softc *);
246 static void	vtnet_disable_rx_interrupts(struct vtnet_softc *);
247 static void	vtnet_disable_tx_interrupts(struct vtnet_softc *);
248 static void	vtnet_disable_interrupts(struct vtnet_softc *);
249 
250 static int	vtnet_tunable_int(struct vtnet_softc *, const char *, int);
251 
252 DEBUGNET_DEFINE(vtnet);
253 
254 #define vtnet_htog16(_sc, _val)	virtio_htog16(vtnet_modern(_sc), _val)
255 #define vtnet_htog32(_sc, _val)	virtio_htog32(vtnet_modern(_sc), _val)
256 #define vtnet_htog64(_sc, _val)	virtio_htog64(vtnet_modern(_sc), _val)
257 #define vtnet_gtoh16(_sc, _val)	virtio_gtoh16(vtnet_modern(_sc), _val)
258 #define vtnet_gtoh32(_sc, _val)	virtio_gtoh32(vtnet_modern(_sc), _val)
259 #define vtnet_gtoh64(_sc, _val)	virtio_gtoh64(vtnet_modern(_sc), _val)
260 
261 /* Tunables. */
262 static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
263     "VirtIO Net driver parameters");
264 
265 static int vtnet_csum_disable = 0;
266 SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
267     &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
268 
269 static int vtnet_fixup_needs_csum = 0;
270 SYSCTL_INT(_hw_vtnet, OID_AUTO, fixup_needs_csum, CTLFLAG_RDTUN,
271     &vtnet_fixup_needs_csum, 0,
272     "Calculate valid checksum for NEEDS_CSUM packets");
273 
274 static int vtnet_tso_disable = 0;
275 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
276     &vtnet_tso_disable, 0, "Disables TSO");
277 
278 static int vtnet_lro_disable = 0;
279 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
280     &vtnet_lro_disable, 0, "Disables hardware LRO");
281 
282 static int vtnet_mq_disable = 0;
283 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN,
284     &vtnet_mq_disable, 0, "Disables multiqueue support");
285 
286 static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
287 SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
288     &vtnet_mq_max_pairs, 0, "Maximum number of multiqueue pairs");
289 
290 static int vtnet_tso_maxlen = IP_MAXPACKET;
291 SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
292     &vtnet_tso_maxlen, 0, "TSO burst limit");
293 
294 static int vtnet_rx_process_limit = 1024;
295 SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
296     &vtnet_rx_process_limit, 0,
297     "Number of RX segments processed in one pass");
298 
299 static int vtnet_lro_entry_count = 128;
300 SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
301     &vtnet_lro_entry_count, 0, "Software LRO entry count");
302 
303 /* Enable sorted LRO, and the depth of the mbuf queue. */
304 static int vtnet_lro_mbufq_depth = 0;
305 SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
306     &vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
307 
308 static uma_zone_t vtnet_tx_header_zone;
309 
310 static struct virtio_feature_desc vtnet_feature_desc[] = {
311 	{ VIRTIO_NET_F_CSUM,			"TxChecksum"		},
312 	{ VIRTIO_NET_F_GUEST_CSUM,		"RxChecksum"		},
313 	{ VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,	"CtrlRxOffloads"	},
314 	{ VIRTIO_NET_F_MAC,			"MAC"			},
315 	{ VIRTIO_NET_F_GSO,			"TxGSO"			},
316 	{ VIRTIO_NET_F_GUEST_TSO4,		"RxLROv4"		},
317 	{ VIRTIO_NET_F_GUEST_TSO6,		"RxLROv6"		},
318 	{ VIRTIO_NET_F_GUEST_ECN,		"RxLROECN"		},
319 	{ VIRTIO_NET_F_GUEST_UFO,		"RxUFO"			},
320 	{ VIRTIO_NET_F_HOST_TSO4,		"TxTSOv4"		},
321 	{ VIRTIO_NET_F_HOST_TSO6,		"TxTSOv6"		},
322 	{ VIRTIO_NET_F_HOST_ECN,		"TxTSOECN"		},
323 	{ VIRTIO_NET_F_HOST_UFO,		"TxUFO"			},
324 	{ VIRTIO_NET_F_MRG_RXBUF,		"MrgRxBuf"		},
325 	{ VIRTIO_NET_F_STATUS,			"Status"		},
326 	{ VIRTIO_NET_F_CTRL_VQ,			"CtrlVq"		},
327 	{ VIRTIO_NET_F_CTRL_RX,			"CtrlRxMode"		},
328 	{ VIRTIO_NET_F_CTRL_VLAN,		"CtrlVLANFilter"	},
329 	{ VIRTIO_NET_F_CTRL_RX_EXTRA,		"CtrlRxModeExtra"	},
330 	{ VIRTIO_NET_F_GUEST_ANNOUNCE,		"GuestAnnounce"		},
331 	{ VIRTIO_NET_F_MQ,			"Multiqueue"		},
332 	{ VIRTIO_NET_F_CTRL_MAC_ADDR,		"CtrlMacAddr"		},
333 	{ VIRTIO_NET_F_SPEED_DUPLEX,		"SpeedDuplex"		},
334 
335 	{ 0, NULL }
336 };
337 
338 static device_method_t vtnet_methods[] = {
339 	/* Device methods. */
340 	DEVMETHOD(device_probe,			vtnet_probe),
341 	DEVMETHOD(device_attach,		vtnet_attach),
342 	DEVMETHOD(device_detach,		vtnet_detach),
343 	DEVMETHOD(device_suspend,		vtnet_suspend),
344 	DEVMETHOD(device_resume,		vtnet_resume),
345 	DEVMETHOD(device_shutdown,		vtnet_shutdown),
346 
347 	/* VirtIO methods. */
348 	DEVMETHOD(virtio_attach_completed,	vtnet_attach_completed),
349 	DEVMETHOD(virtio_config_change,		vtnet_config_change),
350 
351 	DEVMETHOD_END
352 };
353 
354 #ifdef DEV_NETMAP
355 #include <dev/netmap/if_vtnet_netmap.h>
356 #endif
357 
358 static driver_t vtnet_driver = {
359     .name = "vtnet",
360     .methods = vtnet_methods,
361     .size = sizeof(struct vtnet_softc)
362 };
363 static devclass_t vtnet_devclass;
364 
365 VIRTIO_DRIVER_MODULE(vtnet, vtnet_driver, vtnet_devclass,
366     vtnet_modevent, 0);
367 MODULE_VERSION(vtnet, 1);
368 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
369 #ifdef DEV_NETMAP
370 MODULE_DEPEND(vtnet, netmap, 1, 1, 1);
371 #endif
372 
373 VIRTIO_SIMPLE_PNPINFO(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter");
374 
375 static int
376 vtnet_modevent(module_t mod __unused, int type, void *unused __unused)
377 {
378 	int error = 0;
379 	static int loaded = 0;
380 
381 	switch (type) {
382 	case MOD_LOAD:
383 		if (loaded++ == 0) {
384 			vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr",
385 				sizeof(struct vtnet_tx_header),
386 				NULL, NULL, NULL, NULL, 0, 0);
387 #ifdef DEBUGNET
388 			/*
389 			 * We need to allocate from this zone in the transmit path, so ensure
390 			 * that we have at least one item per header available.
391 			 * XXX add a separate zone like we do for mbufs? otherwise we may alloc
392 			 * buckets
393 			 */
394 			uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
395 			uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2);
396 #endif
397 		}
398 		break;
399 	case MOD_QUIESCE:
400 		if (uma_zone_get_cur(vtnet_tx_header_zone) > 0)
401 			error = EBUSY;
402 		break;
403 	case MOD_UNLOAD:
404 		if (--loaded == 0) {
405 			uma_zdestroy(vtnet_tx_header_zone);
406 			vtnet_tx_header_zone = NULL;
407 		}
408 		break;
409 	case MOD_SHUTDOWN:
410 		break;
411 	default:
412 		error = EOPNOTSUPP;
413 		break;
414 	}
415 
416 	return (error);
417 }
418 
419 static int
420 vtnet_probe(device_t dev)
421 {
422 	return (VIRTIO_SIMPLE_PROBE(dev, vtnet));
423 }
424 
425 static int
426 vtnet_attach(device_t dev)
427 {
428 	struct vtnet_softc *sc;
429 	int error;
430 
431 	sc = device_get_softc(dev);
432 	sc->vtnet_dev = dev;
433 	virtio_set_feature_desc(dev, vtnet_feature_desc);
434 
435 	VTNET_CORE_LOCK_INIT(sc);
436 	callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
437 	vtnet_load_tunables(sc);
438 
439 	error = vtnet_alloc_interface(sc);
440 	if (error) {
441 		device_printf(dev, "cannot allocate interface\n");
442 		goto fail;
443 	}
444 
445 	vtnet_setup_sysctl(sc);
446 
447 	error = vtnet_setup_features(sc);
448 	if (error) {
449 		device_printf(dev, "cannot setup features\n");
450 		goto fail;
451 	}
452 
453 	error = vtnet_alloc_rx_filters(sc);
454 	if (error) {
455 		device_printf(dev, "cannot allocate Rx filters\n");
456 		goto fail;
457 	}
458 
459 	error = vtnet_alloc_rxtx_queues(sc);
460 	if (error) {
461 		device_printf(dev, "cannot allocate queues\n");
462 		goto fail;
463 	}
464 
465 	error = vtnet_alloc_virtqueues(sc);
466 	if (error) {
467 		device_printf(dev, "cannot allocate virtqueues\n");
468 		goto fail;
469 	}
470 
471 	error = vtnet_setup_interface(sc);
472 	if (error) {
473 		device_printf(dev, "cannot setup interface\n");
474 		goto fail;
475 	}
476 
477 	error = virtio_setup_intr(dev, INTR_TYPE_NET);
478 	if (error) {
479 		device_printf(dev, "cannot setup interrupts\n");
480 		ether_ifdetach(sc->vtnet_ifp);
481 		goto fail;
482 	}
483 
484 #ifdef DEV_NETMAP
485 	vtnet_netmap_attach(sc);
486 #endif
487 	vtnet_start_taskqueues(sc);
488 
489 fail:
490 	if (error)
491 		vtnet_detach(dev);
492 
493 	return (error);
494 }
495 
496 static int
497 vtnet_detach(device_t dev)
498 {
499 	struct vtnet_softc *sc;
500 	struct ifnet *ifp;
501 
502 	sc = device_get_softc(dev);
503 	ifp = sc->vtnet_ifp;
504 
505 	if (device_is_attached(dev)) {
506 		VTNET_CORE_LOCK(sc);
507 		vtnet_stop(sc);
508 		VTNET_CORE_UNLOCK(sc);
509 
510 		callout_drain(&sc->vtnet_tick_ch);
511 		vtnet_drain_taskqueues(sc);
512 
513 		ether_ifdetach(ifp);
514 	}
515 
516 #ifdef DEV_NETMAP
517 	netmap_detach(ifp);
518 #endif
519 
520 	vtnet_free_taskqueues(sc);
521 
522 	if (sc->vtnet_vlan_attach != NULL) {
523 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
524 		sc->vtnet_vlan_attach = NULL;
525 	}
526 	if (sc->vtnet_vlan_detach != NULL) {
527 		EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
528 		sc->vtnet_vlan_detach = NULL;
529 	}
530 
531 	ifmedia_removeall(&sc->vtnet_media);
532 
533 	if (ifp != NULL) {
534 		if_free(ifp);
535 		sc->vtnet_ifp = NULL;
536 	}
537 
538 	vtnet_free_rxtx_queues(sc);
539 	vtnet_free_rx_filters(sc);
540 
541 	if (sc->vtnet_ctrl_vq != NULL)
542 		vtnet_free_ctrl_vq(sc);
543 
544 	VTNET_CORE_LOCK_DESTROY(sc);
545 
546 	return (0);
547 }
548 
549 static int
550 vtnet_suspend(device_t dev)
551 {
552 	struct vtnet_softc *sc;
553 
554 	sc = device_get_softc(dev);
555 
556 	VTNET_CORE_LOCK(sc);
557 	vtnet_stop(sc);
558 	sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
559 	VTNET_CORE_UNLOCK(sc);
560 
561 	return (0);
562 }
563 
564 static int
565 vtnet_resume(device_t dev)
566 {
567 	struct vtnet_softc *sc;
568 	struct ifnet *ifp;
569 
570 	sc = device_get_softc(dev);
571 	ifp = sc->vtnet_ifp;
572 
573 	VTNET_CORE_LOCK(sc);
574 	if (ifp->if_flags & IFF_UP)
575 		vtnet_init_locked(sc, 0);
576 	sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
577 	VTNET_CORE_UNLOCK(sc);
578 
579 	return (0);
580 }
581 
582 static int
583 vtnet_shutdown(device_t dev)
584 {
585 	/*
586 	 * Suspend already does all of what we need to
587 	 * do here; we just never expect to be resumed.
588 	 */
589 	return (vtnet_suspend(dev));
590 }
591 
592 static int
593 vtnet_attach_completed(device_t dev)
594 {
595 	struct vtnet_softc *sc;
596 
597 	sc = device_get_softc(dev);
598 
599 	VTNET_CORE_LOCK(sc);
600 	vtnet_attached_set_macaddr(sc);
601 	VTNET_CORE_UNLOCK(sc);
602 
603 	return (0);
604 }
605 
606 static int
607 vtnet_config_change(device_t dev)
608 {
609 	struct vtnet_softc *sc;
610 
611 	sc = device_get_softc(dev);
612 
613 	VTNET_CORE_LOCK(sc);
614 	vtnet_update_link_status(sc);
615 	if (sc->vtnet_link_active != 0)
616 		vtnet_tx_start_all(sc);
617 	VTNET_CORE_UNLOCK(sc);
618 
619 	return (0);
620 }
621 
622 static int
623 vtnet_negotiate_features(struct vtnet_softc *sc)
624 {
625 	device_t dev;
626 	uint64_t features, negotiated_features;
627 	int no_csum;
628 
629 	dev = sc->vtnet_dev;
630 	features = virtio_bus_is_modern(dev) ? VTNET_MODERN_FEATURES :
631 	    VTNET_LEGACY_FEATURES;
632 
633 	/*
634 	 * TSO and LRO are only available when their corresponding checksum
635 	 * offload feature is also negotiated.
636 	 */
637 	no_csum = vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable);
638 	if (no_csum)
639 		features &= ~(VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM);
640 	if (no_csum || vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable))
641 		features &= ~VTNET_TSO_FEATURES;
642 	if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
643 		features &= ~VTNET_LRO_FEATURES;
644 
645 #ifndef VTNET_LEGACY_TX
646 	if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
647 		features &= ~VIRTIO_NET_F_MQ;
648 #else
649 	features &= ~VIRTIO_NET_F_MQ;
650 #endif
651 
652 	negotiated_features = virtio_negotiate_features(dev, features);
653 
654 	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
655 		uint16_t mtu;
656 
657 		mtu = virtio_read_dev_config_2(dev,
658 		    offsetof(struct virtio_net_config, mtu));
659 		if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
660 			device_printf(dev, "Invalid MTU value: %d. "
661 			    "MTU feature disabled.\n", mtu);
662 			features &= ~VIRTIO_NET_F_MTU;
663 			negotiated_features =
664 			    virtio_negotiate_features(dev, features);
665 		}
666 	}
667 
668 	if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
669 		uint16_t npairs;
670 
671 		npairs = virtio_read_dev_config_2(dev,
672 		    offsetof(struct virtio_net_config, max_virtqueue_pairs));
673 		if (npairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
674 		    npairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
675 			device_printf(dev, "Invalid max_virtqueue_pairs value: "
676 			    "%d. Multiqueue feature disabled.\n", npairs);
677 			features &= ~VIRTIO_NET_F_MQ;
678 			negotiated_features =
679 			    virtio_negotiate_features(dev, features);
680 		}
681 	}
682 
683 	if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
684 	    virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
685 		/*
686 		 * LRO without mergeable buffers requires special care. This
687 		 * is not ideal because every receive buffer must be large
688 		 * enough to hold the maximum TCP packet, the Ethernet header,
689 		 * and the header. This requires up to 34 descriptors with
690 		 * MCLBYTES clusters. If we do not have indirect descriptors,
691 		 * LRO is disabled since the virtqueue will not contain very
692 		 * many receive buffers.
693 		 */
694 		if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
695 			device_printf(dev,
696 			    "Host LRO disabled since both mergeable buffers "
697 			    "and indirect descriptors were not negotiated\n");
698 			features &= ~VTNET_LRO_FEATURES;
699 			negotiated_features =
700 			    virtio_negotiate_features(dev, features);
701 		} else
702 			sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
703 	}
704 
705 	sc->vtnet_features = negotiated_features;
706 	sc->vtnet_negotiated_features = negotiated_features;
707 
708 	return (virtio_finalize_features(dev));
709 }
710 
711 static int
712 vtnet_setup_features(struct vtnet_softc *sc)
713 {
714 	device_t dev;
715 	int error;
716 
717 	dev = sc->vtnet_dev;
718 
719 	error = vtnet_negotiate_features(sc);
720 	if (error)
721 		return (error);
722 
723 	if (virtio_with_feature(dev, VIRTIO_F_VERSION_1))
724 		sc->vtnet_flags |= VTNET_FLAG_MODERN;
725 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
726 		sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
727 	if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX))
728 		sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX;
729 
730 	if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
731 		/* This feature should always be negotiated. */
732 		sc->vtnet_flags |= VTNET_FLAG_MAC;
733 	}
734 
735 	if (virtio_with_feature(dev, VIRTIO_NET_F_MTU)) {
736 		sc->vtnet_max_mtu = virtio_read_dev_config_2(dev,
737 		    offsetof(struct virtio_net_config, mtu));
738 	} else
739 		sc->vtnet_max_mtu = VTNET_MAX_MTU;
740 
741 	if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
742 		sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
743 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
744 	} else if (vtnet_modern(sc)) {
745 		/* This is identical to the mergeable header. */
746 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_v1);
747 	} else
748 		sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
749 
750 	if (vtnet_modern(sc) || sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
751 		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_INLINE;
752 	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
753 		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_LRO_NOMRG;
754 	else
755 		sc->vtnet_rx_nsegs = VTNET_RX_SEGS_HDR_SEPARATE;
756 
757 	/*
758 	 * Favor "hardware" LRO if negotiated, but support software LRO as
759 	 * a fallback; there is usually little benefit (or worse) with both.
760 	 */
761 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) == 0 &&
762 	    virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6) == 0)
763 		sc->vtnet_flags |= VTNET_FLAG_SW_LRO;
764 
765 	if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) ||
766 	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
767 	    virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
768 		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MAX;
769 	else
770 		sc->vtnet_tx_nsegs = VTNET_TX_SEGS_MIN;
771 
772 	sc->vtnet_req_vq_pairs = 1;
773 	sc->vtnet_max_vq_pairs = 1;
774 
775 	if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
776 		sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
777 
778 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
779 			sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
780 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
781 			sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
782 		if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR))
783 			sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
784 
785 		if (virtio_with_feature(dev, VIRTIO_NET_F_MQ)) {
786 			sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
787 			    offsetof(struct virtio_net_config,
788 			    max_virtqueue_pairs));
789 		}
790 	}
791 
792 	if (sc->vtnet_max_vq_pairs > 1) {
793 		int req;
794 
795 		/*
796 		 * Limit the maximum number of requested queue pairs to the
797 		 * number of CPUs and the configured maximum.
798 		 */
799 		req = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
800 		if (req < 0)
801 			req = 1;
802 		if (req == 0)
803 			req = mp_ncpus;
804 		if (req > sc->vtnet_max_vq_pairs)
805 			req = sc->vtnet_max_vq_pairs;
806 		if (req > mp_ncpus)
807 			req = mp_ncpus;
808 		if (req > 1) {
809 			sc->vtnet_req_vq_pairs = req;
810 			sc->vtnet_flags |= VTNET_FLAG_MQ;
811 		}
812 	}
813 
814 	return (0);
815 }
816 
817 static int
818 vtnet_init_rxq(struct vtnet_softc *sc, int id)
819 {
820 	struct vtnet_rxq *rxq;
821 
822 	rxq = &sc->vtnet_rxqs[id];
823 
824 	snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d",
825 	    device_get_nameunit(sc->vtnet_dev), id);
826 	mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF);
827 
828 	rxq->vtnrx_sc = sc;
829 	rxq->vtnrx_id = id;
830 
831 	rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT);
832 	if (rxq->vtnrx_sg == NULL)
833 		return (ENOMEM);
834 
835 #if defined(INET) || defined(INET6)
836 	if (vtnet_software_lro(sc)) {
837 		if (tcp_lro_init_args(&rxq->vtnrx_lro, sc->vtnet_ifp,
838 		    sc->vtnet_lro_entry_count, sc->vtnet_lro_mbufq_depth) != 0)
839 			return (ENOMEM);
840 	}
841 #endif
842 
843 	NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq);
844 	rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT,
845 	    taskqueue_thread_enqueue, &rxq->vtnrx_tq);
846 
847 	return (rxq->vtnrx_tq == NULL ? ENOMEM : 0);
848 }
849 
850 static int
851 vtnet_init_txq(struct vtnet_softc *sc, int id)
852 {
853 	struct vtnet_txq *txq;
854 
855 	txq = &sc->vtnet_txqs[id];
856 
857 	snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d",
858 	    device_get_nameunit(sc->vtnet_dev), id);
859 	mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF);
860 
861 	txq->vtntx_sc = sc;
862 	txq->vtntx_id = id;
863 
864 	txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT);
865 	if (txq->vtntx_sg == NULL)
866 		return (ENOMEM);
867 
868 #ifndef VTNET_LEGACY_TX
869 	txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
870 	    M_NOWAIT, &txq->vtntx_mtx);
871 	if (txq->vtntx_br == NULL)
872 		return (ENOMEM);
873 
874 	TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
875 #endif
876 	TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
877 	txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
878 	    taskqueue_thread_enqueue, &txq->vtntx_tq);
879 	if (txq->vtntx_tq == NULL)
880 		return (ENOMEM);
881 
882 	return (0);
883 }
884 
885 static int
886 vtnet_alloc_rxtx_queues(struct vtnet_softc *sc)
887 {
888 	int i, npairs, error;
889 
890 	npairs = sc->vtnet_max_vq_pairs;
891 
892 	sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF,
893 	    M_NOWAIT | M_ZERO);
894 	sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF,
895 	    M_NOWAIT | M_ZERO);
896 	if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL)
897 		return (ENOMEM);
898 
899 	for (i = 0; i < npairs; i++) {
900 		error = vtnet_init_rxq(sc, i);
901 		if (error)
902 			return (error);
903 		error = vtnet_init_txq(sc, i);
904 		if (error)
905 			return (error);
906 	}
907 
908 	vtnet_set_rx_process_limit(sc);
909 	vtnet_setup_queue_sysctl(sc);
910 
911 	return (0);
912 }
913 
914 static void
915 vtnet_destroy_rxq(struct vtnet_rxq *rxq)
916 {
917 
918 	rxq->vtnrx_sc = NULL;
919 	rxq->vtnrx_id = -1;
920 
921 #if defined(INET) || defined(INET6)
922 	tcp_lro_free(&rxq->vtnrx_lro);
923 #endif
924 
925 	if (rxq->vtnrx_sg != NULL) {
926 		sglist_free(rxq->vtnrx_sg);
927 		rxq->vtnrx_sg = NULL;
928 	}
929 
930 	if (mtx_initialized(&rxq->vtnrx_mtx) != 0)
931 		mtx_destroy(&rxq->vtnrx_mtx);
932 }
933 
934 static void
935 vtnet_destroy_txq(struct vtnet_txq *txq)
936 {
937 
938 	txq->vtntx_sc = NULL;
939 	txq->vtntx_id = -1;
940 
941 	if (txq->vtntx_sg != NULL) {
942 		sglist_free(txq->vtntx_sg);
943 		txq->vtntx_sg = NULL;
944 	}
945 
946 #ifndef VTNET_LEGACY_TX
947 	if (txq->vtntx_br != NULL) {
948 		buf_ring_free(txq->vtntx_br, M_DEVBUF);
949 		txq->vtntx_br = NULL;
950 	}
951 #endif
952 
953 	if (mtx_initialized(&txq->vtntx_mtx) != 0)
954 		mtx_destroy(&txq->vtntx_mtx);
955 }
956 
957 static void
958 vtnet_free_rxtx_queues(struct vtnet_softc *sc)
959 {
960 	int i;
961 
962 	if (sc->vtnet_rxqs != NULL) {
963 		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
964 			vtnet_destroy_rxq(&sc->vtnet_rxqs[i]);
965 		free(sc->vtnet_rxqs, M_DEVBUF);
966 		sc->vtnet_rxqs = NULL;
967 	}
968 
969 	if (sc->vtnet_txqs != NULL) {
970 		for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
971 			vtnet_destroy_txq(&sc->vtnet_txqs[i]);
972 		free(sc->vtnet_txqs, M_DEVBUF);
973 		sc->vtnet_txqs = NULL;
974 	}
975 }
976 
977 static int
978 vtnet_alloc_rx_filters(struct vtnet_softc *sc)
979 {
980 
981 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
982 		sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter),
983 		    M_DEVBUF, M_NOWAIT | M_ZERO);
984 		if (sc->vtnet_mac_filter == NULL)
985 			return (ENOMEM);
986 	}
987 
988 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
989 		sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) *
990 		    VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO);
991 		if (sc->vtnet_vlan_filter == NULL)
992 			return (ENOMEM);
993 	}
994 
995 	return (0);
996 }
997 
998 static void
999 vtnet_free_rx_filters(struct vtnet_softc *sc)
1000 {
1001 
1002 	if (sc->vtnet_mac_filter != NULL) {
1003 		free(sc->vtnet_mac_filter, M_DEVBUF);
1004 		sc->vtnet_mac_filter = NULL;
1005 	}
1006 
1007 	if (sc->vtnet_vlan_filter != NULL) {
1008 		free(sc->vtnet_vlan_filter, M_DEVBUF);
1009 		sc->vtnet_vlan_filter = NULL;
1010 	}
1011 }
1012 
1013 static int
1014 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
1015 {
1016 	device_t dev;
1017 	struct vq_alloc_info *info;
1018 	struct vtnet_rxq *rxq;
1019 	struct vtnet_txq *txq;
1020 	int i, idx, flags, nvqs, error;
1021 
1022 	dev = sc->vtnet_dev;
1023 	flags = 0;
1024 
1025 	nvqs = sc->vtnet_max_vq_pairs * 2;
1026 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
1027 		nvqs++;
1028 
1029 	info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT);
1030 	if (info == NULL)
1031 		return (ENOMEM);
1032 
1033 	for (i = 0, idx = 0; i < sc->vtnet_req_vq_pairs; i++, idx += 2) {
1034 		rxq = &sc->vtnet_rxqs[i];
1035 		VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs,
1036 		    vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq,
1037 		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1038 
1039 		txq = &sc->vtnet_txqs[i];
1040 		VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
1041 		    vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
1042 		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1043 	}
1044 
1045 	/* These queues will not be used so allocate the minimum resources. */
1046 	for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
1047 		rxq = &sc->vtnet_rxqs[i];
1048 		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
1049 		    "%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
1050 
1051 		txq = &sc->vtnet_txqs[i];
1052 		VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
1053 		    "%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
1054 	}
1055 
1056 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
1057 		VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL,
1058 		    &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev));
1059 	}
1060 
1061 	/*
1062 	 * TODO: Enable interrupt binding if this is multiqueue. This will
1063 	 * only matter when per-virtqueue MSIX is available.
1064 	 */
1065 	if (sc->vtnet_flags & VTNET_FLAG_MQ)
1066 		flags |= 0;
1067 
1068 	error = virtio_alloc_virtqueues(dev, flags, nvqs, info);
1069 	free(info, M_TEMP);
1070 
1071 	return (error);
1072 }
1073 
1074 static int
1075 vtnet_alloc_interface(struct vtnet_softc *sc)
1076 {
1077 	device_t dev;
1078 	struct ifnet *ifp;
1079 
1080 	dev = sc->vtnet_dev;
1081 
1082 	ifp = if_alloc(IFT_ETHER);
1083 	if (ifp == NULL)
1084 		return (ENOMEM);
1085 
1086 	sc->vtnet_ifp = ifp;
1087 	ifp->if_softc = sc;
1088 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1089 
1090 	return (0);
1091 }
1092 
1093 static int
1094 vtnet_setup_interface(struct vtnet_softc *sc)
1095 {
1096 	device_t dev;
1097 	struct pfil_head_args pa;
1098 	struct ifnet *ifp;
1099 
1100 	dev = sc->vtnet_dev;
1101 	ifp = sc->vtnet_ifp;
1102 
1103 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST |
1104 	    IFF_KNOWSEPOCH;
1105 	ifp->if_baudrate = IF_Gbps(10);
1106 	ifp->if_init = vtnet_init;
1107 	ifp->if_ioctl = vtnet_ioctl;
1108 	ifp->if_get_counter = vtnet_get_counter;
1109 #ifndef VTNET_LEGACY_TX
1110 	ifp->if_transmit = vtnet_txq_mq_start;
1111 	ifp->if_qflush = vtnet_qflush;
1112 #else
1113 	struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
1114 	ifp->if_start = vtnet_start;
1115 	IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1);
1116 	ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1;
1117 	IFQ_SET_READY(&ifp->if_snd);
1118 #endif
1119 
1120 	vtnet_get_macaddr(sc);
1121 
1122 	if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS))
1123 		ifp->if_capabilities |= IFCAP_LINKSTATE;
1124 
1125 	ifmedia_init(&sc->vtnet_media, 0, vtnet_ifmedia_upd, vtnet_ifmedia_sts);
1126 	ifmedia_add(&sc->vtnet_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1127 	ifmedia_set(&sc->vtnet_media, IFM_ETHER | IFM_AUTO);
1128 
1129 	if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
1130 		int gso;
1131 
1132 		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
1133 
1134 		gso = virtio_with_feature(dev, VIRTIO_NET_F_GSO);
1135 		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
1136 			ifp->if_capabilities |= IFCAP_TSO4;
1137 		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
1138 			ifp->if_capabilities |= IFCAP_TSO6;
1139 		if (gso || virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
1140 			sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
1141 
1142 		if (ifp->if_capabilities & (IFCAP_TSO4 | IFCAP_TSO6)) {
1143 			int tso_maxlen;
1144 
1145 			ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1146 
1147 			tso_maxlen = vtnet_tunable_int(sc, "tso_maxlen",
1148 			    vtnet_tso_maxlen);
1149 			ifp->if_hw_tsomax = tso_maxlen -
1150 			    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1151 			ifp->if_hw_tsomaxsegcount = sc->vtnet_tx_nsegs - 1;
1152 			ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
1153 		}
1154 	}
1155 
1156 	if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
1157 		ifp->if_capabilities |= IFCAP_RXCSUM;
1158 #ifdef notyet
1159 		/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
1160 		ifp->if_capabilities |= IFCAP_RXCSUM_IPV6;
1161 #endif
1162 
1163 		if (vtnet_tunable_int(sc, "fixup_needs_csum",
1164 		    vtnet_fixup_needs_csum) != 0)
1165 			sc->vtnet_flags |= VTNET_FLAG_FIXUP_NEEDS_CSUM;
1166 
1167 		/* Support either "hardware" or software LRO. */
1168 		ifp->if_capabilities |= IFCAP_LRO;
1169 	}
1170 
1171 	if (ifp->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6)) {
1172 		/*
1173 		 * VirtIO does not support VLAN tagging, but we can fake
1174 		 * it by inserting and removing the 802.1Q header during
1175 		 * transmit and receive. We are then able to do checksum
1176 		 * offloading of VLAN frames.
1177 		 */
1178 		ifp->if_capabilities |=
1179 		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1180 	}
1181 
1182 	if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
1183 		ifp->if_capabilities |= IFCAP_JUMBO_MTU;
1184 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1185 
1186 	/*
1187 	 * Capabilities after here are not enabled by default.
1188 	 */
1189 	ifp->if_capenable = ifp->if_capabilities;
1190 
1191 	if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
1192 		ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1193 
1194 		sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1195 		    vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1196 		sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
1197 		    vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1198 	}
1199 
1200 	ether_ifattach(ifp, sc->vtnet_hwaddr);
1201 
1202 	/* Tell the upper layer(s) we support long frames. */
1203 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1204 
1205 	DEBUGNET_SET(ifp, vtnet);
1206 
1207 	pa.pa_version = PFIL_VERSION;
1208 	pa.pa_flags = PFIL_IN;
1209 	pa.pa_type = PFIL_TYPE_ETHERNET;
1210 	pa.pa_headname = ifp->if_xname;
1211 	sc->vtnet_pfil = pfil_head_register(&pa);
1212 
1213 	return (0);
1214 }
1215 
1216 static int
1217 vtnet_rx_cluster_size(struct vtnet_softc *sc, int mtu)
1218 {
1219 	int framesz;
1220 
1221 	if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS)
1222 		return (MJUMPAGESIZE);
1223 	else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1224 		return (MCLBYTES);
1225 
1226 	/*
1227 	 * Try to scale the receive mbuf cluster size from the MTU. We
1228 	 * could also use the VQ size to influence the selected size,
1229 	 * but that would only matter for very small queues.
1230 	 */
1231 	if (vtnet_modern(sc)) {
1232 		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr_v1));
1233 		framesz = sizeof(struct virtio_net_hdr_v1);
1234 	} else
1235 		framesz = sizeof(struct vtnet_rx_header);
1236 	framesz += sizeof(struct ether_vlan_header) + mtu;
1237 
1238 	if (framesz <= MCLBYTES)
1239 		return (MCLBYTES);
1240 	else if (framesz <= MJUMPAGESIZE)
1241 		return (MJUMPAGESIZE);
1242 	else if (framesz <= MJUM9BYTES)
1243 		return (MJUM9BYTES);
1244 
1245 	/* Sane default; avoid 16KB clusters. */
1246 	return (MCLBYTES);
1247 }
1248 
1249 static int
1250 vtnet_ioctl_mtu(struct vtnet_softc *sc, u_int mtu)
1251 {
1252 	struct ifnet *ifp;
1253 	int clustersz;
1254 
1255 	ifp = sc->vtnet_ifp;
1256 	VTNET_CORE_LOCK_ASSERT(sc);
1257 
1258 	if (ifp->if_mtu == mtu)
1259 		return (0);
1260 	else if (mtu < ETHERMIN || mtu > sc->vtnet_max_mtu)
1261 		return (EINVAL);
1262 
1263 	ifp->if_mtu = mtu;
1264 	clustersz = vtnet_rx_cluster_size(sc, mtu);
1265 
1266 	if (clustersz != sc->vtnet_rx_clustersz &&
1267 	    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1268 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1269 		vtnet_init_locked(sc, 0);
1270 	}
1271 
1272 	return (0);
1273 }
1274 
1275 static int
1276 vtnet_ioctl_ifflags(struct vtnet_softc *sc)
1277 {
1278 	struct ifnet *ifp;
1279 	int drv_running;
1280 
1281 	ifp = sc->vtnet_ifp;
1282 	drv_running = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;
1283 
1284 	VTNET_CORE_LOCK_ASSERT(sc);
1285 
1286 	if ((ifp->if_flags & IFF_UP) == 0) {
1287 		if (drv_running)
1288 			vtnet_stop(sc);
1289 		goto out;
1290 	}
1291 
1292 	if (!drv_running) {
1293 		vtnet_init_locked(sc, 0);
1294 		goto out;
1295 	}
1296 
1297 	if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1298 	    (IFF_PROMISC | IFF_ALLMULTI)) {
1299 		if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
1300 			return (ENOTSUP);
1301 		vtnet_rx_filter(sc);
1302 	}
1303 
1304 out:
1305 	sc->vtnet_if_flags = ifp->if_flags;
1306 	return (0);
1307 }
1308 
1309 static int
1310 vtnet_ioctl_multi(struct vtnet_softc *sc)
1311 {
1312 	struct ifnet *ifp;
1313 
1314 	ifp = sc->vtnet_ifp;
1315 
1316 	VTNET_CORE_LOCK_ASSERT(sc);
1317 
1318 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX &&
1319 	    ifp->if_drv_flags & IFF_DRV_RUNNING)
1320 		vtnet_rx_filter_mac(sc);
1321 
1322 	return (0);
1323 }
1324 
1325 static int
1326 vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
1327 {
1328 	struct ifnet *ifp;
1329 	int mask, reinit, update;
1330 
1331 	ifp = sc->vtnet_ifp;
1332 	mask = (ifr->ifr_reqcap & ifp->if_capabilities) ^ ifp->if_capenable;
1333 	reinit = update = 0;
1334 
1335 	VTNET_CORE_LOCK_ASSERT(sc);
1336 
1337 	if (mask & IFCAP_TXCSUM)
1338 		ifp->if_capenable ^= IFCAP_TXCSUM;
1339 	if (mask & IFCAP_TXCSUM_IPV6)
1340 		ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1341 	if (mask & IFCAP_TSO4)
1342 		ifp->if_capenable ^= IFCAP_TSO4;
1343 	if (mask & IFCAP_TSO6)
1344 		ifp->if_capenable ^= IFCAP_TSO6;
1345 
1346 	if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
1347 		/*
1348 		 * These Rx features require the negotiated features to
1349 		 * be updated. Avoid a full reinit if possible.
1350 		 */
1351 		if (sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
1352 			update = 1;
1353 		else
1354 			reinit = 1;
1355 
1356 		/* BMV: Avoid needless renegotiation for just software LRO. */
1357 		if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
1358 		    IFCAP_LRO && vtnet_software_lro(sc))
1359 			reinit = update = 0;
1360 
1361 		if (mask & IFCAP_RXCSUM)
1362 			ifp->if_capenable ^= IFCAP_RXCSUM;
1363 		if (mask & IFCAP_RXCSUM_IPV6)
1364 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1365 		if (mask & IFCAP_LRO)
1366 			ifp->if_capenable ^= IFCAP_LRO;
1367 
1368 		/*
1369 		 * VirtIO does not distinguish between IPv4 and IPv6 checksums
1370 		 * so treat them as a pair. Guest TSO (LRO) requires receive
1371 		 * checksums.
1372 		 */
1373 		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
1374 			ifp->if_capenable |= IFCAP_RXCSUM;
1375 #ifdef notyet
1376 			ifp->if_capenable |= IFCAP_RXCSUM_IPV6;
1377 #endif
1378 		} else
1379 			ifp->if_capenable &=
1380 			    ~(IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO);
1381 	}
1382 
1383 	if (mask & IFCAP_VLAN_HWFILTER) {
1384 		/* These Rx features require renegotiation. */
1385 		reinit = 1;
1386 
1387 		if (mask & IFCAP_VLAN_HWFILTER)
1388 			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1389 	}
1390 
1391 	if (mask & IFCAP_VLAN_HWTSO)
1392 		ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1393 	if (mask & IFCAP_VLAN_HWTAGGING)
1394 		ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1395 
1396 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1397 		if (reinit) {
1398 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1399 			vtnet_init_locked(sc, 0);
1400 		} else if (update)
1401 			vtnet_update_rx_offloads(sc);
1402 	}
1403 
1404 	return (0);
1405 }
1406 
1407 static int
1408 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1409 {
1410 	struct vtnet_softc *sc;
1411 	struct ifreq *ifr;
1412 	int error;
1413 
1414 	sc = ifp->if_softc;
1415 	ifr = (struct ifreq *) data;
1416 	error = 0;
1417 
1418 	switch (cmd) {
1419 	case SIOCSIFMTU:
1420 		VTNET_CORE_LOCK(sc);
1421 		error = vtnet_ioctl_mtu(sc, ifr->ifr_mtu);
1422 		VTNET_CORE_UNLOCK(sc);
1423 		break;
1424 
1425 	case SIOCSIFFLAGS:
1426 		VTNET_CORE_LOCK(sc);
1427 		error = vtnet_ioctl_ifflags(sc);
1428 		VTNET_CORE_UNLOCK(sc);
1429 		break;
1430 
1431 	case SIOCADDMULTI:
1432 	case SIOCDELMULTI:
1433 		VTNET_CORE_LOCK(sc);
1434 		error = vtnet_ioctl_multi(sc);
1435 		VTNET_CORE_UNLOCK(sc);
1436 		break;
1437 
1438 	case SIOCSIFMEDIA:
1439 	case SIOCGIFMEDIA:
1440 		error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1441 		break;
1442 
1443 	case SIOCSIFCAP:
1444 		VTNET_CORE_LOCK(sc);
1445 		error = vtnet_ioctl_ifcap(sc, ifr);
1446 		VTNET_CORE_UNLOCK(sc);
1447 		VLAN_CAPABILITIES(ifp);
1448 		break;
1449 
1450 	default:
1451 		error = ether_ioctl(ifp, cmd, data);
1452 		break;
1453 	}
1454 
1455 	VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc);
1456 
1457 	return (error);
1458 }
1459 
1460 static int
1461 vtnet_rxq_populate(struct vtnet_rxq *rxq)
1462 {
1463 	struct virtqueue *vq;
1464 	int nbufs, error;
1465 
1466 #ifdef DEV_NETMAP
1467 	error = vtnet_netmap_rxq_populate(rxq);
1468 	if (error >= 0)
1469 		return (error);
1470 #endif  /* DEV_NETMAP */
1471 
1472 	vq = rxq->vtnrx_vq;
1473 	error = ENOSPC;
1474 
1475 	for (nbufs = 0; !virtqueue_full(vq); nbufs++) {
1476 		error = vtnet_rxq_new_buf(rxq);
1477 		if (error)
1478 			break;
1479 	}
1480 
1481 	if (nbufs > 0) {
1482 		virtqueue_notify(vq);
1483 		/*
1484 		 * EMSGSIZE signifies the virtqueue did not have enough
1485 		 * entries available to hold the last mbuf. This is not
1486 		 * an error.
1487 		 */
1488 		if (error == EMSGSIZE)
1489 			error = 0;
1490 	}
1491 
1492 	return (error);
1493 }
1494 
1495 static void
1496 vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq)
1497 {
1498 	struct virtqueue *vq;
1499 	struct mbuf *m;
1500 	int last;
1501 #ifdef DEV_NETMAP
1502 	struct netmap_kring *kring = netmap_kring_on(NA(rxq->vtnrx_sc->vtnet_ifp),
1503 							rxq->vtnrx_id, NR_RX);
1504 #else  /* !DEV_NETMAP */
1505 	void *kring = NULL;
1506 #endif /* !DEV_NETMAP */
1507 
1508 	vq = rxq->vtnrx_vq;
1509 	last = 0;
1510 
1511 	while ((m = virtqueue_drain(vq, &last)) != NULL) {
1512 		if (kring == NULL)
1513 			m_freem(m);
1514 	}
1515 
1516 	KASSERT(virtqueue_empty(vq),
1517 	    ("%s: mbufs remaining in rx queue %p", __func__, rxq));
1518 }
1519 
1520 static struct mbuf *
1521 vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1522 {
1523 	struct mbuf *m_head, *m_tail, *m;
1524 	int i, size;
1525 
1526 	m_head = NULL;
1527 	size = sc->vtnet_rx_clustersz;
1528 
1529 	KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1530 	    ("%s: mbuf %d chain requested without LRO_NOMRG", __func__, nbufs));
1531 
1532 	for (i = 0; i < nbufs; i++) {
1533 		m = m_getjcl(M_NOWAIT, MT_DATA, i == 0 ? M_PKTHDR : 0, size);
1534 		if (m == NULL) {
1535 			sc->vtnet_stats.mbuf_alloc_failed++;
1536 			m_freem(m_head);
1537 			return (NULL);
1538 		}
1539 
1540 		m->m_len = size;
1541 		if (m_head != NULL) {
1542 			m_tail->m_next = m;
1543 			m_tail = m;
1544 		} else
1545 			m_head = m_tail = m;
1546 	}
1547 
1548 	if (m_tailp != NULL)
1549 		*m_tailp = m_tail;
1550 
1551 	return (m_head);
1552 }
1553 
1554 /*
1555  * Slow path for when LRO without mergeable buffers is negotiated.
1556  */
1557 static int
1558 vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *rxq, struct mbuf *m0,
1559     int len0)
1560 {
1561 	struct vtnet_softc *sc;
1562 	struct mbuf *m, *m_prev, *m_new, *m_tail;
1563 	int len, clustersz, nreplace, error;
1564 
1565 	sc = rxq->vtnrx_sc;
1566 	clustersz = sc->vtnet_rx_clustersz;
1567 
1568 	m_prev = NULL;
1569 	m_tail = NULL;
1570 	nreplace = 0;
1571 
1572 	m = m0;
1573 	len = len0;
1574 
1575 	/*
1576 	 * Since these mbuf chains are so large, avoid allocating a complete
1577 	 * replacement when the received frame did not consume the entire
1578 	 * chain. Unused mbufs are moved to the tail of the replacement mbuf.
1579 	 */
1580 	while (len > 0) {
1581 		if (m == NULL) {
1582 			sc->vtnet_stats.rx_frame_too_large++;
1583 			return (EMSGSIZE);
1584 		}
1585 
1586 		/*
1587 		 * Every mbuf should have the expected cluster size since that
1588 		 * is also used to allocate the replacements.
1589 		 */
1590 		KASSERT(m->m_len == clustersz,
1591 		    ("%s: mbuf size %d not expected cluster size %d", __func__,
1592 		    m->m_len, clustersz));
1593 
1594 		m->m_len = MIN(m->m_len, len);
1595 		len -= m->m_len;
1596 
1597 		m_prev = m;
1598 		m = m->m_next;
1599 		nreplace++;
1600 	}
1601 
1602 	KASSERT(nreplace > 0 && nreplace <= sc->vtnet_rx_nmbufs,
1603 	    ("%s: invalid replacement mbuf count %d max %d", __func__,
1604 	    nreplace, sc->vtnet_rx_nmbufs));
1605 
1606 	m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail);
1607 	if (m_new == NULL) {
1608 		m_prev->m_len = clustersz;
1609 		return (ENOBUFS);
1610 	}
1611 
1612 	/*
1613 	 * Move any unused mbufs from the received mbuf chain onto the
1614 	 * end of the replacement chain.
1615 	 */
1616 	if (m_prev->m_next != NULL) {
1617 		m_tail->m_next = m_prev->m_next;
1618 		m_prev->m_next = NULL;
1619 	}
1620 
1621 	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1622 	if (error) {
1623 		/*
1624 		 * The replacement is suppose to be an copy of the one
1625 		 * dequeued so this is a very unexpected error.
1626 		 *
1627 		 * Restore the m0 chain to the original state if it was
1628 		 * modified so we can then discard it.
1629 		 */
1630 		if (m_tail->m_next != NULL) {
1631 			m_prev->m_next = m_tail->m_next;
1632 			m_tail->m_next = NULL;
1633 		}
1634 		m_prev->m_len = clustersz;
1635 		sc->vtnet_stats.rx_enq_replacement_failed++;
1636 		m_freem(m_new);
1637 	}
1638 
1639 	return (error);
1640 }
1641 
1642 static int
1643 vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len)
1644 {
1645 	struct vtnet_softc *sc;
1646 	struct mbuf *m_new;
1647 	int error;
1648 
1649 	sc = rxq->vtnrx_sc;
1650 
1651 	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG)
1652 		return (vtnet_rxq_replace_lro_nomrg_buf(rxq, m, len));
1653 
1654 	MPASS(m->m_next == NULL);
1655 	if (m->m_len < len)
1656 		return (EMSGSIZE);
1657 
1658 	m_new = vtnet_rx_alloc_buf(sc, 1, NULL);
1659 	if (m_new == NULL)
1660 		return (ENOBUFS);
1661 
1662 	error = vtnet_rxq_enqueue_buf(rxq, m_new);
1663 	if (error) {
1664 		sc->vtnet_stats.rx_enq_replacement_failed++;
1665 		m_freem(m_new);
1666 	} else
1667 		m->m_len = len;
1668 
1669 	return (error);
1670 }
1671 
1672 static int
1673 vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1674 {
1675 	struct vtnet_softc *sc;
1676 	struct sglist *sg;
1677 	int header_inlined, error;
1678 
1679 	sc = rxq->vtnrx_sc;
1680 	sg = rxq->vtnrx_sg;
1681 
1682 	KASSERT(m->m_next == NULL || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1683 	    ("%s: mbuf chain without LRO_NOMRG", __func__));
1684 	VTNET_RXQ_LOCK_ASSERT(rxq);
1685 
1686 	sglist_reset(sg);
1687 	header_inlined = vtnet_modern(sc) ||
1688 	    (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) != 0; /* TODO: ANY_LAYOUT */
1689 
1690 	if (header_inlined)
1691 		error = sglist_append_mbuf(sg, m);
1692 	else {
1693 		struct vtnet_rx_header *rxhdr =
1694 		    mtod(m, struct vtnet_rx_header *);
1695 		MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr));
1696 
1697 		/* Append the header and remaining mbuf data. */
1698 		error = sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size);
1699 		if (error)
1700 			return (error);
1701 		error = sglist_append(sg, &rxhdr[1],
1702 		    m->m_len - sizeof(struct vtnet_rx_header));
1703 		if (error)
1704 			return (error);
1705 
1706 		if (m->m_next != NULL)
1707 			error = sglist_append_mbuf(sg, m->m_next);
1708 	}
1709 
1710 	if (error)
1711 		return (error);
1712 
1713 	return (virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg));
1714 }
1715 
1716 static int
1717 vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
1718 {
1719 	struct vtnet_softc *sc;
1720 	struct mbuf *m;
1721 	int error;
1722 
1723 	sc = rxq->vtnrx_sc;
1724 
1725 	m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL);
1726 	if (m == NULL)
1727 		return (ENOBUFS);
1728 
1729 	error = vtnet_rxq_enqueue_buf(rxq, m);
1730 	if (error)
1731 		m_freem(m);
1732 
1733 	return (error);
1734 }
1735 
1736 static int
1737 vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
1738     int hoff, struct virtio_net_hdr *hdr)
1739 {
1740 	struct vtnet_softc *sc;
1741 	int error;
1742 
1743 	sc = rxq->vtnrx_sc;
1744 
1745 	/*
1746 	 * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
1747 	 * not have an analogous CSUM flag. The checksum has been validated,
1748 	 * but is incomplete (TCP/UDP pseudo header).
1749 	 *
1750 	 * The packet is likely from another VM on the same host that itself
1751 	 * performed checksum offloading so Tx/Rx is basically a memcpy and
1752 	 * the checksum has little value.
1753 	 *
1754 	 * Default to receiving the packet as-is for performance reasons, but
1755 	 * this can cause issues if the packet is to be forwarded because it
1756 	 * does not contain a valid checksum. This patch may be helpful:
1757 	 * https://reviews.freebsd.org/D6611. In the meantime, have the driver
1758 	 * compute the checksum if requested.
1759 	 *
1760 	 * BMV: Need to add an CSUM_PARTIAL flag?
1761 	 */
1762 	if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
1763 		error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
1764 		return (error);
1765 	}
1766 
1767 	/*
1768 	 * Compute the checksum in the driver so the packet will contain a
1769 	 * valid checksum. The checksum is at csum_offset from csum_start.
1770 	 */
1771 	switch (etype) {
1772 #if defined(INET) || defined(INET6)
1773 	case ETHERTYPE_IP:
1774 	case ETHERTYPE_IPV6: {
1775 		int csum_off, csum_end;
1776 		uint16_t csum;
1777 
1778 		csum_off = hdr->csum_start + hdr->csum_offset;
1779 		csum_end = csum_off + sizeof(uint16_t);
1780 
1781 		/* Assume checksum will be in the first mbuf. */
1782 		if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
1783 			return (1);
1784 
1785 		/*
1786 		 * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
1787 		 * checksum and write it at the specified offset. We could
1788 		 * try to verify the packet: csum_start should probably
1789 		 * correspond to the start of the TCP/UDP header.
1790 		 *
1791 		 * BMV: Need to properly handle UDP with zero checksum. Is
1792 		 * the IPv4 header checksum implicitly validated?
1793 		 */
1794 		csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
1795 		*(uint16_t *)(mtodo(m, csum_off)) = csum;
1796 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1797 		m->m_pkthdr.csum_data = 0xFFFF;
1798 		break;
1799 	}
1800 #endif
1801 	default:
1802 		sc->vtnet_stats.rx_csum_bad_ethtype++;
1803 		return (1);
1804 	}
1805 
1806 	return (0);
1807 }
1808 
1809 static int
1810 vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
1811     uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
1812 {
1813 	struct vtnet_softc *sc;
1814 	int protocol;
1815 
1816 	sc = rxq->vtnrx_sc;
1817 
1818 	switch (etype) {
1819 #if defined(INET)
1820 	case ETHERTYPE_IP:
1821 		if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
1822 			protocol = IPPROTO_DONE;
1823 		else {
1824 			struct ip *ip = (struct ip *)(m->m_data + hoff);
1825 			protocol = ip->ip_p;
1826 		}
1827 		break;
1828 #endif
1829 #if defined(INET6)
1830 	case ETHERTYPE_IPV6:
1831 		if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
1832 		    || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
1833 			protocol = IPPROTO_DONE;
1834 		break;
1835 #endif
1836 	default:
1837 		protocol = IPPROTO_DONE;
1838 		break;
1839 	}
1840 
1841 	switch (protocol) {
1842 	case IPPROTO_TCP:
1843 	case IPPROTO_UDP:
1844 		m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1845 		m->m_pkthdr.csum_data = 0xFFFF;
1846 		break;
1847 	default:
1848 		/*
1849 		 * FreeBSD does not support checksum offloading of this
1850 		 * protocol. Let the stack re-verify the checksum later
1851 		 * if the protocol is supported.
1852 		 */
1853 #if 0
1854 		if_printf(sc->vtnet_ifp,
1855 		    "%s: checksum offload of unsupported protocol "
1856 		    "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
1857 		    __func__, etype, protocol, hdr->csum_start,
1858 		    hdr->csum_offset);
1859 #endif
1860 		break;
1861 	}
1862 
1863 	return (0);
1864 }
1865 
1866 static int
1867 vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
1868     struct virtio_net_hdr *hdr)
1869 {
1870 	const struct ether_header *eh;
1871 	int hoff;
1872 	uint16_t etype;
1873 
1874 	eh = mtod(m, const struct ether_header *);
1875 	etype = ntohs(eh->ether_type);
1876 	if (etype == ETHERTYPE_VLAN) {
1877 		/* TODO BMV: Handle QinQ. */
1878 		const struct ether_vlan_header *evh =
1879 		    mtod(m, const struct ether_vlan_header *);
1880 		etype = ntohs(evh->evl_proto);
1881 		hoff = sizeof(struct ether_vlan_header);
1882 	} else
1883 		hoff = sizeof(struct ether_header);
1884 
1885 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1886 		return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
1887 	else /* VIRTIO_NET_HDR_F_DATA_VALID */
1888 		return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
1889 }
1890 
1891 static void
1892 vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
1893 {
1894 	struct mbuf *m;
1895 
1896 	while (--nbufs > 0) {
1897 		m = virtqueue_dequeue(rxq->vtnrx_vq, NULL);
1898 		if (m == NULL)
1899 			break;
1900 		vtnet_rxq_discard_buf(rxq, m);
1901 	}
1902 }
1903 
1904 static void
1905 vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m)
1906 {
1907 	int error;
1908 
1909 	/*
1910 	 * Requeue the discarded mbuf. This should always be successful
1911 	 * since it was just dequeued.
1912 	 */
1913 	error = vtnet_rxq_enqueue_buf(rxq, m);
1914 	KASSERT(error == 0,
1915 	    ("%s: cannot requeue discarded mbuf %d", __func__, error));
1916 }
1917 
1918 static int
1919 vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs)
1920 {
1921 	struct vtnet_softc *sc;
1922 	struct virtqueue *vq;
1923 	struct mbuf *m_tail;
1924 
1925 	sc = rxq->vtnrx_sc;
1926 	vq = rxq->vtnrx_vq;
1927 	m_tail = m_head;
1928 
1929 	while (--nbufs > 0) {
1930 		struct mbuf *m;
1931 		uint32_t len;
1932 
1933 		m = virtqueue_dequeue(vq, &len);
1934 		if (m == NULL) {
1935 			rxq->vtnrx_stats.vrxs_ierrors++;
1936 			goto fail;
1937 		}
1938 
1939 		if (vtnet_rxq_new_buf(rxq) != 0) {
1940 			rxq->vtnrx_stats.vrxs_iqdrops++;
1941 			vtnet_rxq_discard_buf(rxq, m);
1942 			if (nbufs > 1)
1943 				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
1944 			goto fail;
1945 		}
1946 
1947 		if (m->m_len < len)
1948 			len = m->m_len;
1949 
1950 		m->m_len = len;
1951 		m->m_flags &= ~M_PKTHDR;
1952 
1953 		m_head->m_pkthdr.len += len;
1954 		m_tail->m_next = m;
1955 		m_tail = m;
1956 	}
1957 
1958 	return (0);
1959 
1960 fail:
1961 	sc->vtnet_stats.rx_mergeable_failed++;
1962 	m_freem(m_head);
1963 
1964 	return (1);
1965 }
1966 
1967 #if defined(INET) || defined(INET6)
1968 static int
1969 vtnet_lro_rx(struct vtnet_rxq *rxq, struct mbuf *m)
1970 {
1971 	struct lro_ctrl *lro;
1972 
1973 	lro = &rxq->vtnrx_lro;
1974 
1975 	if (lro->lro_mbuf_max != 0) {
1976 		tcp_lro_queue_mbuf(lro, m);
1977 		return (0);
1978 	}
1979 
1980 	return (tcp_lro_rx(lro, m, 0));
1981 }
1982 #endif
1983 
1984 static void
1985 vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
1986     struct virtio_net_hdr *hdr)
1987 {
1988 	struct vtnet_softc *sc;
1989 	struct ifnet *ifp;
1990 
1991 	sc = rxq->vtnrx_sc;
1992 	ifp = sc->vtnet_ifp;
1993 
1994 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1995 		struct ether_header *eh = mtod(m, struct ether_header *);
1996 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1997 			vtnet_vlan_tag_remove(m);
1998 			/*
1999 			 * With the 802.1Q header removed, update the
2000 			 * checksum starting location accordingly.
2001 			 */
2002 			if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
2003 				hdr->csum_start -= ETHER_VLAN_ENCAP_LEN;
2004 		}
2005 	}
2006 
2007 	m->m_pkthdr.flowid = rxq->vtnrx_id;
2008 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2009 
2010 	if (hdr->flags &
2011 	    (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
2012 		if (vtnet_rxq_csum(rxq, m, hdr) == 0)
2013 			rxq->vtnrx_stats.vrxs_csum++;
2014 		else
2015 			rxq->vtnrx_stats.vrxs_csum_failed++;
2016 	}
2017 
2018 	if (hdr->gso_size != 0) {
2019 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2020 		case VIRTIO_NET_HDR_GSO_TCPV4:
2021 		case VIRTIO_NET_HDR_GSO_TCPV6:
2022 			m->m_pkthdr.lro_nsegs =
2023 			    howmany(m->m_pkthdr.len, hdr->gso_size);
2024 			rxq->vtnrx_stats.vrxs_host_lro++;
2025 			break;
2026 		}
2027 	}
2028 
2029 	rxq->vtnrx_stats.vrxs_ipackets++;
2030 	rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len;
2031 
2032 #if defined(INET) || defined(INET6)
2033 	if (vtnet_software_lro(sc) && ifp->if_capenable & IFCAP_LRO) {
2034 		if (vtnet_lro_rx(rxq, m) == 0)
2035 			return;
2036 	}
2037 #endif
2038 
2039 	(*ifp->if_input)(ifp, m);
2040 }
2041 
2042 static int
2043 vtnet_rxq_eof(struct vtnet_rxq *rxq)
2044 {
2045 	struct virtio_net_hdr lhdr, *hdr;
2046 	struct vtnet_softc *sc;
2047 	struct ifnet *ifp;
2048 	struct virtqueue *vq;
2049 	int deq, count;
2050 
2051 	sc = rxq->vtnrx_sc;
2052 	vq = rxq->vtnrx_vq;
2053 	ifp = sc->vtnet_ifp;
2054 	deq = 0;
2055 	count = sc->vtnet_rx_process_limit;
2056 
2057 	VTNET_RXQ_LOCK_ASSERT(rxq);
2058 
2059 	while (count-- > 0) {
2060 		struct mbuf *m;
2061 		uint32_t len, nbufs, adjsz;
2062 
2063 		m = virtqueue_dequeue(vq, &len);
2064 		if (m == NULL)
2065 			break;
2066 		deq++;
2067 
2068 		if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
2069 			rxq->vtnrx_stats.vrxs_ierrors++;
2070 			vtnet_rxq_discard_buf(rxq, m);
2071 			continue;
2072 		}
2073 
2074 		if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) {
2075 			struct virtio_net_hdr_mrg_rxbuf *mhdr =
2076 			    mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
2077 			nbufs = vtnet_htog16(sc, mhdr->num_buffers);
2078 			adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2079 		} else if (vtnet_modern(sc)) {
2080 			nbufs = 1; /* num_buffers is always 1 */
2081 			adjsz = sizeof(struct virtio_net_hdr_v1);
2082 		} else {
2083 			nbufs = 1;
2084 			adjsz = sizeof(struct vtnet_rx_header);
2085 			/*
2086 			 * Account for our gap between the header and start of
2087 			 * data to keep the segments separated.
2088 			 */
2089 			len += VTNET_RX_HEADER_PAD;
2090 		}
2091 
2092 		if (vtnet_rxq_replace_buf(rxq, m, len) != 0) {
2093 			rxq->vtnrx_stats.vrxs_iqdrops++;
2094 			vtnet_rxq_discard_buf(rxq, m);
2095 			if (nbufs > 1)
2096 				vtnet_rxq_discard_merged_bufs(rxq, nbufs);
2097 			continue;
2098 		}
2099 
2100 		m->m_pkthdr.len = len;
2101 		m->m_pkthdr.rcvif = ifp;
2102 		m->m_pkthdr.csum_flags = 0;
2103 
2104 		if (nbufs > 1) {
2105 			/* Dequeue the rest of chain. */
2106 			if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0)
2107 				continue;
2108 		}
2109 
2110 		/*
2111 		 * Save an endian swapped version of the header prior to it
2112 		 * being stripped. The header is always at the start of the
2113 		 * mbuf data. num_buffers was already saved (and not needed)
2114 		 * so use the standard header.
2115 		 */
2116 		hdr = mtod(m, struct virtio_net_hdr *);
2117 		lhdr.flags = hdr->flags;
2118 		lhdr.gso_type = hdr->gso_type;
2119 		lhdr.hdr_len = vtnet_htog16(sc, hdr->hdr_len);
2120 		lhdr.gso_size = vtnet_htog16(sc, hdr->gso_size);
2121 		lhdr.csum_start = vtnet_htog16(sc, hdr->csum_start);
2122 		lhdr.csum_offset = vtnet_htog16(sc, hdr->csum_offset);
2123 		m_adj(m, adjsz);
2124 
2125 		if (PFIL_HOOKED_IN(sc->vtnet_pfil)) {
2126 			pfil_return_t pfil;
2127 
2128 			pfil = pfil_run_hooks(sc->vtnet_pfil, &m, ifp, PFIL_IN,
2129 			    NULL);
2130 			switch (pfil) {
2131 			case PFIL_REALLOCED:
2132 				m = pfil_mem2mbuf(m->m_data);
2133 				break;
2134 			case PFIL_DROPPED:
2135 			case PFIL_CONSUMED:
2136 				continue;
2137 			default:
2138 				KASSERT(pfil == PFIL_PASS,
2139 				    ("Filter returned %d!", pfil));
2140 			}
2141 		}
2142 
2143 		vtnet_rxq_input(rxq, m, &lhdr);
2144 	}
2145 
2146 	if (deq > 0) {
2147 #if defined(INET) || defined(INET6)
2148 		if (vtnet_software_lro(sc))
2149 			tcp_lro_flush_all(&rxq->vtnrx_lro);
2150 #endif
2151 		virtqueue_notify(vq);
2152 	}
2153 
2154 	return (count > 0 ? 0 : EAGAIN);
2155 }
2156 
2157 static void
2158 vtnet_rx_vq_process(struct vtnet_rxq *rxq, int tries)
2159 {
2160 	struct vtnet_softc *sc;
2161 	struct ifnet *ifp;
2162 	u_int more;
2163 #ifdef DEV_NETMAP
2164 	int nmirq;
2165 #endif /* DEV_NETMAP */
2166 
2167 	sc = rxq->vtnrx_sc;
2168 	ifp = sc->vtnet_ifp;
2169 
2170 	if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) {
2171 		/*
2172 		 * Ignore this interrupt. Either this is a spurious interrupt
2173 		 * or multiqueue without per-VQ MSIX so every queue needs to
2174 		 * be polled (a brain dead configuration we could try harder
2175 		 * to avoid).
2176 		 */
2177 		vtnet_rxq_disable_intr(rxq);
2178 		return;
2179 	}
2180 
2181 	VTNET_RXQ_LOCK(rxq);
2182 
2183 #ifdef DEV_NETMAP
2184 	/*
2185 	 * We call netmap_rx_irq() under lock to prevent concurrent calls.
2186 	 * This is not necessary to serialize the access to the RX vq, but
2187 	 * rather to avoid races that may happen if this interface is
2188 	 * attached to a VALE switch, which would cause received packets
2189 	 * to stall in the RX queue (nm_kr_tryget() could find the kring
2190 	 * busy when called from netmap_bwrap_intr_notify()).
2191 	 */
2192 	nmirq = netmap_rx_irq(ifp, rxq->vtnrx_id, &more);
2193 	if (nmirq != NM_IRQ_PASS) {
2194 		VTNET_RXQ_UNLOCK(rxq);
2195 		if (nmirq == NM_IRQ_RESCHED) {
2196 			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2197 		}
2198 		return;
2199 	}
2200 #endif /* DEV_NETMAP */
2201 
2202 again:
2203 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2204 		VTNET_RXQ_UNLOCK(rxq);
2205 		return;
2206 	}
2207 
2208 	more = vtnet_rxq_eof(rxq);
2209 	if (more || vtnet_rxq_enable_intr(rxq) != 0) {
2210 		if (!more)
2211 			vtnet_rxq_disable_intr(rxq);
2212 		/*
2213 		 * This is an occasional condition or race (when !more),
2214 		 * so retry a few times before scheduling the taskqueue.
2215 		 */
2216 		if (tries-- > 0)
2217 			goto again;
2218 
2219 		rxq->vtnrx_stats.vrxs_rescheduled++;
2220 		VTNET_RXQ_UNLOCK(rxq);
2221 		taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
2222 	} else
2223 		VTNET_RXQ_UNLOCK(rxq);
2224 }
2225 
2226 static void
2227 vtnet_rx_vq_intr(void *xrxq)
2228 {
2229 	struct vtnet_rxq *rxq;
2230 
2231 	rxq = xrxq;
2232 	vtnet_rx_vq_process(rxq, VTNET_INTR_DISABLE_RETRIES);
2233 }
2234 
2235 static void
2236 vtnet_rxq_tq_intr(void *xrxq, int pending __unused)
2237 {
2238 	struct vtnet_rxq *rxq;
2239 
2240 	rxq = xrxq;
2241 	vtnet_rx_vq_process(rxq, 0);
2242 }
2243 
2244 static int
2245 vtnet_txq_intr_threshold(struct vtnet_txq *txq)
2246 {
2247 	struct vtnet_softc *sc;
2248 	int threshold;
2249 
2250 	sc = txq->vtntx_sc;
2251 
2252 	/*
2253 	 * The Tx interrupt is disabled until the queue free count falls
2254 	 * below our threshold. Completed frames are drained from the Tx
2255 	 * virtqueue before transmitting new frames and in the watchdog
2256 	 * callout, so the frequency of Tx interrupts is greatly reduced,
2257 	 * at the cost of not freeing mbufs as quickly as they otherwise
2258 	 * would be.
2259 	 */
2260 	threshold = virtqueue_size(txq->vtntx_vq) / 4;
2261 
2262 	/*
2263 	 * Without indirect descriptors, leave enough room for the most
2264 	 * segments we handle.
2265 	 */
2266 	if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 &&
2267 	    threshold < sc->vtnet_tx_nsegs)
2268 		threshold = sc->vtnet_tx_nsegs;
2269 
2270 	return (threshold);
2271 }
2272 
2273 static int
2274 vtnet_txq_below_threshold(struct vtnet_txq *txq)
2275 {
2276 	struct virtqueue *vq;
2277 
2278 	vq = txq->vtntx_vq;
2279 
2280 	return (virtqueue_nfree(vq) <= txq->vtntx_intr_threshold);
2281 }
2282 
2283 static int
2284 vtnet_txq_notify(struct vtnet_txq *txq)
2285 {
2286 	struct virtqueue *vq;
2287 
2288 	vq = txq->vtntx_vq;
2289 
2290 	txq->vtntx_watchdog = VTNET_TX_TIMEOUT;
2291 	virtqueue_notify(vq);
2292 
2293 	if (vtnet_txq_enable_intr(txq) == 0)
2294 		return (0);
2295 
2296 	/*
2297 	 * Drain frames that were completed since last checked. If this
2298 	 * causes the queue to go above the threshold, the caller should
2299 	 * continue transmitting.
2300 	 */
2301 	if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) {
2302 		virtqueue_disable_intr(vq);
2303 		return (1);
2304 	}
2305 
2306 	return (0);
2307 }
2308 
2309 static void
2310 vtnet_txq_free_mbufs(struct vtnet_txq *txq)
2311 {
2312 	struct virtqueue *vq;
2313 	struct vtnet_tx_header *txhdr;
2314 	int last;
2315 #ifdef DEV_NETMAP
2316 	struct netmap_kring *kring = netmap_kring_on(NA(txq->vtntx_sc->vtnet_ifp),
2317 							txq->vtntx_id, NR_TX);
2318 #else  /* !DEV_NETMAP */
2319 	void *kring = NULL;
2320 #endif /* !DEV_NETMAP */
2321 
2322 	vq = txq->vtntx_vq;
2323 	last = 0;
2324 
2325 	while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
2326 		if (kring == NULL) {
2327 			m_freem(txhdr->vth_mbuf);
2328 			uma_zfree(vtnet_tx_header_zone, txhdr);
2329 		}
2330 	}
2331 
2332 	KASSERT(virtqueue_empty(vq),
2333 	    ("%s: mbufs remaining in tx queue %p", __func__, txq));
2334 }
2335 
2336 /*
2337  * BMV: This can go away once we finally have offsets in the mbuf header.
2338  */
2339 static int
2340 vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype,
2341     int *proto, int *start)
2342 {
2343 	struct vtnet_softc *sc;
2344 	struct ether_vlan_header *evh;
2345 	int offset;
2346 
2347 	sc = txq->vtntx_sc;
2348 
2349 	evh = mtod(m, struct ether_vlan_header *);
2350 	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2351 		/* BMV: We should handle nested VLAN tags too. */
2352 		*etype = ntohs(evh->evl_proto);
2353 		offset = sizeof(struct ether_vlan_header);
2354 	} else {
2355 		*etype = ntohs(evh->evl_encap_proto);
2356 		offset = sizeof(struct ether_header);
2357 	}
2358 
2359 	switch (*etype) {
2360 #if defined(INET)
2361 	case ETHERTYPE_IP: {
2362 		struct ip *ip, iphdr;
2363 		if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2364 			m_copydata(m, offset, sizeof(struct ip),
2365 			    (caddr_t) &iphdr);
2366 			ip = &iphdr;
2367 		} else
2368 			ip = (struct ip *)(m->m_data + offset);
2369 		*proto = ip->ip_p;
2370 		*start = offset + (ip->ip_hl << 2);
2371 		break;
2372 	}
2373 #endif
2374 #if defined(INET6)
2375 	case ETHERTYPE_IPV6:
2376 		*proto = -1;
2377 		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2378 		/* Assert the network stack sent us a valid packet. */
2379 		KASSERT(*start > offset,
2380 		    ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2381 		    *start, offset, *proto));
2382 		break;
2383 #endif
2384 	default:
2385 		sc->vtnet_stats.tx_csum_unknown_ethtype++;
2386 		return (EINVAL);
2387 	}
2388 
2389 	return (0);
2390 }
2391 
2392 static int
2393 vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
2394     int offset, struct virtio_net_hdr *hdr)
2395 {
2396 	static struct timeval lastecn;
2397 	static int curecn;
2398 	struct vtnet_softc *sc;
2399 	struct tcphdr *tcp, tcphdr;
2400 
2401 	sc = txq->vtntx_sc;
2402 
2403 	if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
2404 		m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
2405 		tcp = &tcphdr;
2406 	} else
2407 		tcp = (struct tcphdr *)(m->m_data + offset);
2408 
2409 	hdr->hdr_len = vtnet_gtoh16(sc, offset + (tcp->th_off << 2));
2410 	hdr->gso_size = vtnet_gtoh16(sc, m->m_pkthdr.tso_segsz);
2411 	hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
2412 	    VIRTIO_NET_HDR_GSO_TCPV6;
2413 
2414 	if (__predict_false(tcp->th_flags & TH_CWR)) {
2415 		/*
2416 		 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
2417 		 * FreeBSD, ECN support is not on a per-interface basis,
2418 		 * but globally via the net.inet.tcp.ecn.enable sysctl
2419 		 * knob. The default is off.
2420 		 */
2421 		if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
2422 			if (ppsratecheck(&lastecn, &curecn, 1))
2423 				if_printf(sc->vtnet_ifp,
2424 				    "TSO with ECN not negotiated with host\n");
2425 			return (ENOTSUP);
2426 		}
2427 		hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2428 	}
2429 
2430 	txq->vtntx_stats.vtxs_tso++;
2431 
2432 	return (0);
2433 }
2434 
2435 static struct mbuf *
2436 vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
2437     struct virtio_net_hdr *hdr)
2438 {
2439 	struct vtnet_softc *sc;
2440 	int flags, etype, csum_start, proto, error;
2441 
2442 	sc = txq->vtntx_sc;
2443 	flags = m->m_pkthdr.csum_flags;
2444 
2445 	error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start);
2446 	if (error)
2447 		goto drop;
2448 
2449 	if (flags & (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6)) {
2450 		/* Sanity check the parsed mbuf matches the offload flags. */
2451 		if (__predict_false((flags & VTNET_CSUM_OFFLOAD &&
2452 		    etype != ETHERTYPE_IP) || (flags & VTNET_CSUM_OFFLOAD_IPV6
2453 		    && etype != ETHERTYPE_IPV6))) {
2454 			sc->vtnet_stats.tx_csum_proto_mismatch++;
2455 			goto drop;
2456 		}
2457 
2458 		hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
2459 		hdr->csum_start = vtnet_gtoh16(sc, csum_start);
2460 		hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
2461 		txq->vtntx_stats.vtxs_csum++;
2462 	}
2463 
2464 	if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
2465 		/*
2466 		 * Sanity check the parsed mbuf IP protocol is TCP, and
2467 		 * VirtIO TSO reqires the checksum offloading above.
2468 		 */
2469 		if (__predict_false(proto != IPPROTO_TCP)) {
2470 			sc->vtnet_stats.tx_tso_not_tcp++;
2471 			goto drop;
2472 		} else if (__predict_false((hdr->flags &
2473 		    VIRTIO_NET_HDR_F_NEEDS_CSUM) == 0)) {
2474 			sc->vtnet_stats.tx_tso_without_csum++;
2475 			goto drop;
2476 		}
2477 
2478 		error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr);
2479 		if (error)
2480 			goto drop;
2481 	}
2482 
2483 	return (m);
2484 
2485 drop:
2486 	m_freem(m);
2487 	return (NULL);
2488 }
2489 
2490 static int
2491 vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
2492     struct vtnet_tx_header *txhdr)
2493 {
2494 	struct vtnet_softc *sc;
2495 	struct virtqueue *vq;
2496 	struct sglist *sg;
2497 	struct mbuf *m;
2498 	int error;
2499 
2500 	sc = txq->vtntx_sc;
2501 	vq = txq->vtntx_vq;
2502 	sg = txq->vtntx_sg;
2503 	m = *m_head;
2504 
2505 	sglist_reset(sg);
2506 	error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
2507 	if (error != 0 || sg->sg_nseg != 1) {
2508 		KASSERT(0, ("%s: cannot add header to sglist error %d nseg %d",
2509 		    __func__, error, sg->sg_nseg));
2510 		goto fail;
2511 	}
2512 
2513 	error = sglist_append_mbuf(sg, m);
2514 	if (error) {
2515 		m = m_defrag(m, M_NOWAIT);
2516 		if (m == NULL)
2517 			goto fail;
2518 
2519 		*m_head = m;
2520 		sc->vtnet_stats.tx_defragged++;
2521 
2522 		error = sglist_append_mbuf(sg, m);
2523 		if (error)
2524 			goto fail;
2525 	}
2526 
2527 	txhdr->vth_mbuf = m;
2528 	error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0);
2529 
2530 	return (error);
2531 
2532 fail:
2533 	sc->vtnet_stats.tx_defrag_failed++;
2534 	m_freem(*m_head);
2535 	*m_head = NULL;
2536 
2537 	return (ENOBUFS);
2538 }
2539 
2540 static int
2541 vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
2542 {
2543 	struct vtnet_tx_header *txhdr;
2544 	struct virtio_net_hdr *hdr;
2545 	struct mbuf *m;
2546 	int error;
2547 
2548 	m = *m_head;
2549 	M_ASSERTPKTHDR(m);
2550 
2551 	txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO);
2552 	if (txhdr == NULL) {
2553 		m_freem(m);
2554 		*m_head = NULL;
2555 		return (ENOMEM);
2556 	}
2557 
2558 	/*
2559 	 * Always use the non-mergeable header, regardless if mergable headers
2560 	 * were negotiated, because for transmit num_buffers is always zero.
2561 	 * The vtnet_hdr_size is used to enqueue the right header size segment.
2562 	 */
2563 	hdr = &txhdr->vth_uhdr.hdr;
2564 
2565 	if (m->m_flags & M_VLANTAG) {
2566 		m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2567 		if ((*m_head = m) == NULL) {
2568 			error = ENOBUFS;
2569 			goto fail;
2570 		}
2571 		m->m_flags &= ~M_VLANTAG;
2572 	}
2573 
2574 	if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
2575 		m = vtnet_txq_offload(txq, m, hdr);
2576 		if ((*m_head = m) == NULL) {
2577 			error = ENOBUFS;
2578 			goto fail;
2579 		}
2580 	}
2581 
2582 	error = vtnet_txq_enqueue_buf(txq, m_head, txhdr);
2583 fail:
2584 	if (error)
2585 		uma_zfree(vtnet_tx_header_zone, txhdr);
2586 
2587 	return (error);
2588 }
2589 
2590 #ifdef VTNET_LEGACY_TX
2591 
2592 static void
2593 vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp)
2594 {
2595 	struct vtnet_softc *sc;
2596 	struct virtqueue *vq;
2597 	struct mbuf *m0;
2598 	int tries, enq;
2599 
2600 	sc = txq->vtntx_sc;
2601 	vq = txq->vtntx_vq;
2602 	tries = 0;
2603 
2604 	VTNET_TXQ_LOCK_ASSERT(txq);
2605 
2606 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2607 	    sc->vtnet_link_active == 0)
2608 		return;
2609 
2610 	vtnet_txq_eof(txq);
2611 
2612 again:
2613 	enq = 0;
2614 
2615 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2616 		if (virtqueue_full(vq))
2617 			break;
2618 
2619 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2620 		if (m0 == NULL)
2621 			break;
2622 
2623 		if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) {
2624 			if (m0 != NULL)
2625 				IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2626 			break;
2627 		}
2628 
2629 		enq++;
2630 		ETHER_BPF_MTAP(ifp, m0);
2631 	}
2632 
2633 	if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2634 		if (tries++ < VTNET_NOTIFY_RETRIES)
2635 			goto again;
2636 
2637 		txq->vtntx_stats.vtxs_rescheduled++;
2638 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2639 	}
2640 }
2641 
2642 static void
2643 vtnet_start(struct ifnet *ifp)
2644 {
2645 	struct vtnet_softc *sc;
2646 	struct vtnet_txq *txq;
2647 
2648 	sc = ifp->if_softc;
2649 	txq = &sc->vtnet_txqs[0];
2650 
2651 	VTNET_TXQ_LOCK(txq);
2652 	vtnet_start_locked(txq, ifp);
2653 	VTNET_TXQ_UNLOCK(txq);
2654 }
2655 
2656 #else /* !VTNET_LEGACY_TX */
2657 
2658 static int
2659 vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
2660 {
2661 	struct vtnet_softc *sc;
2662 	struct virtqueue *vq;
2663 	struct buf_ring *br;
2664 	struct ifnet *ifp;
2665 	int enq, tries, error;
2666 
2667 	sc = txq->vtntx_sc;
2668 	vq = txq->vtntx_vq;
2669 	br = txq->vtntx_br;
2670 	ifp = sc->vtnet_ifp;
2671 	tries = 0;
2672 	error = 0;
2673 
2674 	VTNET_TXQ_LOCK_ASSERT(txq);
2675 
2676 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2677 	    sc->vtnet_link_active == 0) {
2678 		if (m != NULL)
2679 			error = drbr_enqueue(ifp, br, m);
2680 		return (error);
2681 	}
2682 
2683 	if (m != NULL) {
2684 		error = drbr_enqueue(ifp, br, m);
2685 		if (error)
2686 			return (error);
2687 	}
2688 
2689 	vtnet_txq_eof(txq);
2690 
2691 again:
2692 	enq = 0;
2693 
2694 	while ((m = drbr_peek(ifp, br)) != NULL) {
2695 		if (virtqueue_full(vq)) {
2696 			drbr_putback(ifp, br, m);
2697 			break;
2698 		}
2699 
2700 		if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) {
2701 			if (m != NULL)
2702 				drbr_putback(ifp, br, m);
2703 			else
2704 				drbr_advance(ifp, br);
2705 			break;
2706 		}
2707 		drbr_advance(ifp, br);
2708 
2709 		enq++;
2710 		ETHER_BPF_MTAP(ifp, m);
2711 	}
2712 
2713 	if (enq > 0 && vtnet_txq_notify(txq) != 0) {
2714 		if (tries++ < VTNET_NOTIFY_RETRIES)
2715 			goto again;
2716 
2717 		txq->vtntx_stats.vtxs_rescheduled++;
2718 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask);
2719 	}
2720 
2721 	return (0);
2722 }
2723 
2724 static int
2725 vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
2726 {
2727 	struct vtnet_softc *sc;
2728 	struct vtnet_txq *txq;
2729 	int i, npairs, error;
2730 
2731 	sc = ifp->if_softc;
2732 	npairs = sc->vtnet_act_vq_pairs;
2733 
2734 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2735 		i = m->m_pkthdr.flowid % npairs;
2736 	else
2737 		i = curcpu % npairs;
2738 
2739 	txq = &sc->vtnet_txqs[i];
2740 
2741 	if (VTNET_TXQ_TRYLOCK(txq) != 0) {
2742 		error = vtnet_txq_mq_start_locked(txq, m);
2743 		VTNET_TXQ_UNLOCK(txq);
2744 	} else {
2745 		error = drbr_enqueue(ifp, txq->vtntx_br, m);
2746 		taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask);
2747 	}
2748 
2749 	return (error);
2750 }
2751 
2752 static void
2753 vtnet_txq_tq_deferred(void *xtxq, int pending __unused)
2754 {
2755 	struct vtnet_softc *sc;
2756 	struct vtnet_txq *txq;
2757 
2758 	txq = xtxq;
2759 	sc = txq->vtntx_sc;
2760 
2761 	VTNET_TXQ_LOCK(txq);
2762 	if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br))
2763 		vtnet_txq_mq_start_locked(txq, NULL);
2764 	VTNET_TXQ_UNLOCK(txq);
2765 }
2766 
2767 #endif /* VTNET_LEGACY_TX */
2768 
2769 static void
2770 vtnet_txq_start(struct vtnet_txq *txq)
2771 {
2772 	struct vtnet_softc *sc;
2773 	struct ifnet *ifp;
2774 
2775 	sc = txq->vtntx_sc;
2776 	ifp = sc->vtnet_ifp;
2777 
2778 #ifdef VTNET_LEGACY_TX
2779 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2780 		vtnet_start_locked(txq, ifp);
2781 #else
2782 	if (!drbr_empty(ifp, txq->vtntx_br))
2783 		vtnet_txq_mq_start_locked(txq, NULL);
2784 #endif
2785 }
2786 
2787 static void
2788 vtnet_txq_tq_intr(void *xtxq, int pending __unused)
2789 {
2790 	struct vtnet_softc *sc;
2791 	struct vtnet_txq *txq;
2792 	struct ifnet *ifp;
2793 
2794 	txq = xtxq;
2795 	sc = txq->vtntx_sc;
2796 	ifp = sc->vtnet_ifp;
2797 
2798 	VTNET_TXQ_LOCK(txq);
2799 
2800 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2801 		VTNET_TXQ_UNLOCK(txq);
2802 		return;
2803 	}
2804 
2805 	vtnet_txq_eof(txq);
2806 	vtnet_txq_start(txq);
2807 
2808 	VTNET_TXQ_UNLOCK(txq);
2809 }
2810 
2811 static int
2812 vtnet_txq_eof(struct vtnet_txq *txq)
2813 {
2814 	struct virtqueue *vq;
2815 	struct vtnet_tx_header *txhdr;
2816 	struct mbuf *m;
2817 	int deq;
2818 
2819 	vq = txq->vtntx_vq;
2820 	deq = 0;
2821 	VTNET_TXQ_LOCK_ASSERT(txq);
2822 
2823 	while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
2824 		m = txhdr->vth_mbuf;
2825 		deq++;
2826 
2827 		txq->vtntx_stats.vtxs_opackets++;
2828 		txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len;
2829 		if (m->m_flags & M_MCAST)
2830 			txq->vtntx_stats.vtxs_omcasts++;
2831 
2832 		m_freem(m);
2833 		uma_zfree(vtnet_tx_header_zone, txhdr);
2834 	}
2835 
2836 	if (virtqueue_empty(vq))
2837 		txq->vtntx_watchdog = 0;
2838 
2839 	return (deq);
2840 }
2841 
2842 static void
2843 vtnet_tx_vq_intr(void *xtxq)
2844 {
2845 	struct vtnet_softc *sc;
2846 	struct vtnet_txq *txq;
2847 	struct ifnet *ifp;
2848 
2849 	txq = xtxq;
2850 	sc = txq->vtntx_sc;
2851 	ifp = sc->vtnet_ifp;
2852 
2853 	if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) {
2854 		/*
2855 		 * Ignore this interrupt. Either this is a spurious interrupt
2856 		 * or multiqueue without per-VQ MSIX so every queue needs to
2857 		 * be polled (a brain dead configuration we could try harder
2858 		 * to avoid).
2859 		 */
2860 		vtnet_txq_disable_intr(txq);
2861 		return;
2862 	}
2863 
2864 #ifdef DEV_NETMAP
2865 	if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS)
2866 		return;
2867 #endif /* DEV_NETMAP */
2868 
2869 	VTNET_TXQ_LOCK(txq);
2870 
2871 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2872 		VTNET_TXQ_UNLOCK(txq);
2873 		return;
2874 	}
2875 
2876 	vtnet_txq_eof(txq);
2877 	vtnet_txq_start(txq);
2878 
2879 	VTNET_TXQ_UNLOCK(txq);
2880 }
2881 
2882 static void
2883 vtnet_tx_start_all(struct vtnet_softc *sc)
2884 {
2885 	struct vtnet_txq *txq;
2886 	int i;
2887 
2888 	VTNET_CORE_LOCK_ASSERT(sc);
2889 
2890 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2891 		txq = &sc->vtnet_txqs[i];
2892 
2893 		VTNET_TXQ_LOCK(txq);
2894 		vtnet_txq_start(txq);
2895 		VTNET_TXQ_UNLOCK(txq);
2896 	}
2897 }
2898 
2899 #ifndef VTNET_LEGACY_TX
2900 static void
2901 vtnet_qflush(struct ifnet *ifp)
2902 {
2903 	struct vtnet_softc *sc;
2904 	struct vtnet_txq *txq;
2905 	struct mbuf *m;
2906 	int i;
2907 
2908 	sc = ifp->if_softc;
2909 
2910 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
2911 		txq = &sc->vtnet_txqs[i];
2912 
2913 		VTNET_TXQ_LOCK(txq);
2914 		while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL)
2915 			m_freem(m);
2916 		VTNET_TXQ_UNLOCK(txq);
2917 	}
2918 
2919 	if_qflush(ifp);
2920 }
2921 #endif
2922 
2923 static int
2924 vtnet_watchdog(struct vtnet_txq *txq)
2925 {
2926 	struct ifnet *ifp;
2927 
2928 	ifp = txq->vtntx_sc->vtnet_ifp;
2929 
2930 	VTNET_TXQ_LOCK(txq);
2931 	if (txq->vtntx_watchdog == 1) {
2932 		/*
2933 		 * Only drain completed frames if the watchdog is about to
2934 		 * expire. If any frames were drained, there may be enough
2935 		 * free descriptors now available to transmit queued frames.
2936 		 * In that case, the timer will immediately be decremented
2937 		 * below, but the timeout is generous enough that should not
2938 		 * be a problem.
2939 		 */
2940 		if (vtnet_txq_eof(txq) != 0)
2941 			vtnet_txq_start(txq);
2942 	}
2943 
2944 	if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) {
2945 		VTNET_TXQ_UNLOCK(txq);
2946 		return (0);
2947 	}
2948 	VTNET_TXQ_UNLOCK(txq);
2949 
2950 	if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id);
2951 	return (1);
2952 }
2953 
2954 static void
2955 vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc,
2956     struct vtnet_txq_stats *txacc)
2957 {
2958 
2959 	bzero(rxacc, sizeof(struct vtnet_rxq_stats));
2960 	bzero(txacc, sizeof(struct vtnet_txq_stats));
2961 
2962 	for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) {
2963 		struct vtnet_rxq_stats *rxst;
2964 		struct vtnet_txq_stats *txst;
2965 
2966 		rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
2967 		rxacc->vrxs_ipackets += rxst->vrxs_ipackets;
2968 		rxacc->vrxs_ibytes += rxst->vrxs_ibytes;
2969 		rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops;
2970 		rxacc->vrxs_csum += rxst->vrxs_csum;
2971 		rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed;
2972 		rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled;
2973 
2974 		txst = &sc->vtnet_txqs[i].vtntx_stats;
2975 		txacc->vtxs_opackets += txst->vtxs_opackets;
2976 		txacc->vtxs_obytes += txst->vtxs_obytes;
2977 		txacc->vtxs_csum += txst->vtxs_csum;
2978 		txacc->vtxs_tso += txst->vtxs_tso;
2979 		txacc->vtxs_rescheduled += txst->vtxs_rescheduled;
2980 	}
2981 }
2982 
2983 static uint64_t
2984 vtnet_get_counter(if_t ifp, ift_counter cnt)
2985 {
2986 	struct vtnet_softc *sc;
2987 	struct vtnet_rxq_stats rxaccum;
2988 	struct vtnet_txq_stats txaccum;
2989 
2990 	sc = if_getsoftc(ifp);
2991 	vtnet_accum_stats(sc, &rxaccum, &txaccum);
2992 
2993 	switch (cnt) {
2994 	case IFCOUNTER_IPACKETS:
2995 		return (rxaccum.vrxs_ipackets);
2996 	case IFCOUNTER_IQDROPS:
2997 		return (rxaccum.vrxs_iqdrops);
2998 	case IFCOUNTER_IERRORS:
2999 		return (rxaccum.vrxs_ierrors);
3000 	case IFCOUNTER_OPACKETS:
3001 		return (txaccum.vtxs_opackets);
3002 #ifndef VTNET_LEGACY_TX
3003 	case IFCOUNTER_OBYTES:
3004 		return (txaccum.vtxs_obytes);
3005 	case IFCOUNTER_OMCASTS:
3006 		return (txaccum.vtxs_omcasts);
3007 #endif
3008 	default:
3009 		return (if_get_counter_default(ifp, cnt));
3010 	}
3011 }
3012 
3013 static void
3014 vtnet_tick(void *xsc)
3015 {
3016 	struct vtnet_softc *sc;
3017 	struct ifnet *ifp;
3018 	int i, timedout;
3019 
3020 	sc = xsc;
3021 	ifp = sc->vtnet_ifp;
3022 	timedout = 0;
3023 
3024 	VTNET_CORE_LOCK_ASSERT(sc);
3025 
3026 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
3027 		timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]);
3028 
3029 	if (timedout != 0) {
3030 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3031 		vtnet_init_locked(sc, 0);
3032 	} else
3033 		callout_schedule(&sc->vtnet_tick_ch, hz);
3034 }
3035 
3036 static void
3037 vtnet_start_taskqueues(struct vtnet_softc *sc)
3038 {
3039 	device_t dev;
3040 	struct vtnet_rxq *rxq;
3041 	struct vtnet_txq *txq;
3042 	int i, error;
3043 
3044 	dev = sc->vtnet_dev;
3045 
3046 	/*
3047 	 * Errors here are very difficult to recover from - we cannot
3048 	 * easily fail because, if this is during boot, we will hang
3049 	 * when freeing any successfully started taskqueues because
3050 	 * the scheduler isn't up yet.
3051 	 *
3052 	 * Most drivers just ignore the return value - it only fails
3053 	 * with ENOMEM so an error is not likely.
3054 	 */
3055 	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
3056 		rxq = &sc->vtnet_rxqs[i];
3057 		error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET,
3058 		    "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id);
3059 		if (error) {
3060 			device_printf(dev, "failed to start rx taskq %d\n",
3061 			    rxq->vtnrx_id);
3062 		}
3063 
3064 		txq = &sc->vtnet_txqs[i];
3065 		error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET,
3066 		    "%s txq %d", device_get_nameunit(dev), txq->vtntx_id);
3067 		if (error) {
3068 			device_printf(dev, "failed to start tx taskq %d\n",
3069 			    txq->vtntx_id);
3070 		}
3071 	}
3072 }
3073 
3074 static void
3075 vtnet_free_taskqueues(struct vtnet_softc *sc)
3076 {
3077 	struct vtnet_rxq *rxq;
3078 	struct vtnet_txq *txq;
3079 	int i;
3080 
3081 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3082 		rxq = &sc->vtnet_rxqs[i];
3083 		if (rxq->vtnrx_tq != NULL) {
3084 			taskqueue_free(rxq->vtnrx_tq);
3085 			rxq->vtnrx_tq = NULL;
3086 		}
3087 
3088 		txq = &sc->vtnet_txqs[i];
3089 		if (txq->vtntx_tq != NULL) {
3090 			taskqueue_free(txq->vtntx_tq);
3091 			txq->vtntx_tq = NULL;
3092 		}
3093 	}
3094 }
3095 
3096 static void
3097 vtnet_drain_taskqueues(struct vtnet_softc *sc)
3098 {
3099 	struct vtnet_rxq *rxq;
3100 	struct vtnet_txq *txq;
3101 	int i;
3102 
3103 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3104 		rxq = &sc->vtnet_rxqs[i];
3105 		if (rxq->vtnrx_tq != NULL)
3106 			taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
3107 
3108 		txq = &sc->vtnet_txqs[i];
3109 		if (txq->vtntx_tq != NULL) {
3110 			taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
3111 #ifndef VTNET_LEGACY_TX
3112 			taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
3113 #endif
3114 		}
3115 	}
3116 }
3117 
3118 static void
3119 vtnet_drain_rxtx_queues(struct vtnet_softc *sc)
3120 {
3121 	struct vtnet_rxq *rxq;
3122 	struct vtnet_txq *txq;
3123 	int i;
3124 
3125 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3126 		rxq = &sc->vtnet_rxqs[i];
3127 		vtnet_rxq_free_mbufs(rxq);
3128 
3129 		txq = &sc->vtnet_txqs[i];
3130 		vtnet_txq_free_mbufs(txq);
3131 	}
3132 }
3133 
3134 static void
3135 vtnet_stop_rendezvous(struct vtnet_softc *sc)
3136 {
3137 	struct vtnet_rxq *rxq;
3138 	struct vtnet_txq *txq;
3139 	int i;
3140 
3141 	VTNET_CORE_LOCK_ASSERT(sc);
3142 
3143 	/*
3144 	 * Lock and unlock the per-queue mutex so we known the stop
3145 	 * state is visible. Doing only the active queues should be
3146 	 * sufficient, but it does not cost much extra to do all the
3147 	 * queues.
3148 	 */
3149 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
3150 		rxq = &sc->vtnet_rxqs[i];
3151 		VTNET_RXQ_LOCK(rxq);
3152 		VTNET_RXQ_UNLOCK(rxq);
3153 
3154 		txq = &sc->vtnet_txqs[i];
3155 		VTNET_TXQ_LOCK(txq);
3156 		VTNET_TXQ_UNLOCK(txq);
3157 	}
3158 }
3159 
3160 static void
3161 vtnet_stop(struct vtnet_softc *sc)
3162 {
3163 	device_t dev;
3164 	struct ifnet *ifp;
3165 
3166 	dev = sc->vtnet_dev;
3167 	ifp = sc->vtnet_ifp;
3168 
3169 	VTNET_CORE_LOCK_ASSERT(sc);
3170 
3171 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3172 	sc->vtnet_link_active = 0;
3173 	callout_stop(&sc->vtnet_tick_ch);
3174 
3175 	/* Only advisory. */
3176 	vtnet_disable_interrupts(sc);
3177 
3178 #ifdef DEV_NETMAP
3179 	/* Stop any pending txsync/rxsync and disable them. */
3180 	netmap_disable_all_rings(ifp);
3181 #endif /* DEV_NETMAP */
3182 
3183 	/*
3184 	 * Stop the host adapter. This resets it to the pre-initialized
3185 	 * state. It will not generate any interrupts until after it is
3186 	 * reinitialized.
3187 	 */
3188 	virtio_stop(dev);
3189 	vtnet_stop_rendezvous(sc);
3190 
3191 	vtnet_drain_rxtx_queues(sc);
3192 	sc->vtnet_act_vq_pairs = 1;
3193 }
3194 
3195 static int
3196 vtnet_virtio_reinit(struct vtnet_softc *sc)
3197 {
3198 	device_t dev;
3199 	struct ifnet *ifp;
3200 	uint64_t features;
3201 	int error;
3202 
3203 	dev = sc->vtnet_dev;
3204 	ifp = sc->vtnet_ifp;
3205 	features = sc->vtnet_negotiated_features;
3206 
3207 	/*
3208 	 * Re-negotiate with the host, removing any disabled receive
3209 	 * features. Transmit features are disabled only on our side
3210 	 * via if_capenable and if_hwassist.
3211 	 */
3212 
3213 	if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
3214 		features &= ~(VIRTIO_NET_F_GUEST_CSUM | VTNET_LRO_FEATURES);
3215 
3216 	if ((ifp->if_capenable & IFCAP_LRO) == 0)
3217 		features &= ~VTNET_LRO_FEATURES;
3218 
3219 	if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
3220 		features &= ~VIRTIO_NET_F_CTRL_VLAN;
3221 
3222 	error = virtio_reinit(dev, features);
3223 	if (error) {
3224 		device_printf(dev, "virtio reinit error %d\n", error);
3225 		return (error);
3226 	}
3227 
3228 	sc->vtnet_features = features;
3229 	virtio_reinit_complete(dev);
3230 
3231 	return (0);
3232 }
3233 
3234 static void
3235 vtnet_init_rx_filters(struct vtnet_softc *sc)
3236 {
3237 	struct ifnet *ifp;
3238 
3239 	ifp = sc->vtnet_ifp;
3240 
3241 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
3242 		vtnet_rx_filter(sc);
3243 		vtnet_rx_filter_mac(sc);
3244 	}
3245 
3246 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
3247 		vtnet_rx_filter_vlan(sc);
3248 }
3249 
3250 static int
3251 vtnet_init_rx_queues(struct vtnet_softc *sc)
3252 {
3253 	device_t dev;
3254 	struct ifnet *ifp;
3255 	struct vtnet_rxq *rxq;
3256 	int i, clustersz, error;
3257 
3258 	dev = sc->vtnet_dev;
3259 	ifp = sc->vtnet_ifp;
3260 
3261 	clustersz = vtnet_rx_cluster_size(sc, ifp->if_mtu);
3262 	sc->vtnet_rx_clustersz = clustersz;
3263 
3264 	if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) {
3265 		sc->vtnet_rx_nmbufs = howmany(sizeof(struct vtnet_rx_header) +
3266 		    VTNET_MAX_RX_SIZE, clustersz);
3267 		KASSERT(sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs,
3268 		    ("%s: too many rx mbufs %d for %d segments", __func__,
3269 		    sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs));
3270 	} else
3271 		sc->vtnet_rx_nmbufs = 1;
3272 
3273 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3274 		rxq = &sc->vtnet_rxqs[i];
3275 
3276 		/* Hold the lock to satisfy asserts. */
3277 		VTNET_RXQ_LOCK(rxq);
3278 		error = vtnet_rxq_populate(rxq);
3279 		VTNET_RXQ_UNLOCK(rxq);
3280 
3281 		if (error) {
3282 			device_printf(dev, "cannot populate Rx queue %d\n", i);
3283 			return (error);
3284 		}
3285 	}
3286 
3287 	return (0);
3288 }
3289 
3290 static int
3291 vtnet_init_tx_queues(struct vtnet_softc *sc)
3292 {
3293 	struct vtnet_txq *txq;
3294 	int i;
3295 
3296 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
3297 		txq = &sc->vtnet_txqs[i];
3298 		txq->vtntx_watchdog = 0;
3299 		txq->vtntx_intr_threshold = vtnet_txq_intr_threshold(txq);
3300 #ifdef DEV_NETMAP
3301 		netmap_reset(NA(sc->vtnet_ifp), NR_TX, i, 0);
3302 #endif /* DEV_NETMAP */
3303 	}
3304 
3305 	return (0);
3306 }
3307 
3308 static int
3309 vtnet_init_rxtx_queues(struct vtnet_softc *sc)
3310 {
3311 	int error;
3312 
3313 	error = vtnet_init_rx_queues(sc);
3314 	if (error)
3315 		return (error);
3316 
3317 	error = vtnet_init_tx_queues(sc);
3318 	if (error)
3319 		return (error);
3320 
3321 	return (0);
3322 }
3323 
3324 static void
3325 vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
3326 {
3327 	device_t dev;
3328 	int npairs;
3329 
3330 	dev = sc->vtnet_dev;
3331 
3332 	if ((sc->vtnet_flags & VTNET_FLAG_MQ) == 0) {
3333 		sc->vtnet_act_vq_pairs = 1;
3334 		return;
3335 	}
3336 
3337 	npairs = sc->vtnet_req_vq_pairs;
3338 
3339 	if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
3340 		device_printf(dev, "cannot set active queue pairs to %d, "
3341 		    "falling back to 1 queue pair\n", npairs);
3342 		npairs = 1;
3343 	}
3344 
3345 	sc->vtnet_act_vq_pairs = npairs;
3346 }
3347 
3348 static void
3349 vtnet_update_rx_offloads(struct vtnet_softc *sc)
3350 {
3351 	struct ifnet *ifp;
3352 	uint64_t features;
3353 	int error;
3354 
3355 	ifp = sc->vtnet_ifp;
3356 	features = sc->vtnet_features;
3357 
3358 	VTNET_CORE_LOCK_ASSERT(sc);
3359 
3360 	if (ifp->if_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
3361 		if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
3362 			features |= VIRTIO_NET_F_GUEST_CSUM;
3363 		else
3364 			features &= ~VIRTIO_NET_F_GUEST_CSUM;
3365 	}
3366 
3367 	if (ifp->if_capabilities & IFCAP_LRO && !vtnet_software_lro(sc)) {
3368 		if (ifp->if_capenable & IFCAP_LRO)
3369 			features |= VTNET_LRO_FEATURES;
3370 		else
3371 			features &= ~VTNET_LRO_FEATURES;
3372 	}
3373 
3374 	error = vtnet_ctrl_guest_offloads(sc,
3375 	    features & (VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 |
3376 		        VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN  |
3377 			VIRTIO_NET_F_GUEST_UFO));
3378 	if (error) {
3379 		device_printf(sc->vtnet_dev,
3380 		    "%s: cannot update Rx features\n", __func__);
3381 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3382 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3383 			vtnet_init_locked(sc, 0);
3384 		}
3385 	} else
3386 		sc->vtnet_features = features;
3387 }
3388 
3389 static int
3390 vtnet_reinit(struct vtnet_softc *sc)
3391 {
3392 	device_t dev;
3393 	struct ifnet *ifp;
3394 	int error;
3395 
3396 	dev = sc->vtnet_dev;
3397 	ifp = sc->vtnet_ifp;
3398 
3399 	bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
3400 
3401 	error = vtnet_virtio_reinit(sc);
3402 	if (error)
3403 		return (error);
3404 
3405 	vtnet_set_macaddr(sc);
3406 	vtnet_set_active_vq_pairs(sc);
3407 
3408 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ)
3409 		vtnet_init_rx_filters(sc);
3410 
3411 	ifp->if_hwassist = 0;
3412 	if (ifp->if_capenable & IFCAP_TXCSUM)
3413 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
3414 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
3415 		ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6;
3416 	if (ifp->if_capenable & IFCAP_TSO4)
3417 		ifp->if_hwassist |= CSUM_IP_TSO;
3418 	if (ifp->if_capenable & IFCAP_TSO6)
3419 		ifp->if_hwassist |= CSUM_IP6_TSO;
3420 
3421 	error = vtnet_init_rxtx_queues(sc);
3422 	if (error)
3423 		return (error);
3424 
3425 	return (0);
3426 }
3427 
3428 static void
3429 vtnet_init_locked(struct vtnet_softc *sc, int init_mode)
3430 {
3431 	device_t dev;
3432 	struct ifnet *ifp;
3433 
3434 	dev = sc->vtnet_dev;
3435 	ifp = sc->vtnet_ifp;
3436 
3437 	VTNET_CORE_LOCK_ASSERT(sc);
3438 
3439 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3440 		return;
3441 
3442 	vtnet_stop(sc);
3443 
3444 #ifdef DEV_NETMAP
3445 	/* Once stopped we can update the netmap flags, if necessary. */
3446 	switch (init_mode) {
3447 	case VTNET_INIT_NETMAP_ENTER:
3448 		nm_set_native_flags(NA(ifp));
3449 		break;
3450 	case VTNET_INIT_NETMAP_EXIT:
3451 		nm_clear_native_flags(NA(ifp));
3452 		break;
3453 	}
3454 #endif /* DEV_NETMAP */
3455 
3456 	if (vtnet_reinit(sc) != 0) {
3457 		vtnet_stop(sc);
3458 		return;
3459 	}
3460 
3461 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3462 	vtnet_update_link_status(sc);
3463 	vtnet_enable_interrupts(sc);
3464 	callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
3465 
3466 #ifdef DEV_NETMAP
3467 	/* Re-enable txsync/rxsync. */
3468 	netmap_enable_all_rings(ifp);
3469 #endif /* DEV_NETMAP */
3470 }
3471 
3472 static void
3473 vtnet_init(void *xsc)
3474 {
3475 	struct vtnet_softc *sc;
3476 
3477 	sc = xsc;
3478 
3479 	VTNET_CORE_LOCK(sc);
3480 	vtnet_init_locked(sc, 0);
3481 	VTNET_CORE_UNLOCK(sc);
3482 }
3483 
3484 static void
3485 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
3486 {
3487 
3488 	/*
3489 	 * The control virtqueue is only polled and therefore it should
3490 	 * already be empty.
3491 	 */
3492 	KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
3493 	    ("%s: ctrl vq %p not empty", __func__, sc->vtnet_ctrl_vq));
3494 }
3495 
3496 static void
3497 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
3498     struct sglist *sg, int readable, int writable)
3499 {
3500 	struct virtqueue *vq;
3501 
3502 	vq = sc->vtnet_ctrl_vq;
3503 
3504 	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ);
3505 	VTNET_CORE_LOCK_ASSERT(sc);
3506 
3507 	if (!virtqueue_empty(vq))
3508 		return;
3509 
3510 	/*
3511 	 * Poll for the response, but the command is likely completed before
3512 	 * returning from the notify.
3513 	 */
3514 	if (virtqueue_enqueue(vq, cookie, sg, readable, writable) == 0)  {
3515 		virtqueue_notify(vq);
3516 		virtqueue_poll(vq, NULL);
3517 	}
3518 }
3519 
3520 static int
3521 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
3522 {
3523 	struct sglist_seg segs[3];
3524 	struct sglist sg;
3525 	struct {
3526 		struct virtio_net_ctrl_hdr hdr __aligned(2);
3527 		uint8_t pad1;
3528 		uint8_t addr[ETHER_ADDR_LEN] __aligned(8);
3529 		uint8_t pad2;
3530 		uint8_t ack;
3531 	} s;
3532 	int error;
3533 
3534 	error = 0;
3535 	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_MAC);
3536 
3537 	s.hdr.class = VIRTIO_NET_CTRL_MAC;
3538 	s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
3539 	bcopy(hwaddr, &s.addr[0], ETHER_ADDR_LEN);
3540 	s.ack = VIRTIO_NET_ERR;
3541 
3542 	sglist_init(&sg, nitems(segs), segs);
3543 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3544 	error |= sglist_append(&sg, &s.addr[0], ETHER_ADDR_LEN);
3545 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3546 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3547 
3548 	if (error == 0)
3549 		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3550 
3551 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3552 }
3553 
3554 static int
3555 vtnet_ctrl_guest_offloads(struct vtnet_softc *sc, uint64_t offloads)
3556 {
3557 	struct sglist_seg segs[3];
3558 	struct sglist sg;
3559 	struct {
3560 		struct virtio_net_ctrl_hdr hdr __aligned(2);
3561 		uint8_t pad1;
3562 		uint64_t offloads __aligned(8);
3563 		uint8_t pad2;
3564 		uint8_t ack;
3565 	} s;
3566 	int error;
3567 
3568 	error = 0;
3569 	MPASS(sc->vtnet_features & VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3570 
3571 	s.hdr.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
3572 	s.hdr.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
3573 	s.offloads = vtnet_gtoh64(sc, offloads);
3574 	s.ack = VIRTIO_NET_ERR;
3575 
3576 	sglist_init(&sg, nitems(segs), segs);
3577 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3578 	error |= sglist_append(&sg, &s.offloads, sizeof(uint64_t));
3579 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3580 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3581 
3582 	if (error == 0)
3583 		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3584 
3585 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3586 }
3587 
3588 static int
3589 vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs)
3590 {
3591 	struct sglist_seg segs[3];
3592 	struct sglist sg;
3593 	struct {
3594 		struct virtio_net_ctrl_hdr hdr __aligned(2);
3595 		uint8_t pad1;
3596 		struct virtio_net_ctrl_mq mq __aligned(2);
3597 		uint8_t pad2;
3598 		uint8_t ack;
3599 	} s;
3600 	int error;
3601 
3602 	error = 0;
3603 	MPASS(sc->vtnet_flags & VTNET_FLAG_MQ);
3604 
3605 	s.hdr.class = VIRTIO_NET_CTRL_MQ;
3606 	s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
3607 	s.mq.virtqueue_pairs = vtnet_gtoh16(sc, npairs);
3608 	s.ack = VIRTIO_NET_ERR;
3609 
3610 	sglist_init(&sg, nitems(segs), segs);
3611 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3612 	error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq));
3613 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3614 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3615 
3616 	if (error == 0)
3617 		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3618 
3619 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3620 }
3621 
3622 static int
3623 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, uint8_t cmd, bool on)
3624 {
3625 	struct sglist_seg segs[3];
3626 	struct sglist sg;
3627 	struct {
3628 		struct virtio_net_ctrl_hdr hdr __aligned(2);
3629 		uint8_t pad1;
3630 		uint8_t onoff;
3631 		uint8_t pad2;
3632 		uint8_t ack;
3633 	} s;
3634 	int error;
3635 
3636 	error = 0;
3637 	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3638 
3639 	s.hdr.class = VIRTIO_NET_CTRL_RX;
3640 	s.hdr.cmd = cmd;
3641 	s.onoff = on;
3642 	s.ack = VIRTIO_NET_ERR;
3643 
3644 	sglist_init(&sg, nitems(segs), segs);
3645 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3646 	error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
3647 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3648 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3649 
3650 	if (error == 0)
3651 		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3652 
3653 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3654 }
3655 
3656 static int
3657 vtnet_set_promisc(struct vtnet_softc *sc, bool on)
3658 {
3659 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
3660 }
3661 
3662 static int
3663 vtnet_set_allmulti(struct vtnet_softc *sc, bool on)
3664 {
3665 	return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
3666 }
3667 
3668 static void
3669 vtnet_rx_filter(struct vtnet_softc *sc)
3670 {
3671 	device_t dev;
3672 	struct ifnet *ifp;
3673 
3674 	dev = sc->vtnet_dev;
3675 	ifp = sc->vtnet_ifp;
3676 
3677 	VTNET_CORE_LOCK_ASSERT(sc);
3678 
3679 	if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) {
3680 		device_printf(dev, "cannot %s promiscuous mode\n",
3681 		    ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
3682 	}
3683 
3684 	if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) {
3685 		device_printf(dev, "cannot %s all-multicast mode\n",
3686 		    ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
3687 	}
3688 }
3689 
3690 static u_int
3691 vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt)
3692 {
3693 	struct vtnet_softc *sc = arg;
3694 
3695 	if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
3696 		return (0);
3697 
3698 	if (ucnt < VTNET_MAX_MAC_ENTRIES)
3699 		bcopy(LLADDR(sdl),
3700 		    &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt],
3701 		    ETHER_ADDR_LEN);
3702 
3703 	return (1);
3704 }
3705 
3706 static u_int
3707 vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt)
3708 {
3709 	struct vtnet_mac_filter *filter = arg;
3710 
3711 	if (mcnt < VTNET_MAX_MAC_ENTRIES)
3712 		bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt],
3713 		    ETHER_ADDR_LEN);
3714 
3715 	return (1);
3716 }
3717 
3718 static void
3719 vtnet_rx_filter_mac(struct vtnet_softc *sc)
3720 {
3721 	struct virtio_net_ctrl_hdr hdr __aligned(2);
3722 	struct vtnet_mac_filter *filter;
3723 	struct sglist_seg segs[4];
3724 	struct sglist sg;
3725 	struct ifnet *ifp;
3726 	bool promisc, allmulti;
3727 	u_int ucnt, mcnt;
3728 	int error;
3729 	uint8_t ack;
3730 
3731 	ifp = sc->vtnet_ifp;
3732 	filter = sc->vtnet_mac_filter;
3733 	error = 0;
3734 
3735 	MPASS(sc->vtnet_flags & VTNET_FLAG_CTRL_RX);
3736 	VTNET_CORE_LOCK_ASSERT(sc);
3737 
3738 	/* Unicast MAC addresses: */
3739 	ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc);
3740 	promisc = (ucnt > VTNET_MAX_MAC_ENTRIES);
3741 
3742 	if (promisc) {
3743 		ucnt = 0;
3744 		if_printf(ifp, "more than %d MAC addresses assigned, "
3745 		    "falling back to promiscuous mode\n",
3746 		    VTNET_MAX_MAC_ENTRIES);
3747 	}
3748 
3749 	/* Multicast MAC addresses: */
3750 	mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter);
3751 	allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES);
3752 
3753 	if (allmulti) {
3754 		mcnt = 0;
3755 		if_printf(ifp, "more than %d multicast MAC addresses "
3756 		    "assigned, falling back to all-multicast mode\n",
3757 		    VTNET_MAX_MAC_ENTRIES);
3758 	}
3759 
3760 	if (promisc && allmulti)
3761 		goto out;
3762 
3763 	filter->vmf_unicast.nentries = vtnet_gtoh32(sc, ucnt);
3764 	filter->vmf_multicast.nentries = vtnet_gtoh32(sc, mcnt);
3765 
3766 	hdr.class = VIRTIO_NET_CTRL_MAC;
3767 	hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
3768 	ack = VIRTIO_NET_ERR;
3769 
3770 	sglist_init(&sg, nitems(segs), segs);
3771 	error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
3772 	error |= sglist_append(&sg, &filter->vmf_unicast,
3773 	    sizeof(uint32_t) + ucnt * ETHER_ADDR_LEN);
3774 	error |= sglist_append(&sg, &filter->vmf_multicast,
3775 	    sizeof(uint32_t) + mcnt * ETHER_ADDR_LEN);
3776 	error |= sglist_append(&sg, &ack, sizeof(uint8_t));
3777 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3778 
3779 	if (error == 0)
3780 		vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
3781 	if (ack != VIRTIO_NET_OK)
3782 		if_printf(ifp, "error setting host MAC filter table\n");
3783 
3784 out:
3785 	if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
3786 		if_printf(ifp, "cannot enable promiscuous mode\n");
3787 	if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
3788 		if_printf(ifp, "cannot enable all-multicast mode\n");
3789 }
3790 
3791 static int
3792 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3793 {
3794 	struct sglist_seg segs[3];
3795 	struct sglist sg;
3796 	struct {
3797 		struct virtio_net_ctrl_hdr hdr __aligned(2);
3798 		uint8_t pad1;
3799 		uint16_t tag __aligned(2);
3800 		uint8_t pad2;
3801 		uint8_t ack;
3802 	} s;
3803 	int error;
3804 
3805 	error = 0;
3806 	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3807 
3808 	s.hdr.class = VIRTIO_NET_CTRL_VLAN;
3809 	s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
3810 	s.tag = vtnet_gtoh16(sc, tag);
3811 	s.ack = VIRTIO_NET_ERR;
3812 
3813 	sglist_init(&sg, nitems(segs), segs);
3814 	error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
3815 	error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
3816 	error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
3817 	MPASS(error == 0 && sg.sg_nseg == nitems(segs));
3818 
3819 	if (error == 0)
3820 		vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
3821 
3822 	return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
3823 }
3824 
3825 static void
3826 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
3827 {
3828 	int i, bit;
3829 	uint32_t w;
3830 	uint16_t tag;
3831 
3832 	MPASS(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER);
3833 	VTNET_CORE_LOCK_ASSERT(sc);
3834 
3835 	/* Enable the filter for each configured VLAN. */
3836 	for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) {
3837 		w = sc->vtnet_vlan_filter[i];
3838 
3839 		while ((bit = ffs(w) - 1) != -1) {
3840 			w &= ~(1 << bit);
3841 			tag = sizeof(w) * CHAR_BIT * i + bit;
3842 
3843 			if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
3844 				device_printf(sc->vtnet_dev,
3845 				    "cannot enable VLAN %d filter\n", tag);
3846 			}
3847 		}
3848 	}
3849 }
3850 
3851 static void
3852 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
3853 {
3854 	struct ifnet *ifp;
3855 	int idx, bit;
3856 
3857 	ifp = sc->vtnet_ifp;
3858 	idx = (tag >> 5) & 0x7F;
3859 	bit = tag & 0x1F;
3860 
3861 	if (tag == 0 || tag > 4095)
3862 		return;
3863 
3864 	VTNET_CORE_LOCK(sc);
3865 
3866 	if (add)
3867 		sc->vtnet_vlan_filter[idx] |= (1 << bit);
3868 	else
3869 		sc->vtnet_vlan_filter[idx] &= ~(1 << bit);
3870 
3871 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
3872 	    ifp->if_drv_flags & IFF_DRV_RUNNING &&
3873 	    vtnet_exec_vlan_filter(sc, add, tag) != 0) {
3874 		device_printf(sc->vtnet_dev,
3875 		    "cannot %s VLAN %d %s the host filter table\n",
3876 		    add ? "add" : "remove", tag, add ? "to" : "from");
3877 	}
3878 
3879 	VTNET_CORE_UNLOCK(sc);
3880 }
3881 
3882 static void
3883 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3884 {
3885 
3886 	if (ifp->if_softc != arg)
3887 		return;
3888 
3889 	vtnet_update_vlan_filter(arg, 1, tag);
3890 }
3891 
3892 static void
3893 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3894 {
3895 
3896 	if (ifp->if_softc != arg)
3897 		return;
3898 
3899 	vtnet_update_vlan_filter(arg, 0, tag);
3900 }
3901 
3902 static void
3903 vtnet_update_speed_duplex(struct vtnet_softc *sc)
3904 {
3905 	struct ifnet *ifp;
3906 	uint32_t speed;
3907 
3908 	ifp = sc->vtnet_ifp;
3909 
3910 	if ((sc->vtnet_features & VIRTIO_NET_F_SPEED_DUPLEX) == 0)
3911 		return;
3912 
3913 	/* BMV: Ignore duplex. */
3914 	speed = virtio_read_dev_config_4(sc->vtnet_dev,
3915 	    offsetof(struct virtio_net_config, speed));
3916 	if (speed != UINT32_MAX)
3917 		ifp->if_baudrate = IF_Mbps(speed);
3918 }
3919 
3920 static int
3921 vtnet_is_link_up(struct vtnet_softc *sc)
3922 {
3923 	uint16_t status;
3924 
3925 	if ((sc->vtnet_features & VIRTIO_NET_F_STATUS) == 0)
3926 		return (1);
3927 
3928 	status = virtio_read_dev_config_2(sc->vtnet_dev,
3929 	    offsetof(struct virtio_net_config, status));
3930 
3931 	return ((status & VIRTIO_NET_S_LINK_UP) != 0);
3932 }
3933 
3934 static void
3935 vtnet_update_link_status(struct vtnet_softc *sc)
3936 {
3937 	struct ifnet *ifp;
3938 	int link;
3939 
3940 	ifp = sc->vtnet_ifp;
3941 	VTNET_CORE_LOCK_ASSERT(sc);
3942 	link = vtnet_is_link_up(sc);
3943 
3944 	/* Notify if the link status has changed. */
3945 	if (link != 0 && sc->vtnet_link_active == 0) {
3946 		vtnet_update_speed_duplex(sc);
3947 		sc->vtnet_link_active = 1;
3948 		if_link_state_change(ifp, LINK_STATE_UP);
3949 	} else if (link == 0 && sc->vtnet_link_active != 0) {
3950 		sc->vtnet_link_active = 0;
3951 		if_link_state_change(ifp, LINK_STATE_DOWN);
3952 	}
3953 }
3954 
3955 static int
3956 vtnet_ifmedia_upd(struct ifnet *ifp __unused)
3957 {
3958 	return (EOPNOTSUPP);
3959 }
3960 
3961 static void
3962 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3963 {
3964 	struct vtnet_softc *sc;
3965 
3966 	sc = ifp->if_softc;
3967 
3968 	ifmr->ifm_status = IFM_AVALID;
3969 	ifmr->ifm_active = IFM_ETHER;
3970 
3971 	VTNET_CORE_LOCK(sc);
3972 	if (vtnet_is_link_up(sc) != 0) {
3973 		ifmr->ifm_status |= IFM_ACTIVE;
3974 		ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
3975 	} else
3976 		ifmr->ifm_active |= IFM_NONE;
3977 	VTNET_CORE_UNLOCK(sc);
3978 }
3979 
3980 static void
3981 vtnet_get_macaddr(struct vtnet_softc *sc)
3982 {
3983 
3984 	if (sc->vtnet_flags & VTNET_FLAG_MAC) {
3985 		virtio_read_device_config_array(sc->vtnet_dev,
3986 		    offsetof(struct virtio_net_config, mac),
3987 		    &sc->vtnet_hwaddr[0], sizeof(uint8_t), ETHER_ADDR_LEN);
3988 	} else {
3989 		/* Generate a random locally administered unicast address. */
3990 		sc->vtnet_hwaddr[0] = 0xB2;
3991 		arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0);
3992 	}
3993 }
3994 
3995 static void
3996 vtnet_set_macaddr(struct vtnet_softc *sc)
3997 {
3998 	device_t dev;
3999 	int error;
4000 
4001 	dev = sc->vtnet_dev;
4002 
4003 	if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) {
4004 		error = vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr);
4005 		if (error)
4006 			device_printf(dev, "unable to set MAC address\n");
4007 		return;
4008 	}
4009 
4010 	/* MAC in config is read-only in modern VirtIO. */
4011 	if (!vtnet_modern(sc) && sc->vtnet_flags & VTNET_FLAG_MAC) {
4012 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
4013 			virtio_write_dev_config_1(dev,
4014 			    offsetof(struct virtio_net_config, mac) + i,
4015 			    sc->vtnet_hwaddr[i]);
4016 		}
4017 	}
4018 }
4019 
4020 static void
4021 vtnet_attached_set_macaddr(struct vtnet_softc *sc)
4022 {
4023 
4024 	/* Assign MAC address if it was generated. */
4025 	if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0)
4026 		vtnet_set_macaddr(sc);
4027 }
4028 
4029 static void
4030 vtnet_vlan_tag_remove(struct mbuf *m)
4031 {
4032 	struct ether_vlan_header *evh;
4033 
4034 	evh = mtod(m, struct ether_vlan_header *);
4035 	m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
4036 	m->m_flags |= M_VLANTAG;
4037 
4038 	/* Strip the 802.1Q header. */
4039 	bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
4040 	    ETHER_HDR_LEN - ETHER_TYPE_LEN);
4041 	m_adj(m, ETHER_VLAN_ENCAP_LEN);
4042 }
4043 
4044 static void
4045 vtnet_set_rx_process_limit(struct vtnet_softc *sc)
4046 {
4047 	int limit;
4048 
4049 	limit = vtnet_tunable_int(sc, "rx_process_limit",
4050 	    vtnet_rx_process_limit);
4051 	if (limit < 0)
4052 		limit = INT_MAX;
4053 	sc->vtnet_rx_process_limit = limit;
4054 }
4055 
4056 static void
4057 vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
4058     struct sysctl_oid_list *child, struct vtnet_rxq *rxq)
4059 {
4060 	struct sysctl_oid *node;
4061 	struct sysctl_oid_list *list;
4062 	struct vtnet_rxq_stats *stats;
4063 	char namebuf[16];
4064 
4065 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id);
4066 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4067 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Receive Queue");
4068 	list = SYSCTL_CHILDREN(node);
4069 
4070 	stats = &rxq->vtnrx_stats;
4071 
4072 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
4073 	    &stats->vrxs_ipackets, "Receive packets");
4074 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
4075 	    &stats->vrxs_ibytes, "Receive bytes");
4076 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
4077 	    &stats->vrxs_iqdrops, "Receive drops");
4078 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
4079 	    &stats->vrxs_ierrors, "Receive errors");
4080 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4081 	    &stats->vrxs_csum, "Receive checksum offloaded");
4082 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
4083 	    &stats->vrxs_csum_failed, "Receive checksum offload failed");
4084 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
4085 	    &stats->vrxs_host_lro, "Receive host segmentation offloaded");
4086 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4087 	    &stats->vrxs_rescheduled,
4088 	    "Receive interrupt handler rescheduled");
4089 }
4090 
4091 static void
4092 vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
4093     struct sysctl_oid_list *child, struct vtnet_txq *txq)
4094 {
4095 	struct sysctl_oid *node;
4096 	struct sysctl_oid_list *list;
4097 	struct vtnet_txq_stats *stats;
4098 	char namebuf[16];
4099 
4100 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id);
4101 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
4102 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Transmit Queue");
4103 	list = SYSCTL_CHILDREN(node);
4104 
4105 	stats = &txq->vtntx_stats;
4106 
4107 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
4108 	    &stats->vtxs_opackets, "Transmit packets");
4109 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
4110 	    &stats->vtxs_obytes, "Transmit bytes");
4111 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
4112 	    &stats->vtxs_omcasts, "Transmit multicasts");
4113 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
4114 	    &stats->vtxs_csum, "Transmit checksum offloaded");
4115 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
4116 	    &stats->vtxs_tso, "Transmit TCP segmentation offloaded");
4117 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
4118 	    &stats->vtxs_rescheduled,
4119 	    "Transmit interrupt handler rescheduled");
4120 }
4121 
4122 static void
4123 vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
4124 {
4125 	device_t dev;
4126 	struct sysctl_ctx_list *ctx;
4127 	struct sysctl_oid *tree;
4128 	struct sysctl_oid_list *child;
4129 	int i;
4130 
4131 	dev = sc->vtnet_dev;
4132 	ctx = device_get_sysctl_ctx(dev);
4133 	tree = device_get_sysctl_tree(dev);
4134 	child = SYSCTL_CHILDREN(tree);
4135 
4136 	for (i = 0; i < sc->vtnet_req_vq_pairs; i++) {
4137 		vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]);
4138 		vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]);
4139 	}
4140 }
4141 
4142 static void
4143 vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
4144     struct sysctl_oid_list *child, struct vtnet_softc *sc)
4145 {
4146 	struct vtnet_statistics *stats;
4147 	struct vtnet_rxq_stats rxaccum;
4148 	struct vtnet_txq_stats txaccum;
4149 
4150 	vtnet_accum_stats(sc, &rxaccum, &txaccum);
4151 
4152 	stats = &sc->vtnet_stats;
4153 	stats->rx_csum_offloaded = rxaccum.vrxs_csum;
4154 	stats->rx_csum_failed = rxaccum.vrxs_csum_failed;
4155 	stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled;
4156 	stats->tx_csum_offloaded = txaccum.vtxs_csum;
4157 	stats->tx_tso_offloaded = txaccum.vtxs_tso;
4158 	stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
4159 
4160 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
4161 	    CTLFLAG_RD, &stats->mbuf_alloc_failed,
4162 	    "Mbuf cluster allocation failures");
4163 
4164 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
4165 	    CTLFLAG_RD, &stats->rx_frame_too_large,
4166 	    "Received frame larger than the mbuf chain");
4167 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
4168 	    CTLFLAG_RD, &stats->rx_enq_replacement_failed,
4169 	    "Enqueuing the replacement receive mbuf failed");
4170 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
4171 	    CTLFLAG_RD, &stats->rx_mergeable_failed,
4172 	    "Mergeable buffers receive failures");
4173 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
4174 	    CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
4175 	    "Received checksum offloaded buffer with unsupported "
4176 	    "Ethernet type");
4177 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
4178 	    CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
4179 	    "Received checksum offloaded buffer with incorrect IP protocol");
4180 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
4181 	    CTLFLAG_RD, &stats->rx_csum_bad_offset,
4182 	    "Received checksum offloaded buffer with incorrect offset");
4183 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
4184 	    CTLFLAG_RD, &stats->rx_csum_bad_proto,
4185 	    "Received checksum offloaded buffer with incorrect protocol");
4186 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
4187 	    CTLFLAG_RD, &stats->rx_csum_failed,
4188 	    "Received buffer checksum offload failed");
4189 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
4190 	    CTLFLAG_RD, &stats->rx_csum_offloaded,
4191 	    "Received buffer checksum offload succeeded");
4192 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
4193 	    CTLFLAG_RD, &stats->rx_task_rescheduled,
4194 	    "Times the receive interrupt task rescheduled itself");
4195 
4196 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
4197 	    CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
4198 	    "Aborted transmit of checksum offloaded buffer with unknown "
4199 	    "Ethernet type");
4200 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
4201 	    CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
4202 	    "Aborted transmit of checksum offloaded buffer because mismatched "
4203 	    "protocols");
4204 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
4205 	    CTLFLAG_RD, &stats->tx_tso_not_tcp,
4206 	    "Aborted transmit of TSO buffer with non TCP protocol");
4207 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
4208 	    CTLFLAG_RD, &stats->tx_tso_without_csum,
4209 	    "Aborted transmit of TSO buffer without TCP checksum offload");
4210 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
4211 	    CTLFLAG_RD, &stats->tx_defragged,
4212 	    "Transmit mbufs defragged");
4213 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
4214 	    CTLFLAG_RD, &stats->tx_defrag_failed,
4215 	    "Aborted transmit of buffer because defrag failed");
4216 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
4217 	    CTLFLAG_RD, &stats->tx_csum_offloaded,
4218 	    "Offloaded checksum of transmitted buffer");
4219 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
4220 	    CTLFLAG_RD, &stats->tx_tso_offloaded,
4221 	    "Segmentation offload of transmitted buffer");
4222 	SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
4223 	    CTLFLAG_RD, &stats->tx_task_rescheduled,
4224 	    "Times the transmit interrupt task rescheduled itself");
4225 }
4226 
4227 static void
4228 vtnet_setup_sysctl(struct vtnet_softc *sc)
4229 {
4230 	device_t dev;
4231 	struct sysctl_ctx_list *ctx;
4232 	struct sysctl_oid *tree;
4233 	struct sysctl_oid_list *child;
4234 
4235 	dev = sc->vtnet_dev;
4236 	ctx = device_get_sysctl_ctx(dev);
4237 	tree = device_get_sysctl_tree(dev);
4238 	child = SYSCTL_CHILDREN(tree);
4239 
4240 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
4241 	    CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
4242 	    "Number of maximum supported virtqueue pairs");
4243 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "req_vq_pairs",
4244 	    CTLFLAG_RD, &sc->vtnet_req_vq_pairs, 0,
4245 	    "Number of requested virtqueue pairs");
4246 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
4247 	    CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
4248 	    "Number of active virtqueue pairs");
4249 
4250 	vtnet_setup_stat_sysctl(ctx, child, sc);
4251 }
4252 
4253 static void
4254 vtnet_load_tunables(struct vtnet_softc *sc)
4255 {
4256 
4257 	sc->vtnet_lro_entry_count = vtnet_tunable_int(sc,
4258 	    "lro_entry_count", vtnet_lro_entry_count);
4259 	if (sc->vtnet_lro_entry_count < TCP_LRO_ENTRIES)
4260 		sc->vtnet_lro_entry_count = TCP_LRO_ENTRIES;
4261 
4262 	sc->vtnet_lro_mbufq_depth = vtnet_tunable_int(sc,
4263 	    "lro_mbufq_depth", vtnet_lro_mbufq_depth);
4264 }
4265 
4266 static int
4267 vtnet_rxq_enable_intr(struct vtnet_rxq *rxq)
4268 {
4269 
4270 	return (virtqueue_enable_intr(rxq->vtnrx_vq));
4271 }
4272 
4273 static void
4274 vtnet_rxq_disable_intr(struct vtnet_rxq *rxq)
4275 {
4276 
4277 	virtqueue_disable_intr(rxq->vtnrx_vq);
4278 }
4279 
4280 static int
4281 vtnet_txq_enable_intr(struct vtnet_txq *txq)
4282 {
4283 	struct virtqueue *vq;
4284 
4285 	vq = txq->vtntx_vq;
4286 
4287 	if (vtnet_txq_below_threshold(txq) != 0)
4288 		return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG));
4289 
4290 	/*
4291 	 * The free count is above our threshold. Keep the Tx interrupt
4292 	 * disabled until the queue is fuller.
4293 	 */
4294 	return (0);
4295 }
4296 
4297 static void
4298 vtnet_txq_disable_intr(struct vtnet_txq *txq)
4299 {
4300 
4301 	virtqueue_disable_intr(txq->vtntx_vq);
4302 }
4303 
4304 static void
4305 vtnet_enable_rx_interrupts(struct vtnet_softc *sc)
4306 {
4307 	struct vtnet_rxq *rxq;
4308 	int i;
4309 
4310 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++) {
4311 		rxq = &sc->vtnet_rxqs[i];
4312 		if (vtnet_rxq_enable_intr(rxq) != 0)
4313 			taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask);
4314 	}
4315 }
4316 
4317 static void
4318 vtnet_enable_tx_interrupts(struct vtnet_softc *sc)
4319 {
4320 	int i;
4321 
4322 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4323 		vtnet_txq_enable_intr(&sc->vtnet_txqs[i]);
4324 }
4325 
4326 static void
4327 vtnet_enable_interrupts(struct vtnet_softc *sc)
4328 {
4329 
4330 	vtnet_enable_rx_interrupts(sc);
4331 	vtnet_enable_tx_interrupts(sc);
4332 }
4333 
4334 static void
4335 vtnet_disable_rx_interrupts(struct vtnet_softc *sc)
4336 {
4337 	int i;
4338 
4339 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4340 		vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]);
4341 }
4342 
4343 static void
4344 vtnet_disable_tx_interrupts(struct vtnet_softc *sc)
4345 {
4346 	int i;
4347 
4348 	for (i = 0; i < sc->vtnet_max_vq_pairs; i++)
4349 		vtnet_txq_disable_intr(&sc->vtnet_txqs[i]);
4350 }
4351 
4352 static void
4353 vtnet_disable_interrupts(struct vtnet_softc *sc)
4354 {
4355 
4356 	vtnet_disable_rx_interrupts(sc);
4357 	vtnet_disable_tx_interrupts(sc);
4358 }
4359 
4360 static int
4361 vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def)
4362 {
4363 	char path[64];
4364 
4365 	snprintf(path, sizeof(path),
4366 	    "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob);
4367 	TUNABLE_INT_FETCH(path, &def);
4368 
4369 	return (def);
4370 }
4371 
4372 #ifdef DEBUGNET
4373 static void
4374 vtnet_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
4375 {
4376 	struct vtnet_softc *sc;
4377 
4378 	sc = if_getsoftc(ifp);
4379 
4380 	VTNET_CORE_LOCK(sc);
4381 	*nrxr = sc->vtnet_req_vq_pairs;
4382 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
4383 	*clsize = sc->vtnet_rx_clustersz;
4384 	VTNET_CORE_UNLOCK(sc);
4385 }
4386 
4387 static void
4388 vtnet_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4389 {
4390 }
4391 
4392 static int
4393 vtnet_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4394 {
4395 	struct vtnet_softc *sc;
4396 	struct vtnet_txq *txq;
4397 	int error;
4398 
4399 	sc = if_getsoftc(ifp);
4400 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4401 	    IFF_DRV_RUNNING)
4402 		return (EBUSY);
4403 
4404 	txq = &sc->vtnet_txqs[0];
4405 	error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE);
4406 	if (error == 0)
4407 		(void)vtnet_txq_notify(txq);
4408 	return (error);
4409 }
4410 
4411 static int
4412 vtnet_debugnet_poll(struct ifnet *ifp, int count)
4413 {
4414 	struct vtnet_softc *sc;
4415 	int i;
4416 
4417 	sc = if_getsoftc(ifp);
4418 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4419 	    IFF_DRV_RUNNING)
4420 		return (EBUSY);
4421 
4422 	(void)vtnet_txq_eof(&sc->vtnet_txqs[0]);
4423 	for (i = 0; i < sc->vtnet_act_vq_pairs; i++)
4424 		(void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]);
4425 	return (0);
4426 }
4427 #endif /* DEBUGNET */
4428