xref: /freebsd/sys/dev/vmware/vmxnet3/if_vmx.c (revision 1f4bcc459a76b7aa664f3fd557684cd0ba6da352)
1 /*-
2  * Copyright (c) 2013 Tsubai Masanari
3  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
18  */
19 
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
21 
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/eventhandler.h>
28 #include <sys/kernel.h>
29 #include <sys/endian.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <sys/smp.h>
37 #include <sys/taskqueue.h>
38 #include <vm/vm.h>
39 #include <vm/pmap.h>
40 
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_var.h>
44 #include <net/if_arp.h>
45 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_media.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <net/bpf.h>
51 
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet/udp.h>
58 #include <netinet/tcp.h>
59 
60 #include <machine/in_cksum.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 
70 #include "if_vmxreg.h"
71 #include "if_vmxvar.h"
72 
73 #include "opt_inet.h"
74 #include "opt_inet6.h"
75 
76 #ifdef VMXNET3_FAILPOINTS
77 #include <sys/fail.h>
78 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
79     "vmxnet3 fail points");
80 #define VMXNET3_FP	_debug_fail_point_vmxnet3
81 #endif
82 
83 static int	vmxnet3_probe(device_t);
84 static int	vmxnet3_attach(device_t);
85 static int	vmxnet3_detach(device_t);
86 static int	vmxnet3_shutdown(device_t);
87 
88 static int	vmxnet3_alloc_resources(struct vmxnet3_softc *);
89 static void	vmxnet3_free_resources(struct vmxnet3_softc *);
90 static int	vmxnet3_check_version(struct vmxnet3_softc *);
91 static void	vmxnet3_initial_config(struct vmxnet3_softc *);
92 static void	vmxnet3_check_multiqueue(struct vmxnet3_softc *);
93 
94 static int	vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
95 static int	vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
96 static int	vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
97 static int	vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
98 		    struct vmxnet3_interrupt *);
99 static int	vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
100 static int	vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
101 static int	vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
102 static int	vmxnet3_setup_interrupts(struct vmxnet3_softc *);
103 static int	vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
104 
105 static void	vmxnet3_free_interrupt(struct vmxnet3_softc *,
106 		    struct vmxnet3_interrupt *);
107 static void	vmxnet3_free_interrupts(struct vmxnet3_softc *);
108 
109 #ifndef VMXNET3_LEGACY_TX
110 static int	vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
111 static void	vmxnet3_start_taskqueue(struct vmxnet3_softc *);
112 static void	vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
113 static void	vmxnet3_free_taskqueue(struct vmxnet3_softc *);
114 #endif
115 
116 static int	vmxnet3_init_rxq(struct vmxnet3_softc *, int);
117 static int	vmxnet3_init_txq(struct vmxnet3_softc *, int);
118 static int	vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
119 static void	vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
120 static void	vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
121 static void	vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
122 
123 static int	vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
124 static void	vmxnet3_free_shared_data(struct vmxnet3_softc *);
125 static int	vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
126 static void	vmxnet3_free_txq_data(struct vmxnet3_softc *);
127 static int	vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
128 static void	vmxnet3_free_rxq_data(struct vmxnet3_softc *);
129 static int	vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
130 static void	vmxnet3_free_queue_data(struct vmxnet3_softc *);
131 static int	vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
132 static void	vmxnet3_init_shared_data(struct vmxnet3_softc *);
133 static void	vmxnet3_reinit_interface(struct vmxnet3_softc *);
134 static void	vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
135 static void	vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
136 static int	vmxnet3_alloc_data(struct vmxnet3_softc *);
137 static void	vmxnet3_free_data(struct vmxnet3_softc *);
138 static int	vmxnet3_setup_interface(struct vmxnet3_softc *);
139 
140 static void	vmxnet3_evintr(struct vmxnet3_softc *);
141 static void	vmxnet3_txq_eof(struct vmxnet3_txqueue *);
142 static void	vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
143 static int	vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
144 static void	vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
145 		    struct vmxnet3_rxring *, int);
146 static void	vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
147 static void	vmxnet3_legacy_intr(void *);
148 static void	vmxnet3_txq_intr(void *);
149 static void	vmxnet3_rxq_intr(void *);
150 static void	vmxnet3_event_intr(void *);
151 
152 static void	vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
153 static void	vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
154 static void	vmxnet3_stop(struct vmxnet3_softc *);
155 
156 static void	vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
157 static int	vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
158 static int	vmxnet3_reinit_queues(struct vmxnet3_softc *);
159 static int	vmxnet3_enable_device(struct vmxnet3_softc *);
160 static void	vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
161 static int	vmxnet3_reinit(struct vmxnet3_softc *);
162 static void	vmxnet3_init_locked(struct vmxnet3_softc *);
163 static void	vmxnet3_init(void *);
164 
165 static int	vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
166 		    int *, int *, int *);
167 static int	vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
168 		    bus_dmamap_t, bus_dma_segment_t [], int *);
169 static void	vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
170 static int	vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
171 #ifdef VMXNET3_LEGACY_TX
172 static void	vmxnet3_start_locked(struct ifnet *);
173 static void	vmxnet3_start(struct ifnet *);
174 #else
175 static int	vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
176 		    struct mbuf *);
177 static int	vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
178 static void	vmxnet3_txq_tq_deferred(void *, int);
179 #endif
180 static void	vmxnet3_txq_start(struct vmxnet3_txqueue *);
181 static void	vmxnet3_tx_start_all(struct vmxnet3_softc *);
182 
183 static void	vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
184 		    uint16_t);
185 static void	vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
186 static void	vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
187 static void	vmxnet3_set_rxfilter(struct vmxnet3_softc *);
188 static int	vmxnet3_change_mtu(struct vmxnet3_softc *, int);
189 static int	vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
190 static uint64_t	vmxnet3_get_counter(struct ifnet *, ift_counter);
191 
192 #ifndef VMXNET3_LEGACY_TX
193 static void	vmxnet3_qflush(struct ifnet *);
194 #endif
195 
196 static int	vmxnet3_watchdog(struct vmxnet3_txqueue *);
197 static void	vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
198 static void	vmxnet3_tick(void *);
199 static void	vmxnet3_link_status(struct vmxnet3_softc *);
200 static void	vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
201 static int	vmxnet3_media_change(struct ifnet *);
202 static void	vmxnet3_set_lladdr(struct vmxnet3_softc *);
203 static void	vmxnet3_get_lladdr(struct vmxnet3_softc *);
204 
205 static void	vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
206 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
207 static void	vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
208 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
209 static void	vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
210 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
211 static void	vmxnet3_setup_sysctl(struct vmxnet3_softc *);
212 
213 static void	vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
214 		    uint32_t);
215 static uint32_t	vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
216 static void	vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
217 		    uint32_t);
218 static void	vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
219 static uint32_t	vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
220 
221 static void	vmxnet3_enable_intr(struct vmxnet3_softc *, int);
222 static void	vmxnet3_disable_intr(struct vmxnet3_softc *, int);
223 static void	vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
224 static void	vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
225 
226 static int	vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
227 		    bus_size_t, struct vmxnet3_dma_alloc *);
228 static void	vmxnet3_dma_free(struct vmxnet3_softc *,
229 		    struct vmxnet3_dma_alloc *);
230 static int	vmxnet3_tunable_int(struct vmxnet3_softc *,
231 		    const char *, int);
232 
233 typedef enum {
234 	VMXNET3_BARRIER_RD,
235 	VMXNET3_BARRIER_WR,
236 	VMXNET3_BARRIER_RDWR,
237 } vmxnet3_barrier_t;
238 
239 static void	vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
240 
241 /* Tunables. */
242 static int vmxnet3_mq_disable = 0;
243 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
244 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
245 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
246 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
247 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
248 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
249 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
250 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
251 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
252 
253 static device_method_t vmxnet3_methods[] = {
254 	/* Device interface. */
255 	DEVMETHOD(device_probe,		vmxnet3_probe),
256 	DEVMETHOD(device_attach,	vmxnet3_attach),
257 	DEVMETHOD(device_detach,	vmxnet3_detach),
258 	DEVMETHOD(device_shutdown,	vmxnet3_shutdown),
259 
260 	DEVMETHOD_END
261 };
262 
263 static driver_t vmxnet3_driver = {
264 	"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
265 };
266 
267 static devclass_t vmxnet3_devclass;
268 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
269 
270 MODULE_DEPEND(vmx, pci, 1, 1, 1);
271 MODULE_DEPEND(vmx, ether, 1, 1, 1);
272 
273 #define VMXNET3_VMWARE_VENDOR_ID	0x15AD
274 #define VMXNET3_VMWARE_DEVICE_ID	0x07B0
275 
276 static int
277 vmxnet3_probe(device_t dev)
278 {
279 
280 	if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
281 	    pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
282 		device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
283 		return (BUS_PROBE_DEFAULT);
284 	}
285 
286 	return (ENXIO);
287 }
288 
289 static int
290 vmxnet3_attach(device_t dev)
291 {
292 	struct vmxnet3_softc *sc;
293 	int error;
294 
295 	sc = device_get_softc(dev);
296 	sc->vmx_dev = dev;
297 
298 	pci_enable_busmaster(dev);
299 
300 	VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
301 	callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
302 
303 	vmxnet3_initial_config(sc);
304 
305 	error = vmxnet3_alloc_resources(sc);
306 	if (error)
307 		goto fail;
308 
309 	error = vmxnet3_check_version(sc);
310 	if (error)
311 		goto fail;
312 
313 	error = vmxnet3_alloc_rxtx_queues(sc);
314 	if (error)
315 		goto fail;
316 
317 #ifndef VMXNET3_LEGACY_TX
318 	error = vmxnet3_alloc_taskqueue(sc);
319 	if (error)
320 		goto fail;
321 #endif
322 
323 	error = vmxnet3_alloc_interrupts(sc);
324 	if (error)
325 		goto fail;
326 
327 	vmxnet3_check_multiqueue(sc);
328 
329 	error = vmxnet3_alloc_data(sc);
330 	if (error)
331 		goto fail;
332 
333 	error = vmxnet3_setup_interface(sc);
334 	if (error)
335 		goto fail;
336 
337 	error = vmxnet3_setup_interrupts(sc);
338 	if (error) {
339 		ether_ifdetach(sc->vmx_ifp);
340 		device_printf(dev, "could not set up interrupt\n");
341 		goto fail;
342 	}
343 
344 	vmxnet3_setup_sysctl(sc);
345 #ifndef VMXNET3_LEGACY_TX
346 	vmxnet3_start_taskqueue(sc);
347 #endif
348 
349 fail:
350 	if (error)
351 		vmxnet3_detach(dev);
352 
353 	return (error);
354 }
355 
356 static int
357 vmxnet3_detach(device_t dev)
358 {
359 	struct vmxnet3_softc *sc;
360 	struct ifnet *ifp;
361 
362 	sc = device_get_softc(dev);
363 	ifp = sc->vmx_ifp;
364 
365 	if (device_is_attached(dev)) {
366 		VMXNET3_CORE_LOCK(sc);
367 		vmxnet3_stop(sc);
368 		VMXNET3_CORE_UNLOCK(sc);
369 
370 		callout_drain(&sc->vmx_tick);
371 #ifndef VMXNET3_LEGACY_TX
372 		vmxnet3_drain_taskqueue(sc);
373 #endif
374 
375 		ether_ifdetach(ifp);
376 	}
377 
378 	if (sc->vmx_vlan_attach != NULL) {
379 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
380 		sc->vmx_vlan_attach = NULL;
381 	}
382 	if (sc->vmx_vlan_detach != NULL) {
383 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
384 		sc->vmx_vlan_detach = NULL;
385 	}
386 
387 #ifndef VMXNET3_LEGACY_TX
388 	vmxnet3_free_taskqueue(sc);
389 #endif
390 	vmxnet3_free_interrupts(sc);
391 
392 	if (ifp != NULL) {
393 		if_free(ifp);
394 		sc->vmx_ifp = NULL;
395 	}
396 
397 	ifmedia_removeall(&sc->vmx_media);
398 
399 	vmxnet3_free_data(sc);
400 	vmxnet3_free_resources(sc);
401 	vmxnet3_free_rxtx_queues(sc);
402 
403 	VMXNET3_CORE_LOCK_DESTROY(sc);
404 
405 	return (0);
406 }
407 
408 static int
409 vmxnet3_shutdown(device_t dev)
410 {
411 
412 	return (0);
413 }
414 
415 static int
416 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
417 {
418 	device_t dev;
419 	int rid;
420 
421 	dev = sc->vmx_dev;
422 
423 	rid = PCIR_BAR(0);
424 	sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
425 	    RF_ACTIVE);
426 	if (sc->vmx_res0 == NULL) {
427 		device_printf(dev,
428 		    "could not map BAR0 memory\n");
429 		return (ENXIO);
430 	}
431 
432 	sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
433 	sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
434 
435 	rid = PCIR_BAR(1);
436 	sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
437 	    RF_ACTIVE);
438 	if (sc->vmx_res1 == NULL) {
439 		device_printf(dev,
440 		    "could not map BAR1 memory\n");
441 		return (ENXIO);
442 	}
443 
444 	sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
445 	sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
446 
447 	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
448 		rid = PCIR_BAR(2);
449 		sc->vmx_msix_res = bus_alloc_resource_any(dev,
450 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
451 	}
452 
453 	if (sc->vmx_msix_res == NULL)
454 		sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
455 
456 	return (0);
457 }
458 
459 static void
460 vmxnet3_free_resources(struct vmxnet3_softc *sc)
461 {
462 	device_t dev;
463 	int rid;
464 
465 	dev = sc->vmx_dev;
466 
467 	if (sc->vmx_res0 != NULL) {
468 		rid = PCIR_BAR(0);
469 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
470 		sc->vmx_res0 = NULL;
471 	}
472 
473 	if (sc->vmx_res1 != NULL) {
474 		rid = PCIR_BAR(1);
475 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
476 		sc->vmx_res1 = NULL;
477 	}
478 
479 	if (sc->vmx_msix_res != NULL) {
480 		rid = PCIR_BAR(2);
481 		bus_release_resource(dev, SYS_RES_MEMORY, rid,
482 		    sc->vmx_msix_res);
483 		sc->vmx_msix_res = NULL;
484 	}
485 }
486 
487 static int
488 vmxnet3_check_version(struct vmxnet3_softc *sc)
489 {
490 	device_t dev;
491 	uint32_t version;
492 
493 	dev = sc->vmx_dev;
494 
495 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
496 	if ((version & 0x01) == 0) {
497 		device_printf(dev, "unsupported hardware version %#x\n",
498 		    version);
499 		return (ENOTSUP);
500 	}
501 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
502 
503 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
504 	if ((version & 0x01) == 0) {
505 		device_printf(dev, "unsupported UPT version %#x\n", version);
506 		return (ENOTSUP);
507 	}
508 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
509 
510 	return (0);
511 }
512 
513 static int
514 trunc_powerof2(int val)
515 {
516 
517 	return (1U << (fls(val) - 1));
518 }
519 
520 static void
521 vmxnet3_initial_config(struct vmxnet3_softc *sc)
522 {
523 	int nqueue, ndesc;
524 
525 	nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
526 	if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
527 		nqueue = VMXNET3_DEF_TX_QUEUES;
528 	if (nqueue > mp_ncpus)
529 		nqueue = mp_ncpus;
530 	sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
531 
532 	nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
533 	if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
534 		nqueue = VMXNET3_DEF_RX_QUEUES;
535 	if (nqueue > mp_ncpus)
536 		nqueue = mp_ncpus;
537 	sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
538 
539 	if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
540 		sc->vmx_max_nrxqueues = 1;
541 		sc->vmx_max_ntxqueues = 1;
542 	}
543 
544 	ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
545 	if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
546 		ndesc = VMXNET3_DEF_TX_NDESC;
547 	if (ndesc & VMXNET3_MASK_TX_NDESC)
548 		ndesc &= ~VMXNET3_MASK_TX_NDESC;
549 	sc->vmx_ntxdescs = ndesc;
550 
551 	ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
552 	if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
553 		ndesc = VMXNET3_DEF_RX_NDESC;
554 	if (ndesc & VMXNET3_MASK_RX_NDESC)
555 		ndesc &= ~VMXNET3_MASK_RX_NDESC;
556 	sc->vmx_nrxdescs = ndesc;
557 	sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
558 }
559 
560 static void
561 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
562 {
563 
564 	if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
565 		goto out;
566 
567 	/* BMV: Just use the maximum configured for now. */
568 	sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
569 	sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
570 
571 	if (sc->vmx_nrxqueues > 1)
572 		sc->vmx_flags |= VMXNET3_FLAG_RSS;
573 
574 	return;
575 
576 out:
577 	sc->vmx_ntxqueues = 1;
578 	sc->vmx_nrxqueues = 1;
579 }
580 
581 static int
582 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
583 {
584 	device_t dev;
585 	int nmsix, cnt, required;
586 
587 	dev = sc->vmx_dev;
588 
589 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
590 		return (1);
591 
592 	/* Allocate an additional vector for the events interrupt. */
593 	required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
594 
595 	nmsix = pci_msix_count(dev);
596 	if (nmsix < required)
597 		return (1);
598 
599 	cnt = required;
600 	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
601 		sc->vmx_nintrs = required;
602 		return (0);
603 	} else
604 		pci_release_msi(dev);
605 
606 	/* BMV TODO Fallback to sharing MSIX vectors if possible. */
607 
608 	return (1);
609 }
610 
611 static int
612 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
613 {
614 	device_t dev;
615 	int nmsi, cnt, required;
616 
617 	dev = sc->vmx_dev;
618 	required = 1;
619 
620 	nmsi = pci_msi_count(dev);
621 	if (nmsi < required)
622 		return (1);
623 
624 	cnt = required;
625 	if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
626 		sc->vmx_nintrs = 1;
627 		return (0);
628 	} else
629 		pci_release_msi(dev);
630 
631 	return (1);
632 }
633 
634 static int
635 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
636 {
637 
638 	sc->vmx_nintrs = 1;
639 	return (0);
640 }
641 
642 static int
643 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
644     struct vmxnet3_interrupt *intr)
645 {
646 	struct resource *irq;
647 
648 	irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
649 	if (irq == NULL)
650 		return (ENXIO);
651 
652 	intr->vmxi_irq = irq;
653 	intr->vmxi_rid = rid;
654 
655 	return (0);
656 }
657 
658 static int
659 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
660 {
661 	int i, rid, flags, error;
662 
663 	rid = 0;
664 	flags = RF_ACTIVE;
665 
666 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
667 		flags |= RF_SHAREABLE;
668 	else
669 		rid = 1;
670 
671 	for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
672 		error = vmxnet3_alloc_interrupt(sc, rid, flags,
673 		    &sc->vmx_intrs[i]);
674 		if (error)
675 			return (error);
676 	}
677 
678 	return (0);
679 }
680 
681 static int
682 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
683 {
684 	device_t dev;
685 	struct vmxnet3_txqueue *txq;
686 	struct vmxnet3_rxqueue *rxq;
687 	struct vmxnet3_interrupt *intr;
688 	enum intr_type type;
689 	int i, error;
690 
691 	dev = sc->vmx_dev;
692 	intr = &sc->vmx_intrs[0];
693 	type = INTR_TYPE_NET | INTR_MPSAFE;
694 
695 	for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
696 		txq = &sc->vmx_txq[i];
697 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
698 		     vmxnet3_txq_intr, txq, &intr->vmxi_handler);
699 		if (error)
700 			return (error);
701 		bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
702 		    "tq%d", i);
703 		txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
704 	}
705 
706 	for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
707 		rxq = &sc->vmx_rxq[i];
708 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
709 		    vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
710 		if (error)
711 			return (error);
712 		bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
713 		    "rq%d", i);
714 		rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
715 	}
716 
717 	error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
718 	    vmxnet3_event_intr, sc, &intr->vmxi_handler);
719 	if (error)
720 		return (error);
721 	bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
722 	sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
723 
724 	return (0);
725 }
726 
727 static int
728 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
729 {
730 	struct vmxnet3_interrupt *intr;
731 	int i, error;
732 
733 	intr = &sc->vmx_intrs[0];
734 	error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
735 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
736 	    &intr->vmxi_handler);
737 
738 	for (i = 0; i < sc->vmx_ntxqueues; i++)
739 		sc->vmx_txq[i].vxtxq_intr_idx = 0;
740 	for (i = 0; i < sc->vmx_nrxqueues; i++)
741 		sc->vmx_rxq[i].vxrxq_intr_idx = 0;
742 	sc->vmx_event_intr_idx = 0;
743 
744 	return (error);
745 }
746 
747 static void
748 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
749 {
750 	struct vmxnet3_txqueue *txq;
751 	struct vmxnet3_txq_shared *txs;
752 	struct vmxnet3_rxqueue *rxq;
753 	struct vmxnet3_rxq_shared *rxs;
754 	int i;
755 
756 	sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
757 
758 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
759 		txq = &sc->vmx_txq[i];
760 		txs = txq->vxtxq_ts;
761 		txs->intr_idx = txq->vxtxq_intr_idx;
762 	}
763 
764 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
765 		rxq = &sc->vmx_rxq[i];
766 		rxs = rxq->vxrxq_rs;
767 		rxs->intr_idx = rxq->vxrxq_intr_idx;
768 	}
769 }
770 
771 static int
772 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
773 {
774 	int error;
775 
776 	error = vmxnet3_alloc_intr_resources(sc);
777 	if (error)
778 		return (error);
779 
780 	switch (sc->vmx_intr_type) {
781 	case VMXNET3_IT_MSIX:
782 		error = vmxnet3_setup_msix_interrupts(sc);
783 		break;
784 	case VMXNET3_IT_MSI:
785 	case VMXNET3_IT_LEGACY:
786 		error = vmxnet3_setup_legacy_interrupt(sc);
787 		break;
788 	default:
789 		panic("%s: invalid interrupt type %d", __func__,
790 		    sc->vmx_intr_type);
791 	}
792 
793 	if (error == 0)
794 		vmxnet3_set_interrupt_idx(sc);
795 
796 	return (error);
797 }
798 
799 static int
800 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
801 {
802 	device_t dev;
803 	uint32_t config;
804 	int error;
805 
806 	dev = sc->vmx_dev;
807 	config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
808 
809 	sc->vmx_intr_type = config & 0x03;
810 	sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
811 
812 	switch (sc->vmx_intr_type) {
813 	case VMXNET3_IT_AUTO:
814 		sc->vmx_intr_type = VMXNET3_IT_MSIX;
815 		/* FALLTHROUGH */
816 	case VMXNET3_IT_MSIX:
817 		error = vmxnet3_alloc_msix_interrupts(sc);
818 		if (error == 0)
819 			break;
820 		sc->vmx_intr_type = VMXNET3_IT_MSI;
821 		/* FALLTHROUGH */
822 	case VMXNET3_IT_MSI:
823 		error = vmxnet3_alloc_msi_interrupts(sc);
824 		if (error == 0)
825 			break;
826 		sc->vmx_intr_type = VMXNET3_IT_LEGACY;
827 		/* FALLTHROUGH */
828 	case VMXNET3_IT_LEGACY:
829 		error = vmxnet3_alloc_legacy_interrupts(sc);
830 		if (error == 0)
831 			break;
832 		/* FALLTHROUGH */
833 	default:
834 		sc->vmx_intr_type = -1;
835 		device_printf(dev, "cannot allocate any interrupt resources\n");
836 		return (ENXIO);
837 	}
838 
839 	return (error);
840 }
841 
842 static void
843 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
844     struct vmxnet3_interrupt *intr)
845 {
846 	device_t dev;
847 
848 	dev = sc->vmx_dev;
849 
850 	if (intr->vmxi_handler != NULL) {
851 		bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
852 		intr->vmxi_handler = NULL;
853 	}
854 
855 	if (intr->vmxi_irq != NULL) {
856 		bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
857 		    intr->vmxi_irq);
858 		intr->vmxi_irq = NULL;
859 		intr->vmxi_rid = -1;
860 	}
861 }
862 
863 static void
864 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
865 {
866 	int i;
867 
868 	for (i = 0; i < sc->vmx_nintrs; i++)
869 		vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
870 
871 	if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
872 	    sc->vmx_intr_type == VMXNET3_IT_MSIX)
873 		pci_release_msi(sc->vmx_dev);
874 }
875 
876 #ifndef VMXNET3_LEGACY_TX
877 static int
878 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
879 {
880 	device_t dev;
881 
882 	dev = sc->vmx_dev;
883 
884 	sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
885 	    taskqueue_thread_enqueue, &sc->vmx_tq);
886 	if (sc->vmx_tq == NULL)
887 		return (ENOMEM);
888 
889 	return (0);
890 }
891 
892 static void
893 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
894 {
895 	device_t dev;
896 	int nthreads, error;
897 
898 	dev = sc->vmx_dev;
899 
900 	/*
901 	 * The taskqueue is typically not frequently used, so a dedicated
902 	 * thread for each queue is unnecessary.
903 	 */
904 	nthreads = MAX(1, sc->vmx_ntxqueues / 2);
905 
906 	/*
907 	 * Most drivers just ignore the return value - it only fails
908 	 * with ENOMEM so an error is not likely. It is hard for us
909 	 * to recover from an error here.
910 	 */
911 	error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
912 	    "%s taskq", device_get_nameunit(dev));
913 	if (error)
914 		device_printf(dev, "failed to start taskqueue: %d", error);
915 }
916 
917 static void
918 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
919 {
920 	struct vmxnet3_txqueue *txq;
921 	int i;
922 
923 	if (sc->vmx_tq != NULL) {
924 		for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
925 			txq = &sc->vmx_txq[i];
926 			taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
927 		}
928 	}
929 }
930 
931 static void
932 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
933 {
934 	if (sc->vmx_tq != NULL) {
935 		taskqueue_free(sc->vmx_tq);
936 		sc->vmx_tq = NULL;
937 	}
938 }
939 #endif
940 
941 static int
942 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
943 {
944 	struct vmxnet3_rxqueue *rxq;
945 	struct vmxnet3_rxring *rxr;
946 	int i;
947 
948 	rxq = &sc->vmx_rxq[q];
949 
950 	snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
951 	    device_get_nameunit(sc->vmx_dev), q);
952 	mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
953 
954 	rxq->vxrxq_sc = sc;
955 	rxq->vxrxq_id = q;
956 
957 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
958 		rxr = &rxq->vxrxq_cmd_ring[i];
959 		rxr->vxrxr_rid = i;
960 		rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
961 		rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
962 		    sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
963 		if (rxr->vxrxr_rxbuf == NULL)
964 			return (ENOMEM);
965 
966 		rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
967 	}
968 
969 	return (0);
970 }
971 
972 static int
973 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
974 {
975 	struct vmxnet3_txqueue *txq;
976 	struct vmxnet3_txring *txr;
977 
978 	txq = &sc->vmx_txq[q];
979 	txr = &txq->vxtxq_cmd_ring;
980 
981 	snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
982 	    device_get_nameunit(sc->vmx_dev), q);
983 	mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
984 
985 	txq->vxtxq_sc = sc;
986 	txq->vxtxq_id = q;
987 
988 	txr->vxtxr_ndesc = sc->vmx_ntxdescs;
989 	txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
990 	    sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
991 	if (txr->vxtxr_txbuf == NULL)
992 		return (ENOMEM);
993 
994 	txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
995 
996 #ifndef VMXNET3_LEGACY_TX
997 	TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
998 
999 	txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
1000 	    M_NOWAIT, &txq->vxtxq_mtx);
1001 	if (txq->vxtxq_br == NULL)
1002 		return (ENOMEM);
1003 #endif
1004 
1005 	return (0);
1006 }
1007 
1008 static int
1009 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1010 {
1011 	int i, error;
1012 
1013 	/*
1014 	 * Only attempt to create multiple queues if MSIX is available. MSIX is
1015 	 * disabled by default because its apparently broken for devices passed
1016 	 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1017 	 * must be set to zero for MSIX. This check prevents us from allocating
1018 	 * queue structures that we will not use.
1019 	 */
1020 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1021 		sc->vmx_max_nrxqueues = 1;
1022 		sc->vmx_max_ntxqueues = 1;
1023 	}
1024 
1025 	sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
1026 	    sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1027 	sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
1028 	    sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1029 	if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1030 		return (ENOMEM);
1031 
1032 	for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1033 		error = vmxnet3_init_rxq(sc, i);
1034 		if (error)
1035 			return (error);
1036 	}
1037 
1038 	for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1039 		error = vmxnet3_init_txq(sc, i);
1040 		if (error)
1041 			return (error);
1042 	}
1043 
1044 	return (0);
1045 }
1046 
1047 static void
1048 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1049 {
1050 	struct vmxnet3_rxring *rxr;
1051 	int i;
1052 
1053 	rxq->vxrxq_sc = NULL;
1054 	rxq->vxrxq_id = -1;
1055 
1056 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1057 		rxr = &rxq->vxrxq_cmd_ring[i];
1058 
1059 		if (rxr->vxrxr_rxbuf != NULL) {
1060 			free(rxr->vxrxr_rxbuf, M_DEVBUF);
1061 			rxr->vxrxr_rxbuf = NULL;
1062 		}
1063 	}
1064 
1065 	if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
1066 		mtx_destroy(&rxq->vxrxq_mtx);
1067 }
1068 
1069 static void
1070 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1071 {
1072 	struct vmxnet3_txring *txr;
1073 
1074 	txr = &txq->vxtxq_cmd_ring;
1075 
1076 	txq->vxtxq_sc = NULL;
1077 	txq->vxtxq_id = -1;
1078 
1079 #ifndef VMXNET3_LEGACY_TX
1080 	if (txq->vxtxq_br != NULL) {
1081 		buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1082 		txq->vxtxq_br = NULL;
1083 	}
1084 #endif
1085 
1086 	if (txr->vxtxr_txbuf != NULL) {
1087 		free(txr->vxtxr_txbuf, M_DEVBUF);
1088 		txr->vxtxr_txbuf = NULL;
1089 	}
1090 
1091 	if (mtx_initialized(&txq->vxtxq_mtx) != 0)
1092 		mtx_destroy(&txq->vxtxq_mtx);
1093 }
1094 
1095 static void
1096 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1097 {
1098 	int i;
1099 
1100 	if (sc->vmx_rxq != NULL) {
1101 		for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1102 			vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1103 		free(sc->vmx_rxq, M_DEVBUF);
1104 		sc->vmx_rxq = NULL;
1105 	}
1106 
1107 	if (sc->vmx_txq != NULL) {
1108 		for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1109 			vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1110 		free(sc->vmx_txq, M_DEVBUF);
1111 		sc->vmx_txq = NULL;
1112 	}
1113 }
1114 
1115 static int
1116 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1117 {
1118 	device_t dev;
1119 	uint8_t *kva;
1120 	size_t size;
1121 	int i, error;
1122 
1123 	dev = sc->vmx_dev;
1124 
1125 	size = sizeof(struct vmxnet3_driver_shared);
1126 	error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1127 	if (error) {
1128 		device_printf(dev, "cannot alloc shared memory\n");
1129 		return (error);
1130 	}
1131 	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1132 
1133 	size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1134 	    sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1135 	error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1136 	if (error) {
1137 		device_printf(dev, "cannot alloc queue shared memory\n");
1138 		return (error);
1139 	}
1140 	sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1141 	kva = sc->vmx_qs;
1142 
1143 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1144 		sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1145 		kva += sizeof(struct vmxnet3_txq_shared);
1146 	}
1147 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1148 		sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1149 		kva += sizeof(struct vmxnet3_rxq_shared);
1150 	}
1151 
1152 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1153 		size = sizeof(struct vmxnet3_rss_shared);
1154 		error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1155 		if (error) {
1156 			device_printf(dev, "cannot alloc rss shared memory\n");
1157 			return (error);
1158 		}
1159 		sc->vmx_rss =
1160 		    (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1161 	}
1162 
1163 	return (0);
1164 }
1165 
1166 static void
1167 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1168 {
1169 
1170 	if (sc->vmx_rss != NULL) {
1171 		vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1172 		sc->vmx_rss = NULL;
1173 	}
1174 
1175 	if (sc->vmx_qs != NULL) {
1176 		vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1177 		sc->vmx_qs = NULL;
1178 	}
1179 
1180 	if (sc->vmx_ds != NULL) {
1181 		vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1182 		sc->vmx_ds = NULL;
1183 	}
1184 }
1185 
1186 static int
1187 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1188 {
1189 	device_t dev;
1190 	struct vmxnet3_txqueue *txq;
1191 	struct vmxnet3_txring *txr;
1192 	struct vmxnet3_comp_ring *txc;
1193 	size_t descsz, compsz;
1194 	int i, q, error;
1195 
1196 	dev = sc->vmx_dev;
1197 
1198 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1199 		txq = &sc->vmx_txq[q];
1200 		txr = &txq->vxtxq_cmd_ring;
1201 		txc = &txq->vxtxq_comp_ring;
1202 
1203 		descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1204 		compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1205 
1206 		error = bus_dma_tag_create(bus_get_dma_tag(dev),
1207 		    1, 0,			/* alignment, boundary */
1208 		    BUS_SPACE_MAXADDR,		/* lowaddr */
1209 		    BUS_SPACE_MAXADDR,		/* highaddr */
1210 		    NULL, NULL,			/* filter, filterarg */
1211 		    VMXNET3_TX_MAXSIZE,		/* maxsize */
1212 		    VMXNET3_TX_MAXSEGS,		/* nsegments */
1213 		    VMXNET3_TX_MAXSEGSIZE,	/* maxsegsize */
1214 		    0,				/* flags */
1215 		    NULL, NULL,			/* lockfunc, lockarg */
1216 		    &txr->vxtxr_txtag);
1217 		if (error) {
1218 			device_printf(dev,
1219 			    "unable to create Tx buffer tag for queue %d\n", q);
1220 			return (error);
1221 		}
1222 
1223 		error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1224 		if (error) {
1225 			device_printf(dev, "cannot alloc Tx descriptors for "
1226 			    "queue %d error %d\n", q, error);
1227 			return (error);
1228 		}
1229 		txr->vxtxr_txd =
1230 		    (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1231 
1232 		error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1233 		if (error) {
1234 			device_printf(dev, "cannot alloc Tx comp descriptors "
1235 			   "for queue %d error %d\n", q, error);
1236 			return (error);
1237 		}
1238 		txc->vxcr_u.txcd =
1239 		    (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1240 
1241 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1242 			error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1243 			    &txr->vxtxr_txbuf[i].vtxb_dmamap);
1244 			if (error) {
1245 				device_printf(dev, "unable to create Tx buf "
1246 				    "dmamap for queue %d idx %d\n", q, i);
1247 				return (error);
1248 			}
1249 		}
1250 	}
1251 
1252 	return (0);
1253 }
1254 
1255 static void
1256 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1257 {
1258 	device_t dev;
1259 	struct vmxnet3_txqueue *txq;
1260 	struct vmxnet3_txring *txr;
1261 	struct vmxnet3_comp_ring *txc;
1262 	struct vmxnet3_txbuf *txb;
1263 	int i, q;
1264 
1265 	dev = sc->vmx_dev;
1266 
1267 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1268 		txq = &sc->vmx_txq[q];
1269 		txr = &txq->vxtxq_cmd_ring;
1270 		txc = &txq->vxtxq_comp_ring;
1271 
1272 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1273 			txb = &txr->vxtxr_txbuf[i];
1274 			if (txb->vtxb_dmamap != NULL) {
1275 				bus_dmamap_destroy(txr->vxtxr_txtag,
1276 				    txb->vtxb_dmamap);
1277 				txb->vtxb_dmamap = NULL;
1278 			}
1279 		}
1280 
1281 		if (txc->vxcr_u.txcd != NULL) {
1282 			vmxnet3_dma_free(sc, &txc->vxcr_dma);
1283 			txc->vxcr_u.txcd = NULL;
1284 		}
1285 
1286 		if (txr->vxtxr_txd != NULL) {
1287 			vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1288 			txr->vxtxr_txd = NULL;
1289 		}
1290 
1291 		if (txr->vxtxr_txtag != NULL) {
1292 			bus_dma_tag_destroy(txr->vxtxr_txtag);
1293 			txr->vxtxr_txtag = NULL;
1294 		}
1295 	}
1296 }
1297 
1298 static int
1299 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1300 {
1301 	device_t dev;
1302 	struct vmxnet3_rxqueue *rxq;
1303 	struct vmxnet3_rxring *rxr;
1304 	struct vmxnet3_comp_ring *rxc;
1305 	int descsz, compsz;
1306 	int i, j, q, error;
1307 
1308 	dev = sc->vmx_dev;
1309 
1310 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1311 		rxq = &sc->vmx_rxq[q];
1312 		rxc = &rxq->vxrxq_comp_ring;
1313 		compsz = 0;
1314 
1315 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1316 			rxr = &rxq->vxrxq_cmd_ring[i];
1317 
1318 			descsz = rxr->vxrxr_ndesc *
1319 			    sizeof(struct vmxnet3_rxdesc);
1320 			compsz += rxr->vxrxr_ndesc *
1321 			    sizeof(struct vmxnet3_rxcompdesc);
1322 
1323 			error = bus_dma_tag_create(bus_get_dma_tag(dev),
1324 			    1, 0,		/* alignment, boundary */
1325 			    BUS_SPACE_MAXADDR,	/* lowaddr */
1326 			    BUS_SPACE_MAXADDR,	/* highaddr */
1327 			    NULL, NULL,		/* filter, filterarg */
1328 			    MJUMPAGESIZE,	/* maxsize */
1329 			    1,			/* nsegments */
1330 			    MJUMPAGESIZE,	/* maxsegsize */
1331 			    0,			/* flags */
1332 			    NULL, NULL,		/* lockfunc, lockarg */
1333 			    &rxr->vxrxr_rxtag);
1334 			if (error) {
1335 				device_printf(dev,
1336 				    "unable to create Rx buffer tag for "
1337 				    "queue %d\n", q);
1338 				return (error);
1339 			}
1340 
1341 			error = vmxnet3_dma_malloc(sc, descsz, 512,
1342 			    &rxr->vxrxr_dma);
1343 			if (error) {
1344 				device_printf(dev, "cannot allocate Rx "
1345 				    "descriptors for queue %d/%d error %d\n",
1346 				    i, q, error);
1347 				return (error);
1348 			}
1349 			rxr->vxrxr_rxd =
1350 			    (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1351 		}
1352 
1353 		error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1354 		if (error) {
1355 			device_printf(dev, "cannot alloc Rx comp descriptors "
1356 			    "for queue %d error %d\n", q, error);
1357 			return (error);
1358 		}
1359 		rxc->vxcr_u.rxcd =
1360 		    (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1361 
1362 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1363 			rxr = &rxq->vxrxq_cmd_ring[i];
1364 
1365 			error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1366 			    &rxr->vxrxr_spare_dmap);
1367 			if (error) {
1368 				device_printf(dev, "unable to create spare "
1369 				    "dmamap for queue %d/%d error %d\n",
1370 				    q, i, error);
1371 				return (error);
1372 			}
1373 
1374 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1375 				error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1376 				    &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1377 				if (error) {
1378 					device_printf(dev, "unable to create "
1379 					    "dmamap for queue %d/%d slot %d "
1380 					    "error %d\n",
1381 					    q, i, j, error);
1382 					return (error);
1383 				}
1384 			}
1385 		}
1386 	}
1387 
1388 	return (0);
1389 }
1390 
1391 static void
1392 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1393 {
1394 	device_t dev;
1395 	struct vmxnet3_rxqueue *rxq;
1396 	struct vmxnet3_rxring *rxr;
1397 	struct vmxnet3_comp_ring *rxc;
1398 	struct vmxnet3_rxbuf *rxb;
1399 	int i, j, q;
1400 
1401 	dev = sc->vmx_dev;
1402 
1403 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1404 		rxq = &sc->vmx_rxq[q];
1405 		rxc = &rxq->vxrxq_comp_ring;
1406 
1407 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1408 			rxr = &rxq->vxrxq_cmd_ring[i];
1409 
1410 			if (rxr->vxrxr_spare_dmap != NULL) {
1411 				bus_dmamap_destroy(rxr->vxrxr_rxtag,
1412 				    rxr->vxrxr_spare_dmap);
1413 				rxr->vxrxr_spare_dmap = NULL;
1414 			}
1415 
1416 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1417 				rxb = &rxr->vxrxr_rxbuf[j];
1418 				if (rxb->vrxb_dmamap != NULL) {
1419 					bus_dmamap_destroy(rxr->vxrxr_rxtag,
1420 					    rxb->vrxb_dmamap);
1421 					rxb->vrxb_dmamap = NULL;
1422 				}
1423 			}
1424 		}
1425 
1426 		if (rxc->vxcr_u.rxcd != NULL) {
1427 			vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1428 			rxc->vxcr_u.rxcd = NULL;
1429 		}
1430 
1431 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1432 			rxr = &rxq->vxrxq_cmd_ring[i];
1433 
1434 			if (rxr->vxrxr_rxd != NULL) {
1435 				vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1436 				rxr->vxrxr_rxd = NULL;
1437 			}
1438 
1439 			if (rxr->vxrxr_rxtag != NULL) {
1440 				bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1441 				rxr->vxrxr_rxtag = NULL;
1442 			}
1443 		}
1444 	}
1445 }
1446 
1447 static int
1448 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1449 {
1450 	int error;
1451 
1452 	error = vmxnet3_alloc_txq_data(sc);
1453 	if (error)
1454 		return (error);
1455 
1456 	error = vmxnet3_alloc_rxq_data(sc);
1457 	if (error)
1458 		return (error);
1459 
1460 	return (0);
1461 }
1462 
1463 static void
1464 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1465 {
1466 
1467 	if (sc->vmx_rxq != NULL)
1468 		vmxnet3_free_rxq_data(sc);
1469 
1470 	if (sc->vmx_txq != NULL)
1471 		vmxnet3_free_txq_data(sc);
1472 }
1473 
1474 static int
1475 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1476 {
1477 	int error;
1478 
1479 	error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1480 	    32, &sc->vmx_mcast_dma);
1481 	if (error)
1482 		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1483 	else
1484 		sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1485 
1486 	return (error);
1487 }
1488 
1489 static void
1490 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1491 {
1492 
1493 	if (sc->vmx_mcast != NULL) {
1494 		vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1495 		sc->vmx_mcast = NULL;
1496 	}
1497 }
1498 
1499 static void
1500 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1501 {
1502 	struct vmxnet3_driver_shared *ds;
1503 	struct vmxnet3_txqueue *txq;
1504 	struct vmxnet3_txq_shared *txs;
1505 	struct vmxnet3_rxqueue *rxq;
1506 	struct vmxnet3_rxq_shared *rxs;
1507 	int i;
1508 
1509 	ds = sc->vmx_ds;
1510 
1511 	/*
1512 	 * Initialize fields of the shared data that remains the same across
1513 	 * reinits. Note the shared data is zero'd when allocated.
1514 	 */
1515 
1516 	ds->magic = VMXNET3_REV1_MAGIC;
1517 
1518 	/* DriverInfo */
1519 	ds->version = VMXNET3_DRIVER_VERSION;
1520 	ds->guest = VMXNET3_GOS_FREEBSD |
1521 #ifdef __LP64__
1522 	    VMXNET3_GOS_64BIT;
1523 #else
1524 	    VMXNET3_GOS_32BIT;
1525 #endif
1526 	ds->vmxnet3_revision = 1;
1527 	ds->upt_version = 1;
1528 
1529 	/* Misc. conf */
1530 	ds->driver_data = vtophys(sc);
1531 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
1532 	ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1533 	ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1534 	ds->nrxsg_max = sc->vmx_max_rxsegs;
1535 
1536 	/* RSS conf */
1537 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1538 		ds->rss.version = 1;
1539 		ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1540 		ds->rss.len = sc->vmx_rss_dma.dma_size;
1541 	}
1542 
1543 	/* Interrupt control. */
1544 	ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1545 	ds->nintr = sc->vmx_nintrs;
1546 	ds->evintr = sc->vmx_event_intr_idx;
1547 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1548 
1549 	for (i = 0; i < sc->vmx_nintrs; i++)
1550 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1551 
1552 	/* Receive filter. */
1553 	ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1554 	ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1555 
1556 	/* Tx queues */
1557 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1558 		txq = &sc->vmx_txq[i];
1559 		txs = txq->vxtxq_ts;
1560 
1561 		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1562 		txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1563 		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1564 		txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1565 		txs->driver_data = vtophys(txq);
1566 		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1567 	}
1568 
1569 	/* Rx queues */
1570 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1571 		rxq = &sc->vmx_rxq[i];
1572 		rxs = rxq->vxrxq_rs;
1573 
1574 		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1575 		rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1576 		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1577 		rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1578 		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1579 		rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1580 		rxs->driver_data = vtophys(rxq);
1581 		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1582 	}
1583 }
1584 
1585 static void
1586 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1587 {
1588 	struct ifnet *ifp;
1589 
1590 	ifp = sc->vmx_ifp;
1591 
1592 	/* Use the current MAC address. */
1593 	bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1594 	vmxnet3_set_lladdr(sc);
1595 
1596 	ifp->if_hwassist = 0;
1597 	if (ifp->if_capenable & IFCAP_TXCSUM)
1598 		ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD;
1599 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1600 		ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1601 	if (ifp->if_capenable & IFCAP_TSO4)
1602 		ifp->if_hwassist |= CSUM_IP_TSO;
1603 	if (ifp->if_capenable & IFCAP_TSO6)
1604 		ifp->if_hwassist |= CSUM_IP6_TSO;
1605 }
1606 
1607 static void
1608 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1609 {
1610 	/*
1611 	 * Use the same key as the Linux driver until FreeBSD can do
1612 	 * RSS (presumably Toeplitz) in software.
1613 	 */
1614 	static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1615 	    0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1616 	    0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1617 	    0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1618 	    0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1619 	    0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1620 	};
1621 
1622 	struct vmxnet3_driver_shared *ds;
1623 	struct vmxnet3_rss_shared *rss;
1624 	int i;
1625 
1626 	ds = sc->vmx_ds;
1627 	rss = sc->vmx_rss;
1628 
1629 	rss->hash_type =
1630 	    UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1631 	    UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1632 	rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1633 	rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1634 	rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1635 	memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1636 
1637 	for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1638 		rss->ind_table[i] = i % sc->vmx_nrxqueues;
1639 }
1640 
1641 static void
1642 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1643 {
1644 	struct ifnet *ifp;
1645 	struct vmxnet3_driver_shared *ds;
1646 
1647 	ifp = sc->vmx_ifp;
1648 	ds = sc->vmx_ds;
1649 
1650 	ds->mtu = ifp->if_mtu;
1651 	ds->ntxqueue = sc->vmx_ntxqueues;
1652 	ds->nrxqueue = sc->vmx_nrxqueues;
1653 
1654 	ds->upt_features = 0;
1655 	if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1656 		ds->upt_features |= UPT1_F_CSUM;
1657 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1658 		ds->upt_features |= UPT1_F_VLAN;
1659 	if (ifp->if_capenable & IFCAP_LRO)
1660 		ds->upt_features |= UPT1_F_LRO;
1661 
1662 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1663 		ds->upt_features |= UPT1_F_RSS;
1664 		vmxnet3_reinit_rss_shared_data(sc);
1665 	}
1666 
1667 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1668 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1669 	    (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1670 }
1671 
1672 static int
1673 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1674 {
1675 	int error;
1676 
1677 	error = vmxnet3_alloc_shared_data(sc);
1678 	if (error)
1679 		return (error);
1680 
1681 	error = vmxnet3_alloc_queue_data(sc);
1682 	if (error)
1683 		return (error);
1684 
1685 	error = vmxnet3_alloc_mcast_table(sc);
1686 	if (error)
1687 		return (error);
1688 
1689 	vmxnet3_init_shared_data(sc);
1690 
1691 	return (0);
1692 }
1693 
1694 static void
1695 vmxnet3_free_data(struct vmxnet3_softc *sc)
1696 {
1697 
1698 	vmxnet3_free_mcast_table(sc);
1699 	vmxnet3_free_queue_data(sc);
1700 	vmxnet3_free_shared_data(sc);
1701 }
1702 
1703 static int
1704 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1705 {
1706 	device_t dev;
1707 	struct ifnet *ifp;
1708 
1709 	dev = sc->vmx_dev;
1710 
1711 	ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1712 	if (ifp == NULL) {
1713 		device_printf(dev, "cannot allocate ifnet structure\n");
1714 		return (ENOSPC);
1715 	}
1716 
1717 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1718 #if __FreeBSD_version < 1000025
1719 	ifp->if_baudrate = 1000000000;
1720 #elif __FreeBSD_version < 1100011
1721 	if_initbaudrate(ifp, IF_Gbps(10));
1722 #else
1723 	ifp->if_baudrate = IF_Gbps(10);
1724 #endif
1725 	ifp->if_softc = sc;
1726 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1727 	ifp->if_init = vmxnet3_init;
1728 	ifp->if_ioctl = vmxnet3_ioctl;
1729 	ifp->if_get_counter = vmxnet3_get_counter;
1730 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1731 	ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1732 	ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1733 
1734 #ifdef VMXNET3_LEGACY_TX
1735 	ifp->if_start = vmxnet3_start;
1736 	ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1737 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1738 	IFQ_SET_READY(&ifp->if_snd);
1739 #else
1740 	ifp->if_transmit = vmxnet3_txq_mq_start;
1741 	ifp->if_qflush = vmxnet3_qflush;
1742 #endif
1743 
1744 	vmxnet3_get_lladdr(sc);
1745 	ether_ifattach(ifp, sc->vmx_lladdr);
1746 
1747 	ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1748 	ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1749 	ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1750 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1751 	    IFCAP_VLAN_HWCSUM;
1752 	ifp->if_capenable = ifp->if_capabilities;
1753 
1754 	/* These capabilities are not enabled by default. */
1755 	ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1756 
1757 	sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1758 	    vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1759 	sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1760 	    vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1761 
1762 	ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1763 	    vmxnet3_media_status);
1764 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1765 	ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1766 
1767 	return (0);
1768 }
1769 
1770 static void
1771 vmxnet3_evintr(struct vmxnet3_softc *sc)
1772 {
1773 	device_t dev;
1774 	struct ifnet *ifp;
1775 	struct vmxnet3_txq_shared *ts;
1776 	struct vmxnet3_rxq_shared *rs;
1777 	uint32_t event;
1778 	int reset;
1779 
1780 	dev = sc->vmx_dev;
1781 	ifp = sc->vmx_ifp;
1782 	reset = 0;
1783 
1784 	VMXNET3_CORE_LOCK(sc);
1785 
1786 	/* Clear events. */
1787 	event = sc->vmx_ds->event;
1788 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1789 
1790 	if (event & VMXNET3_EVENT_LINK) {
1791 		vmxnet3_link_status(sc);
1792 		if (sc->vmx_link_active != 0)
1793 			vmxnet3_tx_start_all(sc);
1794 	}
1795 
1796 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1797 		reset = 1;
1798 		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1799 		ts = sc->vmx_txq[0].vxtxq_ts;
1800 		if (ts->stopped != 0)
1801 			device_printf(dev, "Tx queue error %#x\n", ts->error);
1802 		rs = sc->vmx_rxq[0].vxrxq_rs;
1803 		if (rs->stopped != 0)
1804 			device_printf(dev, "Rx queue error %#x\n", rs->error);
1805 		device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1806 	}
1807 
1808 	if (event & VMXNET3_EVENT_DIC)
1809 		device_printf(dev, "device implementation change event\n");
1810 	if (event & VMXNET3_EVENT_DEBUG)
1811 		device_printf(dev, "debug event\n");
1812 
1813 	if (reset != 0) {
1814 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1815 		vmxnet3_init_locked(sc);
1816 	}
1817 
1818 	VMXNET3_CORE_UNLOCK(sc);
1819 }
1820 
1821 static void
1822 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1823 {
1824 	struct vmxnet3_softc *sc;
1825 	struct ifnet *ifp;
1826 	struct vmxnet3_txring *txr;
1827 	struct vmxnet3_comp_ring *txc;
1828 	struct vmxnet3_txcompdesc *txcd;
1829 	struct vmxnet3_txbuf *txb;
1830 	struct mbuf *m;
1831 	u_int sop;
1832 
1833 	sc = txq->vxtxq_sc;
1834 	ifp = sc->vmx_ifp;
1835 	txr = &txq->vxtxq_cmd_ring;
1836 	txc = &txq->vxtxq_comp_ring;
1837 
1838 	VMXNET3_TXQ_LOCK_ASSERT(txq);
1839 
1840 	for (;;) {
1841 		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1842 		if (txcd->gen != txc->vxcr_gen)
1843 			break;
1844 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1845 
1846 		if (++txc->vxcr_next == txc->vxcr_ndesc) {
1847 			txc->vxcr_next = 0;
1848 			txc->vxcr_gen ^= 1;
1849 		}
1850 
1851 		sop = txr->vxtxr_next;
1852 		txb = &txr->vxtxr_txbuf[sop];
1853 
1854 		if ((m = txb->vtxb_m) != NULL) {
1855 			bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1856 			    BUS_DMASYNC_POSTWRITE);
1857 			bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1858 
1859 			txq->vxtxq_stats.vmtxs_opackets++;
1860 			txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1861 			if (m->m_flags & M_MCAST)
1862 				txq->vxtxq_stats.vmtxs_omcasts++;
1863 
1864 			m_freem(m);
1865 			txb->vtxb_m = NULL;
1866 		}
1867 
1868 		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1869 	}
1870 
1871 	if (txr->vxtxr_head == txr->vxtxr_next)
1872 		txq->vxtxq_watchdog = 0;
1873 }
1874 
1875 static int
1876 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1877 {
1878 	struct ifnet *ifp;
1879 	struct mbuf *m;
1880 	struct vmxnet3_rxdesc *rxd;
1881 	struct vmxnet3_rxbuf *rxb;
1882 	bus_dma_tag_t tag;
1883 	bus_dmamap_t dmap;
1884 	bus_dma_segment_t segs[1];
1885 	int idx, clsize, btype, flags, nsegs, error;
1886 
1887 	ifp = sc->vmx_ifp;
1888 	tag = rxr->vxrxr_rxtag;
1889 	dmap = rxr->vxrxr_spare_dmap;
1890 	idx = rxr->vxrxr_fill;
1891 	rxd = &rxr->vxrxr_rxd[idx];
1892 	rxb = &rxr->vxrxr_rxbuf[idx];
1893 
1894 #ifdef VMXNET3_FAILPOINTS
1895 	KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1896 	if (rxr->vxrxr_rid != 0)
1897 		KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1898 #endif
1899 
1900 	if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1901 		flags = M_PKTHDR;
1902 		clsize = MCLBYTES;
1903 		btype = VMXNET3_BTYPE_HEAD;
1904 	} else {
1905 #if __FreeBSD_version < 902001
1906 		/*
1907 		 * These mbufs will never be used for the start of a frame.
1908 		 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1909 		 * required the mbuf to always be a packet header. Avoid
1910 		 * unnecessary mbuf initialization in newer versions where
1911 		 * that is not the case.
1912 		 */
1913 		flags = M_PKTHDR;
1914 #else
1915 		flags = 0;
1916 #endif
1917 		clsize = MJUMPAGESIZE;
1918 		btype = VMXNET3_BTYPE_BODY;
1919 	}
1920 
1921 	m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1922 	if (m == NULL) {
1923 		sc->vmx_stats.vmst_mgetcl_failed++;
1924 		return (ENOBUFS);
1925 	}
1926 
1927 	if (btype == VMXNET3_BTYPE_HEAD) {
1928 		m->m_len = m->m_pkthdr.len = clsize;
1929 		m_adj(m, ETHER_ALIGN);
1930 	} else
1931 		m->m_len = clsize;
1932 
1933 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1934 	    BUS_DMA_NOWAIT);
1935 	if (error) {
1936 		m_freem(m);
1937 		sc->vmx_stats.vmst_mbuf_load_failed++;
1938 		return (error);
1939 	}
1940 	KASSERT(nsegs == 1,
1941 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1942 #if __FreeBSD_version < 902001
1943 	if (btype == VMXNET3_BTYPE_BODY)
1944 		m->m_flags &= ~M_PKTHDR;
1945 #endif
1946 
1947 	if (rxb->vrxb_m != NULL) {
1948 		bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1949 		bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1950 	}
1951 
1952 	rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1953 	rxb->vrxb_dmamap = dmap;
1954 	rxb->vrxb_m = m;
1955 
1956 	rxd->addr = segs[0].ds_addr;
1957 	rxd->len = segs[0].ds_len;
1958 	rxd->btype = btype;
1959 	rxd->gen = rxr->vxrxr_gen;
1960 
1961 	vmxnet3_rxr_increment_fill(rxr);
1962 	return (0);
1963 }
1964 
1965 static void
1966 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1967     struct vmxnet3_rxring *rxr, int idx)
1968 {
1969 	struct vmxnet3_rxdesc *rxd;
1970 
1971 	rxd = &rxr->vxrxr_rxd[idx];
1972 	rxd->gen = rxr->vxrxr_gen;
1973 	vmxnet3_rxr_increment_fill(rxr);
1974 }
1975 
1976 static void
1977 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1978 {
1979 	struct vmxnet3_softc *sc;
1980 	struct vmxnet3_rxring *rxr;
1981 	struct vmxnet3_comp_ring *rxc;
1982 	struct vmxnet3_rxcompdesc *rxcd;
1983 	int idx, eof;
1984 
1985 	sc = rxq->vxrxq_sc;
1986 	rxc = &rxq->vxrxq_comp_ring;
1987 
1988 	do {
1989 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1990 		if (rxcd->gen != rxc->vxcr_gen)
1991 			break;		/* Not expected. */
1992 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1993 
1994 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1995 			rxc->vxcr_next = 0;
1996 			rxc->vxcr_gen ^= 1;
1997 		}
1998 
1999 		idx = rxcd->rxd_idx;
2000 		eof = rxcd->eop;
2001 		if (rxcd->qid < sc->vmx_nrxqueues)
2002 			rxr = &rxq->vxrxq_cmd_ring[0];
2003 		else
2004 			rxr = &rxq->vxrxq_cmd_ring[1];
2005 		vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2006 	} while (!eof);
2007 }
2008 
2009 static void
2010 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2011 {
2012 
2013 	if (rxcd->ipv4) {
2014 		m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2015 		if (rxcd->ipcsum_ok)
2016 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2017 	}
2018 
2019 	if (!rxcd->fragment) {
2020 		if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2021 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2022 			    CSUM_PSEUDO_HDR;
2023 			m->m_pkthdr.csum_data = 0xFFFF;
2024 		}
2025 	}
2026 }
2027 
2028 static void
2029 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2030     struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2031 {
2032 	struct vmxnet3_softc *sc;
2033 	struct ifnet *ifp;
2034 
2035 	sc = rxq->vxrxq_sc;
2036 	ifp = sc->vmx_ifp;
2037 
2038 	if (rxcd->error) {
2039 		rxq->vxrxq_stats.vmrxs_ierrors++;
2040 		m_freem(m);
2041 		return;
2042 	}
2043 
2044 #ifdef notyet
2045 	switch (rxcd->rss_type) {
2046 	case VMXNET3_RCD_RSS_TYPE_IPV4:
2047 		m->m_pkthdr.flowid = rxcd->rss_hash;
2048 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2049 		break;
2050 	case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2051 		m->m_pkthdr.flowid = rxcd->rss_hash;
2052 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2053 		break;
2054 	case VMXNET3_RCD_RSS_TYPE_IPV6:
2055 		m->m_pkthdr.flowid = rxcd->rss_hash;
2056 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2057 		break;
2058 	case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2059 		m->m_pkthdr.flowid = rxcd->rss_hash;
2060 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2061 		break;
2062 	default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2063 		m->m_pkthdr.flowid = rxq->vxrxq_id;
2064 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2065 		break;
2066 	}
2067 #else
2068 	m->m_pkthdr.flowid = rxq->vxrxq_id;
2069 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2070 #endif
2071 
2072 	if (!rxcd->no_csum)
2073 		vmxnet3_rx_csum(rxcd, m);
2074 	if (rxcd->vlan) {
2075 		m->m_flags |= M_VLANTAG;
2076 		m->m_pkthdr.ether_vtag = rxcd->vtag;
2077 	}
2078 
2079 	rxq->vxrxq_stats.vmrxs_ipackets++;
2080 	rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2081 
2082 	VMXNET3_RXQ_UNLOCK(rxq);
2083 	(*ifp->if_input)(ifp, m);
2084 	VMXNET3_RXQ_LOCK(rxq);
2085 }
2086 
2087 static void
2088 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2089 {
2090 	struct vmxnet3_softc *sc;
2091 	struct ifnet *ifp;
2092 	struct vmxnet3_rxring *rxr;
2093 	struct vmxnet3_comp_ring *rxc;
2094 	struct vmxnet3_rxdesc *rxd;
2095 	struct vmxnet3_rxcompdesc *rxcd;
2096 	struct mbuf *m, *m_head, *m_tail;
2097 	int idx, length;
2098 
2099 	sc = rxq->vxrxq_sc;
2100 	ifp = sc->vmx_ifp;
2101 	rxc = &rxq->vxrxq_comp_ring;
2102 
2103 	VMXNET3_RXQ_LOCK_ASSERT(rxq);
2104 
2105 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2106 		return;
2107 
2108 	m_head = rxq->vxrxq_mhead;
2109 	rxq->vxrxq_mhead = NULL;
2110 	m_tail = rxq->vxrxq_mtail;
2111 	rxq->vxrxq_mtail = NULL;
2112 	MPASS(m_head == NULL || m_tail != NULL);
2113 
2114 	for (;;) {
2115 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2116 		if (rxcd->gen != rxc->vxcr_gen) {
2117 			rxq->vxrxq_mhead = m_head;
2118 			rxq->vxrxq_mtail = m_tail;
2119 			break;
2120 		}
2121 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2122 
2123 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2124 			rxc->vxcr_next = 0;
2125 			rxc->vxcr_gen ^= 1;
2126 		}
2127 
2128 		idx = rxcd->rxd_idx;
2129 		length = rxcd->len;
2130 		if (rxcd->qid < sc->vmx_nrxqueues)
2131 			rxr = &rxq->vxrxq_cmd_ring[0];
2132 		else
2133 			rxr = &rxq->vxrxq_cmd_ring[1];
2134 		rxd = &rxr->vxrxr_rxd[idx];
2135 
2136 		m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2137 		KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2138 		    __func__, rxcd->qid, idx));
2139 
2140 		/*
2141 		 * The host may skip descriptors. We detect this when this
2142 		 * descriptor does not match the previous fill index. Catch
2143 		 * up with the host now.
2144 		 */
2145 		if (__predict_false(rxr->vxrxr_fill != idx)) {
2146 			while (rxr->vxrxr_fill != idx) {
2147 				rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2148 				    rxr->vxrxr_gen;
2149 				vmxnet3_rxr_increment_fill(rxr);
2150 			}
2151 		}
2152 
2153 		if (rxcd->sop) {
2154 			KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2155 			    ("%s: start of frame w/o head buffer", __func__));
2156 			KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2157 			    ("%s: start of frame not in ring 0", __func__));
2158 			KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2159 			    ("%s: start of frame at unexcepted index %d (%d)",
2160 			     __func__, idx, sc->vmx_rx_max_chain));
2161 			KASSERT(m_head == NULL,
2162 			    ("%s: duplicate start of frame?", __func__));
2163 
2164 			if (length == 0) {
2165 				/* Just ignore this descriptor. */
2166 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2167 				goto nextp;
2168 			}
2169 
2170 			if (vmxnet3_newbuf(sc, rxr) != 0) {
2171 				rxq->vxrxq_stats.vmrxs_iqdrops++;
2172 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2173 				if (!rxcd->eop)
2174 					vmxnet3_rxq_discard_chain(rxq);
2175 				goto nextp;
2176 			}
2177 
2178 			m->m_pkthdr.rcvif = ifp;
2179 			m->m_pkthdr.len = m->m_len = length;
2180 			m->m_pkthdr.csum_flags = 0;
2181 			m_head = m_tail = m;
2182 
2183 		} else {
2184 			KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2185 			    ("%s: non start of frame w/o body buffer", __func__));
2186 			KASSERT(m_head != NULL,
2187 			    ("%s: frame not started?", __func__));
2188 
2189 			if (vmxnet3_newbuf(sc, rxr) != 0) {
2190 				rxq->vxrxq_stats.vmrxs_iqdrops++;
2191 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2192 				if (!rxcd->eop)
2193 					vmxnet3_rxq_discard_chain(rxq);
2194 				m_freem(m_head);
2195 				m_head = m_tail = NULL;
2196 				goto nextp;
2197 			}
2198 
2199 			m->m_len = length;
2200 			m_head->m_pkthdr.len += length;
2201 			m_tail->m_next = m;
2202 			m_tail = m;
2203 		}
2204 
2205 		if (rxcd->eop) {
2206 			vmxnet3_rxq_input(rxq, rxcd, m_head);
2207 			m_head = m_tail = NULL;
2208 
2209 			/* Must recheck after dropping the Rx lock. */
2210 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2211 				break;
2212 		}
2213 
2214 nextp:
2215 		if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2216 			int qid = rxcd->qid;
2217 			bus_size_t r;
2218 
2219 			idx = (idx + 1) % rxr->vxrxr_ndesc;
2220 			if (qid >= sc->vmx_nrxqueues) {
2221 				qid -= sc->vmx_nrxqueues;
2222 				r = VMXNET3_BAR0_RXH2(qid);
2223 			} else
2224 				r = VMXNET3_BAR0_RXH1(qid);
2225 			vmxnet3_write_bar0(sc, r, idx);
2226 		}
2227 	}
2228 }
2229 
2230 static void
2231 vmxnet3_legacy_intr(void *xsc)
2232 {
2233 	struct vmxnet3_softc *sc;
2234 	struct vmxnet3_rxqueue *rxq;
2235 	struct vmxnet3_txqueue *txq;
2236 
2237 	sc = xsc;
2238 	rxq = &sc->vmx_rxq[0];
2239 	txq = &sc->vmx_txq[0];
2240 
2241 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2242 		if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2243 			return;
2244 	}
2245 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2246 		vmxnet3_disable_all_intrs(sc);
2247 
2248 	if (sc->vmx_ds->event != 0)
2249 		vmxnet3_evintr(sc);
2250 
2251 	VMXNET3_RXQ_LOCK(rxq);
2252 	vmxnet3_rxq_eof(rxq);
2253 	VMXNET3_RXQ_UNLOCK(rxq);
2254 
2255 	VMXNET3_TXQ_LOCK(txq);
2256 	vmxnet3_txq_eof(txq);
2257 	vmxnet3_txq_start(txq);
2258 	VMXNET3_TXQ_UNLOCK(txq);
2259 
2260 	vmxnet3_enable_all_intrs(sc);
2261 }
2262 
2263 static void
2264 vmxnet3_txq_intr(void *xtxq)
2265 {
2266 	struct vmxnet3_softc *sc;
2267 	struct vmxnet3_txqueue *txq;
2268 
2269 	txq = xtxq;
2270 	sc = txq->vxtxq_sc;
2271 
2272 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2273 		vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2274 
2275 	VMXNET3_TXQ_LOCK(txq);
2276 	vmxnet3_txq_eof(txq);
2277 	vmxnet3_txq_start(txq);
2278 	VMXNET3_TXQ_UNLOCK(txq);
2279 
2280 	vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2281 }
2282 
2283 static void
2284 vmxnet3_rxq_intr(void *xrxq)
2285 {
2286 	struct vmxnet3_softc *sc;
2287 	struct vmxnet3_rxqueue *rxq;
2288 
2289 	rxq = xrxq;
2290 	sc = rxq->vxrxq_sc;
2291 
2292 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2293 		vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2294 
2295 	VMXNET3_RXQ_LOCK(rxq);
2296 	vmxnet3_rxq_eof(rxq);
2297 	VMXNET3_RXQ_UNLOCK(rxq);
2298 
2299 	vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2300 }
2301 
2302 static void
2303 vmxnet3_event_intr(void *xsc)
2304 {
2305 	struct vmxnet3_softc *sc;
2306 
2307 	sc = xsc;
2308 
2309 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2310 		vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2311 
2312 	if (sc->vmx_ds->event != 0)
2313 		vmxnet3_evintr(sc);
2314 
2315 	vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2316 }
2317 
2318 static void
2319 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2320 {
2321 	struct vmxnet3_txring *txr;
2322 	struct vmxnet3_txbuf *txb;
2323 	int i;
2324 
2325 	txr = &txq->vxtxq_cmd_ring;
2326 
2327 	for (i = 0; i < txr->vxtxr_ndesc; i++) {
2328 		txb = &txr->vxtxr_txbuf[i];
2329 
2330 		if (txb->vtxb_m == NULL)
2331 			continue;
2332 
2333 		bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2334 		    BUS_DMASYNC_POSTWRITE);
2335 		bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2336 		m_freem(txb->vtxb_m);
2337 		txb->vtxb_m = NULL;
2338 	}
2339 }
2340 
2341 static void
2342 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2343 {
2344 	struct vmxnet3_rxring *rxr;
2345 	struct vmxnet3_rxbuf *rxb;
2346 	int i, j;
2347 
2348 	if (rxq->vxrxq_mhead != NULL) {
2349 		m_freem(rxq->vxrxq_mhead);
2350 		rxq->vxrxq_mhead = NULL;
2351 		rxq->vxrxq_mtail = NULL;
2352 	}
2353 
2354 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2355 		rxr = &rxq->vxrxq_cmd_ring[i];
2356 
2357 		for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2358 			rxb = &rxr->vxrxr_rxbuf[j];
2359 
2360 			if (rxb->vrxb_m == NULL)
2361 				continue;
2362 
2363 			bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2364 			    BUS_DMASYNC_POSTREAD);
2365 			bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2366 			m_freem(rxb->vrxb_m);
2367 			rxb->vrxb_m = NULL;
2368 		}
2369 	}
2370 }
2371 
2372 static void
2373 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2374 {
2375 	struct vmxnet3_rxqueue *rxq;
2376 	struct vmxnet3_txqueue *txq;
2377 	int i;
2378 
2379 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2380 		rxq = &sc->vmx_rxq[i];
2381 		VMXNET3_RXQ_LOCK(rxq);
2382 		VMXNET3_RXQ_UNLOCK(rxq);
2383 	}
2384 
2385 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
2386 		txq = &sc->vmx_txq[i];
2387 		VMXNET3_TXQ_LOCK(txq);
2388 		VMXNET3_TXQ_UNLOCK(txq);
2389 	}
2390 }
2391 
2392 static void
2393 vmxnet3_stop(struct vmxnet3_softc *sc)
2394 {
2395 	struct ifnet *ifp;
2396 	int q;
2397 
2398 	ifp = sc->vmx_ifp;
2399 	VMXNET3_CORE_LOCK_ASSERT(sc);
2400 
2401 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2402 	sc->vmx_link_active = 0;
2403 	callout_stop(&sc->vmx_tick);
2404 
2405 	/* Disable interrupts. */
2406 	vmxnet3_disable_all_intrs(sc);
2407 	vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2408 
2409 	vmxnet3_stop_rendezvous(sc);
2410 
2411 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2412 		vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2413 	for (q = 0; q < sc->vmx_nrxqueues; q++)
2414 		vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2415 
2416 	vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2417 }
2418 
2419 static void
2420 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2421 {
2422 	struct vmxnet3_txring *txr;
2423 	struct vmxnet3_comp_ring *txc;
2424 
2425 	txr = &txq->vxtxq_cmd_ring;
2426 	txr->vxtxr_head = 0;
2427 	txr->vxtxr_next = 0;
2428 	txr->vxtxr_gen = VMXNET3_INIT_GEN;
2429 	bzero(txr->vxtxr_txd,
2430 	    txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2431 
2432 	txc = &txq->vxtxq_comp_ring;
2433 	txc->vxcr_next = 0;
2434 	txc->vxcr_gen = VMXNET3_INIT_GEN;
2435 	bzero(txc->vxcr_u.txcd,
2436 	    txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2437 }
2438 
2439 static int
2440 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2441 {
2442 	struct ifnet *ifp;
2443 	struct vmxnet3_rxring *rxr;
2444 	struct vmxnet3_comp_ring *rxc;
2445 	int i, populate, idx, frame_size, error;
2446 
2447 	ifp = sc->vmx_ifp;
2448 	frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2449 	    ifp->if_mtu;
2450 
2451 	/*
2452 	 * If the MTU causes us to exceed what a regular sized cluster can
2453 	 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2454 	 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2455 	 *
2456 	 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2457 	 * our life easier. We do not support changing the ring size after
2458 	 * the attach.
2459 	 */
2460 	if (frame_size <= MCLBYTES)
2461 		sc->vmx_rx_max_chain = 1;
2462 	else
2463 		sc->vmx_rx_max_chain = 2;
2464 
2465 	/*
2466 	 * Only populate ring 1 if the configuration will take advantage
2467 	 * of it. That is either when LRO is enabled or the frame size
2468 	 * exceeds what ring 0 can contain.
2469 	 */
2470 	if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2471 	    frame_size <= MCLBYTES + MJUMPAGESIZE)
2472 		populate = 1;
2473 	else
2474 		populate = VMXNET3_RXRINGS_PERQ;
2475 
2476 	for (i = 0; i < populate; i++) {
2477 		rxr = &rxq->vxrxq_cmd_ring[i];
2478 		rxr->vxrxr_fill = 0;
2479 		rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2480 		bzero(rxr->vxrxr_rxd,
2481 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2482 
2483 		for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2484 			error = vmxnet3_newbuf(sc, rxr);
2485 			if (error)
2486 				return (error);
2487 		}
2488 	}
2489 
2490 	for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2491 		rxr = &rxq->vxrxq_cmd_ring[i];
2492 		rxr->vxrxr_fill = 0;
2493 		rxr->vxrxr_gen = 0;
2494 		bzero(rxr->vxrxr_rxd,
2495 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2496 	}
2497 
2498 	rxc = &rxq->vxrxq_comp_ring;
2499 	rxc->vxcr_next = 0;
2500 	rxc->vxcr_gen = VMXNET3_INIT_GEN;
2501 	bzero(rxc->vxcr_u.rxcd,
2502 	    rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2503 
2504 	return (0);
2505 }
2506 
2507 static int
2508 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2509 {
2510 	device_t dev;
2511 	int q, error;
2512 
2513 	dev = sc->vmx_dev;
2514 
2515 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2516 		vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2517 
2518 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2519 		error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2520 		if (error) {
2521 			device_printf(dev, "cannot populate Rx queue %d\n", q);
2522 			return (error);
2523 		}
2524 	}
2525 
2526 	return (0);
2527 }
2528 
2529 static int
2530 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2531 {
2532 	int q;
2533 
2534 	if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2535 		device_printf(sc->vmx_dev, "device enable command failed!\n");
2536 		return (1);
2537 	}
2538 
2539 	/* Reset the Rx queue heads. */
2540 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2541 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2542 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2543 	}
2544 
2545 	return (0);
2546 }
2547 
2548 static void
2549 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2550 {
2551 	struct ifnet *ifp;
2552 
2553 	ifp = sc->vmx_ifp;
2554 
2555 	vmxnet3_set_rxfilter(sc);
2556 
2557 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2558 		bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2559 		    sizeof(sc->vmx_ds->vlan_filter));
2560 	else
2561 		bzero(sc->vmx_ds->vlan_filter,
2562 		    sizeof(sc->vmx_ds->vlan_filter));
2563 	vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2564 }
2565 
2566 static int
2567 vmxnet3_reinit(struct vmxnet3_softc *sc)
2568 {
2569 
2570 	vmxnet3_reinit_interface(sc);
2571 	vmxnet3_reinit_shared_data(sc);
2572 
2573 	if (vmxnet3_reinit_queues(sc) != 0)
2574 		return (ENXIO);
2575 
2576 	if (vmxnet3_enable_device(sc) != 0)
2577 		return (ENXIO);
2578 
2579 	vmxnet3_reinit_rxfilters(sc);
2580 
2581 	return (0);
2582 }
2583 
2584 static void
2585 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2586 {
2587 	struct ifnet *ifp;
2588 
2589 	ifp = sc->vmx_ifp;
2590 
2591 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2592 		return;
2593 
2594 	vmxnet3_stop(sc);
2595 
2596 	if (vmxnet3_reinit(sc) != 0) {
2597 		vmxnet3_stop(sc);
2598 		return;
2599 	}
2600 
2601 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2602 	vmxnet3_link_status(sc);
2603 
2604 	vmxnet3_enable_all_intrs(sc);
2605 	callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2606 }
2607 
2608 static void
2609 vmxnet3_init(void *xsc)
2610 {
2611 	struct vmxnet3_softc *sc;
2612 
2613 	sc = xsc;
2614 
2615 	VMXNET3_CORE_LOCK(sc);
2616 	vmxnet3_init_locked(sc);
2617 	VMXNET3_CORE_UNLOCK(sc);
2618 }
2619 
2620 /*
2621  * BMV: Much of this can go away once we finally have offsets in
2622  * the mbuf packet header. Bug andre@.
2623  */
2624 static int
2625 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2626     int *etype, int *proto, int *start)
2627 {
2628 	struct ether_vlan_header *evh;
2629 	int offset;
2630 #if defined(INET)
2631 	struct ip *ip = NULL;
2632 	struct ip iphdr;
2633 #endif
2634 #if defined(INET6)
2635 	struct ip6_hdr *ip6 = NULL;
2636 	struct ip6_hdr ip6hdr;
2637 #endif
2638 
2639 	evh = mtod(m, struct ether_vlan_header *);
2640 	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2641 		/* BMV: We should handle nested VLAN tags too. */
2642 		*etype = ntohs(evh->evl_proto);
2643 		offset = sizeof(struct ether_vlan_header);
2644 	} else {
2645 		*etype = ntohs(evh->evl_encap_proto);
2646 		offset = sizeof(struct ether_header);
2647 	}
2648 
2649 	switch (*etype) {
2650 #if defined(INET)
2651 	case ETHERTYPE_IP:
2652 		if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2653 			m_copydata(m, offset, sizeof(struct ip),
2654 			    (caddr_t) &iphdr);
2655 			ip = &iphdr;
2656 		} else
2657 			ip = mtodo(m, offset);
2658 		*proto = ip->ip_p;
2659 		*start = offset + (ip->ip_hl << 2);
2660 		break;
2661 #endif
2662 #if defined(INET6)
2663 	case ETHERTYPE_IPV6:
2664 		if (__predict_false(m->m_len <
2665 		    offset + sizeof(struct ip6_hdr))) {
2666 			m_copydata(m, offset, sizeof(struct ip6_hdr),
2667 			    (caddr_t) &ip6hdr);
2668 			ip6 = &ip6hdr;
2669 		} else
2670 			ip6 = mtodo(m, offset);
2671 		*proto = -1;
2672 		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2673 		/* Assert the network stack sent us a valid packet. */
2674 		KASSERT(*start > offset,
2675 		    ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2676 		    *start, offset, *proto));
2677 		break;
2678 #endif
2679 	default:
2680 		return (EINVAL);
2681 	}
2682 
2683 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2684 		struct tcphdr *tcp, tcphdr;
2685 		uint16_t sum;
2686 
2687 		if (__predict_false(*proto != IPPROTO_TCP)) {
2688 			/* Likely failed to correctly parse the mbuf. */
2689 			return (EINVAL);
2690 		}
2691 
2692 		txq->vxtxq_stats.vmtxs_tso++;
2693 
2694 		switch (*etype) {
2695 #if defined(INET)
2696 		case ETHERTYPE_IP:
2697 			sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2698 			    htons(IPPROTO_TCP));
2699 			break;
2700 #endif
2701 #if defined(INET6)
2702 		case ETHERTYPE_IPV6:
2703 			sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
2704 			break;
2705 #endif
2706 		default:
2707 			sum = 0;
2708 			break;
2709 		}
2710 
2711 		if (m->m_len < *start + sizeof(struct tcphdr)) {
2712 			m_copyback(m, *start + offsetof(struct tcphdr, th_sum),
2713 			    sizeof(uint16_t), (caddr_t) &sum);
2714 			m_copydata(m, *start, sizeof(struct tcphdr),
2715 			    (caddr_t) &tcphdr);
2716 			tcp = &tcphdr;
2717 		} else {
2718 			tcp = mtodo(m, *start);
2719 			tcp->th_sum = sum;
2720 		}
2721 
2722 		/*
2723 		 * For TSO, the size of the protocol header is also
2724 		 * included in the descriptor header size.
2725 		 */
2726 		*start += (tcp->th_off << 2);
2727 	} else
2728 		txq->vxtxq_stats.vmtxs_csum++;
2729 
2730 	return (0);
2731 }
2732 
2733 static int
2734 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2735     bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2736 {
2737 	struct vmxnet3_txring *txr;
2738 	struct mbuf *m;
2739 	bus_dma_tag_t tag;
2740 	int error;
2741 
2742 	txr = &txq->vxtxq_cmd_ring;
2743 	m = *m0;
2744 	tag = txr->vxtxr_txtag;
2745 
2746 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2747 	if (error == 0 || error != EFBIG)
2748 		return (error);
2749 
2750 	m = m_defrag(m, M_NOWAIT);
2751 	if (m != NULL) {
2752 		*m0 = m;
2753 		error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2754 	} else
2755 		error = ENOBUFS;
2756 
2757 	if (error) {
2758 		m_freem(*m0);
2759 		*m0 = NULL;
2760 		txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2761 	} else
2762 		txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2763 
2764 	return (error);
2765 }
2766 
2767 static void
2768 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2769 {
2770 	struct vmxnet3_txring *txr;
2771 
2772 	txr = &txq->vxtxq_cmd_ring;
2773 	bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2774 }
2775 
2776 static int
2777 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2778 {
2779 	struct vmxnet3_softc *sc;
2780 	struct vmxnet3_txring *txr;
2781 	struct vmxnet3_txdesc *txd, *sop;
2782 	struct mbuf *m;
2783 	bus_dmamap_t dmap;
2784 	bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2785 	int i, gen, nsegs, etype, proto, start, error;
2786 
2787 	sc = txq->vxtxq_sc;
2788 	start = 0;
2789 	txd = NULL;
2790 	txr = &txq->vxtxq_cmd_ring;
2791 	dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2792 
2793 	error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2794 	if (error)
2795 		return (error);
2796 
2797 	m = *m0;
2798 	M_ASSERTPKTHDR(m);
2799 	KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2800 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2801 
2802 	if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2803 		txq->vxtxq_stats.vmtxs_full++;
2804 		vmxnet3_txq_unload_mbuf(txq, dmap);
2805 		return (ENOSPC);
2806 	} else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2807 		error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2808 		if (error) {
2809 			txq->vxtxq_stats.vmtxs_offload_failed++;
2810 			vmxnet3_txq_unload_mbuf(txq, dmap);
2811 			m_freem(m);
2812 			*m0 = NULL;
2813 			return (error);
2814 		}
2815 	}
2816 
2817 	txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2818 	sop = &txr->vxtxr_txd[txr->vxtxr_head];
2819 	gen = txr->vxtxr_gen ^ 1;	/* Owned by cpu (yet) */
2820 
2821 	for (i = 0; i < nsegs; i++) {
2822 		txd = &txr->vxtxr_txd[txr->vxtxr_head];
2823 
2824 		txd->addr = segs[i].ds_addr;
2825 		txd->len = segs[i].ds_len;
2826 		txd->gen = gen;
2827 		txd->dtype = 0;
2828 		txd->offload_mode = VMXNET3_OM_NONE;
2829 		txd->offload_pos = 0;
2830 		txd->hlen = 0;
2831 		txd->eop = 0;
2832 		txd->compreq = 0;
2833 		txd->vtag_mode = 0;
2834 		txd->vtag = 0;
2835 
2836 		if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2837 			txr->vxtxr_head = 0;
2838 			txr->vxtxr_gen ^= 1;
2839 		}
2840 		gen = txr->vxtxr_gen;
2841 	}
2842 	txd->eop = 1;
2843 	txd->compreq = 1;
2844 
2845 	if (m->m_flags & M_VLANTAG) {
2846 		sop->vtag_mode = 1;
2847 		sop->vtag = m->m_pkthdr.ether_vtag;
2848 	}
2849 
2850 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2851 		sop->offload_mode = VMXNET3_OM_TSO;
2852 		sop->hlen = start;
2853 		sop->offload_pos = m->m_pkthdr.tso_segsz;
2854 	} else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2855 	    VMXNET3_CSUM_OFFLOAD_IPV6)) {
2856 		sop->offload_mode = VMXNET3_OM_CSUM;
2857 		sop->hlen = start;
2858 		sop->offload_pos = start + m->m_pkthdr.csum_data;
2859 	}
2860 
2861 	/* Finally, change the ownership. */
2862 	vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2863 	sop->gen ^= 1;
2864 
2865 	txq->vxtxq_ts->npending += nsegs;
2866 	if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2867 		txq->vxtxq_ts->npending = 0;
2868 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2869 		    txr->vxtxr_head);
2870 	}
2871 
2872 	return (0);
2873 }
2874 
2875 #ifdef VMXNET3_LEGACY_TX
2876 
2877 static void
2878 vmxnet3_start_locked(struct ifnet *ifp)
2879 {
2880 	struct vmxnet3_softc *sc;
2881 	struct vmxnet3_txqueue *txq;
2882 	struct vmxnet3_txring *txr;
2883 	struct mbuf *m_head;
2884 	int tx, avail;
2885 
2886 	sc = ifp->if_softc;
2887 	txq = &sc->vmx_txq[0];
2888 	txr = &txq->vxtxq_cmd_ring;
2889 	tx = 0;
2890 
2891 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2892 
2893 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2894 	    sc->vmx_link_active == 0)
2895 		return;
2896 
2897 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2898 		if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2899 			break;
2900 
2901 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2902 		if (m_head == NULL)
2903 			break;
2904 
2905 		/* Assume worse case if this mbuf is the head of a chain. */
2906 		if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2907 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2908 			break;
2909 		}
2910 
2911 		if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2912 			if (m_head != NULL)
2913 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2914 			break;
2915 		}
2916 
2917 		tx++;
2918 		ETHER_BPF_MTAP(ifp, m_head);
2919 	}
2920 
2921 	if (tx > 0)
2922 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2923 }
2924 
2925 static void
2926 vmxnet3_start(struct ifnet *ifp)
2927 {
2928 	struct vmxnet3_softc *sc;
2929 	struct vmxnet3_txqueue *txq;
2930 
2931 	sc = ifp->if_softc;
2932 	txq = &sc->vmx_txq[0];
2933 
2934 	VMXNET3_TXQ_LOCK(txq);
2935 	vmxnet3_start_locked(ifp);
2936 	VMXNET3_TXQ_UNLOCK(txq);
2937 }
2938 
2939 #else /* !VMXNET3_LEGACY_TX */
2940 
2941 static int
2942 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
2943 {
2944 	struct vmxnet3_softc *sc;
2945 	struct vmxnet3_txring *txr;
2946 	struct buf_ring *br;
2947 	struct ifnet *ifp;
2948 	int tx, avail, error;
2949 
2950 	sc = txq->vxtxq_sc;
2951 	br = txq->vxtxq_br;
2952 	ifp = sc->vmx_ifp;
2953 	txr = &txq->vxtxq_cmd_ring;
2954 	tx = 0;
2955 	error = 0;
2956 
2957 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2958 
2959 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2960 	    sc->vmx_link_active == 0) {
2961 		if (m != NULL)
2962 			error = drbr_enqueue(ifp, br, m);
2963 		return (error);
2964 	}
2965 
2966 	if (m != NULL) {
2967 		error = drbr_enqueue(ifp, br, m);
2968 		if (error)
2969 			return (error);
2970 	}
2971 
2972 	while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
2973 		m = drbr_peek(ifp, br);
2974 		if (m == NULL)
2975 			break;
2976 
2977 		/* Assume worse case if this mbuf is the head of a chain. */
2978 		if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2979 			drbr_putback(ifp, br, m);
2980 			break;
2981 		}
2982 
2983 		if (vmxnet3_txq_encap(txq, &m) != 0) {
2984 			if (m != NULL)
2985 				drbr_putback(ifp, br, m);
2986 			else
2987 				drbr_advance(ifp, br);
2988 			break;
2989 		}
2990 		drbr_advance(ifp, br);
2991 
2992 		tx++;
2993 		ETHER_BPF_MTAP(ifp, m);
2994 	}
2995 
2996 	if (tx > 0)
2997 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2998 
2999 	return (0);
3000 }
3001 
3002 static int
3003 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
3004 {
3005 	struct vmxnet3_softc *sc;
3006 	struct vmxnet3_txqueue *txq;
3007 	int i, ntxq, error;
3008 
3009 	sc = ifp->if_softc;
3010 	ntxq = sc->vmx_ntxqueues;
3011 
3012 	/* check if flowid is set */
3013 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3014 		i = m->m_pkthdr.flowid % ntxq;
3015 	else
3016 		i = curcpu % ntxq;
3017 
3018 	txq = &sc->vmx_txq[i];
3019 
3020 	if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3021 		error = vmxnet3_txq_mq_start_locked(txq, m);
3022 		VMXNET3_TXQ_UNLOCK(txq);
3023 	} else {
3024 		error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3025 		taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3026 	}
3027 
3028 	return (error);
3029 }
3030 
3031 static void
3032 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3033 {
3034 	struct vmxnet3_softc *sc;
3035 	struct vmxnet3_txqueue *txq;
3036 
3037 	txq = xtxq;
3038 	sc = txq->vxtxq_sc;
3039 
3040 	VMXNET3_TXQ_LOCK(txq);
3041 	if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3042 		vmxnet3_txq_mq_start_locked(txq, NULL);
3043 	VMXNET3_TXQ_UNLOCK(txq);
3044 }
3045 
3046 #endif /* VMXNET3_LEGACY_TX */
3047 
3048 static void
3049 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3050 {
3051 	struct vmxnet3_softc *sc;
3052 	struct ifnet *ifp;
3053 
3054 	sc = txq->vxtxq_sc;
3055 	ifp = sc->vmx_ifp;
3056 
3057 #ifdef VMXNET3_LEGACY_TX
3058 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3059 		vmxnet3_start_locked(ifp);
3060 #else
3061 	if (!drbr_empty(ifp, txq->vxtxq_br))
3062 		vmxnet3_txq_mq_start_locked(txq, NULL);
3063 #endif
3064 }
3065 
3066 static void
3067 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3068 {
3069 	struct vmxnet3_txqueue *txq;
3070 	int i;
3071 
3072 	VMXNET3_CORE_LOCK_ASSERT(sc);
3073 
3074 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3075 		txq = &sc->vmx_txq[i];
3076 
3077 		VMXNET3_TXQ_LOCK(txq);
3078 		vmxnet3_txq_start(txq);
3079 		VMXNET3_TXQ_UNLOCK(txq);
3080 	}
3081 }
3082 
3083 static void
3084 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3085 {
3086 	struct ifnet *ifp;
3087 	int idx, bit;
3088 
3089 	ifp = sc->vmx_ifp;
3090 	idx = (tag >> 5) & 0x7F;
3091 	bit = tag & 0x1F;
3092 
3093 	if (tag == 0 || tag > 4095)
3094 		return;
3095 
3096 	VMXNET3_CORE_LOCK(sc);
3097 
3098 	/* Update our private VLAN bitvector. */
3099 	if (add)
3100 		sc->vmx_vlan_filter[idx] |= (1 << bit);
3101 	else
3102 		sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3103 
3104 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3105 		if (add)
3106 			sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3107 		else
3108 			sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3109 		vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3110 	}
3111 
3112 	VMXNET3_CORE_UNLOCK(sc);
3113 }
3114 
3115 static void
3116 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3117 {
3118 
3119 	if (ifp->if_softc == arg)
3120 		vmxnet3_update_vlan_filter(arg, 1, tag);
3121 }
3122 
3123 static void
3124 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3125 {
3126 
3127 	if (ifp->if_softc == arg)
3128 		vmxnet3_update_vlan_filter(arg, 0, tag);
3129 }
3130 
3131 static void
3132 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3133 {
3134 	struct ifnet *ifp;
3135 	struct vmxnet3_driver_shared *ds;
3136 	struct ifmultiaddr *ifma;
3137 	u_int mode;
3138 
3139 	ifp = sc->vmx_ifp;
3140 	ds = sc->vmx_ds;
3141 
3142 	mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3143 	if (ifp->if_flags & IFF_PROMISC)
3144 		mode |= VMXNET3_RXMODE_PROMISC;
3145 	if (ifp->if_flags & IFF_ALLMULTI)
3146 		mode |= VMXNET3_RXMODE_ALLMULTI;
3147 	else {
3148 		int cnt = 0, overflow = 0;
3149 
3150 		if_maddr_rlock(ifp);
3151 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3152 			if (ifma->ifma_addr->sa_family != AF_LINK)
3153 				continue;
3154 			else if (cnt == VMXNET3_MULTICAST_MAX) {
3155 				overflow = 1;
3156 				break;
3157 			}
3158 
3159 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3160 			   &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3161 			cnt++;
3162 		}
3163 		if_maddr_runlock(ifp);
3164 
3165 		if (overflow != 0) {
3166 			cnt = 0;
3167 			mode |= VMXNET3_RXMODE_ALLMULTI;
3168 		} else if (cnt > 0)
3169 			mode |= VMXNET3_RXMODE_MCAST;
3170 		ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3171 	}
3172 
3173 	ds->rxmode = mode;
3174 
3175 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3176 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3177 }
3178 
3179 static int
3180 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3181 {
3182 	struct ifnet *ifp;
3183 
3184 	ifp = sc->vmx_ifp;
3185 
3186 	if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3187 		return (EINVAL);
3188 
3189 	ifp->if_mtu = mtu;
3190 
3191 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3192 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3193 		vmxnet3_init_locked(sc);
3194 	}
3195 
3196 	return (0);
3197 }
3198 
3199 static int
3200 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3201 {
3202 	struct vmxnet3_softc *sc;
3203 	struct ifreq *ifr;
3204 	int reinit, mask, error;
3205 
3206 	sc = ifp->if_softc;
3207 	ifr = (struct ifreq *) data;
3208 	error = 0;
3209 
3210 	switch (cmd) {
3211 	case SIOCSIFMTU:
3212 		if (ifp->if_mtu != ifr->ifr_mtu) {
3213 			VMXNET3_CORE_LOCK(sc);
3214 			error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3215 			VMXNET3_CORE_UNLOCK(sc);
3216 		}
3217 		break;
3218 
3219 	case SIOCSIFFLAGS:
3220 		VMXNET3_CORE_LOCK(sc);
3221 		if (ifp->if_flags & IFF_UP) {
3222 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3223 				if ((ifp->if_flags ^ sc->vmx_if_flags) &
3224 				    (IFF_PROMISC | IFF_ALLMULTI)) {
3225 					vmxnet3_set_rxfilter(sc);
3226 				}
3227 			} else
3228 				vmxnet3_init_locked(sc);
3229 		} else {
3230 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3231 				vmxnet3_stop(sc);
3232 		}
3233 		sc->vmx_if_flags = ifp->if_flags;
3234 		VMXNET3_CORE_UNLOCK(sc);
3235 		break;
3236 
3237 	case SIOCADDMULTI:
3238 	case SIOCDELMULTI:
3239 		VMXNET3_CORE_LOCK(sc);
3240 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3241 			vmxnet3_set_rxfilter(sc);
3242 		VMXNET3_CORE_UNLOCK(sc);
3243 		break;
3244 
3245 	case SIOCSIFMEDIA:
3246 	case SIOCGIFMEDIA:
3247 		error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3248 		break;
3249 
3250 	case SIOCSIFCAP:
3251 		VMXNET3_CORE_LOCK(sc);
3252 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3253 
3254 		if (mask & IFCAP_TXCSUM)
3255 			ifp->if_capenable ^= IFCAP_TXCSUM;
3256 		if (mask & IFCAP_TXCSUM_IPV6)
3257 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3258 		if (mask & IFCAP_TSO4)
3259 			ifp->if_capenable ^= IFCAP_TSO4;
3260 		if (mask & IFCAP_TSO6)
3261 			ifp->if_capenable ^= IFCAP_TSO6;
3262 
3263 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
3264 		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
3265 			/* Changing these features requires us to reinit. */
3266 			reinit = 1;
3267 
3268 			if (mask & IFCAP_RXCSUM)
3269 				ifp->if_capenable ^= IFCAP_RXCSUM;
3270 			if (mask & IFCAP_RXCSUM_IPV6)
3271 				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3272 			if (mask & IFCAP_LRO)
3273 				ifp->if_capenable ^= IFCAP_LRO;
3274 			if (mask & IFCAP_VLAN_HWTAGGING)
3275 				ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3276 			if (mask & IFCAP_VLAN_HWFILTER)
3277 				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3278 		} else
3279 			reinit = 0;
3280 
3281 		if (mask & IFCAP_VLAN_HWTSO)
3282 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3283 
3284 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3285 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3286 			vmxnet3_init_locked(sc);
3287 		}
3288 
3289 		VMXNET3_CORE_UNLOCK(sc);
3290 		VLAN_CAPABILITIES(ifp);
3291 		break;
3292 
3293 	default:
3294 		error = ether_ioctl(ifp, cmd, data);
3295 		break;
3296 	}
3297 
3298 	VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3299 
3300 	return (error);
3301 }
3302 
3303 #ifndef VMXNET3_LEGACY_TX
3304 static void
3305 vmxnet3_qflush(struct ifnet *ifp)
3306 {
3307 	struct vmxnet3_softc *sc;
3308 	struct vmxnet3_txqueue *txq;
3309 	struct mbuf *m;
3310 	int i;
3311 
3312 	sc = ifp->if_softc;
3313 
3314 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3315 		txq = &sc->vmx_txq[i];
3316 
3317 		VMXNET3_TXQ_LOCK(txq);
3318 		while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3319 			m_freem(m);
3320 		VMXNET3_TXQ_UNLOCK(txq);
3321 	}
3322 
3323 	if_qflush(ifp);
3324 }
3325 #endif
3326 
3327 static int
3328 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3329 {
3330 	struct vmxnet3_softc *sc;
3331 
3332 	sc = txq->vxtxq_sc;
3333 
3334 	VMXNET3_TXQ_LOCK(txq);
3335 	if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3336 		VMXNET3_TXQ_UNLOCK(txq);
3337 		return (0);
3338 	}
3339 	VMXNET3_TXQ_UNLOCK(txq);
3340 
3341 	if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3342 	    txq->vxtxq_id);
3343 	return (1);
3344 }
3345 
3346 static void
3347 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3348 {
3349 
3350 	vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3351 }
3352 
3353 static uint64_t
3354 vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt)
3355 {
3356 	struct vmxnet3_softc *sc;
3357 	uint64_t rv;
3358 
3359 	sc = if_getsoftc(ifp);
3360 	rv = 0;
3361 
3362 	/*
3363 	 * With the exception of if_ierrors, these ifnet statistics are
3364 	 * only updated in the driver, so just set them to our accumulated
3365 	 * values. if_ierrors is updated in ether_input() for malformed
3366 	 * frames that we should have already discarded.
3367 	 */
3368 	switch (cnt) {
3369 	case IFCOUNTER_IPACKETS:
3370 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3371 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets;
3372 		return (rv);
3373 	case IFCOUNTER_IQDROPS:
3374 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3375 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops;
3376 		return (rv);
3377 	case IFCOUNTER_IERRORS:
3378 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3379 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors;
3380 		return (rv);
3381 	case IFCOUNTER_OPACKETS:
3382 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3383 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets;
3384 		return (rv);
3385 #ifndef VMXNET3_LEGACY_TX
3386 	case IFCOUNTER_OBYTES:
3387 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3388 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes;
3389 		return (rv);
3390 	case IFCOUNTER_OMCASTS:
3391 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3392 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts;
3393 		return (rv);
3394 #endif
3395 	default:
3396 		return (if_get_counter_default(ifp, cnt));
3397 	}
3398 }
3399 
3400 static void
3401 vmxnet3_tick(void *xsc)
3402 {
3403 	struct vmxnet3_softc *sc;
3404 	struct ifnet *ifp;
3405 	int i, timedout;
3406 
3407 	sc = xsc;
3408 	ifp = sc->vmx_ifp;
3409 	timedout = 0;
3410 
3411 	VMXNET3_CORE_LOCK_ASSERT(sc);
3412 
3413 	vmxnet3_refresh_host_stats(sc);
3414 
3415 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3416 		timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3417 
3418 	if (timedout != 0) {
3419 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3420 		vmxnet3_init_locked(sc);
3421 	} else
3422 		callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3423 }
3424 
3425 static int
3426 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3427 {
3428 	uint32_t status;
3429 
3430 	/* Also update the link speed while here. */
3431 	status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3432 	sc->vmx_link_speed = status >> 16;
3433 	return !!(status & 0x1);
3434 }
3435 
3436 static void
3437 vmxnet3_link_status(struct vmxnet3_softc *sc)
3438 {
3439 	struct ifnet *ifp;
3440 	int link;
3441 
3442 	ifp = sc->vmx_ifp;
3443 	link = vmxnet3_link_is_up(sc);
3444 
3445 	if (link != 0 && sc->vmx_link_active == 0) {
3446 		sc->vmx_link_active = 1;
3447 		if_link_state_change(ifp, LINK_STATE_UP);
3448 	} else if (link == 0 && sc->vmx_link_active != 0) {
3449 		sc->vmx_link_active = 0;
3450 		if_link_state_change(ifp, LINK_STATE_DOWN);
3451 	}
3452 }
3453 
3454 static void
3455 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3456 {
3457 	struct vmxnet3_softc *sc;
3458 
3459 	sc = ifp->if_softc;
3460 
3461 	ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
3462 	ifmr->ifm_status = IFM_AVALID;
3463 
3464 	VMXNET3_CORE_LOCK(sc);
3465 	if (vmxnet3_link_is_up(sc) != 0)
3466 		ifmr->ifm_status |= IFM_ACTIVE;
3467 	else
3468 		ifmr->ifm_status |= IFM_NONE;
3469 	VMXNET3_CORE_UNLOCK(sc);
3470 }
3471 
3472 static int
3473 vmxnet3_media_change(struct ifnet *ifp)
3474 {
3475 
3476 	/* Ignore. */
3477 	return (0);
3478 }
3479 
3480 static void
3481 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3482 {
3483 	uint32_t ml, mh;
3484 
3485 	ml  = sc->vmx_lladdr[0];
3486 	ml |= sc->vmx_lladdr[1] << 8;
3487 	ml |= sc->vmx_lladdr[2] << 16;
3488 	ml |= sc->vmx_lladdr[3] << 24;
3489 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3490 
3491 	mh  = sc->vmx_lladdr[4];
3492 	mh |= sc->vmx_lladdr[5] << 8;
3493 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3494 }
3495 
3496 static void
3497 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3498 {
3499 	uint32_t ml, mh;
3500 
3501 	ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3502 	mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3503 
3504 	sc->vmx_lladdr[0] = ml;
3505 	sc->vmx_lladdr[1] = ml >> 8;
3506 	sc->vmx_lladdr[2] = ml >> 16;
3507 	sc->vmx_lladdr[3] = ml >> 24;
3508 	sc->vmx_lladdr[4] = mh;
3509 	sc->vmx_lladdr[5] = mh >> 8;
3510 }
3511 
3512 static void
3513 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3514     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3515 {
3516 	struct sysctl_oid *node, *txsnode;
3517 	struct sysctl_oid_list *list, *txslist;
3518 	struct vmxnet3_txq_stats *stats;
3519 	struct UPT1_TxStats *txstats;
3520 	char namebuf[16];
3521 
3522 	stats = &txq->vxtxq_stats;
3523 	txstats = &txq->vxtxq_ts->stats;
3524 
3525 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3526 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3527 	    NULL, "Transmit Queue");
3528 	txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3529 
3530 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3531 	    &stats->vmtxs_opackets, "Transmit packets");
3532 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3533 	    &stats->vmtxs_obytes, "Transmit bytes");
3534 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3535 	    &stats->vmtxs_omcasts, "Transmit multicasts");
3536 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3537 	    &stats->vmtxs_csum, "Transmit checksum offloaded");
3538 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3539 	    &stats->vmtxs_tso, "Transmit TCP segmentation offloaded");
3540 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3541 	    &stats->vmtxs_full, "Transmit ring full");
3542 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3543 	    &stats->vmtxs_offload_failed, "Transmit checksum offload failed");
3544 
3545 	/*
3546 	 * Add statistics reported by the host. These are updated once
3547 	 * per second.
3548 	 */
3549 	txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3550 	    NULL, "Host Statistics");
3551 	txslist = SYSCTL_CHILDREN(txsnode);
3552 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3553 	    &txstats->TSO_packets, "TSO packets");
3554 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3555 	    &txstats->TSO_bytes, "TSO bytes");
3556 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3557 	    &txstats->ucast_packets, "Unicast packets");
3558 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3559 	    &txstats->ucast_bytes, "Unicast bytes");
3560 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3561 	    &txstats->mcast_packets, "Multicast packets");
3562 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3563 	    &txstats->mcast_bytes, "Multicast bytes");
3564 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3565 	    &txstats->error, "Errors");
3566 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3567 	    &txstats->discard, "Discards");
3568 }
3569 
3570 static void
3571 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3572     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3573 {
3574 	struct sysctl_oid *node, *rxsnode;
3575 	struct sysctl_oid_list *list, *rxslist;
3576 	struct vmxnet3_rxq_stats *stats;
3577 	struct UPT1_RxStats *rxstats;
3578 	char namebuf[16];
3579 
3580 	stats = &rxq->vxrxq_stats;
3581 	rxstats = &rxq->vxrxq_rs->stats;
3582 
3583 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3584 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3585 	    NULL, "Receive Queue");
3586 	rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3587 
3588 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3589 	    &stats->vmrxs_ipackets, "Receive packets");
3590 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3591 	    &stats->vmrxs_ibytes, "Receive bytes");
3592 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3593 	    &stats->vmrxs_iqdrops, "Receive drops");
3594 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3595 	    &stats->vmrxs_ierrors, "Receive errors");
3596 
3597 	/*
3598 	 * Add statistics reported by the host. These are updated once
3599 	 * per second.
3600 	 */
3601 	rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3602 	    NULL, "Host Statistics");
3603 	rxslist = SYSCTL_CHILDREN(rxsnode);
3604 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3605 	    &rxstats->LRO_packets, "LRO packets");
3606 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3607 	    &rxstats->LRO_bytes, "LRO bytes");
3608 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3609 	    &rxstats->ucast_packets, "Unicast packets");
3610 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3611 	    &rxstats->ucast_bytes, "Unicast bytes");
3612 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3613 	    &rxstats->mcast_packets, "Multicast packets");
3614 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3615 	    &rxstats->mcast_bytes, "Multicast bytes");
3616 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3617 	    &rxstats->bcast_packets, "Broadcast packets");
3618 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3619 	    &rxstats->bcast_bytes, "Broadcast bytes");
3620 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3621 	    &rxstats->nobuffer, "No buffer");
3622 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3623 	    &rxstats->error, "Errors");
3624 }
3625 
3626 static void
3627 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3628     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3629 {
3630 	struct sysctl_oid *node;
3631 	struct sysctl_oid_list *list;
3632 	int i;
3633 
3634 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3635 		struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3636 
3637 		node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3638 		    "debug", CTLFLAG_RD, NULL, "");
3639 		list = SYSCTL_CHILDREN(node);
3640 
3641 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3642 		    &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3643 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3644 		    &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3645 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3646 		    &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3647 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3648 		    &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3649 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3650 		    &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3651 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3652 		    &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3653 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3654 		    &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3655 	}
3656 
3657 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
3658 		struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3659 
3660 		node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3661 		    "debug", CTLFLAG_RD, NULL, "");
3662 		list = SYSCTL_CHILDREN(node);
3663 
3664 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3665 		    &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3666 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3667 		    &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3668 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3669 		    &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3670 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3671 		    &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3672 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3673 		    &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3674 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3675 		    &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3676 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3677 		    &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3678 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3679 		    &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3680 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3681 		    &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3682 	}
3683 }
3684 
3685 static void
3686 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3687     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3688 {
3689 	int i;
3690 
3691 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3692 		vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3693 	for (i = 0; i < sc->vmx_nrxqueues; i++)
3694 		vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3695 
3696 	vmxnet3_setup_debug_sysctl(sc, ctx, child);
3697 }
3698 
3699 static void
3700 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3701 {
3702 	device_t dev;
3703 	struct vmxnet3_statistics *stats;
3704 	struct sysctl_ctx_list *ctx;
3705 	struct sysctl_oid *tree;
3706 	struct sysctl_oid_list *child;
3707 
3708 	dev = sc->vmx_dev;
3709 	ctx = device_get_sysctl_ctx(dev);
3710 	tree = device_get_sysctl_tree(dev);
3711 	child = SYSCTL_CHILDREN(tree);
3712 
3713 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3714 	    &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3715 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3716 	    &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3717 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3718 	    &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3719 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3720 	    &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3721 
3722 	stats = &sc->vmx_stats;
3723 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3724 	    &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3725 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3726 	    &stats->vmst_defrag_failed, 0,
3727 	    "Tx mbuf dropped because defrag failed");
3728 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3729 	    &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3730 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3731 	    &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3732 
3733 	vmxnet3_setup_queue_sysctl(sc, ctx, child);
3734 }
3735 
3736 static void
3737 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3738 {
3739 
3740 	bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3741 }
3742 
3743 static uint32_t
3744 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3745 {
3746 
3747 	return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3748 }
3749 
3750 static void
3751 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3752 {
3753 
3754 	bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3755 }
3756 
3757 static void
3758 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3759 {
3760 
3761 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3762 }
3763 
3764 static uint32_t
3765 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3766 {
3767 
3768 	vmxnet3_write_cmd(sc, cmd);
3769 	bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3770 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3771 	return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3772 }
3773 
3774 static void
3775 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3776 {
3777 
3778 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3779 }
3780 
3781 static void
3782 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3783 {
3784 
3785 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3786 }
3787 
3788 static void
3789 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3790 {
3791 	int i;
3792 
3793 	sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3794 	for (i = 0; i < sc->vmx_nintrs; i++)
3795 		vmxnet3_enable_intr(sc, i);
3796 }
3797 
3798 static void
3799 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3800 {
3801 	int i;
3802 
3803 	sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3804 	for (i = 0; i < sc->vmx_nintrs; i++)
3805 		vmxnet3_disable_intr(sc, i);
3806 }
3807 
3808 static void
3809 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3810 {
3811 	bus_addr_t *baddr = arg;
3812 
3813 	if (error == 0)
3814 		*baddr = segs->ds_addr;
3815 }
3816 
3817 static int
3818 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3819     struct vmxnet3_dma_alloc *dma)
3820 {
3821 	device_t dev;
3822 	int error;
3823 
3824 	dev = sc->vmx_dev;
3825 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3826 
3827 	error = bus_dma_tag_create(bus_get_dma_tag(dev),
3828 	    align, 0,		/* alignment, bounds */
3829 	    BUS_SPACE_MAXADDR,	/* lowaddr */
3830 	    BUS_SPACE_MAXADDR,	/* highaddr */
3831 	    NULL, NULL,		/* filter, filterarg */
3832 	    size,		/* maxsize */
3833 	    1,			/* nsegments */
3834 	    size,		/* maxsegsize */
3835 	    BUS_DMA_ALLOCNOW,	/* flags */
3836 	    NULL,		/* lockfunc */
3837 	    NULL,		/* lockfuncarg */
3838 	    &dma->dma_tag);
3839 	if (error) {
3840 		device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3841 		goto fail;
3842 	}
3843 
3844 	error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3845 	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3846 	if (error) {
3847 		device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3848 		goto fail;
3849 	}
3850 
3851 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3852 	    size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3853 	if (error) {
3854 		device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3855 		goto fail;
3856 	}
3857 
3858 	dma->dma_size = size;
3859 
3860 fail:
3861 	if (error)
3862 		vmxnet3_dma_free(sc, dma);
3863 
3864 	return (error);
3865 }
3866 
3867 static void
3868 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3869 {
3870 
3871 	if (dma->dma_tag != NULL) {
3872 		if (dma->dma_paddr != 0) {
3873 			bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3874 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3875 			bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3876 		}
3877 
3878 		if (dma->dma_vaddr != NULL) {
3879 			bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3880 			    dma->dma_map);
3881 		}
3882 
3883 		bus_dma_tag_destroy(dma->dma_tag);
3884 	}
3885 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3886 }
3887 
3888 static int
3889 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3890 {
3891 	char path[64];
3892 
3893 	snprintf(path, sizeof(path),
3894 	    "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3895 	TUNABLE_INT_FETCH(path, &def);
3896 
3897 	return (def);
3898 }
3899 
3900 /*
3901  * Since this is a purely paravirtualized device, we do not have
3902  * to worry about DMA coherency. But at times, we must make sure
3903  * both the compiler and CPU do not reorder memory operations.
3904  */
3905 static inline void
3906 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3907 {
3908 
3909 	switch (type) {
3910 	case VMXNET3_BARRIER_RD:
3911 		rmb();
3912 		break;
3913 	case VMXNET3_BARRIER_WR:
3914 		wmb();
3915 		break;
3916 	case VMXNET3_BARRIER_RDWR:
3917 		mb();
3918 		break;
3919 	default:
3920 		panic("%s: bad barrier type %d", __func__, type);
3921 	}
3922 }
3923