xref: /freebsd/sys/dev/vmware/vmxnet3/if_vmx.c (revision 076ad2f836d5f49dc1375f1677335a48fe0d4b82)
1 /*-
2  * Copyright (c) 2013 Tsubai Masanari
3  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
18  */
19 
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
21 
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/eventhandler.h>
28 #include <sys/kernel.h>
29 #include <sys/endian.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <sys/smp.h>
37 #include <sys/taskqueue.h>
38 #include <vm/vm.h>
39 #include <vm/pmap.h>
40 
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_var.h>
44 #include <net/if_arp.h>
45 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_media.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <net/bpf.h>
51 
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet/udp.h>
58 #include <netinet/tcp.h>
59 
60 #include <machine/in_cksum.h>
61 
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 
70 #include "if_vmxreg.h"
71 #include "if_vmxvar.h"
72 
73 #include "opt_inet.h"
74 #include "opt_inet6.h"
75 
76 #ifdef VMXNET3_FAILPOINTS
77 #include <sys/fail.h>
78 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
79     "vmxnet3 fail points");
80 #define VMXNET3_FP	_debug_fail_point_vmxnet3
81 #endif
82 
83 static int	vmxnet3_probe(device_t);
84 static int	vmxnet3_attach(device_t);
85 static int	vmxnet3_detach(device_t);
86 static int	vmxnet3_shutdown(device_t);
87 
88 static int	vmxnet3_alloc_resources(struct vmxnet3_softc *);
89 static void	vmxnet3_free_resources(struct vmxnet3_softc *);
90 static int	vmxnet3_check_version(struct vmxnet3_softc *);
91 static void	vmxnet3_initial_config(struct vmxnet3_softc *);
92 static void	vmxnet3_check_multiqueue(struct vmxnet3_softc *);
93 
94 static int	vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
95 static int	vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
96 static int	vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
97 static int	vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
98 		    struct vmxnet3_interrupt *);
99 static int	vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
100 static int	vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
101 static int	vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
102 static int	vmxnet3_setup_interrupts(struct vmxnet3_softc *);
103 static int	vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
104 
105 static void	vmxnet3_free_interrupt(struct vmxnet3_softc *,
106 		    struct vmxnet3_interrupt *);
107 static void	vmxnet3_free_interrupts(struct vmxnet3_softc *);
108 
109 #ifndef VMXNET3_LEGACY_TX
110 static int	vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
111 static void	vmxnet3_start_taskqueue(struct vmxnet3_softc *);
112 static void	vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
113 static void	vmxnet3_free_taskqueue(struct vmxnet3_softc *);
114 #endif
115 
116 static int	vmxnet3_init_rxq(struct vmxnet3_softc *, int);
117 static int	vmxnet3_init_txq(struct vmxnet3_softc *, int);
118 static int	vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
119 static void	vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
120 static void	vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
121 static void	vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
122 
123 static int	vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
124 static void	vmxnet3_free_shared_data(struct vmxnet3_softc *);
125 static int	vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
126 static void	vmxnet3_free_txq_data(struct vmxnet3_softc *);
127 static int	vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
128 static void	vmxnet3_free_rxq_data(struct vmxnet3_softc *);
129 static int	vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
130 static void	vmxnet3_free_queue_data(struct vmxnet3_softc *);
131 static int	vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
132 static void	vmxnet3_init_shared_data(struct vmxnet3_softc *);
133 static void	vmxnet3_init_hwassist(struct vmxnet3_softc *);
134 static void	vmxnet3_reinit_interface(struct vmxnet3_softc *);
135 static void	vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
136 static void	vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
137 static int	vmxnet3_alloc_data(struct vmxnet3_softc *);
138 static void	vmxnet3_free_data(struct vmxnet3_softc *);
139 static int	vmxnet3_setup_interface(struct vmxnet3_softc *);
140 
141 static void	vmxnet3_evintr(struct vmxnet3_softc *);
142 static void	vmxnet3_txq_eof(struct vmxnet3_txqueue *);
143 static void	vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
144 static int	vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
145 static void	vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
146 		    struct vmxnet3_rxring *, int);
147 static void	vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
148 static void	vmxnet3_legacy_intr(void *);
149 static void	vmxnet3_txq_intr(void *);
150 static void	vmxnet3_rxq_intr(void *);
151 static void	vmxnet3_event_intr(void *);
152 
153 static void	vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
154 static void	vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
155 static void	vmxnet3_stop(struct vmxnet3_softc *);
156 
157 static void	vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
158 static int	vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
159 static int	vmxnet3_reinit_queues(struct vmxnet3_softc *);
160 static int	vmxnet3_enable_device(struct vmxnet3_softc *);
161 static void	vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
162 static int	vmxnet3_reinit(struct vmxnet3_softc *);
163 static void	vmxnet3_init_locked(struct vmxnet3_softc *);
164 static void	vmxnet3_init(void *);
165 
166 static int	vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
167 		    int *, int *, int *);
168 static int	vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
169 		    bus_dmamap_t, bus_dma_segment_t [], int *);
170 static void	vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
171 static int	vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
172 #ifdef VMXNET3_LEGACY_TX
173 static void	vmxnet3_start_locked(struct ifnet *);
174 static void	vmxnet3_start(struct ifnet *);
175 #else
176 static int	vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
177 		    struct mbuf *);
178 static int	vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
179 static void	vmxnet3_txq_tq_deferred(void *, int);
180 #endif
181 static void	vmxnet3_txq_start(struct vmxnet3_txqueue *);
182 static void	vmxnet3_tx_start_all(struct vmxnet3_softc *);
183 
184 static void	vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
185 		    uint16_t);
186 static void	vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
187 static void	vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
188 static void	vmxnet3_set_rxfilter(struct vmxnet3_softc *);
189 static int	vmxnet3_change_mtu(struct vmxnet3_softc *, int);
190 static int	vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
191 static uint64_t	vmxnet3_get_counter(struct ifnet *, ift_counter);
192 
193 #ifndef VMXNET3_LEGACY_TX
194 static void	vmxnet3_qflush(struct ifnet *);
195 #endif
196 
197 static int	vmxnet3_watchdog(struct vmxnet3_txqueue *);
198 static void	vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
199 static void	vmxnet3_tick(void *);
200 static void	vmxnet3_link_status(struct vmxnet3_softc *);
201 static void	vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
202 static int	vmxnet3_media_change(struct ifnet *);
203 static void	vmxnet3_set_lladdr(struct vmxnet3_softc *);
204 static void	vmxnet3_get_lladdr(struct vmxnet3_softc *);
205 
206 static void	vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
207 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
208 static void	vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
209 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
210 static void	vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
211 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
212 static void	vmxnet3_setup_sysctl(struct vmxnet3_softc *);
213 
214 static void	vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
215 		    uint32_t);
216 static uint32_t	vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
217 static void	vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
218 		    uint32_t);
219 static void	vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
220 static uint32_t	vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
221 
222 static void	vmxnet3_enable_intr(struct vmxnet3_softc *, int);
223 static void	vmxnet3_disable_intr(struct vmxnet3_softc *, int);
224 static void	vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
225 static void	vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
226 
227 static int	vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
228 		    bus_size_t, struct vmxnet3_dma_alloc *);
229 static void	vmxnet3_dma_free(struct vmxnet3_softc *,
230 		    struct vmxnet3_dma_alloc *);
231 static int	vmxnet3_tunable_int(struct vmxnet3_softc *,
232 		    const char *, int);
233 
234 typedef enum {
235 	VMXNET3_BARRIER_RD,
236 	VMXNET3_BARRIER_WR,
237 	VMXNET3_BARRIER_RDWR,
238 } vmxnet3_barrier_t;
239 
240 static void	vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
241 
242 /* Tunables. */
243 static int vmxnet3_mq_disable = 0;
244 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
245 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
246 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
247 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
248 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
249 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
250 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
251 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
252 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
253 
254 static device_method_t vmxnet3_methods[] = {
255 	/* Device interface. */
256 	DEVMETHOD(device_probe,		vmxnet3_probe),
257 	DEVMETHOD(device_attach,	vmxnet3_attach),
258 	DEVMETHOD(device_detach,	vmxnet3_detach),
259 	DEVMETHOD(device_shutdown,	vmxnet3_shutdown),
260 
261 	DEVMETHOD_END
262 };
263 
264 static driver_t vmxnet3_driver = {
265 	"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
266 };
267 
268 static devclass_t vmxnet3_devclass;
269 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
270 
271 MODULE_DEPEND(vmx, pci, 1, 1, 1);
272 MODULE_DEPEND(vmx, ether, 1, 1, 1);
273 
274 #define VMXNET3_VMWARE_VENDOR_ID	0x15AD
275 #define VMXNET3_VMWARE_DEVICE_ID	0x07B0
276 
277 static int
278 vmxnet3_probe(device_t dev)
279 {
280 
281 	if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
282 	    pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
283 		device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
284 		return (BUS_PROBE_DEFAULT);
285 	}
286 
287 	return (ENXIO);
288 }
289 
290 static int
291 vmxnet3_attach(device_t dev)
292 {
293 	struct vmxnet3_softc *sc;
294 	int error;
295 
296 	sc = device_get_softc(dev);
297 	sc->vmx_dev = dev;
298 
299 	pci_enable_busmaster(dev);
300 
301 	VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
302 	callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
303 
304 	vmxnet3_initial_config(sc);
305 
306 	error = vmxnet3_alloc_resources(sc);
307 	if (error)
308 		goto fail;
309 
310 	error = vmxnet3_check_version(sc);
311 	if (error)
312 		goto fail;
313 
314 	error = vmxnet3_alloc_rxtx_queues(sc);
315 	if (error)
316 		goto fail;
317 
318 #ifndef VMXNET3_LEGACY_TX
319 	error = vmxnet3_alloc_taskqueue(sc);
320 	if (error)
321 		goto fail;
322 #endif
323 
324 	error = vmxnet3_alloc_interrupts(sc);
325 	if (error)
326 		goto fail;
327 
328 	vmxnet3_check_multiqueue(sc);
329 
330 	error = vmxnet3_alloc_data(sc);
331 	if (error)
332 		goto fail;
333 
334 	error = vmxnet3_setup_interface(sc);
335 	if (error)
336 		goto fail;
337 
338 	error = vmxnet3_setup_interrupts(sc);
339 	if (error) {
340 		ether_ifdetach(sc->vmx_ifp);
341 		device_printf(dev, "could not set up interrupt\n");
342 		goto fail;
343 	}
344 
345 	vmxnet3_setup_sysctl(sc);
346 #ifndef VMXNET3_LEGACY_TX
347 	vmxnet3_start_taskqueue(sc);
348 #endif
349 
350 fail:
351 	if (error)
352 		vmxnet3_detach(dev);
353 
354 	return (error);
355 }
356 
357 static int
358 vmxnet3_detach(device_t dev)
359 {
360 	struct vmxnet3_softc *sc;
361 	struct ifnet *ifp;
362 
363 	sc = device_get_softc(dev);
364 	ifp = sc->vmx_ifp;
365 
366 	if (device_is_attached(dev)) {
367 		VMXNET3_CORE_LOCK(sc);
368 		vmxnet3_stop(sc);
369 		VMXNET3_CORE_UNLOCK(sc);
370 
371 		callout_drain(&sc->vmx_tick);
372 #ifndef VMXNET3_LEGACY_TX
373 		vmxnet3_drain_taskqueue(sc);
374 #endif
375 
376 		ether_ifdetach(ifp);
377 	}
378 
379 	if (sc->vmx_vlan_attach != NULL) {
380 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
381 		sc->vmx_vlan_attach = NULL;
382 	}
383 	if (sc->vmx_vlan_detach != NULL) {
384 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
385 		sc->vmx_vlan_detach = NULL;
386 	}
387 
388 #ifndef VMXNET3_LEGACY_TX
389 	vmxnet3_free_taskqueue(sc);
390 #endif
391 	vmxnet3_free_interrupts(sc);
392 
393 	if (ifp != NULL) {
394 		if_free(ifp);
395 		sc->vmx_ifp = NULL;
396 	}
397 
398 	ifmedia_removeall(&sc->vmx_media);
399 
400 	vmxnet3_free_data(sc);
401 	vmxnet3_free_resources(sc);
402 	vmxnet3_free_rxtx_queues(sc);
403 
404 	VMXNET3_CORE_LOCK_DESTROY(sc);
405 
406 	return (0);
407 }
408 
409 static int
410 vmxnet3_shutdown(device_t dev)
411 {
412 
413 	return (0);
414 }
415 
416 static int
417 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
418 {
419 	device_t dev;
420 	int rid;
421 
422 	dev = sc->vmx_dev;
423 
424 	rid = PCIR_BAR(0);
425 	sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
426 	    RF_ACTIVE);
427 	if (sc->vmx_res0 == NULL) {
428 		device_printf(dev,
429 		    "could not map BAR0 memory\n");
430 		return (ENXIO);
431 	}
432 
433 	sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
434 	sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
435 
436 	rid = PCIR_BAR(1);
437 	sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
438 	    RF_ACTIVE);
439 	if (sc->vmx_res1 == NULL) {
440 		device_printf(dev,
441 		    "could not map BAR1 memory\n");
442 		return (ENXIO);
443 	}
444 
445 	sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
446 	sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
447 
448 	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
449 		rid = PCIR_BAR(2);
450 		sc->vmx_msix_res = bus_alloc_resource_any(dev,
451 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
452 	}
453 
454 	if (sc->vmx_msix_res == NULL)
455 		sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
456 
457 	return (0);
458 }
459 
460 static void
461 vmxnet3_free_resources(struct vmxnet3_softc *sc)
462 {
463 	device_t dev;
464 	int rid;
465 
466 	dev = sc->vmx_dev;
467 
468 	if (sc->vmx_res0 != NULL) {
469 		rid = PCIR_BAR(0);
470 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
471 		sc->vmx_res0 = NULL;
472 	}
473 
474 	if (sc->vmx_res1 != NULL) {
475 		rid = PCIR_BAR(1);
476 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
477 		sc->vmx_res1 = NULL;
478 	}
479 
480 	if (sc->vmx_msix_res != NULL) {
481 		rid = PCIR_BAR(2);
482 		bus_release_resource(dev, SYS_RES_MEMORY, rid,
483 		    sc->vmx_msix_res);
484 		sc->vmx_msix_res = NULL;
485 	}
486 }
487 
488 static int
489 vmxnet3_check_version(struct vmxnet3_softc *sc)
490 {
491 	device_t dev;
492 	uint32_t version;
493 
494 	dev = sc->vmx_dev;
495 
496 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
497 	if ((version & 0x01) == 0) {
498 		device_printf(dev, "unsupported hardware version %#x\n",
499 		    version);
500 		return (ENOTSUP);
501 	}
502 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
503 
504 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
505 	if ((version & 0x01) == 0) {
506 		device_printf(dev, "unsupported UPT version %#x\n", version);
507 		return (ENOTSUP);
508 	}
509 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
510 
511 	return (0);
512 }
513 
514 static int
515 trunc_powerof2(int val)
516 {
517 
518 	return (1U << (fls(val) - 1));
519 }
520 
521 static void
522 vmxnet3_initial_config(struct vmxnet3_softc *sc)
523 {
524 	int nqueue, ndesc;
525 
526 	nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
527 	if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
528 		nqueue = VMXNET3_DEF_TX_QUEUES;
529 	if (nqueue > mp_ncpus)
530 		nqueue = mp_ncpus;
531 	sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
532 
533 	nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
534 	if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
535 		nqueue = VMXNET3_DEF_RX_QUEUES;
536 	if (nqueue > mp_ncpus)
537 		nqueue = mp_ncpus;
538 	sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
539 
540 	if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
541 		sc->vmx_max_nrxqueues = 1;
542 		sc->vmx_max_ntxqueues = 1;
543 	}
544 
545 	ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
546 	if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
547 		ndesc = VMXNET3_DEF_TX_NDESC;
548 	if (ndesc & VMXNET3_MASK_TX_NDESC)
549 		ndesc &= ~VMXNET3_MASK_TX_NDESC;
550 	sc->vmx_ntxdescs = ndesc;
551 
552 	ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
553 	if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
554 		ndesc = VMXNET3_DEF_RX_NDESC;
555 	if (ndesc & VMXNET3_MASK_RX_NDESC)
556 		ndesc &= ~VMXNET3_MASK_RX_NDESC;
557 	sc->vmx_nrxdescs = ndesc;
558 	sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
559 }
560 
561 static void
562 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
563 {
564 
565 	if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
566 		goto out;
567 
568 	/* BMV: Just use the maximum configured for now. */
569 	sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
570 	sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
571 
572 	if (sc->vmx_nrxqueues > 1)
573 		sc->vmx_flags |= VMXNET3_FLAG_RSS;
574 
575 	return;
576 
577 out:
578 	sc->vmx_ntxqueues = 1;
579 	sc->vmx_nrxqueues = 1;
580 }
581 
582 static int
583 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
584 {
585 	device_t dev;
586 	int nmsix, cnt, required;
587 
588 	dev = sc->vmx_dev;
589 
590 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
591 		return (1);
592 
593 	/* Allocate an additional vector for the events interrupt. */
594 	required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
595 
596 	nmsix = pci_msix_count(dev);
597 	if (nmsix < required)
598 		return (1);
599 
600 	cnt = required;
601 	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
602 		sc->vmx_nintrs = required;
603 		return (0);
604 	} else
605 		pci_release_msi(dev);
606 
607 	/* BMV TODO Fallback to sharing MSIX vectors if possible. */
608 
609 	return (1);
610 }
611 
612 static int
613 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
614 {
615 	device_t dev;
616 	int nmsi, cnt, required;
617 
618 	dev = sc->vmx_dev;
619 	required = 1;
620 
621 	nmsi = pci_msi_count(dev);
622 	if (nmsi < required)
623 		return (1);
624 
625 	cnt = required;
626 	if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
627 		sc->vmx_nintrs = 1;
628 		return (0);
629 	} else
630 		pci_release_msi(dev);
631 
632 	return (1);
633 }
634 
635 static int
636 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
637 {
638 
639 	sc->vmx_nintrs = 1;
640 	return (0);
641 }
642 
643 static int
644 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
645     struct vmxnet3_interrupt *intr)
646 {
647 	struct resource *irq;
648 
649 	irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
650 	if (irq == NULL)
651 		return (ENXIO);
652 
653 	intr->vmxi_irq = irq;
654 	intr->vmxi_rid = rid;
655 
656 	return (0);
657 }
658 
659 static int
660 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
661 {
662 	int i, rid, flags, error;
663 
664 	rid = 0;
665 	flags = RF_ACTIVE;
666 
667 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
668 		flags |= RF_SHAREABLE;
669 	else
670 		rid = 1;
671 
672 	for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
673 		error = vmxnet3_alloc_interrupt(sc, rid, flags,
674 		    &sc->vmx_intrs[i]);
675 		if (error)
676 			return (error);
677 	}
678 
679 	return (0);
680 }
681 
682 static int
683 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
684 {
685 	device_t dev;
686 	struct vmxnet3_txqueue *txq;
687 	struct vmxnet3_rxqueue *rxq;
688 	struct vmxnet3_interrupt *intr;
689 	enum intr_type type;
690 	int i, error;
691 
692 	dev = sc->vmx_dev;
693 	intr = &sc->vmx_intrs[0];
694 	type = INTR_TYPE_NET | INTR_MPSAFE;
695 
696 	for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
697 		txq = &sc->vmx_txq[i];
698 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
699 		     vmxnet3_txq_intr, txq, &intr->vmxi_handler);
700 		if (error)
701 			return (error);
702 		bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
703 		    "tq%d", i);
704 		txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
705 	}
706 
707 	for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
708 		rxq = &sc->vmx_rxq[i];
709 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
710 		    vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
711 		if (error)
712 			return (error);
713 		bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
714 		    "rq%d", i);
715 		rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
716 	}
717 
718 	error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
719 	    vmxnet3_event_intr, sc, &intr->vmxi_handler);
720 	if (error)
721 		return (error);
722 	bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
723 	sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
724 
725 	return (0);
726 }
727 
728 static int
729 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
730 {
731 	struct vmxnet3_interrupt *intr;
732 	int i, error;
733 
734 	intr = &sc->vmx_intrs[0];
735 	error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
736 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
737 	    &intr->vmxi_handler);
738 
739 	for (i = 0; i < sc->vmx_ntxqueues; i++)
740 		sc->vmx_txq[i].vxtxq_intr_idx = 0;
741 	for (i = 0; i < sc->vmx_nrxqueues; i++)
742 		sc->vmx_rxq[i].vxrxq_intr_idx = 0;
743 	sc->vmx_event_intr_idx = 0;
744 
745 	return (error);
746 }
747 
748 static void
749 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
750 {
751 	struct vmxnet3_txqueue *txq;
752 	struct vmxnet3_txq_shared *txs;
753 	struct vmxnet3_rxqueue *rxq;
754 	struct vmxnet3_rxq_shared *rxs;
755 	int i;
756 
757 	sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
758 
759 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
760 		txq = &sc->vmx_txq[i];
761 		txs = txq->vxtxq_ts;
762 		txs->intr_idx = txq->vxtxq_intr_idx;
763 	}
764 
765 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
766 		rxq = &sc->vmx_rxq[i];
767 		rxs = rxq->vxrxq_rs;
768 		rxs->intr_idx = rxq->vxrxq_intr_idx;
769 	}
770 }
771 
772 static int
773 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
774 {
775 	int error;
776 
777 	error = vmxnet3_alloc_intr_resources(sc);
778 	if (error)
779 		return (error);
780 
781 	switch (sc->vmx_intr_type) {
782 	case VMXNET3_IT_MSIX:
783 		error = vmxnet3_setup_msix_interrupts(sc);
784 		break;
785 	case VMXNET3_IT_MSI:
786 	case VMXNET3_IT_LEGACY:
787 		error = vmxnet3_setup_legacy_interrupt(sc);
788 		break;
789 	default:
790 		panic("%s: invalid interrupt type %d", __func__,
791 		    sc->vmx_intr_type);
792 	}
793 
794 	if (error == 0)
795 		vmxnet3_set_interrupt_idx(sc);
796 
797 	return (error);
798 }
799 
800 static int
801 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
802 {
803 	device_t dev;
804 	uint32_t config;
805 	int error;
806 
807 	dev = sc->vmx_dev;
808 	config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
809 
810 	sc->vmx_intr_type = config & 0x03;
811 	sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
812 
813 	switch (sc->vmx_intr_type) {
814 	case VMXNET3_IT_AUTO:
815 		sc->vmx_intr_type = VMXNET3_IT_MSIX;
816 		/* FALLTHROUGH */
817 	case VMXNET3_IT_MSIX:
818 		error = vmxnet3_alloc_msix_interrupts(sc);
819 		if (error == 0)
820 			break;
821 		sc->vmx_intr_type = VMXNET3_IT_MSI;
822 		/* FALLTHROUGH */
823 	case VMXNET3_IT_MSI:
824 		error = vmxnet3_alloc_msi_interrupts(sc);
825 		if (error == 0)
826 			break;
827 		sc->vmx_intr_type = VMXNET3_IT_LEGACY;
828 		/* FALLTHROUGH */
829 	case VMXNET3_IT_LEGACY:
830 		error = vmxnet3_alloc_legacy_interrupts(sc);
831 		if (error == 0)
832 			break;
833 		/* FALLTHROUGH */
834 	default:
835 		sc->vmx_intr_type = -1;
836 		device_printf(dev, "cannot allocate any interrupt resources\n");
837 		return (ENXIO);
838 	}
839 
840 	return (error);
841 }
842 
843 static void
844 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
845     struct vmxnet3_interrupt *intr)
846 {
847 	device_t dev;
848 
849 	dev = sc->vmx_dev;
850 
851 	if (intr->vmxi_handler != NULL) {
852 		bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
853 		intr->vmxi_handler = NULL;
854 	}
855 
856 	if (intr->vmxi_irq != NULL) {
857 		bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
858 		    intr->vmxi_irq);
859 		intr->vmxi_irq = NULL;
860 		intr->vmxi_rid = -1;
861 	}
862 }
863 
864 static void
865 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
866 {
867 	int i;
868 
869 	for (i = 0; i < sc->vmx_nintrs; i++)
870 		vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
871 
872 	if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
873 	    sc->vmx_intr_type == VMXNET3_IT_MSIX)
874 		pci_release_msi(sc->vmx_dev);
875 }
876 
877 #ifndef VMXNET3_LEGACY_TX
878 static int
879 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
880 {
881 	device_t dev;
882 
883 	dev = sc->vmx_dev;
884 
885 	sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
886 	    taskqueue_thread_enqueue, &sc->vmx_tq);
887 	if (sc->vmx_tq == NULL)
888 		return (ENOMEM);
889 
890 	return (0);
891 }
892 
893 static void
894 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
895 {
896 	device_t dev;
897 	int nthreads, error;
898 
899 	dev = sc->vmx_dev;
900 
901 	/*
902 	 * The taskqueue is typically not frequently used, so a dedicated
903 	 * thread for each queue is unnecessary.
904 	 */
905 	nthreads = MAX(1, sc->vmx_ntxqueues / 2);
906 
907 	/*
908 	 * Most drivers just ignore the return value - it only fails
909 	 * with ENOMEM so an error is not likely. It is hard for us
910 	 * to recover from an error here.
911 	 */
912 	error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
913 	    "%s taskq", device_get_nameunit(dev));
914 	if (error)
915 		device_printf(dev, "failed to start taskqueue: %d", error);
916 }
917 
918 static void
919 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
920 {
921 	struct vmxnet3_txqueue *txq;
922 	int i;
923 
924 	if (sc->vmx_tq != NULL) {
925 		for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
926 			txq = &sc->vmx_txq[i];
927 			taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
928 		}
929 	}
930 }
931 
932 static void
933 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
934 {
935 	if (sc->vmx_tq != NULL) {
936 		taskqueue_free(sc->vmx_tq);
937 		sc->vmx_tq = NULL;
938 	}
939 }
940 #endif
941 
942 static int
943 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
944 {
945 	struct vmxnet3_rxqueue *rxq;
946 	struct vmxnet3_rxring *rxr;
947 	int i;
948 
949 	rxq = &sc->vmx_rxq[q];
950 
951 	snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
952 	    device_get_nameunit(sc->vmx_dev), q);
953 	mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
954 
955 	rxq->vxrxq_sc = sc;
956 	rxq->vxrxq_id = q;
957 
958 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
959 		rxr = &rxq->vxrxq_cmd_ring[i];
960 		rxr->vxrxr_rid = i;
961 		rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
962 		rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
963 		    sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
964 		if (rxr->vxrxr_rxbuf == NULL)
965 			return (ENOMEM);
966 
967 		rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
968 	}
969 
970 	return (0);
971 }
972 
973 static int
974 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
975 {
976 	struct vmxnet3_txqueue *txq;
977 	struct vmxnet3_txring *txr;
978 
979 	txq = &sc->vmx_txq[q];
980 	txr = &txq->vxtxq_cmd_ring;
981 
982 	snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
983 	    device_get_nameunit(sc->vmx_dev), q);
984 	mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
985 
986 	txq->vxtxq_sc = sc;
987 	txq->vxtxq_id = q;
988 
989 	txr->vxtxr_ndesc = sc->vmx_ntxdescs;
990 	txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
991 	    sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
992 	if (txr->vxtxr_txbuf == NULL)
993 		return (ENOMEM);
994 
995 	txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
996 
997 #ifndef VMXNET3_LEGACY_TX
998 	TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
999 
1000 	txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
1001 	    M_NOWAIT, &txq->vxtxq_mtx);
1002 	if (txq->vxtxq_br == NULL)
1003 		return (ENOMEM);
1004 #endif
1005 
1006 	return (0);
1007 }
1008 
1009 static int
1010 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1011 {
1012 	int i, error;
1013 
1014 	/*
1015 	 * Only attempt to create multiple queues if MSIX is available. MSIX is
1016 	 * disabled by default because its apparently broken for devices passed
1017 	 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1018 	 * must be set to zero for MSIX. This check prevents us from allocating
1019 	 * queue structures that we will not use.
1020 	 */
1021 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1022 		sc->vmx_max_nrxqueues = 1;
1023 		sc->vmx_max_ntxqueues = 1;
1024 	}
1025 
1026 	sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
1027 	    sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1028 	sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
1029 	    sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
1030 	if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1031 		return (ENOMEM);
1032 
1033 	for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1034 		error = vmxnet3_init_rxq(sc, i);
1035 		if (error)
1036 			return (error);
1037 	}
1038 
1039 	for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1040 		error = vmxnet3_init_txq(sc, i);
1041 		if (error)
1042 			return (error);
1043 	}
1044 
1045 	return (0);
1046 }
1047 
1048 static void
1049 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1050 {
1051 	struct vmxnet3_rxring *rxr;
1052 	int i;
1053 
1054 	rxq->vxrxq_sc = NULL;
1055 	rxq->vxrxq_id = -1;
1056 
1057 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1058 		rxr = &rxq->vxrxq_cmd_ring[i];
1059 
1060 		if (rxr->vxrxr_rxbuf != NULL) {
1061 			free(rxr->vxrxr_rxbuf, M_DEVBUF);
1062 			rxr->vxrxr_rxbuf = NULL;
1063 		}
1064 	}
1065 
1066 	if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
1067 		mtx_destroy(&rxq->vxrxq_mtx);
1068 }
1069 
1070 static void
1071 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1072 {
1073 	struct vmxnet3_txring *txr;
1074 
1075 	txr = &txq->vxtxq_cmd_ring;
1076 
1077 	txq->vxtxq_sc = NULL;
1078 	txq->vxtxq_id = -1;
1079 
1080 #ifndef VMXNET3_LEGACY_TX
1081 	if (txq->vxtxq_br != NULL) {
1082 		buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1083 		txq->vxtxq_br = NULL;
1084 	}
1085 #endif
1086 
1087 	if (txr->vxtxr_txbuf != NULL) {
1088 		free(txr->vxtxr_txbuf, M_DEVBUF);
1089 		txr->vxtxr_txbuf = NULL;
1090 	}
1091 
1092 	if (mtx_initialized(&txq->vxtxq_mtx) != 0)
1093 		mtx_destroy(&txq->vxtxq_mtx);
1094 }
1095 
1096 static void
1097 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1098 {
1099 	int i;
1100 
1101 	if (sc->vmx_rxq != NULL) {
1102 		for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1103 			vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1104 		free(sc->vmx_rxq, M_DEVBUF);
1105 		sc->vmx_rxq = NULL;
1106 	}
1107 
1108 	if (sc->vmx_txq != NULL) {
1109 		for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1110 			vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1111 		free(sc->vmx_txq, M_DEVBUF);
1112 		sc->vmx_txq = NULL;
1113 	}
1114 }
1115 
1116 static int
1117 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1118 {
1119 	device_t dev;
1120 	uint8_t *kva;
1121 	size_t size;
1122 	int i, error;
1123 
1124 	dev = sc->vmx_dev;
1125 
1126 	size = sizeof(struct vmxnet3_driver_shared);
1127 	error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1128 	if (error) {
1129 		device_printf(dev, "cannot alloc shared memory\n");
1130 		return (error);
1131 	}
1132 	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1133 
1134 	size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1135 	    sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1136 	error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1137 	if (error) {
1138 		device_printf(dev, "cannot alloc queue shared memory\n");
1139 		return (error);
1140 	}
1141 	sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1142 	kva = sc->vmx_qs;
1143 
1144 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1145 		sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1146 		kva += sizeof(struct vmxnet3_txq_shared);
1147 	}
1148 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1149 		sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1150 		kva += sizeof(struct vmxnet3_rxq_shared);
1151 	}
1152 
1153 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1154 		size = sizeof(struct vmxnet3_rss_shared);
1155 		error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1156 		if (error) {
1157 			device_printf(dev, "cannot alloc rss shared memory\n");
1158 			return (error);
1159 		}
1160 		sc->vmx_rss =
1161 		    (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1162 	}
1163 
1164 	return (0);
1165 }
1166 
1167 static void
1168 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1169 {
1170 
1171 	if (sc->vmx_rss != NULL) {
1172 		vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1173 		sc->vmx_rss = NULL;
1174 	}
1175 
1176 	if (sc->vmx_qs != NULL) {
1177 		vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1178 		sc->vmx_qs = NULL;
1179 	}
1180 
1181 	if (sc->vmx_ds != NULL) {
1182 		vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1183 		sc->vmx_ds = NULL;
1184 	}
1185 }
1186 
1187 static int
1188 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1189 {
1190 	device_t dev;
1191 	struct vmxnet3_txqueue *txq;
1192 	struct vmxnet3_txring *txr;
1193 	struct vmxnet3_comp_ring *txc;
1194 	size_t descsz, compsz;
1195 	int i, q, error;
1196 
1197 	dev = sc->vmx_dev;
1198 
1199 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1200 		txq = &sc->vmx_txq[q];
1201 		txr = &txq->vxtxq_cmd_ring;
1202 		txc = &txq->vxtxq_comp_ring;
1203 
1204 		descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1205 		compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1206 
1207 		error = bus_dma_tag_create(bus_get_dma_tag(dev),
1208 		    1, 0,			/* alignment, boundary */
1209 		    BUS_SPACE_MAXADDR,		/* lowaddr */
1210 		    BUS_SPACE_MAXADDR,		/* highaddr */
1211 		    NULL, NULL,			/* filter, filterarg */
1212 		    VMXNET3_TX_MAXSIZE,		/* maxsize */
1213 		    VMXNET3_TX_MAXSEGS,		/* nsegments */
1214 		    VMXNET3_TX_MAXSEGSIZE,	/* maxsegsize */
1215 		    0,				/* flags */
1216 		    NULL, NULL,			/* lockfunc, lockarg */
1217 		    &txr->vxtxr_txtag);
1218 		if (error) {
1219 			device_printf(dev,
1220 			    "unable to create Tx buffer tag for queue %d\n", q);
1221 			return (error);
1222 		}
1223 
1224 		error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1225 		if (error) {
1226 			device_printf(dev, "cannot alloc Tx descriptors for "
1227 			    "queue %d error %d\n", q, error);
1228 			return (error);
1229 		}
1230 		txr->vxtxr_txd =
1231 		    (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1232 
1233 		error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1234 		if (error) {
1235 			device_printf(dev, "cannot alloc Tx comp descriptors "
1236 			   "for queue %d error %d\n", q, error);
1237 			return (error);
1238 		}
1239 		txc->vxcr_u.txcd =
1240 		    (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1241 
1242 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1243 			error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1244 			    &txr->vxtxr_txbuf[i].vtxb_dmamap);
1245 			if (error) {
1246 				device_printf(dev, "unable to create Tx buf "
1247 				    "dmamap for queue %d idx %d\n", q, i);
1248 				return (error);
1249 			}
1250 		}
1251 	}
1252 
1253 	return (0);
1254 }
1255 
1256 static void
1257 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1258 {
1259 	device_t dev;
1260 	struct vmxnet3_txqueue *txq;
1261 	struct vmxnet3_txring *txr;
1262 	struct vmxnet3_comp_ring *txc;
1263 	struct vmxnet3_txbuf *txb;
1264 	int i, q;
1265 
1266 	dev = sc->vmx_dev;
1267 
1268 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1269 		txq = &sc->vmx_txq[q];
1270 		txr = &txq->vxtxq_cmd_ring;
1271 		txc = &txq->vxtxq_comp_ring;
1272 
1273 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1274 			txb = &txr->vxtxr_txbuf[i];
1275 			if (txb->vtxb_dmamap != NULL) {
1276 				bus_dmamap_destroy(txr->vxtxr_txtag,
1277 				    txb->vtxb_dmamap);
1278 				txb->vtxb_dmamap = NULL;
1279 			}
1280 		}
1281 
1282 		if (txc->vxcr_u.txcd != NULL) {
1283 			vmxnet3_dma_free(sc, &txc->vxcr_dma);
1284 			txc->vxcr_u.txcd = NULL;
1285 		}
1286 
1287 		if (txr->vxtxr_txd != NULL) {
1288 			vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1289 			txr->vxtxr_txd = NULL;
1290 		}
1291 
1292 		if (txr->vxtxr_txtag != NULL) {
1293 			bus_dma_tag_destroy(txr->vxtxr_txtag);
1294 			txr->vxtxr_txtag = NULL;
1295 		}
1296 	}
1297 }
1298 
1299 static int
1300 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1301 {
1302 	device_t dev;
1303 	struct vmxnet3_rxqueue *rxq;
1304 	struct vmxnet3_rxring *rxr;
1305 	struct vmxnet3_comp_ring *rxc;
1306 	int descsz, compsz;
1307 	int i, j, q, error;
1308 
1309 	dev = sc->vmx_dev;
1310 
1311 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1312 		rxq = &sc->vmx_rxq[q];
1313 		rxc = &rxq->vxrxq_comp_ring;
1314 		compsz = 0;
1315 
1316 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1317 			rxr = &rxq->vxrxq_cmd_ring[i];
1318 
1319 			descsz = rxr->vxrxr_ndesc *
1320 			    sizeof(struct vmxnet3_rxdesc);
1321 			compsz += rxr->vxrxr_ndesc *
1322 			    sizeof(struct vmxnet3_rxcompdesc);
1323 
1324 			error = bus_dma_tag_create(bus_get_dma_tag(dev),
1325 			    1, 0,		/* alignment, boundary */
1326 			    BUS_SPACE_MAXADDR,	/* lowaddr */
1327 			    BUS_SPACE_MAXADDR,	/* highaddr */
1328 			    NULL, NULL,		/* filter, filterarg */
1329 			    MJUMPAGESIZE,	/* maxsize */
1330 			    1,			/* nsegments */
1331 			    MJUMPAGESIZE,	/* maxsegsize */
1332 			    0,			/* flags */
1333 			    NULL, NULL,		/* lockfunc, lockarg */
1334 			    &rxr->vxrxr_rxtag);
1335 			if (error) {
1336 				device_printf(dev,
1337 				    "unable to create Rx buffer tag for "
1338 				    "queue %d\n", q);
1339 				return (error);
1340 			}
1341 
1342 			error = vmxnet3_dma_malloc(sc, descsz, 512,
1343 			    &rxr->vxrxr_dma);
1344 			if (error) {
1345 				device_printf(dev, "cannot allocate Rx "
1346 				    "descriptors for queue %d/%d error %d\n",
1347 				    i, q, error);
1348 				return (error);
1349 			}
1350 			rxr->vxrxr_rxd =
1351 			    (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1352 		}
1353 
1354 		error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1355 		if (error) {
1356 			device_printf(dev, "cannot alloc Rx comp descriptors "
1357 			    "for queue %d error %d\n", q, error);
1358 			return (error);
1359 		}
1360 		rxc->vxcr_u.rxcd =
1361 		    (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1362 
1363 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1364 			rxr = &rxq->vxrxq_cmd_ring[i];
1365 
1366 			error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1367 			    &rxr->vxrxr_spare_dmap);
1368 			if (error) {
1369 				device_printf(dev, "unable to create spare "
1370 				    "dmamap for queue %d/%d error %d\n",
1371 				    q, i, error);
1372 				return (error);
1373 			}
1374 
1375 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1376 				error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1377 				    &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1378 				if (error) {
1379 					device_printf(dev, "unable to create "
1380 					    "dmamap for queue %d/%d slot %d "
1381 					    "error %d\n",
1382 					    q, i, j, error);
1383 					return (error);
1384 				}
1385 			}
1386 		}
1387 	}
1388 
1389 	return (0);
1390 }
1391 
1392 static void
1393 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1394 {
1395 	device_t dev;
1396 	struct vmxnet3_rxqueue *rxq;
1397 	struct vmxnet3_rxring *rxr;
1398 	struct vmxnet3_comp_ring *rxc;
1399 	struct vmxnet3_rxbuf *rxb;
1400 	int i, j, q;
1401 
1402 	dev = sc->vmx_dev;
1403 
1404 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1405 		rxq = &sc->vmx_rxq[q];
1406 		rxc = &rxq->vxrxq_comp_ring;
1407 
1408 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1409 			rxr = &rxq->vxrxq_cmd_ring[i];
1410 
1411 			if (rxr->vxrxr_spare_dmap != NULL) {
1412 				bus_dmamap_destroy(rxr->vxrxr_rxtag,
1413 				    rxr->vxrxr_spare_dmap);
1414 				rxr->vxrxr_spare_dmap = NULL;
1415 			}
1416 
1417 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1418 				rxb = &rxr->vxrxr_rxbuf[j];
1419 				if (rxb->vrxb_dmamap != NULL) {
1420 					bus_dmamap_destroy(rxr->vxrxr_rxtag,
1421 					    rxb->vrxb_dmamap);
1422 					rxb->vrxb_dmamap = NULL;
1423 				}
1424 			}
1425 		}
1426 
1427 		if (rxc->vxcr_u.rxcd != NULL) {
1428 			vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1429 			rxc->vxcr_u.rxcd = NULL;
1430 		}
1431 
1432 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1433 			rxr = &rxq->vxrxq_cmd_ring[i];
1434 
1435 			if (rxr->vxrxr_rxd != NULL) {
1436 				vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1437 				rxr->vxrxr_rxd = NULL;
1438 			}
1439 
1440 			if (rxr->vxrxr_rxtag != NULL) {
1441 				bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1442 				rxr->vxrxr_rxtag = NULL;
1443 			}
1444 		}
1445 	}
1446 }
1447 
1448 static int
1449 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1450 {
1451 	int error;
1452 
1453 	error = vmxnet3_alloc_txq_data(sc);
1454 	if (error)
1455 		return (error);
1456 
1457 	error = vmxnet3_alloc_rxq_data(sc);
1458 	if (error)
1459 		return (error);
1460 
1461 	return (0);
1462 }
1463 
1464 static void
1465 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1466 {
1467 
1468 	if (sc->vmx_rxq != NULL)
1469 		vmxnet3_free_rxq_data(sc);
1470 
1471 	if (sc->vmx_txq != NULL)
1472 		vmxnet3_free_txq_data(sc);
1473 }
1474 
1475 static int
1476 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1477 {
1478 	int error;
1479 
1480 	error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1481 	    32, &sc->vmx_mcast_dma);
1482 	if (error)
1483 		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1484 	else
1485 		sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1486 
1487 	return (error);
1488 }
1489 
1490 static void
1491 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1492 {
1493 
1494 	if (sc->vmx_mcast != NULL) {
1495 		vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1496 		sc->vmx_mcast = NULL;
1497 	}
1498 }
1499 
1500 static void
1501 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1502 {
1503 	struct vmxnet3_driver_shared *ds;
1504 	struct vmxnet3_txqueue *txq;
1505 	struct vmxnet3_txq_shared *txs;
1506 	struct vmxnet3_rxqueue *rxq;
1507 	struct vmxnet3_rxq_shared *rxs;
1508 	int i;
1509 
1510 	ds = sc->vmx_ds;
1511 
1512 	/*
1513 	 * Initialize fields of the shared data that remains the same across
1514 	 * reinits. Note the shared data is zero'd when allocated.
1515 	 */
1516 
1517 	ds->magic = VMXNET3_REV1_MAGIC;
1518 
1519 	/* DriverInfo */
1520 	ds->version = VMXNET3_DRIVER_VERSION;
1521 	ds->guest = VMXNET3_GOS_FREEBSD |
1522 #ifdef __LP64__
1523 	    VMXNET3_GOS_64BIT;
1524 #else
1525 	    VMXNET3_GOS_32BIT;
1526 #endif
1527 	ds->vmxnet3_revision = 1;
1528 	ds->upt_version = 1;
1529 
1530 	/* Misc. conf */
1531 	ds->driver_data = vtophys(sc);
1532 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
1533 	ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1534 	ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1535 	ds->nrxsg_max = sc->vmx_max_rxsegs;
1536 
1537 	/* RSS conf */
1538 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1539 		ds->rss.version = 1;
1540 		ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1541 		ds->rss.len = sc->vmx_rss_dma.dma_size;
1542 	}
1543 
1544 	/* Interrupt control. */
1545 	ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1546 	ds->nintr = sc->vmx_nintrs;
1547 	ds->evintr = sc->vmx_event_intr_idx;
1548 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1549 
1550 	for (i = 0; i < sc->vmx_nintrs; i++)
1551 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1552 
1553 	/* Receive filter. */
1554 	ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1555 	ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1556 
1557 	/* Tx queues */
1558 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1559 		txq = &sc->vmx_txq[i];
1560 		txs = txq->vxtxq_ts;
1561 
1562 		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1563 		txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1564 		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1565 		txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1566 		txs->driver_data = vtophys(txq);
1567 		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1568 	}
1569 
1570 	/* Rx queues */
1571 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1572 		rxq = &sc->vmx_rxq[i];
1573 		rxs = rxq->vxrxq_rs;
1574 
1575 		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1576 		rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1577 		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1578 		rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1579 		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1580 		rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1581 		rxs->driver_data = vtophys(rxq);
1582 		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1583 	}
1584 }
1585 
1586 static void
1587 vmxnet3_init_hwassist(struct vmxnet3_softc *sc)
1588 {
1589 	struct ifnet *ifp = sc->vmx_ifp;
1590 	uint64_t hwassist;
1591 
1592 	hwassist = 0;
1593 	if (ifp->if_capenable & IFCAP_TXCSUM)
1594 		hwassist |= VMXNET3_CSUM_OFFLOAD;
1595 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1596 		hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1597 	if (ifp->if_capenable & IFCAP_TSO4)
1598 		hwassist |= CSUM_IP_TSO;
1599 	if (ifp->if_capenable & IFCAP_TSO6)
1600 		hwassist |= CSUM_IP6_TSO;
1601 	ifp->if_hwassist = hwassist;
1602 }
1603 
1604 static void
1605 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1606 {
1607 	struct ifnet *ifp;
1608 
1609 	ifp = sc->vmx_ifp;
1610 
1611 	/* Use the current MAC address. */
1612 	bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1613 	vmxnet3_set_lladdr(sc);
1614 
1615 	vmxnet3_init_hwassist(sc);
1616 }
1617 
1618 static void
1619 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1620 {
1621 	/*
1622 	 * Use the same key as the Linux driver until FreeBSD can do
1623 	 * RSS (presumably Toeplitz) in software.
1624 	 */
1625 	static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1626 	    0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1627 	    0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1628 	    0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1629 	    0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1630 	    0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1631 	};
1632 
1633 	struct vmxnet3_driver_shared *ds;
1634 	struct vmxnet3_rss_shared *rss;
1635 	int i;
1636 
1637 	ds = sc->vmx_ds;
1638 	rss = sc->vmx_rss;
1639 
1640 	rss->hash_type =
1641 	    UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1642 	    UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1643 	rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1644 	rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1645 	rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1646 	memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1647 
1648 	for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1649 		rss->ind_table[i] = i % sc->vmx_nrxqueues;
1650 }
1651 
1652 static void
1653 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1654 {
1655 	struct ifnet *ifp;
1656 	struct vmxnet3_driver_shared *ds;
1657 
1658 	ifp = sc->vmx_ifp;
1659 	ds = sc->vmx_ds;
1660 
1661 	ds->mtu = ifp->if_mtu;
1662 	ds->ntxqueue = sc->vmx_ntxqueues;
1663 	ds->nrxqueue = sc->vmx_nrxqueues;
1664 
1665 	ds->upt_features = 0;
1666 	if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1667 		ds->upt_features |= UPT1_F_CSUM;
1668 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1669 		ds->upt_features |= UPT1_F_VLAN;
1670 	if (ifp->if_capenable & IFCAP_LRO)
1671 		ds->upt_features |= UPT1_F_LRO;
1672 
1673 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1674 		ds->upt_features |= UPT1_F_RSS;
1675 		vmxnet3_reinit_rss_shared_data(sc);
1676 	}
1677 
1678 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1679 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1680 	    (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1681 }
1682 
1683 static int
1684 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1685 {
1686 	int error;
1687 
1688 	error = vmxnet3_alloc_shared_data(sc);
1689 	if (error)
1690 		return (error);
1691 
1692 	error = vmxnet3_alloc_queue_data(sc);
1693 	if (error)
1694 		return (error);
1695 
1696 	error = vmxnet3_alloc_mcast_table(sc);
1697 	if (error)
1698 		return (error);
1699 
1700 	vmxnet3_init_shared_data(sc);
1701 
1702 	return (0);
1703 }
1704 
1705 static void
1706 vmxnet3_free_data(struct vmxnet3_softc *sc)
1707 {
1708 
1709 	vmxnet3_free_mcast_table(sc);
1710 	vmxnet3_free_queue_data(sc);
1711 	vmxnet3_free_shared_data(sc);
1712 }
1713 
1714 static int
1715 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1716 {
1717 	device_t dev;
1718 	struct ifnet *ifp;
1719 
1720 	dev = sc->vmx_dev;
1721 
1722 	ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1723 	if (ifp == NULL) {
1724 		device_printf(dev, "cannot allocate ifnet structure\n");
1725 		return (ENOSPC);
1726 	}
1727 
1728 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1729 #if __FreeBSD_version < 1000025
1730 	ifp->if_baudrate = 1000000000;
1731 #elif __FreeBSD_version < 1100011
1732 	if_initbaudrate(ifp, IF_Gbps(10));
1733 #else
1734 	ifp->if_baudrate = IF_Gbps(10);
1735 #endif
1736 	ifp->if_softc = sc;
1737 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1738 	ifp->if_init = vmxnet3_init;
1739 	ifp->if_ioctl = vmxnet3_ioctl;
1740 	ifp->if_get_counter = vmxnet3_get_counter;
1741 	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1742 	ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1743 	ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1744 
1745 #ifdef VMXNET3_LEGACY_TX
1746 	ifp->if_start = vmxnet3_start;
1747 	ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1748 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1749 	IFQ_SET_READY(&ifp->if_snd);
1750 #else
1751 	ifp->if_transmit = vmxnet3_txq_mq_start;
1752 	ifp->if_qflush = vmxnet3_qflush;
1753 #endif
1754 
1755 	vmxnet3_get_lladdr(sc);
1756 	ether_ifattach(ifp, sc->vmx_lladdr);
1757 
1758 	ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1759 	ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1760 	ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1761 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1762 	    IFCAP_VLAN_HWCSUM;
1763 	ifp->if_capenable = ifp->if_capabilities;
1764 
1765 	/* These capabilities are not enabled by default. */
1766 	ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1767 
1768 	sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1769 	    vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1770 	sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1771 	    vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1772 
1773 	ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1774 	    vmxnet3_media_status);
1775 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1776 	ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1777 
1778 	return (0);
1779 }
1780 
1781 static void
1782 vmxnet3_evintr(struct vmxnet3_softc *sc)
1783 {
1784 	device_t dev;
1785 	struct ifnet *ifp;
1786 	struct vmxnet3_txq_shared *ts;
1787 	struct vmxnet3_rxq_shared *rs;
1788 	uint32_t event;
1789 	int reset;
1790 
1791 	dev = sc->vmx_dev;
1792 	ifp = sc->vmx_ifp;
1793 	reset = 0;
1794 
1795 	VMXNET3_CORE_LOCK(sc);
1796 
1797 	/* Clear events. */
1798 	event = sc->vmx_ds->event;
1799 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1800 
1801 	if (event & VMXNET3_EVENT_LINK) {
1802 		vmxnet3_link_status(sc);
1803 		if (sc->vmx_link_active != 0)
1804 			vmxnet3_tx_start_all(sc);
1805 	}
1806 
1807 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1808 		reset = 1;
1809 		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1810 		ts = sc->vmx_txq[0].vxtxq_ts;
1811 		if (ts->stopped != 0)
1812 			device_printf(dev, "Tx queue error %#x\n", ts->error);
1813 		rs = sc->vmx_rxq[0].vxrxq_rs;
1814 		if (rs->stopped != 0)
1815 			device_printf(dev, "Rx queue error %#x\n", rs->error);
1816 		device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1817 	}
1818 
1819 	if (event & VMXNET3_EVENT_DIC)
1820 		device_printf(dev, "device implementation change event\n");
1821 	if (event & VMXNET3_EVENT_DEBUG)
1822 		device_printf(dev, "debug event\n");
1823 
1824 	if (reset != 0) {
1825 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1826 		vmxnet3_init_locked(sc);
1827 	}
1828 
1829 	VMXNET3_CORE_UNLOCK(sc);
1830 }
1831 
1832 static void
1833 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1834 {
1835 	struct vmxnet3_softc *sc;
1836 	struct ifnet *ifp;
1837 	struct vmxnet3_txring *txr;
1838 	struct vmxnet3_comp_ring *txc;
1839 	struct vmxnet3_txcompdesc *txcd;
1840 	struct vmxnet3_txbuf *txb;
1841 	struct mbuf *m;
1842 	u_int sop;
1843 
1844 	sc = txq->vxtxq_sc;
1845 	ifp = sc->vmx_ifp;
1846 	txr = &txq->vxtxq_cmd_ring;
1847 	txc = &txq->vxtxq_comp_ring;
1848 
1849 	VMXNET3_TXQ_LOCK_ASSERT(txq);
1850 
1851 	for (;;) {
1852 		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1853 		if (txcd->gen != txc->vxcr_gen)
1854 			break;
1855 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1856 
1857 		if (++txc->vxcr_next == txc->vxcr_ndesc) {
1858 			txc->vxcr_next = 0;
1859 			txc->vxcr_gen ^= 1;
1860 		}
1861 
1862 		sop = txr->vxtxr_next;
1863 		txb = &txr->vxtxr_txbuf[sop];
1864 
1865 		if ((m = txb->vtxb_m) != NULL) {
1866 			bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1867 			    BUS_DMASYNC_POSTWRITE);
1868 			bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1869 
1870 			txq->vxtxq_stats.vmtxs_opackets++;
1871 			txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1872 			if (m->m_flags & M_MCAST)
1873 				txq->vxtxq_stats.vmtxs_omcasts++;
1874 
1875 			m_freem(m);
1876 			txb->vtxb_m = NULL;
1877 		}
1878 
1879 		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1880 	}
1881 
1882 	if (txr->vxtxr_head == txr->vxtxr_next)
1883 		txq->vxtxq_watchdog = 0;
1884 }
1885 
1886 static int
1887 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1888 {
1889 	struct ifnet *ifp;
1890 	struct mbuf *m;
1891 	struct vmxnet3_rxdesc *rxd;
1892 	struct vmxnet3_rxbuf *rxb;
1893 	bus_dma_tag_t tag;
1894 	bus_dmamap_t dmap;
1895 	bus_dma_segment_t segs[1];
1896 	int idx, clsize, btype, flags, nsegs, error;
1897 
1898 	ifp = sc->vmx_ifp;
1899 	tag = rxr->vxrxr_rxtag;
1900 	dmap = rxr->vxrxr_spare_dmap;
1901 	idx = rxr->vxrxr_fill;
1902 	rxd = &rxr->vxrxr_rxd[idx];
1903 	rxb = &rxr->vxrxr_rxbuf[idx];
1904 
1905 #ifdef VMXNET3_FAILPOINTS
1906 	KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1907 	if (rxr->vxrxr_rid != 0)
1908 		KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1909 #endif
1910 
1911 	if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1912 		flags = M_PKTHDR;
1913 		clsize = MCLBYTES;
1914 		btype = VMXNET3_BTYPE_HEAD;
1915 	} else {
1916 #if __FreeBSD_version < 902001
1917 		/*
1918 		 * These mbufs will never be used for the start of a frame.
1919 		 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1920 		 * required the mbuf to always be a packet header. Avoid
1921 		 * unnecessary mbuf initialization in newer versions where
1922 		 * that is not the case.
1923 		 */
1924 		flags = M_PKTHDR;
1925 #else
1926 		flags = 0;
1927 #endif
1928 		clsize = MJUMPAGESIZE;
1929 		btype = VMXNET3_BTYPE_BODY;
1930 	}
1931 
1932 	m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1933 	if (m == NULL) {
1934 		sc->vmx_stats.vmst_mgetcl_failed++;
1935 		return (ENOBUFS);
1936 	}
1937 
1938 	if (btype == VMXNET3_BTYPE_HEAD) {
1939 		m->m_len = m->m_pkthdr.len = clsize;
1940 		m_adj(m, ETHER_ALIGN);
1941 	} else
1942 		m->m_len = clsize;
1943 
1944 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1945 	    BUS_DMA_NOWAIT);
1946 	if (error) {
1947 		m_freem(m);
1948 		sc->vmx_stats.vmst_mbuf_load_failed++;
1949 		return (error);
1950 	}
1951 	KASSERT(nsegs == 1,
1952 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1953 #if __FreeBSD_version < 902001
1954 	if (btype == VMXNET3_BTYPE_BODY)
1955 		m->m_flags &= ~M_PKTHDR;
1956 #endif
1957 
1958 	if (rxb->vrxb_m != NULL) {
1959 		bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1960 		bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1961 	}
1962 
1963 	rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1964 	rxb->vrxb_dmamap = dmap;
1965 	rxb->vrxb_m = m;
1966 
1967 	rxd->addr = segs[0].ds_addr;
1968 	rxd->len = segs[0].ds_len;
1969 	rxd->btype = btype;
1970 	rxd->gen = rxr->vxrxr_gen;
1971 
1972 	vmxnet3_rxr_increment_fill(rxr);
1973 	return (0);
1974 }
1975 
1976 static void
1977 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1978     struct vmxnet3_rxring *rxr, int idx)
1979 {
1980 	struct vmxnet3_rxdesc *rxd;
1981 
1982 	rxd = &rxr->vxrxr_rxd[idx];
1983 	rxd->gen = rxr->vxrxr_gen;
1984 	vmxnet3_rxr_increment_fill(rxr);
1985 }
1986 
1987 static void
1988 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1989 {
1990 	struct vmxnet3_softc *sc;
1991 	struct vmxnet3_rxring *rxr;
1992 	struct vmxnet3_comp_ring *rxc;
1993 	struct vmxnet3_rxcompdesc *rxcd;
1994 	int idx, eof;
1995 
1996 	sc = rxq->vxrxq_sc;
1997 	rxc = &rxq->vxrxq_comp_ring;
1998 
1999 	do {
2000 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2001 		if (rxcd->gen != rxc->vxcr_gen)
2002 			break;		/* Not expected. */
2003 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2004 
2005 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2006 			rxc->vxcr_next = 0;
2007 			rxc->vxcr_gen ^= 1;
2008 		}
2009 
2010 		idx = rxcd->rxd_idx;
2011 		eof = rxcd->eop;
2012 		if (rxcd->qid < sc->vmx_nrxqueues)
2013 			rxr = &rxq->vxrxq_cmd_ring[0];
2014 		else
2015 			rxr = &rxq->vxrxq_cmd_ring[1];
2016 		vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2017 	} while (!eof);
2018 }
2019 
2020 static void
2021 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2022 {
2023 
2024 	if (rxcd->ipv4) {
2025 		m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2026 		if (rxcd->ipcsum_ok)
2027 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2028 	}
2029 
2030 	if (!rxcd->fragment) {
2031 		if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2032 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2033 			    CSUM_PSEUDO_HDR;
2034 			m->m_pkthdr.csum_data = 0xFFFF;
2035 		}
2036 	}
2037 }
2038 
2039 static void
2040 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2041     struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2042 {
2043 	struct vmxnet3_softc *sc;
2044 	struct ifnet *ifp;
2045 
2046 	sc = rxq->vxrxq_sc;
2047 	ifp = sc->vmx_ifp;
2048 
2049 	if (rxcd->error) {
2050 		rxq->vxrxq_stats.vmrxs_ierrors++;
2051 		m_freem(m);
2052 		return;
2053 	}
2054 
2055 #ifdef notyet
2056 	switch (rxcd->rss_type) {
2057 	case VMXNET3_RCD_RSS_TYPE_IPV4:
2058 		m->m_pkthdr.flowid = rxcd->rss_hash;
2059 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2060 		break;
2061 	case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2062 		m->m_pkthdr.flowid = rxcd->rss_hash;
2063 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2064 		break;
2065 	case VMXNET3_RCD_RSS_TYPE_IPV6:
2066 		m->m_pkthdr.flowid = rxcd->rss_hash;
2067 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2068 		break;
2069 	case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2070 		m->m_pkthdr.flowid = rxcd->rss_hash;
2071 		M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2072 		break;
2073 	default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2074 		m->m_pkthdr.flowid = rxq->vxrxq_id;
2075 		M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2076 		break;
2077 	}
2078 #else
2079 	m->m_pkthdr.flowid = rxq->vxrxq_id;
2080 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2081 #endif
2082 
2083 	if (!rxcd->no_csum)
2084 		vmxnet3_rx_csum(rxcd, m);
2085 	if (rxcd->vlan) {
2086 		m->m_flags |= M_VLANTAG;
2087 		m->m_pkthdr.ether_vtag = rxcd->vtag;
2088 	}
2089 
2090 	rxq->vxrxq_stats.vmrxs_ipackets++;
2091 	rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2092 
2093 	VMXNET3_RXQ_UNLOCK(rxq);
2094 	(*ifp->if_input)(ifp, m);
2095 	VMXNET3_RXQ_LOCK(rxq);
2096 }
2097 
2098 static void
2099 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2100 {
2101 	struct vmxnet3_softc *sc;
2102 	struct ifnet *ifp;
2103 	struct vmxnet3_rxring *rxr;
2104 	struct vmxnet3_comp_ring *rxc;
2105 	struct vmxnet3_rxdesc *rxd;
2106 	struct vmxnet3_rxcompdesc *rxcd;
2107 	struct mbuf *m, *m_head, *m_tail;
2108 	int idx, length;
2109 
2110 	sc = rxq->vxrxq_sc;
2111 	ifp = sc->vmx_ifp;
2112 	rxc = &rxq->vxrxq_comp_ring;
2113 
2114 	VMXNET3_RXQ_LOCK_ASSERT(rxq);
2115 
2116 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2117 		return;
2118 
2119 	m_head = rxq->vxrxq_mhead;
2120 	rxq->vxrxq_mhead = NULL;
2121 	m_tail = rxq->vxrxq_mtail;
2122 	rxq->vxrxq_mtail = NULL;
2123 	MPASS(m_head == NULL || m_tail != NULL);
2124 
2125 	for (;;) {
2126 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2127 		if (rxcd->gen != rxc->vxcr_gen) {
2128 			rxq->vxrxq_mhead = m_head;
2129 			rxq->vxrxq_mtail = m_tail;
2130 			break;
2131 		}
2132 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2133 
2134 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2135 			rxc->vxcr_next = 0;
2136 			rxc->vxcr_gen ^= 1;
2137 		}
2138 
2139 		idx = rxcd->rxd_idx;
2140 		length = rxcd->len;
2141 		if (rxcd->qid < sc->vmx_nrxqueues)
2142 			rxr = &rxq->vxrxq_cmd_ring[0];
2143 		else
2144 			rxr = &rxq->vxrxq_cmd_ring[1];
2145 		rxd = &rxr->vxrxr_rxd[idx];
2146 
2147 		m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2148 		KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2149 		    __func__, rxcd->qid, idx));
2150 
2151 		/*
2152 		 * The host may skip descriptors. We detect this when this
2153 		 * descriptor does not match the previous fill index. Catch
2154 		 * up with the host now.
2155 		 */
2156 		if (__predict_false(rxr->vxrxr_fill != idx)) {
2157 			while (rxr->vxrxr_fill != idx) {
2158 				rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2159 				    rxr->vxrxr_gen;
2160 				vmxnet3_rxr_increment_fill(rxr);
2161 			}
2162 		}
2163 
2164 		if (rxcd->sop) {
2165 			KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2166 			    ("%s: start of frame w/o head buffer", __func__));
2167 			KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2168 			    ("%s: start of frame not in ring 0", __func__));
2169 			KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2170 			    ("%s: start of frame at unexcepted index %d (%d)",
2171 			     __func__, idx, sc->vmx_rx_max_chain));
2172 			KASSERT(m_head == NULL,
2173 			    ("%s: duplicate start of frame?", __func__));
2174 
2175 			if (length == 0) {
2176 				/* Just ignore this descriptor. */
2177 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2178 				goto nextp;
2179 			}
2180 
2181 			if (vmxnet3_newbuf(sc, rxr) != 0) {
2182 				rxq->vxrxq_stats.vmrxs_iqdrops++;
2183 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2184 				if (!rxcd->eop)
2185 					vmxnet3_rxq_discard_chain(rxq);
2186 				goto nextp;
2187 			}
2188 
2189 			m->m_pkthdr.rcvif = ifp;
2190 			m->m_pkthdr.len = m->m_len = length;
2191 			m->m_pkthdr.csum_flags = 0;
2192 			m_head = m_tail = m;
2193 
2194 		} else {
2195 			KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2196 			    ("%s: non start of frame w/o body buffer", __func__));
2197 			KASSERT(m_head != NULL,
2198 			    ("%s: frame not started?", __func__));
2199 
2200 			if (vmxnet3_newbuf(sc, rxr) != 0) {
2201 				rxq->vxrxq_stats.vmrxs_iqdrops++;
2202 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2203 				if (!rxcd->eop)
2204 					vmxnet3_rxq_discard_chain(rxq);
2205 				m_freem(m_head);
2206 				m_head = m_tail = NULL;
2207 				goto nextp;
2208 			}
2209 
2210 			m->m_len = length;
2211 			m_head->m_pkthdr.len += length;
2212 			m_tail->m_next = m;
2213 			m_tail = m;
2214 		}
2215 
2216 		if (rxcd->eop) {
2217 			vmxnet3_rxq_input(rxq, rxcd, m_head);
2218 			m_head = m_tail = NULL;
2219 
2220 			/* Must recheck after dropping the Rx lock. */
2221 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2222 				break;
2223 		}
2224 
2225 nextp:
2226 		if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2227 			int qid = rxcd->qid;
2228 			bus_size_t r;
2229 
2230 			idx = (idx + 1) % rxr->vxrxr_ndesc;
2231 			if (qid >= sc->vmx_nrxqueues) {
2232 				qid -= sc->vmx_nrxqueues;
2233 				r = VMXNET3_BAR0_RXH2(qid);
2234 			} else
2235 				r = VMXNET3_BAR0_RXH1(qid);
2236 			vmxnet3_write_bar0(sc, r, idx);
2237 		}
2238 	}
2239 }
2240 
2241 static void
2242 vmxnet3_legacy_intr(void *xsc)
2243 {
2244 	struct vmxnet3_softc *sc;
2245 	struct vmxnet3_rxqueue *rxq;
2246 	struct vmxnet3_txqueue *txq;
2247 
2248 	sc = xsc;
2249 	rxq = &sc->vmx_rxq[0];
2250 	txq = &sc->vmx_txq[0];
2251 
2252 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2253 		if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2254 			return;
2255 	}
2256 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2257 		vmxnet3_disable_all_intrs(sc);
2258 
2259 	if (sc->vmx_ds->event != 0)
2260 		vmxnet3_evintr(sc);
2261 
2262 	VMXNET3_RXQ_LOCK(rxq);
2263 	vmxnet3_rxq_eof(rxq);
2264 	VMXNET3_RXQ_UNLOCK(rxq);
2265 
2266 	VMXNET3_TXQ_LOCK(txq);
2267 	vmxnet3_txq_eof(txq);
2268 	vmxnet3_txq_start(txq);
2269 	VMXNET3_TXQ_UNLOCK(txq);
2270 
2271 	vmxnet3_enable_all_intrs(sc);
2272 }
2273 
2274 static void
2275 vmxnet3_txq_intr(void *xtxq)
2276 {
2277 	struct vmxnet3_softc *sc;
2278 	struct vmxnet3_txqueue *txq;
2279 
2280 	txq = xtxq;
2281 	sc = txq->vxtxq_sc;
2282 
2283 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2284 		vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2285 
2286 	VMXNET3_TXQ_LOCK(txq);
2287 	vmxnet3_txq_eof(txq);
2288 	vmxnet3_txq_start(txq);
2289 	VMXNET3_TXQ_UNLOCK(txq);
2290 
2291 	vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2292 }
2293 
2294 static void
2295 vmxnet3_rxq_intr(void *xrxq)
2296 {
2297 	struct vmxnet3_softc *sc;
2298 	struct vmxnet3_rxqueue *rxq;
2299 
2300 	rxq = xrxq;
2301 	sc = rxq->vxrxq_sc;
2302 
2303 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2304 		vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2305 
2306 	VMXNET3_RXQ_LOCK(rxq);
2307 	vmxnet3_rxq_eof(rxq);
2308 	VMXNET3_RXQ_UNLOCK(rxq);
2309 
2310 	vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2311 }
2312 
2313 static void
2314 vmxnet3_event_intr(void *xsc)
2315 {
2316 	struct vmxnet3_softc *sc;
2317 
2318 	sc = xsc;
2319 
2320 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2321 		vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2322 
2323 	if (sc->vmx_ds->event != 0)
2324 		vmxnet3_evintr(sc);
2325 
2326 	vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2327 }
2328 
2329 static void
2330 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2331 {
2332 	struct vmxnet3_txring *txr;
2333 	struct vmxnet3_txbuf *txb;
2334 	int i;
2335 
2336 	txr = &txq->vxtxq_cmd_ring;
2337 
2338 	for (i = 0; i < txr->vxtxr_ndesc; i++) {
2339 		txb = &txr->vxtxr_txbuf[i];
2340 
2341 		if (txb->vtxb_m == NULL)
2342 			continue;
2343 
2344 		bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2345 		    BUS_DMASYNC_POSTWRITE);
2346 		bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2347 		m_freem(txb->vtxb_m);
2348 		txb->vtxb_m = NULL;
2349 	}
2350 }
2351 
2352 static void
2353 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2354 {
2355 	struct vmxnet3_rxring *rxr;
2356 	struct vmxnet3_rxbuf *rxb;
2357 	int i, j;
2358 
2359 	if (rxq->vxrxq_mhead != NULL) {
2360 		m_freem(rxq->vxrxq_mhead);
2361 		rxq->vxrxq_mhead = NULL;
2362 		rxq->vxrxq_mtail = NULL;
2363 	}
2364 
2365 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2366 		rxr = &rxq->vxrxq_cmd_ring[i];
2367 
2368 		for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2369 			rxb = &rxr->vxrxr_rxbuf[j];
2370 
2371 			if (rxb->vrxb_m == NULL)
2372 				continue;
2373 
2374 			bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2375 			    BUS_DMASYNC_POSTREAD);
2376 			bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2377 			m_freem(rxb->vrxb_m);
2378 			rxb->vrxb_m = NULL;
2379 		}
2380 	}
2381 }
2382 
2383 static void
2384 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2385 {
2386 	struct vmxnet3_rxqueue *rxq;
2387 	struct vmxnet3_txqueue *txq;
2388 	int i;
2389 
2390 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2391 		rxq = &sc->vmx_rxq[i];
2392 		VMXNET3_RXQ_LOCK(rxq);
2393 		VMXNET3_RXQ_UNLOCK(rxq);
2394 	}
2395 
2396 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
2397 		txq = &sc->vmx_txq[i];
2398 		VMXNET3_TXQ_LOCK(txq);
2399 		VMXNET3_TXQ_UNLOCK(txq);
2400 	}
2401 }
2402 
2403 static void
2404 vmxnet3_stop(struct vmxnet3_softc *sc)
2405 {
2406 	struct ifnet *ifp;
2407 	int q;
2408 
2409 	ifp = sc->vmx_ifp;
2410 	VMXNET3_CORE_LOCK_ASSERT(sc);
2411 
2412 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2413 	sc->vmx_link_active = 0;
2414 	callout_stop(&sc->vmx_tick);
2415 
2416 	/* Disable interrupts. */
2417 	vmxnet3_disable_all_intrs(sc);
2418 	vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2419 
2420 	vmxnet3_stop_rendezvous(sc);
2421 
2422 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2423 		vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2424 	for (q = 0; q < sc->vmx_nrxqueues; q++)
2425 		vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2426 
2427 	vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2428 }
2429 
2430 static void
2431 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2432 {
2433 	struct vmxnet3_txring *txr;
2434 	struct vmxnet3_comp_ring *txc;
2435 
2436 	txr = &txq->vxtxq_cmd_ring;
2437 	txr->vxtxr_head = 0;
2438 	txr->vxtxr_next = 0;
2439 	txr->vxtxr_gen = VMXNET3_INIT_GEN;
2440 	bzero(txr->vxtxr_txd,
2441 	    txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2442 
2443 	txc = &txq->vxtxq_comp_ring;
2444 	txc->vxcr_next = 0;
2445 	txc->vxcr_gen = VMXNET3_INIT_GEN;
2446 	bzero(txc->vxcr_u.txcd,
2447 	    txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2448 }
2449 
2450 static int
2451 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2452 {
2453 	struct ifnet *ifp;
2454 	struct vmxnet3_rxring *rxr;
2455 	struct vmxnet3_comp_ring *rxc;
2456 	int i, populate, idx, frame_size, error;
2457 
2458 	ifp = sc->vmx_ifp;
2459 	frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2460 	    ifp->if_mtu;
2461 
2462 	/*
2463 	 * If the MTU causes us to exceed what a regular sized cluster can
2464 	 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2465 	 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2466 	 *
2467 	 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2468 	 * our life easier. We do not support changing the ring size after
2469 	 * the attach.
2470 	 */
2471 	if (frame_size <= MCLBYTES)
2472 		sc->vmx_rx_max_chain = 1;
2473 	else
2474 		sc->vmx_rx_max_chain = 2;
2475 
2476 	/*
2477 	 * Only populate ring 1 if the configuration will take advantage
2478 	 * of it. That is either when LRO is enabled or the frame size
2479 	 * exceeds what ring 0 can contain.
2480 	 */
2481 	if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2482 	    frame_size <= MCLBYTES + MJUMPAGESIZE)
2483 		populate = 1;
2484 	else
2485 		populate = VMXNET3_RXRINGS_PERQ;
2486 
2487 	for (i = 0; i < populate; i++) {
2488 		rxr = &rxq->vxrxq_cmd_ring[i];
2489 		rxr->vxrxr_fill = 0;
2490 		rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2491 		bzero(rxr->vxrxr_rxd,
2492 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2493 
2494 		for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2495 			error = vmxnet3_newbuf(sc, rxr);
2496 			if (error)
2497 				return (error);
2498 		}
2499 	}
2500 
2501 	for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2502 		rxr = &rxq->vxrxq_cmd_ring[i];
2503 		rxr->vxrxr_fill = 0;
2504 		rxr->vxrxr_gen = 0;
2505 		bzero(rxr->vxrxr_rxd,
2506 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2507 	}
2508 
2509 	rxc = &rxq->vxrxq_comp_ring;
2510 	rxc->vxcr_next = 0;
2511 	rxc->vxcr_gen = VMXNET3_INIT_GEN;
2512 	bzero(rxc->vxcr_u.rxcd,
2513 	    rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2514 
2515 	return (0);
2516 }
2517 
2518 static int
2519 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2520 {
2521 	device_t dev;
2522 	int q, error;
2523 
2524 	dev = sc->vmx_dev;
2525 
2526 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2527 		vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2528 
2529 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2530 		error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2531 		if (error) {
2532 			device_printf(dev, "cannot populate Rx queue %d\n", q);
2533 			return (error);
2534 		}
2535 	}
2536 
2537 	return (0);
2538 }
2539 
2540 static int
2541 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2542 {
2543 	int q;
2544 
2545 	if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2546 		device_printf(sc->vmx_dev, "device enable command failed!\n");
2547 		return (1);
2548 	}
2549 
2550 	/* Reset the Rx queue heads. */
2551 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2552 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2553 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2554 	}
2555 
2556 	return (0);
2557 }
2558 
2559 static void
2560 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2561 {
2562 	struct ifnet *ifp;
2563 
2564 	ifp = sc->vmx_ifp;
2565 
2566 	vmxnet3_set_rxfilter(sc);
2567 
2568 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2569 		bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2570 		    sizeof(sc->vmx_ds->vlan_filter));
2571 	else
2572 		bzero(sc->vmx_ds->vlan_filter,
2573 		    sizeof(sc->vmx_ds->vlan_filter));
2574 	vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2575 }
2576 
2577 static int
2578 vmxnet3_reinit(struct vmxnet3_softc *sc)
2579 {
2580 
2581 	vmxnet3_reinit_interface(sc);
2582 	vmxnet3_reinit_shared_data(sc);
2583 
2584 	if (vmxnet3_reinit_queues(sc) != 0)
2585 		return (ENXIO);
2586 
2587 	if (vmxnet3_enable_device(sc) != 0)
2588 		return (ENXIO);
2589 
2590 	vmxnet3_reinit_rxfilters(sc);
2591 
2592 	return (0);
2593 }
2594 
2595 static void
2596 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2597 {
2598 	struct ifnet *ifp;
2599 
2600 	ifp = sc->vmx_ifp;
2601 
2602 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2603 		return;
2604 
2605 	vmxnet3_stop(sc);
2606 
2607 	if (vmxnet3_reinit(sc) != 0) {
2608 		vmxnet3_stop(sc);
2609 		return;
2610 	}
2611 
2612 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2613 	vmxnet3_link_status(sc);
2614 
2615 	vmxnet3_enable_all_intrs(sc);
2616 	callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2617 }
2618 
2619 static void
2620 vmxnet3_init(void *xsc)
2621 {
2622 	struct vmxnet3_softc *sc;
2623 
2624 	sc = xsc;
2625 
2626 	VMXNET3_CORE_LOCK(sc);
2627 	vmxnet3_init_locked(sc);
2628 	VMXNET3_CORE_UNLOCK(sc);
2629 }
2630 
2631 /*
2632  * BMV: Much of this can go away once we finally have offsets in
2633  * the mbuf packet header. Bug andre@.
2634  */
2635 static int
2636 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2637     int *etype, int *proto, int *start)
2638 {
2639 	struct ether_vlan_header *evh;
2640 	int offset;
2641 #if defined(INET)
2642 	struct ip *ip = NULL;
2643 	struct ip iphdr;
2644 #endif
2645 #if defined(INET6)
2646 	struct ip6_hdr *ip6 = NULL;
2647 	struct ip6_hdr ip6hdr;
2648 #endif
2649 
2650 	evh = mtod(m, struct ether_vlan_header *);
2651 	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2652 		/* BMV: We should handle nested VLAN tags too. */
2653 		*etype = ntohs(evh->evl_proto);
2654 		offset = sizeof(struct ether_vlan_header);
2655 	} else {
2656 		*etype = ntohs(evh->evl_encap_proto);
2657 		offset = sizeof(struct ether_header);
2658 	}
2659 
2660 	switch (*etype) {
2661 #if defined(INET)
2662 	case ETHERTYPE_IP:
2663 		if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2664 			m_copydata(m, offset, sizeof(struct ip),
2665 			    (caddr_t) &iphdr);
2666 			ip = &iphdr;
2667 		} else
2668 			ip = mtodo(m, offset);
2669 		*proto = ip->ip_p;
2670 		*start = offset + (ip->ip_hl << 2);
2671 		break;
2672 #endif
2673 #if defined(INET6)
2674 	case ETHERTYPE_IPV6:
2675 		if (__predict_false(m->m_len <
2676 		    offset + sizeof(struct ip6_hdr))) {
2677 			m_copydata(m, offset, sizeof(struct ip6_hdr),
2678 			    (caddr_t) &ip6hdr);
2679 			ip6 = &ip6hdr;
2680 		} else
2681 			ip6 = mtodo(m, offset);
2682 		*proto = -1;
2683 		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2684 		/* Assert the network stack sent us a valid packet. */
2685 		KASSERT(*start > offset,
2686 		    ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2687 		    *start, offset, *proto));
2688 		break;
2689 #endif
2690 	default:
2691 		return (EINVAL);
2692 	}
2693 
2694 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2695 		struct tcphdr *tcp, tcphdr;
2696 		uint16_t sum;
2697 
2698 		if (__predict_false(*proto != IPPROTO_TCP)) {
2699 			/* Likely failed to correctly parse the mbuf. */
2700 			return (EINVAL);
2701 		}
2702 
2703 		txq->vxtxq_stats.vmtxs_tso++;
2704 
2705 		switch (*etype) {
2706 #if defined(INET)
2707 		case ETHERTYPE_IP:
2708 			sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
2709 			    htons(IPPROTO_TCP));
2710 			break;
2711 #endif
2712 #if defined(INET6)
2713 		case ETHERTYPE_IPV6:
2714 			sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
2715 			break;
2716 #endif
2717 		default:
2718 			sum = 0;
2719 			break;
2720 		}
2721 
2722 		if (m->m_len < *start + sizeof(struct tcphdr)) {
2723 			m_copyback(m, *start + offsetof(struct tcphdr, th_sum),
2724 			    sizeof(uint16_t), (caddr_t) &sum);
2725 			m_copydata(m, *start, sizeof(struct tcphdr),
2726 			    (caddr_t) &tcphdr);
2727 			tcp = &tcphdr;
2728 		} else {
2729 			tcp = mtodo(m, *start);
2730 			tcp->th_sum = sum;
2731 		}
2732 
2733 		/*
2734 		 * For TSO, the size of the protocol header is also
2735 		 * included in the descriptor header size.
2736 		 */
2737 		*start += (tcp->th_off << 2);
2738 	} else
2739 		txq->vxtxq_stats.vmtxs_csum++;
2740 
2741 	return (0);
2742 }
2743 
2744 static int
2745 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2746     bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2747 {
2748 	struct vmxnet3_txring *txr;
2749 	struct mbuf *m;
2750 	bus_dma_tag_t tag;
2751 	int error;
2752 
2753 	txr = &txq->vxtxq_cmd_ring;
2754 	m = *m0;
2755 	tag = txr->vxtxr_txtag;
2756 
2757 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2758 	if (error == 0 || error != EFBIG)
2759 		return (error);
2760 
2761 	m = m_defrag(m, M_NOWAIT);
2762 	if (m != NULL) {
2763 		*m0 = m;
2764 		error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2765 	} else
2766 		error = ENOBUFS;
2767 
2768 	if (error) {
2769 		m_freem(*m0);
2770 		*m0 = NULL;
2771 		txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2772 	} else
2773 		txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2774 
2775 	return (error);
2776 }
2777 
2778 static void
2779 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2780 {
2781 	struct vmxnet3_txring *txr;
2782 
2783 	txr = &txq->vxtxq_cmd_ring;
2784 	bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2785 }
2786 
2787 static int
2788 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2789 {
2790 	struct vmxnet3_softc *sc;
2791 	struct vmxnet3_txring *txr;
2792 	struct vmxnet3_txdesc *txd, *sop;
2793 	struct mbuf *m;
2794 	bus_dmamap_t dmap;
2795 	bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2796 	int i, gen, nsegs, etype, proto, start, error;
2797 
2798 	sc = txq->vxtxq_sc;
2799 	start = 0;
2800 	txd = NULL;
2801 	txr = &txq->vxtxq_cmd_ring;
2802 	dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2803 
2804 	error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2805 	if (error)
2806 		return (error);
2807 
2808 	m = *m0;
2809 	M_ASSERTPKTHDR(m);
2810 	KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2811 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2812 
2813 	if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2814 		txq->vxtxq_stats.vmtxs_full++;
2815 		vmxnet3_txq_unload_mbuf(txq, dmap);
2816 		return (ENOSPC);
2817 	} else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2818 		error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2819 		if (error) {
2820 			txq->vxtxq_stats.vmtxs_offload_failed++;
2821 			vmxnet3_txq_unload_mbuf(txq, dmap);
2822 			m_freem(m);
2823 			*m0 = NULL;
2824 			return (error);
2825 		}
2826 	}
2827 
2828 	txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2829 	sop = &txr->vxtxr_txd[txr->vxtxr_head];
2830 	gen = txr->vxtxr_gen ^ 1;	/* Owned by cpu (yet) */
2831 
2832 	for (i = 0; i < nsegs; i++) {
2833 		txd = &txr->vxtxr_txd[txr->vxtxr_head];
2834 
2835 		txd->addr = segs[i].ds_addr;
2836 		txd->len = segs[i].ds_len;
2837 		txd->gen = gen;
2838 		txd->dtype = 0;
2839 		txd->offload_mode = VMXNET3_OM_NONE;
2840 		txd->offload_pos = 0;
2841 		txd->hlen = 0;
2842 		txd->eop = 0;
2843 		txd->compreq = 0;
2844 		txd->vtag_mode = 0;
2845 		txd->vtag = 0;
2846 
2847 		if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2848 			txr->vxtxr_head = 0;
2849 			txr->vxtxr_gen ^= 1;
2850 		}
2851 		gen = txr->vxtxr_gen;
2852 	}
2853 	txd->eop = 1;
2854 	txd->compreq = 1;
2855 
2856 	if (m->m_flags & M_VLANTAG) {
2857 		sop->vtag_mode = 1;
2858 		sop->vtag = m->m_pkthdr.ether_vtag;
2859 	}
2860 
2861 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2862 		sop->offload_mode = VMXNET3_OM_TSO;
2863 		sop->hlen = start;
2864 		sop->offload_pos = m->m_pkthdr.tso_segsz;
2865 	} else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2866 	    VMXNET3_CSUM_OFFLOAD_IPV6)) {
2867 		sop->offload_mode = VMXNET3_OM_CSUM;
2868 		sop->hlen = start;
2869 		sop->offload_pos = start + m->m_pkthdr.csum_data;
2870 	}
2871 
2872 	/* Finally, change the ownership. */
2873 	vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2874 	sop->gen ^= 1;
2875 
2876 	txq->vxtxq_ts->npending += nsegs;
2877 	if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2878 		txq->vxtxq_ts->npending = 0;
2879 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2880 		    txr->vxtxr_head);
2881 	}
2882 
2883 	return (0);
2884 }
2885 
2886 #ifdef VMXNET3_LEGACY_TX
2887 
2888 static void
2889 vmxnet3_start_locked(struct ifnet *ifp)
2890 {
2891 	struct vmxnet3_softc *sc;
2892 	struct vmxnet3_txqueue *txq;
2893 	struct vmxnet3_txring *txr;
2894 	struct mbuf *m_head;
2895 	int tx, avail;
2896 
2897 	sc = ifp->if_softc;
2898 	txq = &sc->vmx_txq[0];
2899 	txr = &txq->vxtxq_cmd_ring;
2900 	tx = 0;
2901 
2902 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2903 
2904 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2905 	    sc->vmx_link_active == 0)
2906 		return;
2907 
2908 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2909 		if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2910 			break;
2911 
2912 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2913 		if (m_head == NULL)
2914 			break;
2915 
2916 		/* Assume worse case if this mbuf is the head of a chain. */
2917 		if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2918 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2919 			break;
2920 		}
2921 
2922 		if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2923 			if (m_head != NULL)
2924 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2925 			break;
2926 		}
2927 
2928 		tx++;
2929 		ETHER_BPF_MTAP(ifp, m_head);
2930 	}
2931 
2932 	if (tx > 0)
2933 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2934 }
2935 
2936 static void
2937 vmxnet3_start(struct ifnet *ifp)
2938 {
2939 	struct vmxnet3_softc *sc;
2940 	struct vmxnet3_txqueue *txq;
2941 
2942 	sc = ifp->if_softc;
2943 	txq = &sc->vmx_txq[0];
2944 
2945 	VMXNET3_TXQ_LOCK(txq);
2946 	vmxnet3_start_locked(ifp);
2947 	VMXNET3_TXQ_UNLOCK(txq);
2948 }
2949 
2950 #else /* !VMXNET3_LEGACY_TX */
2951 
2952 static int
2953 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
2954 {
2955 	struct vmxnet3_softc *sc;
2956 	struct vmxnet3_txring *txr;
2957 	struct buf_ring *br;
2958 	struct ifnet *ifp;
2959 	int tx, avail, error;
2960 
2961 	sc = txq->vxtxq_sc;
2962 	br = txq->vxtxq_br;
2963 	ifp = sc->vmx_ifp;
2964 	txr = &txq->vxtxq_cmd_ring;
2965 	tx = 0;
2966 	error = 0;
2967 
2968 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2969 
2970 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2971 	    sc->vmx_link_active == 0) {
2972 		if (m != NULL)
2973 			error = drbr_enqueue(ifp, br, m);
2974 		return (error);
2975 	}
2976 
2977 	if (m != NULL) {
2978 		error = drbr_enqueue(ifp, br, m);
2979 		if (error)
2980 			return (error);
2981 	}
2982 
2983 	while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
2984 		m = drbr_peek(ifp, br);
2985 		if (m == NULL)
2986 			break;
2987 
2988 		/* Assume worse case if this mbuf is the head of a chain. */
2989 		if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2990 			drbr_putback(ifp, br, m);
2991 			break;
2992 		}
2993 
2994 		if (vmxnet3_txq_encap(txq, &m) != 0) {
2995 			if (m != NULL)
2996 				drbr_putback(ifp, br, m);
2997 			else
2998 				drbr_advance(ifp, br);
2999 			break;
3000 		}
3001 		drbr_advance(ifp, br);
3002 
3003 		tx++;
3004 		ETHER_BPF_MTAP(ifp, m);
3005 	}
3006 
3007 	if (tx > 0)
3008 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3009 
3010 	return (0);
3011 }
3012 
3013 static int
3014 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
3015 {
3016 	struct vmxnet3_softc *sc;
3017 	struct vmxnet3_txqueue *txq;
3018 	int i, ntxq, error;
3019 
3020 	sc = ifp->if_softc;
3021 	ntxq = sc->vmx_ntxqueues;
3022 
3023 	/* check if flowid is set */
3024 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3025 		i = m->m_pkthdr.flowid % ntxq;
3026 	else
3027 		i = curcpu % ntxq;
3028 
3029 	txq = &sc->vmx_txq[i];
3030 
3031 	if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3032 		error = vmxnet3_txq_mq_start_locked(txq, m);
3033 		VMXNET3_TXQ_UNLOCK(txq);
3034 	} else {
3035 		error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3036 		taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3037 	}
3038 
3039 	return (error);
3040 }
3041 
3042 static void
3043 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3044 {
3045 	struct vmxnet3_softc *sc;
3046 	struct vmxnet3_txqueue *txq;
3047 
3048 	txq = xtxq;
3049 	sc = txq->vxtxq_sc;
3050 
3051 	VMXNET3_TXQ_LOCK(txq);
3052 	if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3053 		vmxnet3_txq_mq_start_locked(txq, NULL);
3054 	VMXNET3_TXQ_UNLOCK(txq);
3055 }
3056 
3057 #endif /* VMXNET3_LEGACY_TX */
3058 
3059 static void
3060 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3061 {
3062 	struct vmxnet3_softc *sc;
3063 	struct ifnet *ifp;
3064 
3065 	sc = txq->vxtxq_sc;
3066 	ifp = sc->vmx_ifp;
3067 
3068 #ifdef VMXNET3_LEGACY_TX
3069 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3070 		vmxnet3_start_locked(ifp);
3071 #else
3072 	if (!drbr_empty(ifp, txq->vxtxq_br))
3073 		vmxnet3_txq_mq_start_locked(txq, NULL);
3074 #endif
3075 }
3076 
3077 static void
3078 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3079 {
3080 	struct vmxnet3_txqueue *txq;
3081 	int i;
3082 
3083 	VMXNET3_CORE_LOCK_ASSERT(sc);
3084 
3085 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3086 		txq = &sc->vmx_txq[i];
3087 
3088 		VMXNET3_TXQ_LOCK(txq);
3089 		vmxnet3_txq_start(txq);
3090 		VMXNET3_TXQ_UNLOCK(txq);
3091 	}
3092 }
3093 
3094 static void
3095 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3096 {
3097 	struct ifnet *ifp;
3098 	int idx, bit;
3099 
3100 	ifp = sc->vmx_ifp;
3101 	idx = (tag >> 5) & 0x7F;
3102 	bit = tag & 0x1F;
3103 
3104 	if (tag == 0 || tag > 4095)
3105 		return;
3106 
3107 	VMXNET3_CORE_LOCK(sc);
3108 
3109 	/* Update our private VLAN bitvector. */
3110 	if (add)
3111 		sc->vmx_vlan_filter[idx] |= (1 << bit);
3112 	else
3113 		sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3114 
3115 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3116 		if (add)
3117 			sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3118 		else
3119 			sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3120 		vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3121 	}
3122 
3123 	VMXNET3_CORE_UNLOCK(sc);
3124 }
3125 
3126 static void
3127 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3128 {
3129 
3130 	if (ifp->if_softc == arg)
3131 		vmxnet3_update_vlan_filter(arg, 1, tag);
3132 }
3133 
3134 static void
3135 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3136 {
3137 
3138 	if (ifp->if_softc == arg)
3139 		vmxnet3_update_vlan_filter(arg, 0, tag);
3140 }
3141 
3142 static void
3143 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3144 {
3145 	struct ifnet *ifp;
3146 	struct vmxnet3_driver_shared *ds;
3147 	struct ifmultiaddr *ifma;
3148 	u_int mode;
3149 
3150 	ifp = sc->vmx_ifp;
3151 	ds = sc->vmx_ds;
3152 
3153 	mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3154 	if (ifp->if_flags & IFF_PROMISC)
3155 		mode |= VMXNET3_RXMODE_PROMISC;
3156 	if (ifp->if_flags & IFF_ALLMULTI)
3157 		mode |= VMXNET3_RXMODE_ALLMULTI;
3158 	else {
3159 		int cnt = 0, overflow = 0;
3160 
3161 		if_maddr_rlock(ifp);
3162 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3163 			if (ifma->ifma_addr->sa_family != AF_LINK)
3164 				continue;
3165 			else if (cnt == VMXNET3_MULTICAST_MAX) {
3166 				overflow = 1;
3167 				break;
3168 			}
3169 
3170 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3171 			   &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3172 			cnt++;
3173 		}
3174 		if_maddr_runlock(ifp);
3175 
3176 		if (overflow != 0) {
3177 			cnt = 0;
3178 			mode |= VMXNET3_RXMODE_ALLMULTI;
3179 		} else if (cnt > 0)
3180 			mode |= VMXNET3_RXMODE_MCAST;
3181 		ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3182 	}
3183 
3184 	ds->rxmode = mode;
3185 
3186 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3187 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3188 }
3189 
3190 static int
3191 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3192 {
3193 	struct ifnet *ifp;
3194 
3195 	ifp = sc->vmx_ifp;
3196 
3197 	if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3198 		return (EINVAL);
3199 
3200 	ifp->if_mtu = mtu;
3201 
3202 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
3203 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3204 		vmxnet3_init_locked(sc);
3205 	}
3206 
3207 	return (0);
3208 }
3209 
3210 static int
3211 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3212 {
3213 	struct vmxnet3_softc *sc;
3214 	struct ifreq *ifr;
3215 	int reinit, mask, error;
3216 
3217 	sc = ifp->if_softc;
3218 	ifr = (struct ifreq *) data;
3219 	error = 0;
3220 
3221 	switch (cmd) {
3222 	case SIOCSIFMTU:
3223 		if (ifp->if_mtu != ifr->ifr_mtu) {
3224 			VMXNET3_CORE_LOCK(sc);
3225 			error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3226 			VMXNET3_CORE_UNLOCK(sc);
3227 		}
3228 		break;
3229 
3230 	case SIOCSIFFLAGS:
3231 		VMXNET3_CORE_LOCK(sc);
3232 		if (ifp->if_flags & IFF_UP) {
3233 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3234 				if ((ifp->if_flags ^ sc->vmx_if_flags) &
3235 				    (IFF_PROMISC | IFF_ALLMULTI)) {
3236 					vmxnet3_set_rxfilter(sc);
3237 				}
3238 			} else
3239 				vmxnet3_init_locked(sc);
3240 		} else {
3241 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3242 				vmxnet3_stop(sc);
3243 		}
3244 		sc->vmx_if_flags = ifp->if_flags;
3245 		VMXNET3_CORE_UNLOCK(sc);
3246 		break;
3247 
3248 	case SIOCADDMULTI:
3249 	case SIOCDELMULTI:
3250 		VMXNET3_CORE_LOCK(sc);
3251 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3252 			vmxnet3_set_rxfilter(sc);
3253 		VMXNET3_CORE_UNLOCK(sc);
3254 		break;
3255 
3256 	case SIOCSIFMEDIA:
3257 	case SIOCGIFMEDIA:
3258 		error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3259 		break;
3260 
3261 	case SIOCSIFCAP:
3262 		VMXNET3_CORE_LOCK(sc);
3263 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3264 
3265 		if (mask & IFCAP_TXCSUM)
3266 			ifp->if_capenable ^= IFCAP_TXCSUM;
3267 		if (mask & IFCAP_TXCSUM_IPV6)
3268 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3269 		if (mask & IFCAP_TSO4)
3270 			ifp->if_capenable ^= IFCAP_TSO4;
3271 		if (mask & IFCAP_TSO6)
3272 			ifp->if_capenable ^= IFCAP_TSO6;
3273 
3274 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
3275 		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
3276 			/* Changing these features requires us to reinit. */
3277 			reinit = 1;
3278 
3279 			if (mask & IFCAP_RXCSUM)
3280 				ifp->if_capenable ^= IFCAP_RXCSUM;
3281 			if (mask & IFCAP_RXCSUM_IPV6)
3282 				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3283 			if (mask & IFCAP_LRO)
3284 				ifp->if_capenable ^= IFCAP_LRO;
3285 			if (mask & IFCAP_VLAN_HWTAGGING)
3286 				ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3287 			if (mask & IFCAP_VLAN_HWFILTER)
3288 				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3289 		} else
3290 			reinit = 0;
3291 
3292 		if (mask & IFCAP_VLAN_HWTSO)
3293 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3294 
3295 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3296 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3297 			vmxnet3_init_locked(sc);
3298 		} else {
3299 			vmxnet3_init_hwassist(sc);
3300 		}
3301 
3302 		VMXNET3_CORE_UNLOCK(sc);
3303 		VLAN_CAPABILITIES(ifp);
3304 		break;
3305 
3306 	default:
3307 		error = ether_ioctl(ifp, cmd, data);
3308 		break;
3309 	}
3310 
3311 	VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3312 
3313 	return (error);
3314 }
3315 
3316 #ifndef VMXNET3_LEGACY_TX
3317 static void
3318 vmxnet3_qflush(struct ifnet *ifp)
3319 {
3320 	struct vmxnet3_softc *sc;
3321 	struct vmxnet3_txqueue *txq;
3322 	struct mbuf *m;
3323 	int i;
3324 
3325 	sc = ifp->if_softc;
3326 
3327 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3328 		txq = &sc->vmx_txq[i];
3329 
3330 		VMXNET3_TXQ_LOCK(txq);
3331 		while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3332 			m_freem(m);
3333 		VMXNET3_TXQ_UNLOCK(txq);
3334 	}
3335 
3336 	if_qflush(ifp);
3337 }
3338 #endif
3339 
3340 static int
3341 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3342 {
3343 	struct vmxnet3_softc *sc;
3344 
3345 	sc = txq->vxtxq_sc;
3346 
3347 	VMXNET3_TXQ_LOCK(txq);
3348 	if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3349 		VMXNET3_TXQ_UNLOCK(txq);
3350 		return (0);
3351 	}
3352 	VMXNET3_TXQ_UNLOCK(txq);
3353 
3354 	if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3355 	    txq->vxtxq_id);
3356 	return (1);
3357 }
3358 
3359 static void
3360 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3361 {
3362 
3363 	vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3364 }
3365 
3366 static uint64_t
3367 vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt)
3368 {
3369 	struct vmxnet3_softc *sc;
3370 	uint64_t rv;
3371 
3372 	sc = if_getsoftc(ifp);
3373 	rv = 0;
3374 
3375 	/*
3376 	 * With the exception of if_ierrors, these ifnet statistics are
3377 	 * only updated in the driver, so just set them to our accumulated
3378 	 * values. if_ierrors is updated in ether_input() for malformed
3379 	 * frames that we should have already discarded.
3380 	 */
3381 	switch (cnt) {
3382 	case IFCOUNTER_IPACKETS:
3383 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3384 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets;
3385 		return (rv);
3386 	case IFCOUNTER_IQDROPS:
3387 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3388 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops;
3389 		return (rv);
3390 	case IFCOUNTER_IERRORS:
3391 		for (int i = 0; i < sc->vmx_nrxqueues; i++)
3392 			rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors;
3393 		return (rv);
3394 	case IFCOUNTER_OPACKETS:
3395 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3396 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets;
3397 		return (rv);
3398 #ifndef VMXNET3_LEGACY_TX
3399 	case IFCOUNTER_OBYTES:
3400 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3401 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes;
3402 		return (rv);
3403 	case IFCOUNTER_OMCASTS:
3404 		for (int i = 0; i < sc->vmx_ntxqueues; i++)
3405 			rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts;
3406 		return (rv);
3407 #endif
3408 	default:
3409 		return (if_get_counter_default(ifp, cnt));
3410 	}
3411 }
3412 
3413 static void
3414 vmxnet3_tick(void *xsc)
3415 {
3416 	struct vmxnet3_softc *sc;
3417 	struct ifnet *ifp;
3418 	int i, timedout;
3419 
3420 	sc = xsc;
3421 	ifp = sc->vmx_ifp;
3422 	timedout = 0;
3423 
3424 	VMXNET3_CORE_LOCK_ASSERT(sc);
3425 
3426 	vmxnet3_refresh_host_stats(sc);
3427 
3428 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3429 		timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3430 
3431 	if (timedout != 0) {
3432 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3433 		vmxnet3_init_locked(sc);
3434 	} else
3435 		callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3436 }
3437 
3438 static int
3439 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3440 {
3441 	uint32_t status;
3442 
3443 	/* Also update the link speed while here. */
3444 	status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3445 	sc->vmx_link_speed = status >> 16;
3446 	return !!(status & 0x1);
3447 }
3448 
3449 static void
3450 vmxnet3_link_status(struct vmxnet3_softc *sc)
3451 {
3452 	struct ifnet *ifp;
3453 	int link;
3454 
3455 	ifp = sc->vmx_ifp;
3456 	link = vmxnet3_link_is_up(sc);
3457 
3458 	if (link != 0 && sc->vmx_link_active == 0) {
3459 		sc->vmx_link_active = 1;
3460 		if_link_state_change(ifp, LINK_STATE_UP);
3461 	} else if (link == 0 && sc->vmx_link_active != 0) {
3462 		sc->vmx_link_active = 0;
3463 		if_link_state_change(ifp, LINK_STATE_DOWN);
3464 	}
3465 }
3466 
3467 static void
3468 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3469 {
3470 	struct vmxnet3_softc *sc;
3471 
3472 	sc = ifp->if_softc;
3473 
3474 	ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
3475 	ifmr->ifm_status = IFM_AVALID;
3476 
3477 	VMXNET3_CORE_LOCK(sc);
3478 	if (vmxnet3_link_is_up(sc) != 0)
3479 		ifmr->ifm_status |= IFM_ACTIVE;
3480 	else
3481 		ifmr->ifm_status |= IFM_NONE;
3482 	VMXNET3_CORE_UNLOCK(sc);
3483 }
3484 
3485 static int
3486 vmxnet3_media_change(struct ifnet *ifp)
3487 {
3488 
3489 	/* Ignore. */
3490 	return (0);
3491 }
3492 
3493 static void
3494 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3495 {
3496 	uint32_t ml, mh;
3497 
3498 	ml  = sc->vmx_lladdr[0];
3499 	ml |= sc->vmx_lladdr[1] << 8;
3500 	ml |= sc->vmx_lladdr[2] << 16;
3501 	ml |= sc->vmx_lladdr[3] << 24;
3502 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3503 
3504 	mh  = sc->vmx_lladdr[4];
3505 	mh |= sc->vmx_lladdr[5] << 8;
3506 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3507 }
3508 
3509 static void
3510 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3511 {
3512 	uint32_t ml, mh;
3513 
3514 	ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3515 	mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3516 
3517 	sc->vmx_lladdr[0] = ml;
3518 	sc->vmx_lladdr[1] = ml >> 8;
3519 	sc->vmx_lladdr[2] = ml >> 16;
3520 	sc->vmx_lladdr[3] = ml >> 24;
3521 	sc->vmx_lladdr[4] = mh;
3522 	sc->vmx_lladdr[5] = mh >> 8;
3523 }
3524 
3525 static void
3526 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3527     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3528 {
3529 	struct sysctl_oid *node, *txsnode;
3530 	struct sysctl_oid_list *list, *txslist;
3531 	struct vmxnet3_txq_stats *stats;
3532 	struct UPT1_TxStats *txstats;
3533 	char namebuf[16];
3534 
3535 	stats = &txq->vxtxq_stats;
3536 	txstats = &txq->vxtxq_ts->stats;
3537 
3538 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3539 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3540 	    NULL, "Transmit Queue");
3541 	txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3542 
3543 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3544 	    &stats->vmtxs_opackets, "Transmit packets");
3545 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3546 	    &stats->vmtxs_obytes, "Transmit bytes");
3547 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3548 	    &stats->vmtxs_omcasts, "Transmit multicasts");
3549 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3550 	    &stats->vmtxs_csum, "Transmit checksum offloaded");
3551 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3552 	    &stats->vmtxs_tso, "Transmit TCP segmentation offloaded");
3553 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3554 	    &stats->vmtxs_full, "Transmit ring full");
3555 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3556 	    &stats->vmtxs_offload_failed, "Transmit checksum offload failed");
3557 
3558 	/*
3559 	 * Add statistics reported by the host. These are updated once
3560 	 * per second.
3561 	 */
3562 	txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3563 	    NULL, "Host Statistics");
3564 	txslist = SYSCTL_CHILDREN(txsnode);
3565 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3566 	    &txstats->TSO_packets, "TSO packets");
3567 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3568 	    &txstats->TSO_bytes, "TSO bytes");
3569 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3570 	    &txstats->ucast_packets, "Unicast packets");
3571 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3572 	    &txstats->ucast_bytes, "Unicast bytes");
3573 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3574 	    &txstats->mcast_packets, "Multicast packets");
3575 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3576 	    &txstats->mcast_bytes, "Multicast bytes");
3577 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3578 	    &txstats->error, "Errors");
3579 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3580 	    &txstats->discard, "Discards");
3581 }
3582 
3583 static void
3584 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3585     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3586 {
3587 	struct sysctl_oid *node, *rxsnode;
3588 	struct sysctl_oid_list *list, *rxslist;
3589 	struct vmxnet3_rxq_stats *stats;
3590 	struct UPT1_RxStats *rxstats;
3591 	char namebuf[16];
3592 
3593 	stats = &rxq->vxrxq_stats;
3594 	rxstats = &rxq->vxrxq_rs->stats;
3595 
3596 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3597 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3598 	    NULL, "Receive Queue");
3599 	rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3600 
3601 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3602 	    &stats->vmrxs_ipackets, "Receive packets");
3603 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3604 	    &stats->vmrxs_ibytes, "Receive bytes");
3605 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3606 	    &stats->vmrxs_iqdrops, "Receive drops");
3607 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3608 	    &stats->vmrxs_ierrors, "Receive errors");
3609 
3610 	/*
3611 	 * Add statistics reported by the host. These are updated once
3612 	 * per second.
3613 	 */
3614 	rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3615 	    NULL, "Host Statistics");
3616 	rxslist = SYSCTL_CHILDREN(rxsnode);
3617 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3618 	    &rxstats->LRO_packets, "LRO packets");
3619 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3620 	    &rxstats->LRO_bytes, "LRO bytes");
3621 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3622 	    &rxstats->ucast_packets, "Unicast packets");
3623 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3624 	    &rxstats->ucast_bytes, "Unicast bytes");
3625 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3626 	    &rxstats->mcast_packets, "Multicast packets");
3627 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3628 	    &rxstats->mcast_bytes, "Multicast bytes");
3629 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3630 	    &rxstats->bcast_packets, "Broadcast packets");
3631 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3632 	    &rxstats->bcast_bytes, "Broadcast bytes");
3633 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3634 	    &rxstats->nobuffer, "No buffer");
3635 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3636 	    &rxstats->error, "Errors");
3637 }
3638 
3639 static void
3640 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3641     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3642 {
3643 	struct sysctl_oid *node;
3644 	struct sysctl_oid_list *list;
3645 	int i;
3646 
3647 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3648 		struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3649 
3650 		node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3651 		    "debug", CTLFLAG_RD, NULL, "");
3652 		list = SYSCTL_CHILDREN(node);
3653 
3654 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3655 		    &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3656 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3657 		    &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3658 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3659 		    &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3660 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3661 		    &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3662 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3663 		    &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3664 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3665 		    &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3666 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3667 		    &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3668 	}
3669 
3670 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
3671 		struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3672 
3673 		node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3674 		    "debug", CTLFLAG_RD, NULL, "");
3675 		list = SYSCTL_CHILDREN(node);
3676 
3677 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3678 		    &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3679 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3680 		    &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3681 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3682 		    &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3683 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3684 		    &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3685 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3686 		    &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3687 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3688 		    &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3689 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3690 		    &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3691 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3692 		    &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3693 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3694 		    &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3695 	}
3696 }
3697 
3698 static void
3699 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3700     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3701 {
3702 	int i;
3703 
3704 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3705 		vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3706 	for (i = 0; i < sc->vmx_nrxqueues; i++)
3707 		vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3708 
3709 	vmxnet3_setup_debug_sysctl(sc, ctx, child);
3710 }
3711 
3712 static void
3713 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3714 {
3715 	device_t dev;
3716 	struct vmxnet3_statistics *stats;
3717 	struct sysctl_ctx_list *ctx;
3718 	struct sysctl_oid *tree;
3719 	struct sysctl_oid_list *child;
3720 
3721 	dev = sc->vmx_dev;
3722 	ctx = device_get_sysctl_ctx(dev);
3723 	tree = device_get_sysctl_tree(dev);
3724 	child = SYSCTL_CHILDREN(tree);
3725 
3726 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3727 	    &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3728 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3729 	    &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3730 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3731 	    &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3732 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3733 	    &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3734 
3735 	stats = &sc->vmx_stats;
3736 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3737 	    &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3738 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3739 	    &stats->vmst_defrag_failed, 0,
3740 	    "Tx mbuf dropped because defrag failed");
3741 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3742 	    &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3743 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3744 	    &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3745 
3746 	vmxnet3_setup_queue_sysctl(sc, ctx, child);
3747 }
3748 
3749 static void
3750 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3751 {
3752 
3753 	bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3754 }
3755 
3756 static uint32_t
3757 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3758 {
3759 
3760 	return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3761 }
3762 
3763 static void
3764 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3765 {
3766 
3767 	bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3768 }
3769 
3770 static void
3771 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3772 {
3773 
3774 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3775 }
3776 
3777 static uint32_t
3778 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3779 {
3780 
3781 	vmxnet3_write_cmd(sc, cmd);
3782 	bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3783 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3784 	return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3785 }
3786 
3787 static void
3788 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3789 {
3790 
3791 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3792 }
3793 
3794 static void
3795 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3796 {
3797 
3798 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3799 }
3800 
3801 static void
3802 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3803 {
3804 	int i;
3805 
3806 	sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3807 	for (i = 0; i < sc->vmx_nintrs; i++)
3808 		vmxnet3_enable_intr(sc, i);
3809 }
3810 
3811 static void
3812 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3813 {
3814 	int i;
3815 
3816 	sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3817 	for (i = 0; i < sc->vmx_nintrs; i++)
3818 		vmxnet3_disable_intr(sc, i);
3819 }
3820 
3821 static void
3822 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3823 {
3824 	bus_addr_t *baddr = arg;
3825 
3826 	if (error == 0)
3827 		*baddr = segs->ds_addr;
3828 }
3829 
3830 static int
3831 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3832     struct vmxnet3_dma_alloc *dma)
3833 {
3834 	device_t dev;
3835 	int error;
3836 
3837 	dev = sc->vmx_dev;
3838 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3839 
3840 	error = bus_dma_tag_create(bus_get_dma_tag(dev),
3841 	    align, 0,		/* alignment, bounds */
3842 	    BUS_SPACE_MAXADDR,	/* lowaddr */
3843 	    BUS_SPACE_MAXADDR,	/* highaddr */
3844 	    NULL, NULL,		/* filter, filterarg */
3845 	    size,		/* maxsize */
3846 	    1,			/* nsegments */
3847 	    size,		/* maxsegsize */
3848 	    BUS_DMA_ALLOCNOW,	/* flags */
3849 	    NULL,		/* lockfunc */
3850 	    NULL,		/* lockfuncarg */
3851 	    &dma->dma_tag);
3852 	if (error) {
3853 		device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3854 		goto fail;
3855 	}
3856 
3857 	error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3858 	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3859 	if (error) {
3860 		device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3861 		goto fail;
3862 	}
3863 
3864 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3865 	    size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3866 	if (error) {
3867 		device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3868 		goto fail;
3869 	}
3870 
3871 	dma->dma_size = size;
3872 
3873 fail:
3874 	if (error)
3875 		vmxnet3_dma_free(sc, dma);
3876 
3877 	return (error);
3878 }
3879 
3880 static void
3881 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3882 {
3883 
3884 	if (dma->dma_tag != NULL) {
3885 		if (dma->dma_paddr != 0) {
3886 			bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3887 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3888 			bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3889 		}
3890 
3891 		if (dma->dma_vaddr != NULL) {
3892 			bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3893 			    dma->dma_map);
3894 		}
3895 
3896 		bus_dma_tag_destroy(dma->dma_tag);
3897 	}
3898 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3899 }
3900 
3901 static int
3902 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3903 {
3904 	char path[64];
3905 
3906 	snprintf(path, sizeof(path),
3907 	    "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3908 	TUNABLE_INT_FETCH(path, &def);
3909 
3910 	return (def);
3911 }
3912 
3913 /*
3914  * Since this is a purely paravirtualized device, we do not have
3915  * to worry about DMA coherency. But at times, we must make sure
3916  * both the compiler and CPU do not reorder memory operations.
3917  */
3918 static inline void
3919 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3920 {
3921 
3922 	switch (type) {
3923 	case VMXNET3_BARRIER_RD:
3924 		rmb();
3925 		break;
3926 	case VMXNET3_BARRIER_WR:
3927 		wmb();
3928 		break;
3929 	case VMXNET3_BARRIER_RDWR:
3930 		mb();
3931 		break;
3932 	default:
3933 		panic("%s: bad barrier type %d", __func__, type);
3934 	}
3935 }
3936