xref: /freebsd/sys/dev/vmware/vmxnet3/if_vmx.c (revision dda5b39711dab90ae1c5624bdd6ff7453177df31)
1 /*-
2  * Copyright (c) 2013 Tsubai Masanari
3  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
18  */
19 
20 /* Driver for VMware vmxnet3 virtual ethernet devices. */
21 
22 #include <sys/cdefs.h>
23 __FBSDID("$FreeBSD$");
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/eventhandler.h>
28 #include <sys/kernel.h>
29 #include <sys/endian.h>
30 #include <sys/sockio.h>
31 #include <sys/mbuf.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/socket.h>
35 #include <sys/sysctl.h>
36 #include <vm/vm.h>
37 #include <vm/pmap.h>
38 
39 #include <net/ethernet.h>
40 #include <net/if.h>
41 #include <net/if_var.h>
42 #include <net/if_arp.h>
43 #include <net/if_dl.h>
44 #include <net/if_types.h>
45 #include <net/if_media.h>
46 #include <net/if_vlan_var.h>
47 
48 #include <net/bpf.h>
49 
50 #include <netinet/in_systm.h>
51 #include <netinet/in.h>
52 #include <netinet/ip.h>
53 #include <netinet/ip6.h>
54 #include <netinet6/ip6_var.h>
55 #include <netinet/udp.h>
56 #include <netinet/tcp.h>
57 
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 
63 #include <dev/pci/pcireg.h>
64 #include <dev/pci/pcivar.h>
65 
66 #include "if_vmxreg.h"
67 #include "if_vmxvar.h"
68 
69 #include "opt_inet.h"
70 #include "opt_inet6.h"
71 
72 /* Always enable for now - useful for queue hangs. */
73 #define VMXNET3_DEBUG_SYSCTL
74 
75 #ifdef VMXNET3_FAILPOINTS
76 #include <sys/fail.h>
77 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
78     "vmxnet3 fail points");
79 #define VMXNET3_FP	_debug_fail_point_vmxnet3
80 #endif
81 
82 static int	vmxnet3_probe(device_t);
83 static int	vmxnet3_attach(device_t);
84 static int	vmxnet3_detach(device_t);
85 static int	vmxnet3_shutdown(device_t);
86 
87 static int	vmxnet3_alloc_resources(struct vmxnet3_softc *);
88 static void	vmxnet3_free_resources(struct vmxnet3_softc *);
89 static int	vmxnet3_check_version(struct vmxnet3_softc *);
90 static void	vmxnet3_initial_config(struct vmxnet3_softc *);
91 
92 static int	vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
93 static int	vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
94 static int	vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
95 static int	vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
96 		    struct vmxnet3_interrupt *);
97 static int	vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
98 static int	vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
99 static int	vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
100 static int	vmxnet3_setup_interrupts(struct vmxnet3_softc *);
101 static int	vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
102 
103 static void	vmxnet3_free_interrupt(struct vmxnet3_softc *,
104 		    struct vmxnet3_interrupt *);
105 static void	vmxnet3_free_interrupts(struct vmxnet3_softc *);
106 
107 static int	vmxnet3_init_rxq(struct vmxnet3_softc *, int);
108 static int	vmxnet3_init_txq(struct vmxnet3_softc *, int);
109 static int	vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
110 static void	vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
111 static void	vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
112 static void	vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
113 
114 static int	vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
115 static void	vmxnet3_free_shared_data(struct vmxnet3_softc *);
116 static int	vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
117 static void	vmxnet3_free_txq_data(struct vmxnet3_softc *);
118 static int	vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
119 static void	vmxnet3_free_rxq_data(struct vmxnet3_softc *);
120 static int	vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
121 static void	vmxnet3_free_queue_data(struct vmxnet3_softc *);
122 static int	vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
123 static void	vmxnet3_init_shared_data(struct vmxnet3_softc *);
124 static void	vmxnet3_reinit_interface(struct vmxnet3_softc *);
125 static void	vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
126 static int	vmxnet3_alloc_data(struct vmxnet3_softc *);
127 static void	vmxnet3_free_data(struct vmxnet3_softc *);
128 static int	vmxnet3_setup_interface(struct vmxnet3_softc *);
129 
130 static void	vmxnet3_evintr(struct vmxnet3_softc *);
131 static void	vmxnet3_txq_eof(struct vmxnet3_txqueue *);
132 static void	vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
133 static int	vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
134 static void	vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
135 		    struct vmxnet3_rxring *, int);
136 static void	vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
137 static void	vmxnet3_legacy_intr(void *);
138 static void	vmxnet3_txq_intr(void *);
139 static void	vmxnet3_rxq_intr(void *);
140 static void	vmxnet3_event_intr(void *);
141 
142 static void	vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
143 static void	vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
144 static void	vmxnet3_stop(struct vmxnet3_softc *);
145 
146 static void	vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
147 static int	vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
148 static int	vmxnet3_reinit_queues(struct vmxnet3_softc *);
149 static int	vmxnet3_enable_device(struct vmxnet3_softc *);
150 static void	vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
151 static int	vmxnet3_reinit(struct vmxnet3_softc *);
152 static void	vmxnet3_init_locked(struct vmxnet3_softc *);
153 static void	vmxnet3_init(void *);
154 
155 static int	vmxnet3_txq_offload_ctx(struct mbuf *, int *, int *, int *);
156 static int	vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
157 		    bus_dmamap_t, bus_dma_segment_t [], int *);
158 static void	vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
159 static int	vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
160 static void	vmxnet3_start_locked(struct ifnet *);
161 static void	vmxnet3_start(struct ifnet *);
162 
163 static void	vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
164 		    uint16_t);
165 static void	vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
166 static void	vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
167 static void	vmxnet3_set_rxfilter(struct vmxnet3_softc *);
168 static int	vmxnet3_change_mtu(struct vmxnet3_softc *, int);
169 static int	vmxnet3_ioctl(struct ifnet *, u_long, caddr_t);
170 
171 static int	vmxnet3_watchdog(struct vmxnet3_txqueue *);
172 static void	vmxnet3_tick(void *);
173 static void	vmxnet3_link_status(struct vmxnet3_softc *);
174 static void	vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
175 static int	vmxnet3_media_change(struct ifnet *);
176 static void	vmxnet3_set_lladdr(struct vmxnet3_softc *);
177 static void	vmxnet3_get_lladdr(struct vmxnet3_softc *);
178 
179 static void	vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
180 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
181 static void	vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
182 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
183 static void	vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
184 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
185 static void	vmxnet3_setup_sysctl(struct vmxnet3_softc *);
186 
187 static void	vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
188 		    uint32_t);
189 static uint32_t	vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
190 static void	vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
191 		    uint32_t);
192 static void	vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
193 static uint32_t	vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
194 
195 static void	vmxnet3_enable_intr(struct vmxnet3_softc *, int);
196 static void	vmxnet3_disable_intr(struct vmxnet3_softc *, int);
197 static void	vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
198 static void	vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
199 
200 static int	vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
201 		    bus_size_t, struct vmxnet3_dma_alloc *);
202 static void	vmxnet3_dma_free(struct vmxnet3_softc *,
203 		    struct vmxnet3_dma_alloc *);
204 static int	vmxnet3_tunable_int(struct vmxnet3_softc *,
205 		    const char *, int);
206 
207 typedef enum {
208 	VMXNET3_BARRIER_RD,
209 	VMXNET3_BARRIER_WR,
210 	VMXNET3_BARRIER_RDWR,
211 } vmxnet3_barrier_t;
212 
213 static void	vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
214 
215 /* Tunables. */
216 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
217 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
218 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
219 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
220 
221 static device_method_t vmxnet3_methods[] = {
222 	/* Device interface. */
223 	DEVMETHOD(device_probe,		vmxnet3_probe),
224 	DEVMETHOD(device_attach,	vmxnet3_attach),
225 	DEVMETHOD(device_detach,	vmxnet3_detach),
226 	DEVMETHOD(device_shutdown,	vmxnet3_shutdown),
227 
228 	DEVMETHOD_END
229 };
230 
231 static driver_t vmxnet3_driver = {
232 	"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
233 };
234 
235 static devclass_t vmxnet3_devclass;
236 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
237 
238 MODULE_DEPEND(vmx, pci, 1, 1, 1);
239 MODULE_DEPEND(vmx, ether, 1, 1, 1);
240 
241 #define VMXNET3_VMWARE_VENDOR_ID	0x15AD
242 #define VMXNET3_VMWARE_DEVICE_ID	0x07B0
243 
244 static int
245 vmxnet3_probe(device_t dev)
246 {
247 
248 	if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
249 	    pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
250 		device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
251 		return (BUS_PROBE_DEFAULT);
252 	}
253 
254 	return (ENXIO);
255 }
256 
257 static int
258 vmxnet3_attach(device_t dev)
259 {
260 	struct vmxnet3_softc *sc;
261 	int error;
262 
263 	sc = device_get_softc(dev);
264 	sc->vmx_dev = dev;
265 
266 	pci_enable_busmaster(dev);
267 
268 	VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
269 	callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0);
270 
271 	vmxnet3_initial_config(sc);
272 
273 	error = vmxnet3_alloc_resources(sc);
274 	if (error)
275 		goto fail;
276 
277 	error = vmxnet3_check_version(sc);
278 	if (error)
279 		goto fail;
280 
281 	error = vmxnet3_alloc_rxtx_queues(sc);
282 	if (error)
283 		goto fail;
284 
285 	error = vmxnet3_alloc_interrupts(sc);
286 	if (error)
287 		goto fail;
288 
289 	error = vmxnet3_alloc_data(sc);
290 	if (error)
291 		goto fail;
292 
293 	error = vmxnet3_setup_interface(sc);
294 	if (error)
295 		goto fail;
296 
297 	error = vmxnet3_setup_interrupts(sc);
298 	if (error) {
299 		ether_ifdetach(sc->vmx_ifp);
300 		device_printf(dev, "could not set up interrupt\n");
301 		goto fail;
302 	}
303 
304 	vmxnet3_setup_sysctl(sc);
305 	vmxnet3_link_status(sc);
306 
307 fail:
308 	if (error)
309 		vmxnet3_detach(dev);
310 
311 	return (error);
312 }
313 
314 static int
315 vmxnet3_detach(device_t dev)
316 {
317 	struct vmxnet3_softc *sc;
318 	struct ifnet *ifp;
319 
320 	sc = device_get_softc(dev);
321 	ifp = sc->vmx_ifp;
322 
323 	if (device_is_attached(dev)) {
324 		ether_ifdetach(ifp);
325 		VMXNET3_CORE_LOCK(sc);
326 		vmxnet3_stop(sc);
327 		VMXNET3_CORE_UNLOCK(sc);
328 		callout_drain(&sc->vmx_tick);
329 	}
330 
331 	if (sc->vmx_vlan_attach != NULL) {
332 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
333 		sc->vmx_vlan_attach = NULL;
334 	}
335 	if (sc->vmx_vlan_detach != NULL) {
336 		EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
337 		sc->vmx_vlan_detach = NULL;
338 	}
339 
340 	vmxnet3_free_interrupts(sc);
341 
342 	if (ifp != NULL) {
343 		if_free(ifp);
344 		sc->vmx_ifp = NULL;
345 	}
346 
347 	ifmedia_removeall(&sc->vmx_media);
348 
349 	vmxnet3_free_data(sc);
350 	vmxnet3_free_resources(sc);
351 	vmxnet3_free_rxtx_queues(sc);
352 
353 	VMXNET3_CORE_LOCK_DESTROY(sc);
354 
355 	return (0);
356 }
357 
358 static int
359 vmxnet3_shutdown(device_t dev)
360 {
361 
362 	return (0);
363 }
364 
365 static int
366 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
367 {
368 	device_t dev;
369 	int rid;
370 
371 	dev = sc->vmx_dev;
372 
373 	rid = PCIR_BAR(0);
374 	sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
375 	    RF_ACTIVE);
376 	if (sc->vmx_res0 == NULL) {
377 		device_printf(dev,
378 		    "could not map BAR0 memory\n");
379 		return (ENXIO);
380 	}
381 
382 	sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
383 	sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
384 
385 	rid = PCIR_BAR(1);
386 	sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
387 	    RF_ACTIVE);
388 	if (sc->vmx_res1 == NULL) {
389 		device_printf(dev,
390 		    "could not map BAR1 memory\n");
391 		return (ENXIO);
392 	}
393 
394 	sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
395 	sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
396 
397 	if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) {
398 		rid = PCIR_BAR(2);
399 		sc->vmx_msix_res = bus_alloc_resource_any(dev,
400 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
401 	}
402 
403 	if (sc->vmx_msix_res == NULL)
404 		sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
405 
406 	return (0);
407 }
408 
409 static void
410 vmxnet3_free_resources(struct vmxnet3_softc *sc)
411 {
412 	device_t dev;
413 	int rid;
414 
415 	dev = sc->vmx_dev;
416 
417 	if (sc->vmx_res0 != NULL) {
418 		rid = PCIR_BAR(0);
419 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
420 		sc->vmx_res0 = NULL;
421 	}
422 
423 	if (sc->vmx_res1 != NULL) {
424 		rid = PCIR_BAR(1);
425 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
426 		sc->vmx_res1 = NULL;
427 	}
428 
429 	if (sc->vmx_msix_res != NULL) {
430 		rid = PCIR_BAR(2);
431 		bus_release_resource(dev, SYS_RES_MEMORY, rid,
432 		    sc->vmx_msix_res);
433 		sc->vmx_msix_res = NULL;
434 	}
435 }
436 
437 static int
438 vmxnet3_check_version(struct vmxnet3_softc *sc)
439 {
440 	device_t dev;
441 	uint32_t version;
442 
443 	dev = sc->vmx_dev;
444 
445 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
446 	if ((version & 0x01) == 0) {
447 		device_printf(dev, "unsupported hardware version %#x\n",
448 		    version);
449 		return (ENOTSUP);
450 	}
451 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
452 
453 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
454 	if ((version & 0x01) == 0) {
455 		device_printf(dev, "unsupported UPT version %#x\n", version);
456 		return (ENOTSUP);
457 	}
458 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
459 
460 	return (0);
461 }
462 
463 static void
464 vmxnet3_initial_config(struct vmxnet3_softc *sc)
465 {
466 	int ndesc;
467 
468 	/*
469 	 * BMV Much of the work is already done, but this driver does
470 	 * not support multiqueue yet.
471 	 */
472 	sc->vmx_ntxqueues = VMXNET3_TX_QUEUES;
473 	sc->vmx_nrxqueues = VMXNET3_RX_QUEUES;
474 
475 	ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
476 	if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
477 		ndesc = VMXNET3_DEF_TX_NDESC;
478 	if (ndesc & VMXNET3_MASK_TX_NDESC)
479 		ndesc &= ~VMXNET3_MASK_TX_NDESC;
480 	sc->vmx_ntxdescs = ndesc;
481 
482 	ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
483 	if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
484 		ndesc = VMXNET3_DEF_RX_NDESC;
485 	if (ndesc & VMXNET3_MASK_RX_NDESC)
486 		ndesc &= ~VMXNET3_MASK_RX_NDESC;
487 	sc->vmx_nrxdescs = ndesc;
488 	sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
489 }
490 
491 static int
492 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
493 {
494 	device_t dev;
495 	int nmsix, cnt, required;
496 
497 	dev = sc->vmx_dev;
498 
499 	if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
500 		return (1);
501 
502 	/* Allocate an additional vector for the events interrupt. */
503 	required = sc->vmx_nrxqueues + sc->vmx_ntxqueues + 1;
504 
505 	nmsix = pci_msix_count(dev);
506 	if (nmsix < required)
507 		return (1);
508 
509 	cnt = required;
510 	if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
511 		sc->vmx_nintrs = required;
512 		return (0);
513 	} else
514 		pci_release_msi(dev);
515 
516 	return (1);
517 }
518 
519 static int
520 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
521 {
522 	device_t dev;
523 	int nmsi, cnt, required;
524 
525 	dev = sc->vmx_dev;
526 	required = 1;
527 
528 	nmsi = pci_msi_count(dev);
529 	if (nmsi < required)
530 		return (1);
531 
532 	cnt = required;
533 	if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
534 		sc->vmx_nintrs = 1;
535 		return (0);
536 	} else
537 		pci_release_msi(dev);
538 
539 	return (1);
540 }
541 
542 static int
543 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
544 {
545 
546 	sc->vmx_nintrs = 1;
547 	return (0);
548 }
549 
550 static int
551 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
552     struct vmxnet3_interrupt *intr)
553 {
554 	struct resource *irq;
555 
556 	irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags);
557 	if (irq == NULL)
558 		return (ENXIO);
559 
560 	intr->vmxi_irq = irq;
561 	intr->vmxi_rid = rid;
562 
563 	return (0);
564 }
565 
566 static int
567 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
568 {
569 	int i, rid, flags, error;
570 
571 	rid = 0;
572 	flags = RF_ACTIVE;
573 
574 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
575 		flags |= RF_SHAREABLE;
576 	else
577 		rid = 1;
578 
579 	for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
580 		error = vmxnet3_alloc_interrupt(sc, rid, flags,
581 		    &sc->vmx_intrs[i]);
582 		if (error)
583 			return (error);
584 	}
585 
586 	return (0);
587 }
588 
589 /*
590  * NOTE: We only support the simple case of each Rx and Tx queue on its
591  * own MSIX vector. This is good enough until we support mulitqueue.
592  */
593 static int
594 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
595 {
596 	device_t dev;
597 	struct vmxnet3_txqueue *txq;
598 	struct vmxnet3_rxqueue *rxq;
599 	struct vmxnet3_interrupt *intr;
600 	enum intr_type type;
601 	int i, error;
602 
603 	dev = sc->vmx_dev;
604 	intr = &sc->vmx_intrs[0];
605 	type = INTR_TYPE_NET | INTR_MPSAFE;
606 
607 	for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
608 		txq = &sc->vmx_txq[i];
609 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
610 		     vmxnet3_txq_intr, txq, &intr->vmxi_handler);
611 		if (error)
612 			return (error);
613 		txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
614 	}
615 
616 	for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
617 		rxq = &sc->vmx_rxq[i];
618 		error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
619 		    vmxnet3_rxq_intr, rxq, &intr->vmxi_handler);
620 		if (error)
621 			return (error);
622 		rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
623 	}
624 
625 	error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL,
626 	    vmxnet3_event_intr, sc, &intr->vmxi_handler);
627 	if (error)
628 		return (error);
629 	sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
630 
631 	return (0);
632 }
633 
634 static int
635 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
636 {
637 	struct vmxnet3_interrupt *intr;
638 	int i, error;
639 
640 	intr = &sc->vmx_intrs[0];
641 	error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
642 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc,
643 	    &intr->vmxi_handler);
644 
645 	for (i = 0; i < sc->vmx_ntxqueues; i++)
646 		sc->vmx_txq[i].vxtxq_intr_idx = 0;
647 	for (i = 0; i < sc->vmx_nrxqueues; i++)
648 		sc->vmx_rxq[i].vxrxq_intr_idx = 0;
649 	sc->vmx_event_intr_idx = 0;
650 
651 	return (error);
652 }
653 
654 /*
655  * XXX BMV Should probably reorganize the attach and just do
656  * this in vmxnet3_init_shared_data().
657  */
658 static void
659 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
660 {
661 	struct vmxnet3_txqueue *txq;
662 	struct vmxnet3_txq_shared *txs;
663 	struct vmxnet3_rxqueue *rxq;
664 	struct vmxnet3_rxq_shared *rxs;
665 	int i;
666 
667 	sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
668 
669 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
670 		txq = &sc->vmx_txq[i];
671 		txs = txq->vxtxq_ts;
672 		txs->intr_idx = txq->vxtxq_intr_idx;
673 	}
674 
675 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
676 		rxq = &sc->vmx_rxq[i];
677 		rxs = rxq->vxrxq_rs;
678 		rxs->intr_idx = rxq->vxrxq_intr_idx;
679 	}
680 }
681 
682 static int
683 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
684 {
685 	int error;
686 
687 	error = vmxnet3_alloc_intr_resources(sc);
688 	if (error)
689 		return (error);
690 
691 	switch (sc->vmx_intr_type) {
692 	case VMXNET3_IT_MSIX:
693 		error = vmxnet3_setup_msix_interrupts(sc);
694 		break;
695 	case VMXNET3_IT_MSI:
696 	case VMXNET3_IT_LEGACY:
697 		error = vmxnet3_setup_legacy_interrupt(sc);
698 		break;
699 	default:
700 		panic("%s: invalid interrupt type %d", __func__,
701 		    sc->vmx_intr_type);
702 	}
703 
704 	if (error == 0)
705 		vmxnet3_set_interrupt_idx(sc);
706 
707 	return (error);
708 }
709 
710 static int
711 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
712 {
713 	device_t dev;
714 	uint32_t config;
715 	int error;
716 
717 	dev = sc->vmx_dev;
718 	config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
719 
720 	sc->vmx_intr_type = config & 0x03;
721 	sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
722 
723 	switch (sc->vmx_intr_type) {
724 	case VMXNET3_IT_AUTO:
725 		sc->vmx_intr_type = VMXNET3_IT_MSIX;
726 		/* FALLTHROUGH */
727 	case VMXNET3_IT_MSIX:
728 		error = vmxnet3_alloc_msix_interrupts(sc);
729 		if (error == 0)
730 			break;
731 		sc->vmx_intr_type = VMXNET3_IT_MSI;
732 		/* FALLTHROUGH */
733 	case VMXNET3_IT_MSI:
734 		error = vmxnet3_alloc_msi_interrupts(sc);
735 		if (error == 0)
736 			break;
737 		sc->vmx_intr_type = VMXNET3_IT_LEGACY;
738 		/* FALLTHROUGH */
739 	case VMXNET3_IT_LEGACY:
740 		error = vmxnet3_alloc_legacy_interrupts(sc);
741 		if (error == 0)
742 			break;
743 		/* FALLTHROUGH */
744 	default:
745 		sc->vmx_intr_type = -1;
746 		device_printf(dev, "cannot allocate any interrupt resources\n");
747 		return (ENXIO);
748 	}
749 
750 	return (error);
751 }
752 
753 static void
754 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
755     struct vmxnet3_interrupt *intr)
756 {
757 	device_t dev;
758 
759 	dev = sc->vmx_dev;
760 
761 	if (intr->vmxi_handler != NULL) {
762 		bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
763 		intr->vmxi_handler = NULL;
764 	}
765 
766 	if (intr->vmxi_irq != NULL) {
767 		bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
768 		    intr->vmxi_irq);
769 		intr->vmxi_irq = NULL;
770 		intr->vmxi_rid = -1;
771 	}
772 }
773 
774 static void
775 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
776 {
777 	int i;
778 
779 	for (i = 0; i < sc->vmx_nintrs; i++)
780 		vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
781 
782 	if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
783 	    sc->vmx_intr_type == VMXNET3_IT_MSIX)
784 		pci_release_msi(sc->vmx_dev);
785 }
786 
787 static int
788 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
789 {
790 	struct vmxnet3_rxqueue *rxq;
791 	struct vmxnet3_rxring *rxr;
792 	int i;
793 
794 	rxq = &sc->vmx_rxq[q];
795 
796 	snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
797 	    device_get_nameunit(sc->vmx_dev), q);
798 	mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF);
799 
800 	rxq->vxrxq_sc = sc;
801 	rxq->vxrxq_id = q;
802 
803 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
804 		rxr = &rxq->vxrxq_cmd_ring[i];
805 		rxr->vxrxr_rid = i;
806 		rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
807 		rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc *
808 		    sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
809 		if (rxr->vxrxr_rxbuf == NULL)
810 			return (ENOMEM);
811 
812 		rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
813 	}
814 
815 	return (0);
816 }
817 
818 static int
819 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
820 {
821 	struct vmxnet3_txqueue *txq;
822 	struct vmxnet3_txring *txr;
823 
824 	txq = &sc->vmx_txq[q];
825 	txr = &txq->vxtxq_cmd_ring;
826 
827 	snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
828 	    device_get_nameunit(sc->vmx_dev), q);
829 	mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF);
830 
831 	txq->vxtxq_sc = sc;
832 	txq->vxtxq_id = q;
833 
834 	txr->vxtxr_ndesc = sc->vmx_ntxdescs;
835 	txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc *
836 	    sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO);
837 	if (txr->vxtxr_txbuf == NULL)
838 		return (ENOMEM);
839 
840 	txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
841 
842 	return (0);
843 }
844 
845 static int
846 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
847 {
848 	int i, error;
849 
850 	sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
851 	    sc->vmx_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
852 	sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
853 	    sc->vmx_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO);
854 	if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
855 		return (ENOMEM);
856 
857 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
858 		error = vmxnet3_init_rxq(sc, i);
859 		if (error)
860 			return (error);
861 	}
862 
863 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
864 		error = vmxnet3_init_txq(sc, i);
865 		if (error)
866 			return (error);
867 	}
868 
869 	return (0);
870 }
871 
872 static void
873 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
874 {
875 	struct vmxnet3_rxring *rxr;
876 	int i;
877 
878 	rxq->vxrxq_sc = NULL;
879 	rxq->vxrxq_id = -1;
880 
881 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
882 		rxr = &rxq->vxrxq_cmd_ring[i];
883 
884 		if (rxr->vxrxr_rxbuf != NULL) {
885 			free(rxr->vxrxr_rxbuf, M_DEVBUF);
886 			rxr->vxrxr_rxbuf = NULL;
887 		}
888 	}
889 
890 	if (mtx_initialized(&rxq->vxrxq_mtx) != 0)
891 		mtx_destroy(&rxq->vxrxq_mtx);
892 }
893 
894 static void
895 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
896 {
897 	struct vmxnet3_txring *txr;
898 
899 	txr = &txq->vxtxq_cmd_ring;
900 
901 	txq->vxtxq_sc = NULL;
902 	txq->vxtxq_id = -1;
903 
904 	if (txr->vxtxr_txbuf != NULL) {
905 		free(txr->vxtxr_txbuf, M_DEVBUF);
906 		txr->vxtxr_txbuf = NULL;
907 	}
908 
909 	if (mtx_initialized(&txq->vxtxq_mtx) != 0)
910 		mtx_destroy(&txq->vxtxq_mtx);
911 }
912 
913 static void
914 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
915 {
916 	int i;
917 
918 	if (sc->vmx_rxq != NULL) {
919 		for (i = 0; i < sc->vmx_nrxqueues; i++)
920 			vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
921 		free(sc->vmx_rxq, M_DEVBUF);
922 		sc->vmx_rxq = NULL;
923 	}
924 
925 	if (sc->vmx_txq != NULL) {
926 		for (i = 0; i < sc->vmx_ntxqueues; i++)
927 			vmxnet3_destroy_txq(&sc->vmx_txq[i]);
928 		free(sc->vmx_txq, M_DEVBUF);
929 		sc->vmx_txq = NULL;
930 	}
931 }
932 
933 static int
934 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
935 {
936 	device_t dev;
937 	uint8_t *kva;
938 	size_t size;
939 	int i, error;
940 
941 	dev = sc->vmx_dev;
942 
943 	size = sizeof(struct vmxnet3_driver_shared);
944 	error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
945 	if (error) {
946 		device_printf(dev, "cannot alloc shared memory\n");
947 		return (error);
948 	}
949 	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
950 
951 	size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
952 	    sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
953 	error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
954 	if (error) {
955 		device_printf(dev, "cannot alloc queue shared memory\n");
956 		return (error);
957 	}
958 	sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
959 	kva = sc->vmx_qs;
960 
961 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
962 		sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
963 		kva += sizeof(struct vmxnet3_txq_shared);
964 	}
965 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
966 		sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
967 		kva += sizeof(struct vmxnet3_rxq_shared);
968 	}
969 
970 	return (0);
971 }
972 
973 static void
974 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
975 {
976 
977 	if (sc->vmx_qs != NULL) {
978 		vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
979 		sc->vmx_qs = NULL;
980 	}
981 
982 	if (sc->vmx_ds != NULL) {
983 		vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
984 		sc->vmx_ds = NULL;
985 	}
986 }
987 
988 static int
989 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
990 {
991 	device_t dev;
992 	struct vmxnet3_txqueue *txq;
993 	struct vmxnet3_txring *txr;
994 	struct vmxnet3_comp_ring *txc;
995 	size_t descsz, compsz;
996 	int i, q, error;
997 
998 	dev = sc->vmx_dev;
999 
1000 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1001 		txq = &sc->vmx_txq[q];
1002 		txr = &txq->vxtxq_cmd_ring;
1003 		txc = &txq->vxtxq_comp_ring;
1004 
1005 		descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1006 		compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1007 
1008 		error = bus_dma_tag_create(bus_get_dma_tag(dev),
1009 		    1, 0,			/* alignment, boundary */
1010 		    BUS_SPACE_MAXADDR,		/* lowaddr */
1011 		    BUS_SPACE_MAXADDR,		/* highaddr */
1012 		    NULL, NULL,			/* filter, filterarg */
1013 		    VMXNET3_TSO_MAXSIZE,	/* maxsize */
1014 		    VMXNET3_TX_MAXSEGS,		/* nsegments */
1015 		    VMXNET3_TX_MAXSEGSIZE,	/* maxsegsize */
1016 		    0,				/* flags */
1017 		    NULL, NULL,			/* lockfunc, lockarg */
1018 		    &txr->vxtxr_txtag);
1019 		if (error) {
1020 			device_printf(dev,
1021 			    "unable to create Tx buffer tag for queue %d\n", q);
1022 			return (error);
1023 		}
1024 
1025 		error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1026 		if (error) {
1027 			device_printf(dev, "cannot alloc Tx descriptors for "
1028 			    "queue %d error %d\n", q, error);
1029 			return (error);
1030 		}
1031 		txr->vxtxr_txd =
1032 		    (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1033 
1034 		error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1035 		if (error) {
1036 			device_printf(dev, "cannot alloc Tx comp descriptors "
1037 			   "for queue %d error %d\n", q, error);
1038 			return (error);
1039 		}
1040 		txc->vxcr_u.txcd =
1041 		    (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1042 
1043 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1044 			error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1045 			    &txr->vxtxr_txbuf[i].vtxb_dmamap);
1046 			if (error) {
1047 				device_printf(dev, "unable to create Tx buf "
1048 				    "dmamap for queue %d idx %d\n", q, i);
1049 				return (error);
1050 			}
1051 		}
1052 	}
1053 
1054 	return (0);
1055 }
1056 
1057 static void
1058 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1059 {
1060 	device_t dev;
1061 	struct vmxnet3_txqueue *txq;
1062 	struct vmxnet3_txring *txr;
1063 	struct vmxnet3_comp_ring *txc;
1064 	struct vmxnet3_txbuf *txb;
1065 	int i, q;
1066 
1067 	dev = sc->vmx_dev;
1068 
1069 	for (q = 0; q < sc->vmx_ntxqueues; q++) {
1070 		txq = &sc->vmx_txq[q];
1071 		txr = &txq->vxtxq_cmd_ring;
1072 		txc = &txq->vxtxq_comp_ring;
1073 
1074 		for (i = 0; i < txr->vxtxr_ndesc; i++) {
1075 			txb = &txr->vxtxr_txbuf[i];
1076 			if (txb->vtxb_dmamap != NULL) {
1077 				bus_dmamap_destroy(txr->vxtxr_txtag,
1078 				    txb->vtxb_dmamap);
1079 				txb->vtxb_dmamap = NULL;
1080 			}
1081 		}
1082 
1083 		if (txc->vxcr_u.txcd != NULL) {
1084 			vmxnet3_dma_free(sc, &txc->vxcr_dma);
1085 			txc->vxcr_u.txcd = NULL;
1086 		}
1087 
1088 		if (txr->vxtxr_txd != NULL) {
1089 			vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1090 			txr->vxtxr_txd = NULL;
1091 		}
1092 
1093 		if (txr->vxtxr_txtag != NULL) {
1094 			bus_dma_tag_destroy(txr->vxtxr_txtag);
1095 			txr->vxtxr_txtag = NULL;
1096 		}
1097 	}
1098 }
1099 
1100 static int
1101 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1102 {
1103 	device_t dev;
1104 	struct vmxnet3_rxqueue *rxq;
1105 	struct vmxnet3_rxring *rxr;
1106 	struct vmxnet3_comp_ring *rxc;
1107 	int descsz, compsz;
1108 	int i, j, q, error;
1109 
1110 	dev = sc->vmx_dev;
1111 
1112 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1113 		rxq = &sc->vmx_rxq[q];
1114 		rxc = &rxq->vxrxq_comp_ring;
1115 		compsz = 0;
1116 
1117 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1118 			rxr = &rxq->vxrxq_cmd_ring[i];
1119 
1120 			descsz = rxr->vxrxr_ndesc *
1121 			    sizeof(struct vmxnet3_rxdesc);
1122 			compsz += rxr->vxrxr_ndesc *
1123 			    sizeof(struct vmxnet3_rxcompdesc);
1124 
1125 			error = bus_dma_tag_create(bus_get_dma_tag(dev),
1126 			    1, 0,		/* alignment, boundary */
1127 			    BUS_SPACE_MAXADDR,	/* lowaddr */
1128 			    BUS_SPACE_MAXADDR,	/* highaddr */
1129 			    NULL, NULL,		/* filter, filterarg */
1130 			    MJUMPAGESIZE,	/* maxsize */
1131 			    1,			/* nsegments */
1132 			    MJUMPAGESIZE,	/* maxsegsize */
1133 			    0,			/* flags */
1134 			    NULL, NULL,		/* lockfunc, lockarg */
1135 			    &rxr->vxrxr_rxtag);
1136 			if (error) {
1137 				device_printf(dev,
1138 				    "unable to create Rx buffer tag for "
1139 				    "queue %d\n", q);
1140 				return (error);
1141 			}
1142 
1143 			error = vmxnet3_dma_malloc(sc, descsz, 512,
1144 			    &rxr->vxrxr_dma);
1145 			if (error) {
1146 				device_printf(dev, "cannot allocate Rx "
1147 				    "descriptors for queue %d/%d error %d\n",
1148 				    i, q, error);
1149 				return (error);
1150 			}
1151 			rxr->vxrxr_rxd =
1152 			    (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1153 		}
1154 
1155 		error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1156 		if (error) {
1157 			device_printf(dev, "cannot alloc Rx comp descriptors "
1158 			    "for queue %d error %d\n", q, error);
1159 			return (error);
1160 		}
1161 		rxc->vxcr_u.rxcd =
1162 		    (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1163 
1164 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1165 			rxr = &rxq->vxrxq_cmd_ring[i];
1166 
1167 			error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1168 			    &rxr->vxrxr_spare_dmap);
1169 			if (error) {
1170 				device_printf(dev, "unable to create spare "
1171 				    "dmamap for queue %d/%d error %d\n",
1172 				    q, i, error);
1173 				return (error);
1174 			}
1175 
1176 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1177 				error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1178 				    &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1179 				if (error) {
1180 					device_printf(dev, "unable to create "
1181 					    "dmamap for queue %d/%d slot %d "
1182 					    "error %d\n",
1183 					    q, i, j, error);
1184 					return (error);
1185 				}
1186 			}
1187 		}
1188 	}
1189 
1190 	return (0);
1191 }
1192 
1193 static void
1194 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1195 {
1196 	device_t dev;
1197 	struct vmxnet3_rxqueue *rxq;
1198 	struct vmxnet3_rxring *rxr;
1199 	struct vmxnet3_comp_ring *rxc;
1200 	struct vmxnet3_rxbuf *rxb;
1201 	int i, j, q;
1202 
1203 	dev = sc->vmx_dev;
1204 
1205 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
1206 		rxq = &sc->vmx_rxq[q];
1207 		rxc = &rxq->vxrxq_comp_ring;
1208 
1209 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1210 			rxr = &rxq->vxrxq_cmd_ring[i];
1211 
1212 			if (rxr->vxrxr_spare_dmap != NULL) {
1213 				bus_dmamap_destroy(rxr->vxrxr_rxtag,
1214 				    rxr->vxrxr_spare_dmap);
1215 				rxr->vxrxr_spare_dmap = NULL;
1216 			}
1217 
1218 			for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1219 				rxb = &rxr->vxrxr_rxbuf[j];
1220 				if (rxb->vrxb_dmamap != NULL) {
1221 					bus_dmamap_destroy(rxr->vxrxr_rxtag,
1222 					    rxb->vrxb_dmamap);
1223 					rxb->vrxb_dmamap = NULL;
1224 				}
1225 			}
1226 		}
1227 
1228 		if (rxc->vxcr_u.rxcd != NULL) {
1229 			vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1230 			rxc->vxcr_u.rxcd = NULL;
1231 		}
1232 
1233 		for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1234 			rxr = &rxq->vxrxq_cmd_ring[i];
1235 
1236 			if (rxr->vxrxr_rxd != NULL) {
1237 				vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1238 				rxr->vxrxr_rxd = NULL;
1239 			}
1240 
1241 			if (rxr->vxrxr_rxtag != NULL) {
1242 				bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1243 				rxr->vxrxr_rxtag = NULL;
1244 			}
1245 		}
1246 	}
1247 }
1248 
1249 static int
1250 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1251 {
1252 	int error;
1253 
1254 	error = vmxnet3_alloc_txq_data(sc);
1255 	if (error)
1256 		return (error);
1257 
1258 	error = vmxnet3_alloc_rxq_data(sc);
1259 	if (error)
1260 		return (error);
1261 
1262 	return (0);
1263 }
1264 
1265 static void
1266 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1267 {
1268 
1269 	if (sc->vmx_rxq != NULL)
1270 		vmxnet3_free_rxq_data(sc);
1271 
1272 	if (sc->vmx_txq != NULL)
1273 		vmxnet3_free_txq_data(sc);
1274 }
1275 
1276 static int
1277 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1278 {
1279 	int error;
1280 
1281 	error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1282 	    32, &sc->vmx_mcast_dma);
1283 	if (error)
1284 		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1285 	else
1286 		sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1287 
1288 	return (error);
1289 }
1290 
1291 static void
1292 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1293 {
1294 
1295 	if (sc->vmx_mcast != NULL) {
1296 		vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1297 		sc->vmx_mcast = NULL;
1298 	}
1299 }
1300 
1301 static void
1302 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1303 {
1304 	struct vmxnet3_driver_shared *ds;
1305 	struct vmxnet3_txqueue *txq;
1306 	struct vmxnet3_txq_shared *txs;
1307 	struct vmxnet3_rxqueue *rxq;
1308 	struct vmxnet3_rxq_shared *rxs;
1309 	int i;
1310 
1311 	ds = sc->vmx_ds;
1312 
1313 	/*
1314 	 * Initialize fields of the shared data that remains the same across
1315 	 * reinits. Note the shared data is zero'd when allocated.
1316 	 */
1317 
1318 	ds->magic = VMXNET3_REV1_MAGIC;
1319 
1320 	/* DriverInfo */
1321 	ds->version = VMXNET3_DRIVER_VERSION;
1322 	ds->guest = VMXNET3_GOS_FREEBSD |
1323 #ifdef __LP64__
1324 	    VMXNET3_GOS_64BIT;
1325 #else
1326 	    VMXNET3_GOS_32BIT;
1327 #endif
1328 	ds->vmxnet3_revision = 1;
1329 	ds->upt_version = 1;
1330 
1331 	/* Misc. conf */
1332 	ds->driver_data = vtophys(sc);
1333 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
1334 	ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1335 	ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1336 	ds->nrxsg_max = sc->vmx_max_rxsegs;
1337 
1338 	/* Interrupt control. */
1339 	ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1340 	ds->nintr = sc->vmx_nintrs;
1341 	ds->evintr = sc->vmx_event_intr_idx;
1342 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1343 
1344 	for (i = 0; i < sc->vmx_nintrs; i++)
1345 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1346 
1347 	/* Receive filter. */
1348 	ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1349 	ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1350 
1351 	/* Tx queues */
1352 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
1353 		txq = &sc->vmx_txq[i];
1354 		txs = txq->vxtxq_ts;
1355 
1356 		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1357 		txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1358 		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1359 		txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1360 		txs->driver_data = vtophys(txq);
1361 		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1362 	}
1363 
1364 	/* Rx queues */
1365 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
1366 		rxq = &sc->vmx_rxq[i];
1367 		rxs = rxq->vxrxq_rs;
1368 
1369 		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1370 		rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1371 		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1372 		rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1373 		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1374 		rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1375 		rxs->driver_data = vtophys(rxq);
1376 		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1377 	}
1378 }
1379 
1380 static void
1381 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1382 {
1383 	struct ifnet *ifp;
1384 
1385 	ifp = sc->vmx_ifp;
1386 
1387 	/* Use the current MAC address. */
1388 	bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1389 	vmxnet3_set_lladdr(sc);
1390 
1391 	ifp->if_hwassist = 0;
1392 	if (ifp->if_capenable & IFCAP_TXCSUM)
1393 		ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD;
1394 	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1395 		ifp->if_hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1396 	if (ifp->if_capenable & IFCAP_TSO4)
1397 		ifp->if_hwassist |= CSUM_TSO;
1398 	if (ifp->if_capenable & IFCAP_TSO6)
1399 		ifp->if_hwassist |= CSUM_TSO; /* No CSUM_TSO_IPV6. */
1400 }
1401 
1402 static void
1403 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1404 {
1405 	struct ifnet *ifp;
1406 	struct vmxnet3_driver_shared *ds;
1407 
1408 	ifp = sc->vmx_ifp;
1409 	ds = sc->vmx_ds;
1410 
1411 	ds->upt_features = 0;
1412 	if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1413 		ds->upt_features |= UPT1_F_CSUM;
1414 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1415 		ds->upt_features |= UPT1_F_VLAN;
1416 	if (ifp->if_capenable & IFCAP_LRO)
1417 		ds->upt_features |= UPT1_F_LRO;
1418 
1419 	ds->mtu = ifp->if_mtu;
1420 	ds->ntxqueue = sc->vmx_ntxqueues;
1421 	ds->nrxqueue = sc->vmx_nrxqueues;
1422 
1423 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1424 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1425 	    (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1426 }
1427 
1428 static int
1429 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1430 {
1431 	int error;
1432 
1433 	error = vmxnet3_alloc_shared_data(sc);
1434 	if (error)
1435 		return (error);
1436 
1437 	error = vmxnet3_alloc_queue_data(sc);
1438 	if (error)
1439 		return (error);
1440 
1441 	error = vmxnet3_alloc_mcast_table(sc);
1442 	if (error)
1443 		return (error);
1444 
1445 	vmxnet3_init_shared_data(sc);
1446 
1447 	return (0);
1448 }
1449 
1450 static void
1451 vmxnet3_free_data(struct vmxnet3_softc *sc)
1452 {
1453 
1454 	vmxnet3_free_mcast_table(sc);
1455 	vmxnet3_free_queue_data(sc);
1456 	vmxnet3_free_shared_data(sc);
1457 }
1458 
1459 static int
1460 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1461 {
1462 	device_t dev;
1463 	struct ifnet *ifp;
1464 
1465 	dev = sc->vmx_dev;
1466 
1467 	ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1468 	if (ifp == NULL) {
1469 		device_printf(dev, "cannot allocate ifnet structure\n");
1470 		return (ENOSPC);
1471 	}
1472 
1473 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1474 	ifp->if_baudrate = IF_Gbps(10);
1475 	ifp->if_softc = sc;
1476 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1477 	ifp->if_init = vmxnet3_init;
1478 	ifp->if_ioctl = vmxnet3_ioctl;
1479 	ifp->if_start = vmxnet3_start;
1480 	ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1;
1481 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1482 	IFQ_SET_READY(&ifp->if_snd);
1483 
1484 	vmxnet3_get_lladdr(sc);
1485 	ether_ifattach(ifp, sc->vmx_lladdr);
1486 
1487 	ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1488 	ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1489 	ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1490 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1491 	    IFCAP_VLAN_HWCSUM;
1492 	ifp->if_capenable = ifp->if_capabilities;
1493 
1494 	/* These capabilities are not enabled by default. */
1495 	ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
1496 
1497 	sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1498 	    vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1499 	sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1500 	    vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1501 
1502 	ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1503 	    vmxnet3_media_status);
1504 	ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1505 	ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1506 
1507 	return (0);
1508 }
1509 
1510 static void
1511 vmxnet3_evintr(struct vmxnet3_softc *sc)
1512 {
1513 	device_t dev;
1514 	struct ifnet *ifp;
1515 	struct vmxnet3_txq_shared *ts;
1516 	struct vmxnet3_rxq_shared *rs;
1517 	uint32_t event;
1518 	int reset;
1519 
1520 	dev = sc->vmx_dev;
1521 	ifp = sc->vmx_ifp;
1522 	reset = 0;
1523 
1524 	VMXNET3_CORE_LOCK(sc);
1525 
1526 	/* Clear events. */
1527 	event = sc->vmx_ds->event;
1528 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1529 
1530 	if (event & VMXNET3_EVENT_LINK)
1531 		vmxnet3_link_status(sc);
1532 
1533 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1534 		reset = 1;
1535 		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1536 		ts = sc->vmx_txq[0].vxtxq_ts;
1537 		if (ts->stopped != 0)
1538 			device_printf(dev, "Tx queue error %#x\n", ts->error);
1539 		rs = sc->vmx_rxq[0].vxrxq_rs;
1540 		if (rs->stopped != 0)
1541 			device_printf(dev, "Rx queue error %#x\n", rs->error);
1542 		device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1543 	}
1544 
1545 	if (event & VMXNET3_EVENT_DIC)
1546 		device_printf(dev, "device implementation change event\n");
1547 	if (event & VMXNET3_EVENT_DEBUG)
1548 		device_printf(dev, "debug event\n");
1549 
1550 	if (reset != 0) {
1551 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1552 		vmxnet3_init_locked(sc);
1553 	}
1554 
1555 	VMXNET3_CORE_UNLOCK(sc);
1556 }
1557 
1558 static void
1559 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1560 {
1561 	struct vmxnet3_softc *sc;
1562 	struct ifnet *ifp;
1563 	struct vmxnet3_txring *txr;
1564 	struct vmxnet3_comp_ring *txc;
1565 	struct vmxnet3_txcompdesc *txcd;
1566 	struct vmxnet3_txbuf *txb;
1567 	u_int sop;
1568 
1569 	sc = txq->vxtxq_sc;
1570 	ifp = sc->vmx_ifp;
1571 	txr = &txq->vxtxq_cmd_ring;
1572 	txc = &txq->vxtxq_comp_ring;
1573 
1574 	VMXNET3_TXQ_LOCK_ASSERT(txq);
1575 
1576 	for (;;) {
1577 		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1578 		if (txcd->gen != txc->vxcr_gen)
1579 			break;
1580 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1581 
1582 		if (++txc->vxcr_next == txc->vxcr_ndesc) {
1583 			txc->vxcr_next = 0;
1584 			txc->vxcr_gen ^= 1;
1585 		}
1586 
1587 		sop = txr->vxtxr_next;
1588 		txb = &txr->vxtxr_txbuf[sop];
1589 
1590 		if (txb->vtxb_m != NULL) {
1591 			bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1592 			    BUS_DMASYNC_POSTWRITE);
1593 			bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1594 
1595 			m_freem(txb->vtxb_m);
1596 			txb->vtxb_m = NULL;
1597 
1598 			ifp->if_opackets++;
1599 		}
1600 
1601 		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1602 	}
1603 
1604 	if (txr->vxtxr_head == txr->vxtxr_next)
1605 		txq->vxtxq_watchdog = 0;
1606 }
1607 
1608 static int
1609 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1610 {
1611 	struct ifnet *ifp;
1612 	struct mbuf *m;
1613 	struct vmxnet3_rxdesc *rxd;
1614 	struct vmxnet3_rxbuf *rxb;
1615 	bus_dma_tag_t tag;
1616 	bus_dmamap_t dmap;
1617 	bus_dma_segment_t segs[1];
1618 	int idx, clsize, btype, flags, nsegs, error;
1619 
1620 	ifp = sc->vmx_ifp;
1621 	tag = rxr->vxrxr_rxtag;
1622 	dmap = rxr->vxrxr_spare_dmap;
1623 	idx = rxr->vxrxr_fill;
1624 	rxd = &rxr->vxrxr_rxd[idx];
1625 	rxb = &rxr->vxrxr_rxbuf[idx];
1626 
1627 #ifdef VMXNET3_FAILPOINTS
1628 	KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1629 	if (rxr->vxrxr_rid != 0)
1630 		KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1631 #endif
1632 
1633 	if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
1634 		flags = M_PKTHDR;
1635 		clsize = MCLBYTES;
1636 		btype = VMXNET3_BTYPE_HEAD;
1637 	} else {
1638 #if __FreeBSD_version < 902001
1639 		/*
1640 		 * These mbufs will never be used for the start of a frame.
1641 		 * Roughly prior to branching releng/9.2, the load_mbuf_sg()
1642 		 * required the mbuf to always be a packet header. Avoid
1643 		 * unnecessary mbuf initialization in newer versions where
1644 		 * that is not the case.
1645 		 */
1646 		flags = M_PKTHDR;
1647 #else
1648 		flags = 0;
1649 #endif
1650 		clsize = MJUMPAGESIZE;
1651 		btype = VMXNET3_BTYPE_BODY;
1652 	}
1653 
1654 	m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
1655 	if (m == NULL) {
1656 		sc->vmx_stats.vmst_mgetcl_failed++;
1657 		return (ENOBUFS);
1658 	}
1659 
1660 	if (btype == VMXNET3_BTYPE_HEAD) {
1661 		m->m_len = m->m_pkthdr.len = clsize;
1662 		m_adj(m, ETHER_ALIGN);
1663 	} else
1664 		m->m_len = clsize;
1665 
1666 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs,
1667 	    BUS_DMA_NOWAIT);
1668 	if (error) {
1669 		m_freem(m);
1670 		sc->vmx_stats.vmst_mbuf_load_failed++;
1671 		return (error);
1672 	}
1673 	KASSERT(nsegs == 1,
1674 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
1675 #if __FreeBSD_version < 902001
1676 	if (btype == VMXNET3_BTYPE_BODY)
1677 		m->m_flags &= ~M_PKTHDR;
1678 #endif
1679 
1680 	if (rxb->vrxb_m != NULL) {
1681 		bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
1682 		bus_dmamap_unload(tag, rxb->vrxb_dmamap);
1683 	}
1684 
1685 	rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
1686 	rxb->vrxb_dmamap = dmap;
1687 	rxb->vrxb_m = m;
1688 
1689 	rxd->addr = segs[0].ds_addr;
1690 	rxd->len = segs[0].ds_len;
1691 	rxd->btype = btype;
1692 	rxd->gen = rxr->vxrxr_gen;
1693 
1694 	vmxnet3_rxr_increment_fill(rxr);
1695 	return (0);
1696 }
1697 
1698 static void
1699 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
1700     struct vmxnet3_rxring *rxr, int idx)
1701 {
1702 	struct vmxnet3_rxdesc *rxd;
1703 
1704 	rxd = &rxr->vxrxr_rxd[idx];
1705 	rxd->gen = rxr->vxrxr_gen;
1706 	vmxnet3_rxr_increment_fill(rxr);
1707 }
1708 
1709 static void
1710 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
1711 {
1712 	struct vmxnet3_softc *sc;
1713 	struct vmxnet3_rxring *rxr;
1714 	struct vmxnet3_comp_ring *rxc;
1715 	struct vmxnet3_rxcompdesc *rxcd;
1716 	int idx, eof;
1717 
1718 	sc = rxq->vxrxq_sc;
1719 	rxc = &rxq->vxrxq_comp_ring;
1720 
1721 	do {
1722 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1723 		if (rxcd->gen != rxc->vxcr_gen)
1724 			break;		/* Not expected. */
1725 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1726 
1727 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1728 			rxc->vxcr_next = 0;
1729 			rxc->vxcr_gen ^= 1;
1730 		}
1731 
1732 		idx = rxcd->rxd_idx;
1733 		eof = rxcd->eop;
1734 		if (rxcd->qid < sc->vmx_nrxqueues)
1735 			rxr = &rxq->vxrxq_cmd_ring[0];
1736 		else
1737 			rxr = &rxq->vxrxq_cmd_ring[1];
1738 		vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1739 	} while (!eof);
1740 }
1741 
1742 static void
1743 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
1744 {
1745 
1746 	if (rxcd->ipv4) {
1747 		m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1748 		if (rxcd->ipcsum_ok)
1749 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1750 	}
1751 
1752 	if (!rxcd->fragment) {
1753 		if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
1754 			m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1755 			    CSUM_PSEUDO_HDR;
1756 			m->m_pkthdr.csum_data = 0xFFFF;
1757 		}
1758 	}
1759 }
1760 
1761 static void
1762 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
1763     struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
1764 {
1765 	struct vmxnet3_softc *sc;
1766 	struct ifnet *ifp;
1767 
1768 	sc = rxq->vxrxq_sc;
1769 	ifp = sc->vmx_ifp;
1770 
1771 	if (rxcd->error) {
1772 		ifp->if_ierrors++;
1773 		m_freem(m);
1774 		return;
1775 	}
1776 
1777 	if (!rxcd->no_csum)
1778 		vmxnet3_rx_csum(rxcd, m);
1779 	if (rxcd->vlan) {
1780 		m->m_flags |= M_VLANTAG;
1781 		m->m_pkthdr.ether_vtag = rxcd->vtag;
1782 	}
1783 
1784 	ifp->if_ipackets++;
1785 	VMXNET3_RXQ_UNLOCK(rxq);
1786 	(*ifp->if_input)(ifp, m);
1787 	VMXNET3_RXQ_LOCK(rxq);
1788 }
1789 
1790 static void
1791 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
1792 {
1793 	struct vmxnet3_softc *sc;
1794 	struct ifnet *ifp;
1795 	struct vmxnet3_rxring *rxr;
1796 	struct vmxnet3_comp_ring *rxc;
1797 	struct vmxnet3_rxdesc *rxd;
1798 	struct vmxnet3_rxcompdesc *rxcd;
1799 	struct mbuf *m, *m_head, *m_tail;
1800 	int idx, length;
1801 
1802 	sc = rxq->vxrxq_sc;
1803 	ifp = sc->vmx_ifp;
1804 	rxc = &rxq->vxrxq_comp_ring;
1805 	m_head = m_tail = NULL;
1806 
1807 	VMXNET3_RXQ_LOCK_ASSERT(rxq);
1808 
1809 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1810 		return;
1811 
1812 	for (;;) {
1813 		rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
1814 		if (rxcd->gen != rxc->vxcr_gen)
1815 			break;
1816 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1817 
1818 		if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
1819 			rxc->vxcr_next = 0;
1820 			rxc->vxcr_gen ^= 1;
1821 		}
1822 
1823 		idx = rxcd->rxd_idx;
1824 		length = rxcd->len;
1825 		if (rxcd->qid < sc->vmx_nrxqueues)
1826 			rxr = &rxq->vxrxq_cmd_ring[0];
1827 		else
1828 			rxr = &rxq->vxrxq_cmd_ring[1];
1829 		rxd = &rxr->vxrxr_rxd[idx];
1830 
1831 		m = rxr->vxrxr_rxbuf[idx].vrxb_m;
1832 		KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
1833 		    __func__, rxcd->qid, idx));
1834 
1835 		/*
1836 		 * The host may skip descriptors. We detect this when this
1837 		 * descriptor does not match the previous fill index. Catch
1838 		 * up with the host now.
1839 		 */
1840 		if (__predict_false(rxr->vxrxr_fill != idx)) {
1841 			while (rxr->vxrxr_fill != idx) {
1842 				rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
1843 				    rxr->vxrxr_gen;
1844 				vmxnet3_rxr_increment_fill(rxr);
1845 			}
1846 		}
1847 
1848 		if (rxcd->sop) {
1849 			KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
1850 			    ("%s: start of frame w/o head buffer", __func__));
1851 			KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
1852 			    ("%s: start of frame not in ring 0", __func__));
1853 			KASSERT((idx % sc->vmx_rx_max_chain) == 0,
1854 			    ("%s: start of frame at unexcepted index %d (%d)",
1855 			     __func__, idx, sc->vmx_rx_max_chain));
1856 			KASSERT(m_head == NULL,
1857 			    ("%s: duplicate start of frame?", __func__));
1858 
1859 			if (length == 0) {
1860 				/* Just ignore this descriptor. */
1861 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1862 				goto nextp;
1863 			}
1864 
1865 			if (vmxnet3_newbuf(sc, rxr) != 0) {
1866 				ifp->if_iqdrops++;
1867 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1868 				if (!rxcd->eop)
1869 					vmxnet3_rxq_discard_chain(rxq);
1870 				goto nextp;
1871 			}
1872 
1873 			m->m_pkthdr.rcvif = ifp;
1874 			m->m_pkthdr.len = m->m_len = length;
1875 			m->m_pkthdr.csum_flags = 0;
1876 			m_head = m_tail = m;
1877 
1878 		} else {
1879 			KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
1880 			    ("%s: non start of frame w/o body buffer", __func__));
1881 			KASSERT(m_head != NULL,
1882 			    ("%s: frame not started?", __func__));
1883 
1884 			if (vmxnet3_newbuf(sc, rxr) != 0) {
1885 				ifp->if_iqdrops++;
1886 				vmxnet3_rxq_eof_discard(rxq, rxr, idx);
1887 				if (!rxcd->eop)
1888 					vmxnet3_rxq_discard_chain(rxq);
1889 				m_freem(m_head);
1890 				m_head = m_tail = NULL;
1891 				goto nextp;
1892 			}
1893 
1894 			m->m_len = length;
1895 			m_head->m_pkthdr.len += length;
1896 			m_tail->m_next = m;
1897 			m_tail = m;
1898 		}
1899 
1900 		if (rxcd->eop) {
1901 			vmxnet3_rxq_input(rxq, rxcd, m_head);
1902 			m_head = m_tail = NULL;
1903 
1904 			/* Must recheck after dropping the Rx lock. */
1905 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1906 				break;
1907 		}
1908 
1909 nextp:
1910 		if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
1911 			int qid = rxcd->qid;
1912 			bus_size_t r;
1913 
1914 			idx = (idx + 1) % rxr->vxrxr_ndesc;
1915 			if (qid >= sc->vmx_nrxqueues) {
1916 				qid -= sc->vmx_nrxqueues;
1917 				r = VMXNET3_BAR0_RXH2(qid);
1918 			} else
1919 				r = VMXNET3_BAR0_RXH1(qid);
1920 			vmxnet3_write_bar0(sc, r, idx);
1921 		}
1922 	}
1923 }
1924 
1925 static void
1926 vmxnet3_legacy_intr(void *xsc)
1927 {
1928 	struct vmxnet3_softc *sc;
1929 	struct vmxnet3_rxqueue *rxq;
1930 	struct vmxnet3_txqueue *txq;
1931 	struct ifnet *ifp;
1932 
1933 	sc = xsc;
1934 	rxq = &sc->vmx_rxq[0];
1935 	txq = &sc->vmx_txq[0];
1936 	ifp = sc->vmx_ifp;
1937 
1938 	if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
1939 		if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
1940 			return;
1941 	}
1942 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1943 		vmxnet3_disable_all_intrs(sc);
1944 
1945 	if (sc->vmx_ds->event != 0)
1946 		vmxnet3_evintr(sc);
1947 
1948 	VMXNET3_RXQ_LOCK(rxq);
1949 	vmxnet3_rxq_eof(rxq);
1950 	VMXNET3_RXQ_UNLOCK(rxq);
1951 
1952 	VMXNET3_TXQ_LOCK(txq);
1953 	vmxnet3_txq_eof(txq);
1954 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1955 		vmxnet3_start_locked(ifp);
1956 	VMXNET3_TXQ_UNLOCK(txq);
1957 
1958 	vmxnet3_enable_all_intrs(sc);
1959 }
1960 
1961 static void
1962 vmxnet3_txq_intr(void *xtxq)
1963 {
1964 	struct vmxnet3_softc *sc;
1965 	struct vmxnet3_txqueue *txq;
1966 	struct ifnet *ifp;
1967 
1968 	txq = xtxq;
1969 	sc = txq->vxtxq_sc;
1970 	ifp = sc->vmx_ifp;
1971 
1972 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1973 		vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
1974 
1975 	VMXNET3_TXQ_LOCK(txq);
1976 	vmxnet3_txq_eof(txq);
1977 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1978 		vmxnet3_start_locked(ifp);
1979 	VMXNET3_TXQ_UNLOCK(txq);
1980 
1981 	vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
1982 }
1983 
1984 static void
1985 vmxnet3_rxq_intr(void *xrxq)
1986 {
1987 	struct vmxnet3_softc *sc;
1988 	struct vmxnet3_rxqueue *rxq;
1989 
1990 	rxq = xrxq;
1991 	sc = rxq->vxrxq_sc;
1992 
1993 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1994 		vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
1995 
1996 	VMXNET3_RXQ_LOCK(rxq);
1997 	vmxnet3_rxq_eof(rxq);
1998 	VMXNET3_RXQ_UNLOCK(rxq);
1999 
2000 	vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2001 }
2002 
2003 static void
2004 vmxnet3_event_intr(void *xsc)
2005 {
2006 	struct vmxnet3_softc *sc;
2007 
2008 	sc = xsc;
2009 
2010 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2011 		vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2012 
2013 	if (sc->vmx_ds->event != 0)
2014 		vmxnet3_evintr(sc);
2015 
2016 	vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2017 }
2018 
2019 static void
2020 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2021 {
2022 	struct vmxnet3_txring *txr;
2023 	struct vmxnet3_txbuf *txb;
2024 	int i;
2025 
2026 	txr = &txq->vxtxq_cmd_ring;
2027 
2028 	for (i = 0; i < txr->vxtxr_ndesc; i++) {
2029 		txb = &txr->vxtxr_txbuf[i];
2030 
2031 		if (txb->vtxb_m == NULL)
2032 			continue;
2033 
2034 		bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2035 		    BUS_DMASYNC_POSTWRITE);
2036 		bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2037 		m_freem(txb->vtxb_m);
2038 		txb->vtxb_m = NULL;
2039 	}
2040 }
2041 
2042 static void
2043 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2044 {
2045 	struct vmxnet3_rxring *rxr;
2046 	struct vmxnet3_rxbuf *rxb;
2047 	int i, j;
2048 
2049 	for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2050 		rxr = &rxq->vxrxq_cmd_ring[i];
2051 
2052 		for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2053 			rxb = &rxr->vxrxr_rxbuf[j];
2054 
2055 			if (rxb->vrxb_m == NULL)
2056 				continue;
2057 			bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2058 			    BUS_DMASYNC_POSTREAD);
2059 			bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2060 			m_freem(rxb->vrxb_m);
2061 			rxb->vrxb_m = NULL;
2062 		}
2063 	}
2064 }
2065 
2066 static void
2067 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2068 {
2069 	struct vmxnet3_rxqueue *rxq;
2070 	struct vmxnet3_txqueue *txq;
2071 	int i;
2072 
2073 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
2074 		rxq = &sc->vmx_rxq[i];
2075 		VMXNET3_RXQ_LOCK(rxq);
2076 		VMXNET3_RXQ_UNLOCK(rxq);
2077 	}
2078 
2079 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
2080 		txq = &sc->vmx_txq[i];
2081 		VMXNET3_TXQ_LOCK(txq);
2082 		VMXNET3_TXQ_UNLOCK(txq);
2083 	}
2084 }
2085 
2086 static void
2087 vmxnet3_stop(struct vmxnet3_softc *sc)
2088 {
2089 	struct ifnet *ifp;
2090 	int q;
2091 
2092 	ifp = sc->vmx_ifp;
2093 	VMXNET3_CORE_LOCK_ASSERT(sc);
2094 
2095 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2096 	sc->vmx_link_active = 0;
2097 	callout_stop(&sc->vmx_tick);
2098 
2099 	/* Disable interrupts. */
2100 	vmxnet3_disable_all_intrs(sc);
2101 	vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2102 
2103 	vmxnet3_stop_rendezvous(sc);
2104 
2105 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2106 		vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2107 	for (q = 0; q < sc->vmx_nrxqueues; q++)
2108 		vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2109 
2110 	vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2111 }
2112 
2113 static void
2114 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2115 {
2116 	struct vmxnet3_txring *txr;
2117 	struct vmxnet3_comp_ring *txc;
2118 
2119 	txr = &txq->vxtxq_cmd_ring;
2120 	txr->vxtxr_head = 0;
2121 	txr->vxtxr_next = 0;
2122 	txr->vxtxr_gen = VMXNET3_INIT_GEN;
2123 	bzero(txr->vxtxr_txd,
2124 	    txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2125 
2126 	txc = &txq->vxtxq_comp_ring;
2127 	txc->vxcr_next = 0;
2128 	txc->vxcr_gen = VMXNET3_INIT_GEN;
2129 	bzero(txc->vxcr_u.txcd,
2130 	    txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2131 }
2132 
2133 static int
2134 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2135 {
2136 	struct ifnet *ifp;
2137 	struct vmxnet3_rxring *rxr;
2138 	struct vmxnet3_comp_ring *rxc;
2139 	int i, populate, idx, frame_size, error;
2140 
2141 	ifp = sc->vmx_ifp;
2142 	frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2143 	    ifp->if_mtu;
2144 
2145 	/*
2146 	 * If the MTU causes us to exceed what a regular sized cluster can
2147 	 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2148 	 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2149 	 *
2150 	 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2151 	 * our life easier. We do not support changing the ring size after
2152 	 * the attach.
2153 	 */
2154 	if (frame_size <= MCLBYTES)
2155 		sc->vmx_rx_max_chain = 1;
2156 	else
2157 		sc->vmx_rx_max_chain = 2;
2158 
2159 	/*
2160 	 * Only populate ring 1 if the configuration will take advantage
2161 	 * of it. That is either when LRO is enabled or the frame size
2162 	 * exceeds what ring 0 can contain.
2163 	 */
2164 	if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2165 	    frame_size <= MCLBYTES + MJUMPAGESIZE)
2166 		populate = 1;
2167 	else
2168 		populate = VMXNET3_RXRINGS_PERQ;
2169 
2170 	for (i = 0; i < populate; i++) {
2171 		rxr = &rxq->vxrxq_cmd_ring[i];
2172 		rxr->vxrxr_fill = 0;
2173 		rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2174 		bzero(rxr->vxrxr_rxd,
2175 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2176 
2177 		for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2178 			error = vmxnet3_newbuf(sc, rxr);
2179 			if (error)
2180 				return (error);
2181 		}
2182 	}
2183 
2184 	for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2185 		rxr = &rxq->vxrxq_cmd_ring[i];
2186 		rxr->vxrxr_fill = 0;
2187 		rxr->vxrxr_gen = 0;
2188 		bzero(rxr->vxrxr_rxd,
2189 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2190 	}
2191 
2192 	rxc = &rxq->vxrxq_comp_ring;
2193 	rxc->vxcr_next = 0;
2194 	rxc->vxcr_gen = VMXNET3_INIT_GEN;
2195 	bzero(rxc->vxcr_u.rxcd,
2196 	    rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2197 
2198 	return (0);
2199 }
2200 
2201 static int
2202 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2203 {
2204 	device_t dev;
2205 	int q, error;
2206 
2207 	dev = sc->vmx_dev;
2208 
2209 	for (q = 0; q < sc->vmx_ntxqueues; q++)
2210 		vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2211 
2212 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2213 		error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2214 		if (error) {
2215 			device_printf(dev, "cannot populate Rx queue %d\n", q);
2216 			return (error);
2217 		}
2218 	}
2219 
2220 	return (0);
2221 }
2222 
2223 static int
2224 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2225 {
2226 	int q;
2227 
2228 	if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2229 		device_printf(sc->vmx_dev, "device enable command failed!\n");
2230 		return (1);
2231 	}
2232 
2233 	/* Reset the Rx queue heads. */
2234 	for (q = 0; q < sc->vmx_nrxqueues; q++) {
2235 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2236 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2237 	}
2238 
2239 	return (0);
2240 }
2241 
2242 static void
2243 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2244 {
2245 	struct ifnet *ifp;
2246 
2247 	ifp = sc->vmx_ifp;
2248 
2249 	vmxnet3_set_rxfilter(sc);
2250 
2251 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2252 		bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2253 		    sizeof(sc->vmx_ds->vlan_filter));
2254 	else
2255 		bzero(sc->vmx_ds->vlan_filter,
2256 		    sizeof(sc->vmx_ds->vlan_filter));
2257 	vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2258 }
2259 
2260 static int
2261 vmxnet3_reinit(struct vmxnet3_softc *sc)
2262 {
2263 
2264 	vmxnet3_reinit_interface(sc);
2265 	vmxnet3_reinit_shared_data(sc);
2266 
2267 	if (vmxnet3_reinit_queues(sc) != 0)
2268 		return (ENXIO);
2269 
2270 	if (vmxnet3_enable_device(sc) != 0)
2271 		return (ENXIO);
2272 
2273 	vmxnet3_reinit_rxfilters(sc);
2274 
2275 	return (0);
2276 }
2277 
2278 static void
2279 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2280 {
2281 	struct ifnet *ifp;
2282 
2283 	ifp = sc->vmx_ifp;
2284 
2285 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2286 		return;
2287 
2288 	vmxnet3_stop(sc);
2289 
2290 	if (vmxnet3_reinit(sc) != 0) {
2291 		vmxnet3_stop(sc);
2292 		return;
2293 	}
2294 
2295 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2296 	vmxnet3_link_status(sc);
2297 
2298 	vmxnet3_enable_all_intrs(sc);
2299 	callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2300 }
2301 
2302 static void
2303 vmxnet3_init(void *xsc)
2304 {
2305 	struct vmxnet3_softc *sc;
2306 
2307 	sc = xsc;
2308 
2309 	VMXNET3_CORE_LOCK(sc);
2310 	vmxnet3_init_locked(sc);
2311 	VMXNET3_CORE_UNLOCK(sc);
2312 }
2313 
2314 /*
2315  * BMV: Much of this can go away once we finally have offsets in
2316  * the mbuf packet header. Bug andre@.
2317  */
2318 static int
2319 vmxnet3_txq_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
2320 {
2321 	struct ether_vlan_header *evh;
2322 	int offset;
2323 
2324 	evh = mtod(m, struct ether_vlan_header *);
2325 	if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2326 		/* BMV: We should handle nested VLAN tags too. */
2327 		*etype = ntohs(evh->evl_proto);
2328 		offset = sizeof(struct ether_vlan_header);
2329 	} else {
2330 		*etype = ntohs(evh->evl_encap_proto);
2331 		offset = sizeof(struct ether_header);
2332 	}
2333 
2334 	switch (*etype) {
2335 #if defined(INET)
2336 	case ETHERTYPE_IP: {
2337 		struct ip *ip, iphdr;
2338 		if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2339 			m_copydata(m, offset, sizeof(struct ip),
2340 			    (caddr_t) &iphdr);
2341 			ip = &iphdr;
2342 		} else
2343 			ip = (struct ip *)(m->m_data + offset);
2344 		*proto = ip->ip_p;
2345 		*start = offset + (ip->ip_hl << 2);
2346 		break;
2347 	}
2348 #endif
2349 #if defined(INET6)
2350 	case ETHERTYPE_IPV6:
2351 		*proto = -1;
2352 		*start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2353 		/* Assert the network stack sent us a valid packet. */
2354 		KASSERT(*start > offset,
2355 		    ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2356 		    *start, offset, *proto));
2357 		break;
2358 #endif
2359 	default:
2360 		return (EINVAL);
2361 	}
2362 
2363 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2364 		struct tcphdr *tcp, tcphdr;
2365 
2366 		if (__predict_false(*proto != IPPROTO_TCP)) {
2367 			/* Likely failed to correctly parse the mbuf. */
2368 			return (EINVAL);
2369 		}
2370 
2371 		if (m->m_len < *start + sizeof(struct tcphdr)) {
2372 			m_copydata(m, offset, sizeof(struct tcphdr),
2373 			    (caddr_t) &tcphdr);
2374 			tcp = &tcphdr;
2375 		} else
2376 			tcp = (struct tcphdr *)(m->m_data + *start);
2377 
2378 		/*
2379 		 * For TSO, the size of the protocol header is also
2380 		 * included in the descriptor header size.
2381 		 */
2382 		*start += (tcp->th_off << 2);
2383 	}
2384 
2385 	return (0);
2386 }
2387 
2388 static int
2389 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2390     bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2391 {
2392 	struct vmxnet3_txring *txr;
2393 	struct mbuf *m;
2394 	bus_dma_tag_t tag;
2395 	int maxsegs, error;
2396 
2397 	txr = &txq->vxtxq_cmd_ring;
2398 	m = *m0;
2399 	tag = txr->vxtxr_txtag;
2400 	maxsegs = VMXNET3_TX_MAXSEGS;
2401 
2402 	error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2403 	if (error == 0 || error != EFBIG)
2404 		return (error);
2405 
2406 	m = m_collapse(m, M_NOWAIT, maxsegs);
2407 	if (m != NULL) {
2408 		*m0 = m;
2409 		error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0);
2410 	} else
2411 		error = ENOBUFS;
2412 
2413 	if (error) {
2414 		m_freem(*m0);
2415 		*m0 = NULL;
2416 	} else
2417 		txq->vxtxq_sc->vmx_stats.vmst_collapsed++;
2418 
2419 	return (error);
2420 }
2421 
2422 static void
2423 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2424 {
2425 	struct vmxnet3_txring *txr;
2426 
2427 	txr = &txq->vxtxq_cmd_ring;
2428 	bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2429 }
2430 
2431 static int
2432 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2433 {
2434 	struct vmxnet3_softc *sc;
2435 	struct ifnet *ifp;
2436 	struct vmxnet3_txring *txr;
2437 	struct vmxnet3_txdesc *txd, *sop;
2438 	struct mbuf *m;
2439 	bus_dmamap_t dmap;
2440 	bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2441 	int i, gen, nsegs, etype, proto, start, error;
2442 
2443 	sc = txq->vxtxq_sc;
2444 	ifp = sc->vmx_ifp;
2445 	start = 0;
2446 	txd = NULL;
2447 	txr = &txq->vxtxq_cmd_ring;
2448 	dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2449 
2450 	error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2451 	if (error)
2452 		return (error);
2453 
2454 	m = *m0;
2455 	M_ASSERTPKTHDR(m);
2456 	KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2457 	    ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2458 
2459 	if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2460 		txq->vxtxq_stats.vtxrs_full++;
2461 		vmxnet3_txq_unload_mbuf(txq, dmap);
2462 		return (ENOSPC);
2463 	} else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2464 		error = vmxnet3_txq_offload_ctx(m, &etype, &proto, &start);
2465 		if (error) {
2466 			txq->vxtxq_stats.vtxrs_offload_failed++;
2467 			vmxnet3_txq_unload_mbuf(txq, dmap);
2468 			m_freem(m);
2469 			*m0 = NULL;
2470 			return (error);
2471 		}
2472 	}
2473 
2474 	txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m = *m0;
2475 	sop = &txr->vxtxr_txd[txr->vxtxr_head];
2476 	gen = txr->vxtxr_gen ^ 1;	/* Owned by cpu (yet) */
2477 
2478 	for (i = 0; i < nsegs; i++) {
2479 		txd = &txr->vxtxr_txd[txr->vxtxr_head];
2480 
2481 		txd->addr = segs[i].ds_addr;
2482 		txd->len = segs[i].ds_len;
2483 		txd->gen = gen;
2484 		txd->dtype = 0;
2485 		txd->offload_mode = VMXNET3_OM_NONE;
2486 		txd->offload_pos = 0;
2487 		txd->hlen = 0;
2488 		txd->eop = 0;
2489 		txd->compreq = 0;
2490 		txd->vtag_mode = 0;
2491 		txd->vtag = 0;
2492 
2493 		if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2494 			txr->vxtxr_head = 0;
2495 			txr->vxtxr_gen ^= 1;
2496 		}
2497 		gen = txr->vxtxr_gen;
2498 	}
2499 	txd->eop = 1;
2500 	txd->compreq = 1;
2501 
2502 	if (m->m_flags & M_VLANTAG) {
2503 		sop->vtag_mode = 1;
2504 		sop->vtag = m->m_pkthdr.ether_vtag;
2505 	}
2506 
2507 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2508 		sop->offload_mode = VMXNET3_OM_TSO;
2509 		sop->hlen = start;
2510 		sop->offload_pos = m->m_pkthdr.tso_segsz;
2511 	} else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2512 	    VMXNET3_CSUM_OFFLOAD_IPV6)) {
2513 		sop->offload_mode = VMXNET3_OM_CSUM;
2514 		sop->hlen = start;
2515 		sop->offload_pos = start + m->m_pkthdr.csum_data;
2516 	}
2517 
2518 	/* Finally, change the ownership. */
2519 	vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2520 	sop->gen ^= 1;
2521 
2522 	if (++txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2523 		txq->vxtxq_ts->npending = 0;
2524 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2525 		    txr->vxtxr_head);
2526 	}
2527 
2528 	return (0);
2529 }
2530 
2531 static void
2532 vmxnet3_start_locked(struct ifnet *ifp)
2533 {
2534 	struct vmxnet3_softc *sc;
2535 	struct vmxnet3_txqueue *txq;
2536 	struct vmxnet3_txring *txr;
2537 	struct mbuf *m_head;
2538 	int tx, avail;
2539 
2540 	sc = ifp->if_softc;
2541 	txq = &sc->vmx_txq[0];
2542 	txr = &txq->vxtxq_cmd_ring;
2543 	tx = 0;
2544 
2545 	VMXNET3_TXQ_LOCK_ASSERT(txq);
2546 
2547 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2548 	    sc->vmx_link_active == 0)
2549 		return;
2550 
2551 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
2552 		if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2553 			break;
2554 
2555 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2556 		if (m_head == NULL)
2557 			break;
2558 
2559 		/* Assume worse case if this mbuf is the head of a chain. */
2560 		if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2561 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2562 			break;
2563 		}
2564 
2565 		if (vmxnet3_txq_encap(txq, &m_head) != 0) {
2566 			if (m_head != NULL)
2567 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2568 			break;
2569 		}
2570 
2571 		tx++;
2572 		ETHER_BPF_MTAP(ifp, m_head);
2573 	}
2574 
2575 	if (tx > 0) {
2576 		if (txq->vxtxq_ts->npending > 0) {
2577 			txq->vxtxq_ts->npending = 0;
2578 			vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2579 			    txr->vxtxr_head);
2580 		}
2581 		txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
2582 	}
2583 }
2584 
2585 static void
2586 vmxnet3_start(struct ifnet *ifp)
2587 {
2588 	struct vmxnet3_softc *sc;
2589 	struct vmxnet3_txqueue *txq;
2590 
2591 	sc = ifp->if_softc;
2592 	txq = &sc->vmx_txq[0];
2593 
2594 	VMXNET3_TXQ_LOCK(txq);
2595 	vmxnet3_start_locked(ifp);
2596 	VMXNET3_TXQ_UNLOCK(txq);
2597 }
2598 
2599 static void
2600 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
2601 {
2602 	struct ifnet *ifp;
2603 	int idx, bit;
2604 
2605 	ifp = sc->vmx_ifp;
2606 	idx = (tag >> 5) & 0x7F;
2607 	bit = tag & 0x1F;
2608 
2609 	if (tag == 0 || tag > 4095)
2610 		return;
2611 
2612 	VMXNET3_CORE_LOCK(sc);
2613 
2614 	/* Update our private VLAN bitvector. */
2615 	if (add)
2616 		sc->vmx_vlan_filter[idx] |= (1 << bit);
2617 	else
2618 		sc->vmx_vlan_filter[idx] &= ~(1 << bit);
2619 
2620 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2621 		if (add)
2622 			sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
2623 		else
2624 			sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
2625 		vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2626 	}
2627 
2628 	VMXNET3_CORE_UNLOCK(sc);
2629 }
2630 
2631 static void
2632 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2633 {
2634 
2635 	if (ifp->if_softc == arg)
2636 		vmxnet3_update_vlan_filter(arg, 1, tag);
2637 }
2638 
2639 static void
2640 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2641 {
2642 
2643 	if (ifp->if_softc == arg)
2644 		vmxnet3_update_vlan_filter(arg, 0, tag);
2645 }
2646 
2647 static void
2648 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
2649 {
2650 	struct ifnet *ifp;
2651 	struct vmxnet3_driver_shared *ds;
2652 	struct ifmultiaddr *ifma;
2653 	u_int mode;
2654 
2655 	ifp = sc->vmx_ifp;
2656 	ds = sc->vmx_ds;
2657 
2658 	mode = VMXNET3_RXMODE_UCAST;
2659 	if (ifp->if_flags & IFF_BROADCAST)
2660 		mode |= VMXNET3_RXMODE_BCAST;
2661 	if (ifp->if_flags & IFF_PROMISC)
2662 		mode |= VMXNET3_RXMODE_PROMISC;
2663 	if (ifp->if_flags & IFF_ALLMULTI)
2664 		mode |= VMXNET3_RXMODE_ALLMULTI;
2665 	else {
2666 		int cnt = 0, overflow = 0;
2667 
2668 		if_maddr_rlock(ifp);
2669 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2670 			if (ifma->ifma_addr->sa_family != AF_LINK)
2671 				continue;
2672 			else if (cnt == VMXNET3_MULTICAST_MAX) {
2673 				overflow = 1;
2674 				break;
2675 			}
2676 
2677 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2678 			   &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2679 			cnt++;
2680 		}
2681 		if_maddr_runlock(ifp);
2682 
2683 		if (overflow != 0) {
2684 			cnt = 0;
2685 			mode |= VMXNET3_RXMODE_ALLMULTI;
2686 		} else if (cnt > 0)
2687 			mode |= VMXNET3_RXMODE_MCAST;
2688 		ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
2689 	}
2690 
2691 	ds->rxmode = mode;
2692 
2693 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
2694 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
2695 }
2696 
2697 static int
2698 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
2699 {
2700 	struct ifnet *ifp;
2701 
2702 	ifp = sc->vmx_ifp;
2703 
2704 	if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
2705 		return (EINVAL);
2706 
2707 	ifp->if_mtu = mtu;
2708 
2709 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2710 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2711 		vmxnet3_init_locked(sc);
2712 	}
2713 
2714 	return (0);
2715 }
2716 
2717 static int
2718 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2719 {
2720 	struct vmxnet3_softc *sc;
2721 	struct ifreq *ifr;
2722 	int reinit, mask, error;
2723 
2724 	sc = ifp->if_softc;
2725 	ifr = (struct ifreq *) data;
2726 	error = 0;
2727 
2728 	switch (cmd) {
2729 	case SIOCSIFMTU:
2730 		if (ifp->if_mtu != ifr->ifr_mtu) {
2731 			VMXNET3_CORE_LOCK(sc);
2732 			error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
2733 			VMXNET3_CORE_UNLOCK(sc);
2734 		}
2735 		break;
2736 
2737 	case SIOCSIFFLAGS:
2738 		VMXNET3_CORE_LOCK(sc);
2739 		if (ifp->if_flags & IFF_UP) {
2740 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2741 				if ((ifp->if_flags ^ sc->vmx_if_flags) &
2742 				    (IFF_PROMISC | IFF_ALLMULTI)) {
2743 					vmxnet3_set_rxfilter(sc);
2744 				}
2745 			} else
2746 				vmxnet3_init_locked(sc);
2747 		} else {
2748 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2749 				vmxnet3_stop(sc);
2750 		}
2751 		sc->vmx_if_flags = ifp->if_flags;
2752 		VMXNET3_CORE_UNLOCK(sc);
2753 		break;
2754 
2755 	case SIOCADDMULTI:
2756 	case SIOCDELMULTI:
2757 		VMXNET3_CORE_LOCK(sc);
2758 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2759 			vmxnet3_set_rxfilter(sc);
2760 		VMXNET3_CORE_UNLOCK(sc);
2761 		break;
2762 
2763 	case SIOCSIFMEDIA:
2764 	case SIOCGIFMEDIA:
2765 		error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
2766 		break;
2767 
2768 	case SIOCSIFCAP:
2769 		VMXNET3_CORE_LOCK(sc);
2770 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2771 
2772 		if (mask & IFCAP_TXCSUM)
2773 			ifp->if_capenable ^= IFCAP_TXCSUM;
2774 		if (mask & IFCAP_TXCSUM_IPV6)
2775 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
2776 		if (mask & IFCAP_TSO4)
2777 			ifp->if_capenable ^= IFCAP_TSO4;
2778 		if (mask & IFCAP_TSO6)
2779 			ifp->if_capenable ^= IFCAP_TSO6;
2780 
2781 		if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO |
2782 		    IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) {
2783 			/* Changing these features requires us to reinit. */
2784 			reinit = 1;
2785 
2786 			if (mask & IFCAP_RXCSUM)
2787 				ifp->if_capenable ^= IFCAP_RXCSUM;
2788 			if (mask & IFCAP_RXCSUM_IPV6)
2789 				ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
2790 			if (mask & IFCAP_LRO)
2791 				ifp->if_capenable ^= IFCAP_LRO;
2792 			if (mask & IFCAP_VLAN_HWTAGGING)
2793 				ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2794 			if (mask & IFCAP_VLAN_HWFILTER)
2795 				ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
2796 		} else
2797 			reinit = 0;
2798 
2799 		if (mask & IFCAP_VLAN_HWTSO)
2800 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2801 
2802 		if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
2803 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2804 			vmxnet3_init_locked(sc);
2805 		}
2806 
2807 		VMXNET3_CORE_UNLOCK(sc);
2808 		VLAN_CAPABILITIES(ifp);
2809 		break;
2810 
2811 	default:
2812 		error = ether_ioctl(ifp, cmd, data);
2813 		break;
2814 	}
2815 
2816 	VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
2817 
2818 	return (error);
2819 }
2820 
2821 static int
2822 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
2823 {
2824 	struct vmxnet3_softc *sc;
2825 
2826 	sc = txq->vxtxq_sc;
2827 
2828 	VMXNET3_TXQ_LOCK(txq);
2829 	if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
2830 		VMXNET3_TXQ_UNLOCK(txq);
2831 		return (0);
2832 	}
2833 	VMXNET3_TXQ_UNLOCK(txq);
2834 
2835 	if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
2836 	    txq->vxtxq_id);
2837 	return (1);
2838 }
2839 
2840 static void
2841 vmxnet3_refresh_stats(struct vmxnet3_softc *sc)
2842 {
2843 
2844 	vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
2845 }
2846 
2847 static void
2848 vmxnet3_tick(void *xsc)
2849 {
2850 	struct vmxnet3_softc *sc;
2851 	struct ifnet *ifp;
2852 	int i, timedout;
2853 
2854 	sc = xsc;
2855 	ifp = sc->vmx_ifp;
2856 	timedout = 0;
2857 
2858 	VMXNET3_CORE_LOCK_ASSERT(sc);
2859 	vmxnet3_refresh_stats(sc);
2860 
2861 	for (i = 0; i < sc->vmx_ntxqueues; i++)
2862 		timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
2863 
2864 	if (timedout != 0) {
2865 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2866 		vmxnet3_init_locked(sc);
2867 	} else
2868 		callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2869 }
2870 
2871 static int
2872 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
2873 {
2874 	uint32_t status;
2875 
2876 	/* Also update the link speed while here. */
2877 	status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
2878 	sc->vmx_link_speed = status >> 16;
2879 	return !!(status & 0x1);
2880 }
2881 
2882 static void
2883 vmxnet3_link_status(struct vmxnet3_softc *sc)
2884 {
2885 	struct ifnet *ifp;
2886 	int link;
2887 
2888 	ifp = sc->vmx_ifp;
2889 	link = vmxnet3_link_is_up(sc);
2890 
2891 	if (link != 0 && sc->vmx_link_active == 0) {
2892 		sc->vmx_link_active = 1;
2893 		if_link_state_change(ifp, LINK_STATE_UP);
2894 	} else if (link == 0 && sc->vmx_link_active != 0) {
2895 		sc->vmx_link_active = 0;
2896 		if_link_state_change(ifp, LINK_STATE_DOWN);
2897 	}
2898 }
2899 
2900 static void
2901 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2902 {
2903 	struct vmxnet3_softc *sc;
2904 
2905 	sc = ifp->if_softc;
2906 
2907 	ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
2908 	ifmr->ifm_status = IFM_AVALID;
2909 
2910 	VMXNET3_CORE_LOCK(sc);
2911 	if (vmxnet3_link_is_up(sc) != 0)
2912 		ifmr->ifm_status |= IFM_ACTIVE;
2913 	else
2914 		ifmr->ifm_status |= IFM_NONE;
2915 	VMXNET3_CORE_UNLOCK(sc);
2916 }
2917 
2918 static int
2919 vmxnet3_media_change(struct ifnet *ifp)
2920 {
2921 
2922 	/* Ignore. */
2923 	return (0);
2924 }
2925 
2926 static void
2927 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
2928 {
2929 	uint32_t ml, mh;
2930 
2931 	ml  = sc->vmx_lladdr[0];
2932 	ml |= sc->vmx_lladdr[1] << 8;
2933 	ml |= sc->vmx_lladdr[2] << 16;
2934 	ml |= sc->vmx_lladdr[3] << 24;
2935 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
2936 
2937 	mh  = sc->vmx_lladdr[4];
2938 	mh |= sc->vmx_lladdr[5] << 8;
2939 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
2940 }
2941 
2942 static void
2943 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
2944 {
2945 	uint32_t ml, mh;
2946 
2947 	ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
2948 	mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
2949 
2950 	sc->vmx_lladdr[0] = ml;
2951 	sc->vmx_lladdr[1] = ml >> 8;
2952 	sc->vmx_lladdr[2] = ml >> 16;
2953 	sc->vmx_lladdr[3] = ml >> 24;
2954 	sc->vmx_lladdr[4] = mh;
2955 	sc->vmx_lladdr[5] = mh >> 8;
2956 }
2957 
2958 static void
2959 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
2960     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2961 {
2962 	struct sysctl_oid *node, *txsnode;
2963 	struct sysctl_oid_list *list, *txslist;
2964 	struct vmxnet3_txq_stats *stats;
2965 	struct UPT1_TxStats *txstats;
2966 	char namebuf[16];
2967 
2968 	stats = &txq->vxtxq_stats;
2969 	txstats = &txq->vxtxq_ts->stats;
2970 
2971 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
2972 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
2973 	    NULL, "Transmit Queue");
2974 	txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
2975 
2976 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
2977 	    &stats->vtxrs_full, "Tx ring full");
2978 	SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
2979 	    &stats->vtxrs_offload_failed, "Tx checksum offload failed");
2980 
2981 	/*
2982 	 * Add statistics reported by the host. These are updated once
2983 	 * per second.
2984 	 */
2985 	txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
2986 	    NULL, "Host Statistics");
2987 	txslist = SYSCTL_CHILDREN(txsnode);
2988 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
2989 	    &txstats->TSO_packets, "TSO packets");
2990 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
2991 	    &txstats->TSO_bytes, "TSO bytes");
2992 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
2993 	    &txstats->ucast_packets, "Unicast packets");
2994 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
2995 	    &txstats->ucast_bytes, "Unicast bytes");
2996 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
2997 	    &txstats->mcast_packets, "Multicast packets");
2998 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
2999 	    &txstats->mcast_bytes, "Multicast bytes");
3000 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3001 	    &txstats->error, "Errors");
3002 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3003 	    &txstats->discard, "Discards");
3004 }
3005 
3006 static void
3007 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3008     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3009 {
3010 	struct sysctl_oid *node, *rxsnode;
3011 	struct sysctl_oid_list *list, *rxslist;
3012 	struct vmxnet3_rxq_stats *stats;
3013 	struct UPT1_RxStats *rxstats;
3014 	char namebuf[16];
3015 
3016 	stats = &rxq->vxrxq_stats;
3017 	rxstats = &rxq->vxrxq_rs->stats;
3018 
3019 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3020 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3021 	    NULL, "Receive Queue");
3022 	rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3023 
3024 	/*
3025 	 * Add statistics reported by the host. These are updated once
3026 	 * per second.
3027 	 */
3028 	rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3029 	    NULL, "Host Statistics");
3030 	rxslist = SYSCTL_CHILDREN(rxsnode);
3031 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3032 	    &rxstats->LRO_packets, "LRO packets");
3033 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3034 	    &rxstats->LRO_bytes, "LRO bytes");
3035 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3036 	    &rxstats->ucast_packets, "Unicast packets");
3037 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3038 	    &rxstats->ucast_bytes, "Unicast bytes");
3039 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3040 	    &rxstats->mcast_packets, "Multicast packets");
3041 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3042 	    &rxstats->mcast_bytes, "Multicast bytes");
3043 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3044 	    &rxstats->bcast_packets, "Broadcast packets");
3045 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3046 	    &rxstats->bcast_bytes, "Broadcast bytes");
3047 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3048 	    &rxstats->nobuffer, "No buffer");
3049 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3050 	    &rxstats->error, "Errors");
3051 }
3052 
3053 #ifdef VMXNET3_DEBUG_SYSCTL
3054 static void
3055 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3056     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3057 {
3058 	struct sysctl_oid *node;
3059 	struct sysctl_oid_list *list;
3060 	int i;
3061 
3062 	for (i = 0; i < sc->vmx_ntxqueues; i++) {
3063 		struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3064 
3065 		node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3066 		    "debug", CTLFLAG_RD, NULL, "");
3067 		list = SYSCTL_CHILDREN(node);
3068 
3069 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3070 		    &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3071 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3072 		    &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3073 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3074 		    &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3075 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3076 		    &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3077 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3078 		    &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3079 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3080 		    &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3081 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3082 		    &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3083 	}
3084 
3085 	for (i = 0; i < sc->vmx_nrxqueues; i++) {
3086 		struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3087 
3088 		node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3089 		    "debug", CTLFLAG_RD, NULL, "");
3090 		list = SYSCTL_CHILDREN(node);
3091 
3092 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3093 		    &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3094 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3095 		    &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3096 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3097 		    &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3098 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3099 		    &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3100 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3101 		    &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3102 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3103 		    &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3104 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3105 		    &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3106 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3107 		    &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3108 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3109 		    &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3110 	}
3111 }
3112 #endif
3113 
3114 static void
3115 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3116     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3117 {
3118 	int i;
3119 
3120 	for (i = 0; i < sc->vmx_ntxqueues; i++)
3121 		vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3122 	for (i = 0; i < sc->vmx_nrxqueues; i++)
3123 		vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3124 
3125 #ifdef VMXNET3_DEBUG_SYSCTL
3126 	vmxnet3_setup_debug_sysctl(sc, ctx, child);
3127 #endif
3128 }
3129 
3130 static void
3131 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3132 {
3133 	device_t dev;
3134 	struct vmxnet3_statistics *stats;
3135 	struct sysctl_ctx_list *ctx;
3136 	struct sysctl_oid *tree;
3137 	struct sysctl_oid_list *child;
3138 
3139 	dev = sc->vmx_dev;
3140 	ctx = device_get_sysctl_ctx(dev);
3141 	tree = device_get_sysctl_tree(dev);
3142 	child = SYSCTL_CHILDREN(tree);
3143 
3144 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3145 	    &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3146 	SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3147 	    &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3148 
3149 	stats = &sc->vmx_stats;
3150 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "collapsed", CTLFLAG_RD,
3151 	    &stats->vmst_collapsed, 0, "Tx mbuf chains collapsed");
3152 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3153 	    &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3154 	SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3155 	    &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3156 
3157 	vmxnet3_setup_queue_sysctl(sc, ctx, child);
3158 }
3159 
3160 static void
3161 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3162 {
3163 
3164 	bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3165 }
3166 
3167 static uint32_t
3168 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3169 {
3170 
3171 	return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3172 }
3173 
3174 static void
3175 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3176 {
3177 
3178 	bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3179 }
3180 
3181 static void
3182 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3183 {
3184 
3185 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3186 }
3187 
3188 static uint32_t
3189 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3190 {
3191 
3192 	vmxnet3_write_cmd(sc, cmd);
3193 	bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3194 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3195 	return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3196 }
3197 
3198 static void
3199 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3200 {
3201 
3202 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3203 }
3204 
3205 static void
3206 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3207 {
3208 
3209 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3210 }
3211 
3212 static void
3213 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3214 {
3215 	int i;
3216 
3217 	sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3218 	for (i = 0; i < sc->vmx_nintrs; i++)
3219 		vmxnet3_enable_intr(sc, i);
3220 }
3221 
3222 static void
3223 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3224 {
3225 	int i;
3226 
3227 	sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3228 	for (i = 0; i < sc->vmx_nintrs; i++)
3229 		vmxnet3_disable_intr(sc, i);
3230 }
3231 
3232 static void
3233 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3234 {
3235 	bus_addr_t *baddr = arg;
3236 
3237 	if (error == 0)
3238 		*baddr = segs->ds_addr;
3239 }
3240 
3241 static int
3242 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3243     struct vmxnet3_dma_alloc *dma)
3244 {
3245 	device_t dev;
3246 	int error;
3247 
3248 	dev = sc->vmx_dev;
3249 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3250 
3251 	error = bus_dma_tag_create(bus_get_dma_tag(dev),
3252 	    align, 0,		/* alignment, bounds */
3253 	    BUS_SPACE_MAXADDR,	/* lowaddr */
3254 	    BUS_SPACE_MAXADDR,	/* highaddr */
3255 	    NULL, NULL,		/* filter, filterarg */
3256 	    size,		/* maxsize */
3257 	    1,			/* nsegments */
3258 	    size,		/* maxsegsize */
3259 	    BUS_DMA_ALLOCNOW,	/* flags */
3260 	    NULL,		/* lockfunc */
3261 	    NULL,		/* lockfuncarg */
3262 	    &dma->dma_tag);
3263 	if (error) {
3264 		device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3265 		goto fail;
3266 	}
3267 
3268 	error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3269 	    BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3270 	if (error) {
3271 		device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3272 		goto fail;
3273 	}
3274 
3275 	error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3276 	    size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3277 	if (error) {
3278 		device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3279 		goto fail;
3280 	}
3281 
3282 	dma->dma_size = size;
3283 
3284 fail:
3285 	if (error)
3286 		vmxnet3_dma_free(sc, dma);
3287 
3288 	return (error);
3289 }
3290 
3291 static void
3292 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3293 {
3294 
3295 	if (dma->dma_tag != NULL) {
3296 		if (dma->dma_map != NULL) {
3297 			bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3298 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3299 			bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3300 		}
3301 
3302 		if (dma->dma_vaddr != NULL) {
3303 			bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
3304 			    dma->dma_map);
3305 		}
3306 
3307 		bus_dma_tag_destroy(dma->dma_tag);
3308 	}
3309 	bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3310 }
3311 
3312 static int
3313 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
3314 {
3315 	char path[64];
3316 
3317 	snprintf(path, sizeof(path),
3318 	    "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
3319 	TUNABLE_INT_FETCH(path, &def);
3320 
3321 	return (def);
3322 }
3323 
3324 /*
3325  * Since this is a purely paravirtualized device, we do not have
3326  * to worry about DMA coherency. But at times, we must make sure
3327  * both the compiler and CPU do not reorder memory operations.
3328  */
3329 static inline void
3330 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
3331 {
3332 
3333 	switch (type) {
3334 	case VMXNET3_BARRIER_RD:
3335 		rmb();
3336 		break;
3337 	case VMXNET3_BARRIER_WR:
3338 		wmb();
3339 		break;
3340 	case VMXNET3_BARRIER_RDWR:
3341 		mb();
3342 		break;
3343 	default:
3344 		panic("%s: bad barrier type %d", __func__, type);
3345 	}
3346 }
3347