xref: /freebsd/sys/dev/vmware/vmxnet3/if_vmx.c (revision 8f82136aec620025030eaee0cc4ae7c463a8057e)
1e3c97c2cSBryan Venteicher /*-
2e3c97c2cSBryan Venteicher  * Copyright (c) 2013 Tsubai Masanari
3e3c97c2cSBryan Venteicher  * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
4*8f82136aSPatrick Kelsey  * Copyright (c) 2018 Patrick Kelsey
5e3c97c2cSBryan Venteicher  *
6e3c97c2cSBryan Venteicher  * Permission to use, copy, modify, and distribute this software for any
7e3c97c2cSBryan Venteicher  * purpose with or without fee is hereby granted, provided that the above
8e3c97c2cSBryan Venteicher  * copyright notice and this permission notice appear in all copies.
9e3c97c2cSBryan Venteicher  *
10e3c97c2cSBryan Venteicher  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11e3c97c2cSBryan Venteicher  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12e3c97c2cSBryan Venteicher  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13e3c97c2cSBryan Venteicher  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14e3c97c2cSBryan Venteicher  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15e3c97c2cSBryan Venteicher  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16e3c97c2cSBryan Venteicher  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17e3c97c2cSBryan Venteicher  *
18e3c97c2cSBryan Venteicher  * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
19e3c97c2cSBryan Venteicher  */
20e3c97c2cSBryan Venteicher 
21e3c97c2cSBryan Venteicher /* Driver for VMware vmxnet3 virtual ethernet devices. */
22e3c97c2cSBryan Venteicher 
23e3c97c2cSBryan Venteicher #include <sys/cdefs.h>
24e3c97c2cSBryan Venteicher __FBSDID("$FreeBSD$");
25e3c97c2cSBryan Venteicher 
26e3c97c2cSBryan Venteicher #include <sys/param.h>
27e3c97c2cSBryan Venteicher #include <sys/systm.h>
28e3c97c2cSBryan Venteicher #include <sys/kernel.h>
29e3c97c2cSBryan Venteicher #include <sys/endian.h>
30e3c97c2cSBryan Venteicher #include <sys/sockio.h>
31e3c97c2cSBryan Venteicher #include <sys/mbuf.h>
32e3c97c2cSBryan Venteicher #include <sys/malloc.h>
33e3c97c2cSBryan Venteicher #include <sys/module.h>
34e3c97c2cSBryan Venteicher #include <sys/socket.h>
35e3c97c2cSBryan Venteicher #include <sys/sysctl.h>
36e557c1ddSBryan Venteicher #include <sys/smp.h>
37e3c97c2cSBryan Venteicher #include <vm/vm.h>
38e3c97c2cSBryan Venteicher #include <vm/pmap.h>
39e3c97c2cSBryan Venteicher 
40e3c97c2cSBryan Venteicher #include <net/ethernet.h>
41e3c97c2cSBryan Venteicher #include <net/if.h>
4276039bc8SGleb Smirnoff #include <net/if_var.h>
43e3c97c2cSBryan Venteicher #include <net/if_arp.h>
44e3c97c2cSBryan Venteicher #include <net/if_dl.h>
45e3c97c2cSBryan Venteicher #include <net/if_types.h>
46e3c97c2cSBryan Venteicher #include <net/if_media.h>
47e3c97c2cSBryan Venteicher #include <net/if_vlan_var.h>
48*8f82136aSPatrick Kelsey #include <net/iflib.h>
49e3c97c2cSBryan Venteicher 
50e3c97c2cSBryan Venteicher #include <netinet/in_systm.h>
51e3c97c2cSBryan Venteicher #include <netinet/in.h>
52e3c97c2cSBryan Venteicher #include <netinet/ip.h>
53e3c97c2cSBryan Venteicher #include <netinet/ip6.h>
54e3c97c2cSBryan Venteicher #include <netinet6/ip6_var.h>
55e3c97c2cSBryan Venteicher #include <netinet/udp.h>
56e3c97c2cSBryan Venteicher #include <netinet/tcp.h>
57e3c97c2cSBryan Venteicher 
58e3c97c2cSBryan Venteicher #include <machine/bus.h>
59e3c97c2cSBryan Venteicher #include <machine/resource.h>
60e3c97c2cSBryan Venteicher #include <sys/bus.h>
61e3c97c2cSBryan Venteicher #include <sys/rman.h>
62e3c97c2cSBryan Venteicher 
63e3c97c2cSBryan Venteicher #include <dev/pci/pcireg.h>
64e3c97c2cSBryan Venteicher #include <dev/pci/pcivar.h>
65e3c97c2cSBryan Venteicher 
66*8f82136aSPatrick Kelsey #include "ifdi_if.h"
67*8f82136aSPatrick Kelsey 
68e3c97c2cSBryan Venteicher #include "if_vmxreg.h"
69e3c97c2cSBryan Venteicher #include "if_vmxvar.h"
70e3c97c2cSBryan Venteicher 
71e3c97c2cSBryan Venteicher #include "opt_inet.h"
72e3c97c2cSBryan Venteicher #include "opt_inet6.h"
73e3c97c2cSBryan Venteicher 
74e3c97c2cSBryan Venteicher 
75*8f82136aSPatrick Kelsey #define VMXNET3_VMWARE_VENDOR_ID	0x15AD
76*8f82136aSPatrick Kelsey #define VMXNET3_VMWARE_DEVICE_ID	0x07B0
77*8f82136aSPatrick Kelsey 
78*8f82136aSPatrick Kelsey static pci_vendor_info_t vmxnet3_vendor_info_array[] =
79*8f82136aSPatrick Kelsey {
80*8f82136aSPatrick Kelsey 	PVID(VMXNET3_VMWARE_VENDOR_ID, VMXNET3_VMWARE_DEVICE_ID, "VMware VMXNET3 Ethernet Adapter"),
81*8f82136aSPatrick Kelsey 	/* required last entry */
82*8f82136aSPatrick Kelsey 	PVID_END
83*8f82136aSPatrick Kelsey };
84*8f82136aSPatrick Kelsey 
85*8f82136aSPatrick Kelsey static void	*vmxnet3_register(device_t);
86*8f82136aSPatrick Kelsey static int	vmxnet3_attach_pre(if_ctx_t);
87*8f82136aSPatrick Kelsey static int	vmxnet3_msix_intr_assign(if_ctx_t, int);
88*8f82136aSPatrick Kelsey static void	vmxnet3_free_irqs(struct vmxnet3_softc *);
89*8f82136aSPatrick Kelsey static int	vmxnet3_attach_post(if_ctx_t);
90*8f82136aSPatrick Kelsey static int	vmxnet3_detach(if_ctx_t);
91*8f82136aSPatrick Kelsey static int	vmxnet3_shutdown(if_ctx_t);
92*8f82136aSPatrick Kelsey static int	vmxnet3_suspend(if_ctx_t);
93*8f82136aSPatrick Kelsey static int	vmxnet3_resume(if_ctx_t);
94e3c97c2cSBryan Venteicher 
95e3c97c2cSBryan Venteicher static int	vmxnet3_alloc_resources(struct vmxnet3_softc *);
96e3c97c2cSBryan Venteicher static void	vmxnet3_free_resources(struct vmxnet3_softc *);
97e3c97c2cSBryan Venteicher static int	vmxnet3_check_version(struct vmxnet3_softc *);
98*8f82136aSPatrick Kelsey static void	vmxnet3_set_interrupt_idx(struct vmxnet3_softc *);
99e3c97c2cSBryan Venteicher 
100*8f82136aSPatrick Kelsey static int	vmxnet3_queues_shared_alloc(struct vmxnet3_softc *);
101*8f82136aSPatrick Kelsey static void	vmxnet3_init_txq(struct vmxnet3_softc *, int);
102*8f82136aSPatrick Kelsey static int	vmxnet3_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
103*8f82136aSPatrick Kelsey static void	vmxnet3_init_rxq(struct vmxnet3_softc *, int, int);
104*8f82136aSPatrick Kelsey static int	vmxnet3_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
105*8f82136aSPatrick Kelsey static void	vmxnet3_queues_free(if_ctx_t);
106e3c97c2cSBryan Venteicher 
107e3c97c2cSBryan Venteicher static int	vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
108e3c97c2cSBryan Venteicher static void	vmxnet3_free_shared_data(struct vmxnet3_softc *);
109e3c97c2cSBryan Venteicher static int	vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
110*8f82136aSPatrick Kelsey static void	vmxnet3_free_mcast_table(struct vmxnet3_softc *);
111e3c97c2cSBryan Venteicher static void	vmxnet3_init_shared_data(struct vmxnet3_softc *);
112e557c1ddSBryan Venteicher static void	vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
113e3c97c2cSBryan Venteicher static void	vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
114e3c97c2cSBryan Venteicher static int	vmxnet3_alloc_data(struct vmxnet3_softc *);
115e3c97c2cSBryan Venteicher static void	vmxnet3_free_data(struct vmxnet3_softc *);
116e3c97c2cSBryan Venteicher 
117e3c97c2cSBryan Venteicher static void	vmxnet3_evintr(struct vmxnet3_softc *);
118*8f82136aSPatrick Kelsey static int	vmxnet3_isc_txd_encap(void *, if_pkt_info_t);
119*8f82136aSPatrick Kelsey static void	vmxnet3_isc_txd_flush(void *, uint16_t, qidx_t);
120*8f82136aSPatrick Kelsey static int	vmxnet3_isc_txd_credits_update(void *, uint16_t, bool);
121*8f82136aSPatrick Kelsey static int	vmxnet3_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
122*8f82136aSPatrick Kelsey static int	vmxnet3_isc_rxd_pkt_get(void *, if_rxd_info_t);
123*8f82136aSPatrick Kelsey static void	vmxnet3_isc_rxd_refill(void *, if_rxd_update_t);
124*8f82136aSPatrick Kelsey static void	vmxnet3_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
125*8f82136aSPatrick Kelsey static int	vmxnet3_legacy_intr(void *);
126*8f82136aSPatrick Kelsey static int	vmxnet3_rxq_intr(void *);
127*8f82136aSPatrick Kelsey static int	vmxnet3_event_intr(void *);
128e3c97c2cSBryan Venteicher 
129*8f82136aSPatrick Kelsey static void	vmxnet3_stop(if_ctx_t);
130e3c97c2cSBryan Venteicher 
131e3c97c2cSBryan Venteicher static void	vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
132*8f82136aSPatrick Kelsey static void	vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
133*8f82136aSPatrick Kelsey static void	vmxnet3_reinit_queues(struct vmxnet3_softc *);
134e3c97c2cSBryan Venteicher static int	vmxnet3_enable_device(struct vmxnet3_softc *);
135e3c97c2cSBryan Venteicher static void	vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
136*8f82136aSPatrick Kelsey static void	vmxnet3_init(if_ctx_t);
137*8f82136aSPatrick Kelsey static void	vmxnet3_multi_set(if_ctx_t);
138*8f82136aSPatrick Kelsey static int	vmxnet3_mtu_set(if_ctx_t, uint32_t);
139*8f82136aSPatrick Kelsey static void	vmxnet3_media_status(if_ctx_t, struct ifmediareq *);
140*8f82136aSPatrick Kelsey static int	vmxnet3_media_change(if_ctx_t);
141*8f82136aSPatrick Kelsey static int	vmxnet3_promisc_set(if_ctx_t, int);
142*8f82136aSPatrick Kelsey static uint64_t	vmxnet3_get_counter(if_ctx_t, ift_counter);
143*8f82136aSPatrick Kelsey static void	vmxnet3_update_admin_status(if_ctx_t);
144*8f82136aSPatrick Kelsey static void	vmxnet3_txq_timer(if_ctx_t, uint16_t);
145e3c97c2cSBryan Venteicher 
146e3c97c2cSBryan Venteicher static void	vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
147e3c97c2cSBryan Venteicher 		    uint16_t);
148*8f82136aSPatrick Kelsey static void	vmxnet3_vlan_register(if_ctx_t, uint16_t);
149*8f82136aSPatrick Kelsey static void	vmxnet3_vlan_unregister(if_ctx_t, uint16_t);
150*8f82136aSPatrick Kelsey static void	vmxnet3_set_rxfilter(struct vmxnet3_softc *, int);
151e3c97c2cSBryan Venteicher 
152e557c1ddSBryan Venteicher static void	vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
153*8f82136aSPatrick Kelsey static int	vmxnet3_link_is_up(struct vmxnet3_softc *);
154e3c97c2cSBryan Venteicher static void	vmxnet3_link_status(struct vmxnet3_softc *);
155e3c97c2cSBryan Venteicher static void	vmxnet3_set_lladdr(struct vmxnet3_softc *);
156e3c97c2cSBryan Venteicher static void	vmxnet3_get_lladdr(struct vmxnet3_softc *);
157e3c97c2cSBryan Venteicher 
158e3c97c2cSBryan Venteicher static void	vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
159e3c97c2cSBryan Venteicher 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
160e3c97c2cSBryan Venteicher static void	vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
161e3c97c2cSBryan Venteicher 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
162e3c97c2cSBryan Venteicher static void	vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
163e3c97c2cSBryan Venteicher 		    struct sysctl_ctx_list *, struct sysctl_oid_list *);
164e3c97c2cSBryan Venteicher static void	vmxnet3_setup_sysctl(struct vmxnet3_softc *);
165e3c97c2cSBryan Venteicher 
166e3c97c2cSBryan Venteicher static void	vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
167e3c97c2cSBryan Venteicher 		    uint32_t);
168e3c97c2cSBryan Venteicher static uint32_t	vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
169e3c97c2cSBryan Venteicher static void	vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
170e3c97c2cSBryan Venteicher 		    uint32_t);
171e3c97c2cSBryan Venteicher static void	vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
172e3c97c2cSBryan Venteicher static uint32_t	vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
173e3c97c2cSBryan Venteicher 
174*8f82136aSPatrick Kelsey static int	vmxnet3_tx_queue_intr_enable(if_ctx_t, uint16_t);
175*8f82136aSPatrick Kelsey static int	vmxnet3_rx_queue_intr_enable(if_ctx_t, uint16_t);
176*8f82136aSPatrick Kelsey static void	vmxnet3_link_intr_enable(if_ctx_t);
177e3c97c2cSBryan Venteicher static void	vmxnet3_enable_intr(struct vmxnet3_softc *, int);
178e3c97c2cSBryan Venteicher static void	vmxnet3_disable_intr(struct vmxnet3_softc *, int);
179*8f82136aSPatrick Kelsey static void	vmxnet3_intr_enable_all(if_ctx_t);
180*8f82136aSPatrick Kelsey static void	vmxnet3_intr_disable_all(if_ctx_t);
181e3c97c2cSBryan Venteicher 
182e3c97c2cSBryan Venteicher typedef enum {
183e3c97c2cSBryan Venteicher 	VMXNET3_BARRIER_RD,
184e3c97c2cSBryan Venteicher 	VMXNET3_BARRIER_WR,
185e3c97c2cSBryan Venteicher 	VMXNET3_BARRIER_RDWR,
186e3c97c2cSBryan Venteicher } vmxnet3_barrier_t;
187e3c97c2cSBryan Venteicher 
188e3c97c2cSBryan Venteicher static void	vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
189e3c97c2cSBryan Venteicher 
1903c5dfe89SBryan Venteicher 
191e3c97c2cSBryan Venteicher static device_method_t vmxnet3_methods[] = {
192*8f82136aSPatrick Kelsey 	/* Device interface */
193*8f82136aSPatrick Kelsey 	DEVMETHOD(device_register, vmxnet3_register),
194*8f82136aSPatrick Kelsey 	DEVMETHOD(device_probe, iflib_device_probe),
195*8f82136aSPatrick Kelsey 	DEVMETHOD(device_attach, iflib_device_attach),
196*8f82136aSPatrick Kelsey 	DEVMETHOD(device_detach, iflib_device_detach),
197*8f82136aSPatrick Kelsey 	DEVMETHOD(device_shutdown, iflib_device_shutdown),
198*8f82136aSPatrick Kelsey 	DEVMETHOD(device_suspend, iflib_device_suspend),
199*8f82136aSPatrick Kelsey 	DEVMETHOD(device_resume, iflib_device_resume),
200e3c97c2cSBryan Venteicher 	DEVMETHOD_END
201e3c97c2cSBryan Venteicher };
202e3c97c2cSBryan Venteicher 
203e3c97c2cSBryan Venteicher static driver_t vmxnet3_driver = {
204e3c97c2cSBryan Venteicher 	"vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
205e3c97c2cSBryan Venteicher };
206e3c97c2cSBryan Venteicher 
207e3c97c2cSBryan Venteicher static devclass_t vmxnet3_devclass;
208e3c97c2cSBryan Venteicher DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0);
209*8f82136aSPatrick Kelsey IFLIB_PNP_INFO(pci, vmx, vmxnet3_vendor_info_array);
210*8f82136aSPatrick Kelsey MODULE_VERSION(vmx, 2);
211e3c97c2cSBryan Venteicher 
212e3c97c2cSBryan Venteicher MODULE_DEPEND(vmx, pci, 1, 1, 1);
213e3c97c2cSBryan Venteicher MODULE_DEPEND(vmx, ether, 1, 1, 1);
214*8f82136aSPatrick Kelsey MODULE_DEPEND(vmx, iflib, 1, 1, 1);
215e3c97c2cSBryan Venteicher 
216*8f82136aSPatrick Kelsey static device_method_t vmxnet3_iflib_methods[] = {
217*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_tx_queues_alloc, vmxnet3_tx_queues_alloc),
218*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_rx_queues_alloc, vmxnet3_rx_queues_alloc),
219*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_queues_free, vmxnet3_queues_free),
220e3c97c2cSBryan Venteicher 
221*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_attach_pre, vmxnet3_attach_pre),
222*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_attach_post, vmxnet3_attach_post),
223*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_detach, vmxnet3_detach),
224*8f82136aSPatrick Kelsey 
225*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_init, vmxnet3_init),
226*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_stop, vmxnet3_stop),
227*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_multi_set, vmxnet3_multi_set),
228*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_mtu_set, vmxnet3_mtu_set),
229*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_media_status, vmxnet3_media_status),
230*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_media_change, vmxnet3_media_change),
231*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_promisc_set, vmxnet3_promisc_set),
232*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_get_counter, vmxnet3_get_counter),
233*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_update_admin_status, vmxnet3_update_admin_status),
234*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_timer, vmxnet3_txq_timer),
235*8f82136aSPatrick Kelsey 
236*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_tx_queue_intr_enable, vmxnet3_tx_queue_intr_enable),
237*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_rx_queue_intr_enable, vmxnet3_rx_queue_intr_enable),
238*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_link_intr_enable, vmxnet3_link_intr_enable),
239*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_intr_enable, vmxnet3_intr_enable_all),
240*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_intr_disable, vmxnet3_intr_disable_all),
241*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_msix_intr_assign, vmxnet3_msix_intr_assign),
242*8f82136aSPatrick Kelsey 
243*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_vlan_register, vmxnet3_vlan_register),
244*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_vlan_unregister, vmxnet3_vlan_unregister),
245*8f82136aSPatrick Kelsey 
246*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_shutdown, vmxnet3_shutdown),
247*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_suspend, vmxnet3_suspend),
248*8f82136aSPatrick Kelsey 	DEVMETHOD(ifdi_resume, vmxnet3_resume),
249*8f82136aSPatrick Kelsey 
250*8f82136aSPatrick Kelsey 	DEVMETHOD_END
251*8f82136aSPatrick Kelsey };
252*8f82136aSPatrick Kelsey 
253*8f82136aSPatrick Kelsey static driver_t vmxnet3_iflib_driver = {
254*8f82136aSPatrick Kelsey 	"vmx", vmxnet3_iflib_methods, sizeof(struct vmxnet3_softc)
255*8f82136aSPatrick Kelsey };
256*8f82136aSPatrick Kelsey 
257*8f82136aSPatrick Kelsey struct if_txrx vmxnet3_txrx = {
258*8f82136aSPatrick Kelsey 	.ift_txd_encap = vmxnet3_isc_txd_encap,
259*8f82136aSPatrick Kelsey 	.ift_txd_flush = vmxnet3_isc_txd_flush,
260*8f82136aSPatrick Kelsey 	.ift_txd_credits_update = vmxnet3_isc_txd_credits_update,
261*8f82136aSPatrick Kelsey 	.ift_rxd_available = vmxnet3_isc_rxd_available,
262*8f82136aSPatrick Kelsey 	.ift_rxd_pkt_get = vmxnet3_isc_rxd_pkt_get,
263*8f82136aSPatrick Kelsey 	.ift_rxd_refill = vmxnet3_isc_rxd_refill,
264*8f82136aSPatrick Kelsey 	.ift_rxd_flush = vmxnet3_isc_rxd_flush,
265*8f82136aSPatrick Kelsey 	.ift_legacy_intr = vmxnet3_legacy_intr
266*8f82136aSPatrick Kelsey };
267*8f82136aSPatrick Kelsey 
268*8f82136aSPatrick Kelsey static struct if_shared_ctx vmxnet3_sctx_init = {
269*8f82136aSPatrick Kelsey 	.isc_magic = IFLIB_MAGIC,
270*8f82136aSPatrick Kelsey 	.isc_q_align = 512,
271*8f82136aSPatrick Kelsey 
272*8f82136aSPatrick Kelsey 	.isc_tx_maxsize = VMXNET3_TX_MAXSIZE,
273*8f82136aSPatrick Kelsey 	.isc_tx_maxsegsize = VMXNET3_TX_MAXSEGSIZE,
274*8f82136aSPatrick Kelsey 	.isc_tso_maxsize = VMXNET3_TSO_MAXSIZE + sizeof(struct ether_vlan_header),
275*8f82136aSPatrick Kelsey 	.isc_tso_maxsegsize = VMXNET3_TX_MAXSEGSIZE,
276*8f82136aSPatrick Kelsey 
277*8f82136aSPatrick Kelsey 	/*
278*8f82136aSPatrick Kelsey 	 * These values are used to configure the busdma tag used for
279*8f82136aSPatrick Kelsey 	 * receive descriptors.  Each receive descriptor only points to one
280*8f82136aSPatrick Kelsey 	 * buffer.
281*8f82136aSPatrick Kelsey 	 */
282*8f82136aSPatrick Kelsey 	.isc_rx_maxsize = VMXNET3_RX_MAXSEGSIZE, /* One buf per descriptor */
283*8f82136aSPatrick Kelsey 	.isc_rx_nsegments = 1,  /* One mapping per descriptor */
284*8f82136aSPatrick Kelsey 	.isc_rx_maxsegsize = VMXNET3_RX_MAXSEGSIZE,
285*8f82136aSPatrick Kelsey 
286*8f82136aSPatrick Kelsey 	.isc_admin_intrcnt = 1,
287*8f82136aSPatrick Kelsey 	.isc_vendor_info = vmxnet3_vendor_info_array,
288*8f82136aSPatrick Kelsey 	.isc_driver_version = "2",
289*8f82136aSPatrick Kelsey 	.isc_driver = &vmxnet3_iflib_driver,
290*8f82136aSPatrick Kelsey 	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ,
291*8f82136aSPatrick Kelsey 
292*8f82136aSPatrick Kelsey 	/*
293*8f82136aSPatrick Kelsey 	 * Number of receive queues per receive queue set, with associated
294*8f82136aSPatrick Kelsey 	 * descriptor settings for each.
295*8f82136aSPatrick Kelsey 	 */
296*8f82136aSPatrick Kelsey 	.isc_nrxqs = 3,
297*8f82136aSPatrick Kelsey 	.isc_nfl = 2, /* one free list for each receive command queue */
298*8f82136aSPatrick Kelsey 	.isc_nrxd_min = {VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC, VMXNET3_MIN_RX_NDESC},
299*8f82136aSPatrick Kelsey 	.isc_nrxd_max = {VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC, VMXNET3_MAX_RX_NDESC},
300*8f82136aSPatrick Kelsey 	.isc_nrxd_default = {VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC, VMXNET3_DEF_RX_NDESC},
301*8f82136aSPatrick Kelsey 
302*8f82136aSPatrick Kelsey 	/*
303*8f82136aSPatrick Kelsey 	 * Number of transmit queues per transmit queue set, with associated
304*8f82136aSPatrick Kelsey 	 * descriptor settings for each.
305*8f82136aSPatrick Kelsey 	 */
306*8f82136aSPatrick Kelsey 	.isc_ntxqs = 2,
307*8f82136aSPatrick Kelsey 	.isc_ntxd_min = {VMXNET3_MIN_TX_NDESC, VMXNET3_MIN_TX_NDESC},
308*8f82136aSPatrick Kelsey 	.isc_ntxd_max = {VMXNET3_MAX_TX_NDESC, VMXNET3_MAX_TX_NDESC},
309*8f82136aSPatrick Kelsey 	.isc_ntxd_default = {VMXNET3_DEF_TX_NDESC, VMXNET3_DEF_TX_NDESC},
310*8f82136aSPatrick Kelsey };
311*8f82136aSPatrick Kelsey 
312*8f82136aSPatrick Kelsey static void *
313*8f82136aSPatrick Kelsey vmxnet3_register(device_t dev)
314e3c97c2cSBryan Venteicher {
315*8f82136aSPatrick Kelsey 	return (&vmxnet3_sctx_init);
316e3c97c2cSBryan Venteicher }
317e3c97c2cSBryan Venteicher 
318e3c97c2cSBryan Venteicher static int
319*8f82136aSPatrick Kelsey vmxnet3_attach_pre(if_ctx_t ctx)
320e3c97c2cSBryan Venteicher {
321*8f82136aSPatrick Kelsey 	device_t dev;
322*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
323e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
324*8f82136aSPatrick Kelsey 	uint32_t intr_config;
325e3c97c2cSBryan Venteicher 	int error;
326e3c97c2cSBryan Venteicher 
327*8f82136aSPatrick Kelsey 	dev = iflib_get_dev(ctx);
328*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
329e3c97c2cSBryan Venteicher 	sc->vmx_dev = dev;
330*8f82136aSPatrick Kelsey 	sc->vmx_ctx = ctx;
331*8f82136aSPatrick Kelsey 	sc->vmx_sctx = iflib_get_sctx(ctx);
332*8f82136aSPatrick Kelsey 	sc->vmx_scctx = iflib_get_softc_ctx(ctx);
333*8f82136aSPatrick Kelsey 	sc->vmx_ifp = iflib_get_ifp(ctx);
334*8f82136aSPatrick Kelsey 	sc->vmx_media = iflib_get_media(ctx);
335*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
336e3c97c2cSBryan Venteicher 
337*8f82136aSPatrick Kelsey 	scctx->isc_tx_nsegments = VMXNET3_TX_MAXSEGS;
338*8f82136aSPatrick Kelsey 	scctx->isc_tx_tso_segments_max = VMXNET3_TX_MAXSEGS;
339*8f82136aSPatrick Kelsey 	/* isc_tx_tso_size_max doesn't include possible vlan header */
340*8f82136aSPatrick Kelsey 	scctx->isc_tx_tso_size_max = VMXNET3_TSO_MAXSIZE;
341*8f82136aSPatrick Kelsey 	scctx->isc_tx_tso_segsize_max = VMXNET3_TX_MAXSEGSIZE;
342*8f82136aSPatrick Kelsey 	scctx->isc_txrx = &vmxnet3_txrx;
343e3c97c2cSBryan Venteicher 
344*8f82136aSPatrick Kelsey 	/* If 0, the iflib tunable was not set, so set to the default */
345*8f82136aSPatrick Kelsey 	if (scctx->isc_nrxqsets == 0)
346*8f82136aSPatrick Kelsey 		scctx->isc_nrxqsets = VMXNET3_DEF_RX_QUEUES;
347*8f82136aSPatrick Kelsey 	scctx->isc_nrxqsets_max = min(VMXNET3_MAX_RX_QUEUES, mp_ncpus);
348e3c97c2cSBryan Venteicher 
349*8f82136aSPatrick Kelsey 	/* If 0, the iflib tunable was not set, so set to the default */
350*8f82136aSPatrick Kelsey 	if (scctx->isc_ntxqsets == 0)
351*8f82136aSPatrick Kelsey 		scctx->isc_ntxqsets = VMXNET3_DEF_TX_QUEUES;
352*8f82136aSPatrick Kelsey 	scctx->isc_ntxqsets_max = min(VMXNET3_MAX_TX_QUEUES, mp_ncpus);
353e3c97c2cSBryan Venteicher 
354*8f82136aSPatrick Kelsey 	/*
355*8f82136aSPatrick Kelsey 	 * Enforce that the transmit completion queue descriptor count is
356*8f82136aSPatrick Kelsey 	 * the same as the transmit command queue descriptor count.
357*8f82136aSPatrick Kelsey 	 */
358*8f82136aSPatrick Kelsey 	scctx->isc_ntxd[0] = scctx->isc_ntxd[1];
359*8f82136aSPatrick Kelsey 	scctx->isc_txqsizes[0] =
360*8f82136aSPatrick Kelsey 	    sizeof(struct vmxnet3_txcompdesc) * scctx->isc_ntxd[0];
361*8f82136aSPatrick Kelsey 	scctx->isc_txqsizes[1] =
362*8f82136aSPatrick Kelsey 	    sizeof(struct vmxnet3_txdesc) * scctx->isc_ntxd[1];
363*8f82136aSPatrick Kelsey 
364*8f82136aSPatrick Kelsey 	/*
365*8f82136aSPatrick Kelsey 	 * Enforce that the receive completion queue descriptor count is the
366*8f82136aSPatrick Kelsey 	 * sum of the receive command queue descriptor counts, and that the
367*8f82136aSPatrick Kelsey 	 * second receive command queue descriptor count is the same as the
368*8f82136aSPatrick Kelsey 	 * first one.
369*8f82136aSPatrick Kelsey 	 */
370*8f82136aSPatrick Kelsey 	scctx->isc_nrxd[2] = scctx->isc_nrxd[1];
371*8f82136aSPatrick Kelsey 	scctx->isc_nrxd[0] = scctx->isc_nrxd[1] + scctx->isc_nrxd[2];
372*8f82136aSPatrick Kelsey 	scctx->isc_rxqsizes[0] =
373*8f82136aSPatrick Kelsey 	    sizeof(struct vmxnet3_rxcompdesc) * scctx->isc_nrxd[0];
374*8f82136aSPatrick Kelsey 	scctx->isc_rxqsizes[1] =
375*8f82136aSPatrick Kelsey 	    sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[1];
376*8f82136aSPatrick Kelsey 	scctx->isc_rxqsizes[2] =
377*8f82136aSPatrick Kelsey 	    sizeof(struct vmxnet3_rxdesc) * scctx->isc_nrxd[2];
378*8f82136aSPatrick Kelsey 
379*8f82136aSPatrick Kelsey 	scctx->isc_rss_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
380*8f82136aSPatrick Kelsey 
381*8f82136aSPatrick Kelsey 	/* Map PCI BARs */
382e3c97c2cSBryan Venteicher 	error = vmxnet3_alloc_resources(sc);
383e3c97c2cSBryan Venteicher 	if (error)
384e3c97c2cSBryan Venteicher 		goto fail;
385e3c97c2cSBryan Venteicher 
386*8f82136aSPatrick Kelsey 	/* Check device versions */
387e3c97c2cSBryan Venteicher 	error = vmxnet3_check_version(sc);
388e3c97c2cSBryan Venteicher 	if (error)
389e3c97c2cSBryan Venteicher 		goto fail;
390e3c97c2cSBryan Venteicher 
391*8f82136aSPatrick Kelsey 	/*
392*8f82136aSPatrick Kelsey 	 * The interrupt mode can be set in the hypervisor configuration via
393*8f82136aSPatrick Kelsey 	 * the parameter ethernet<N>.intrMode.
394*8f82136aSPatrick Kelsey 	 */
395*8f82136aSPatrick Kelsey 	intr_config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
396*8f82136aSPatrick Kelsey 	sc->vmx_intr_mask_mode = (intr_config >> 2) & 0x03;
397e3c97c2cSBryan Venteicher 
398*8f82136aSPatrick Kelsey 	/*
399*8f82136aSPatrick Kelsey 	 * Configure the softc context to attempt to configure the interrupt
400*8f82136aSPatrick Kelsey 	 * mode now indicated by intr_config.  iflib will follow the usual
401*8f82136aSPatrick Kelsey 	 * fallback path MSIX -> MSI -> LEGACY, starting at the configured
402*8f82136aSPatrick Kelsey 	 * starting mode.
403*8f82136aSPatrick Kelsey 	 */
404*8f82136aSPatrick Kelsey 	switch (intr_config & 0x03) {
405*8f82136aSPatrick Kelsey 	case VMXNET3_IT_AUTO:
406*8f82136aSPatrick Kelsey 	case VMXNET3_IT_MSIX:
407*8f82136aSPatrick Kelsey 		scctx->isc_msix_bar = pci_msix_table_bar(dev);
408*8f82136aSPatrick Kelsey 		break;
409*8f82136aSPatrick Kelsey 	case VMXNET3_IT_MSI:
410*8f82136aSPatrick Kelsey 		scctx->isc_msix_bar = -1;
411*8f82136aSPatrick Kelsey 		scctx->isc_disable_msix = 1;
412*8f82136aSPatrick Kelsey 		break;
413*8f82136aSPatrick Kelsey 	case VMXNET3_IT_LEGACY:
414*8f82136aSPatrick Kelsey 		scctx->isc_msix_bar = 0;
415*8f82136aSPatrick Kelsey 		break;
416e3c97c2cSBryan Venteicher 	}
417e3c97c2cSBryan Venteicher 
418*8f82136aSPatrick Kelsey 	scctx->isc_tx_csum_flags = VMXNET3_CSUM_ALL_OFFLOAD;
419*8f82136aSPatrick Kelsey 	scctx->isc_capabilities = scctx->isc_capenable =
420*8f82136aSPatrick Kelsey 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
421*8f82136aSPatrick Kelsey 	    IFCAP_TSO4 | IFCAP_TSO6 |
422*8f82136aSPatrick Kelsey 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
423*8f82136aSPatrick Kelsey 	    IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
424*8f82136aSPatrick Kelsey 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO |
425*8f82136aSPatrick Kelsey 	    IFCAP_JUMBO_MTU;
426e3c97c2cSBryan Venteicher 
427*8f82136aSPatrick Kelsey 	/* These capabilities are not enabled by default. */
428*8f82136aSPatrick Kelsey 	scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER;
429*8f82136aSPatrick Kelsey 
430*8f82136aSPatrick Kelsey 	vmxnet3_get_lladdr(sc);
431*8f82136aSPatrick Kelsey 	iflib_set_mac(ctx, sc->vmx_lladdr);
432*8f82136aSPatrick Kelsey 
433*8f82136aSPatrick Kelsey 	return (0);
434e3c97c2cSBryan Venteicher fail:
435*8f82136aSPatrick Kelsey 	/*
436*8f82136aSPatrick Kelsey 	 * We must completely clean up anything allocated above as iflib
437*8f82136aSPatrick Kelsey 	 * will not invoke any other driver entry points as a result of this
438*8f82136aSPatrick Kelsey 	 * failure.
439*8f82136aSPatrick Kelsey 	 */
440*8f82136aSPatrick Kelsey 	vmxnet3_free_resources(sc);
441e3c97c2cSBryan Venteicher 
442e3c97c2cSBryan Venteicher 	return (error);
443e3c97c2cSBryan Venteicher }
444e3c97c2cSBryan Venteicher 
445e3c97c2cSBryan Venteicher static int
446*8f82136aSPatrick Kelsey vmxnet3_msix_intr_assign(if_ctx_t ctx, int msix)
447e3c97c2cSBryan Venteicher {
448e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
449*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
450*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
451*8f82136aSPatrick Kelsey 	int error;
452*8f82136aSPatrick Kelsey 	int i;
453*8f82136aSPatrick Kelsey 	char irq_name[16];
454e3c97c2cSBryan Venteicher 
455*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
456*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
457e3c97c2cSBryan Venteicher 
458*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
459*8f82136aSPatrick Kelsey 		snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
460e557c1ddSBryan Venteicher 
461*8f82136aSPatrick Kelsey 		rxq = &sc->vmx_rxq[i];
462*8f82136aSPatrick Kelsey 		error = iflib_irq_alloc_generic(ctx, &rxq->vxrxq_irq, i + 1,
463*8f82136aSPatrick Kelsey 		    IFLIB_INTR_RX, vmxnet3_rxq_intr, rxq, i, irq_name);
464*8f82136aSPatrick Kelsey 		if (error) {
465*8f82136aSPatrick Kelsey 			device_printf(iflib_get_dev(ctx),
466*8f82136aSPatrick Kelsey 			    "Failed to register rxq %d interrupt handler\n", i);
467*8f82136aSPatrick Kelsey 			return (error);
468*8f82136aSPatrick Kelsey 		}
469e3c97c2cSBryan Venteicher 	}
470e3c97c2cSBryan Venteicher 
471*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
472*8f82136aSPatrick Kelsey 		snprintf(irq_name, sizeof(irq_name), "txq%d", i);
473*8f82136aSPatrick Kelsey 
474*8f82136aSPatrick Kelsey 		/*
475*8f82136aSPatrick Kelsey 		 * Don't provide the corresponding rxq irq for reference -
476*8f82136aSPatrick Kelsey 		 * we want the transmit task to be attached to a task queue
477*8f82136aSPatrick Kelsey 		 * that is different from the one used by the corresponding
478*8f82136aSPatrick Kelsey 		 * rxq irq.  That is because the TX doorbell writes are very
479*8f82136aSPatrick Kelsey 		 * expensive as virtualized MMIO operations, so we want to
480*8f82136aSPatrick Kelsey 		 * be able to defer them to another core when possible so
481*8f82136aSPatrick Kelsey 		 * that they don't steal receive processing cycles during
482*8f82136aSPatrick Kelsey 		 * stack turnarounds like TCP ACK generation.  The other
483*8f82136aSPatrick Kelsey 		 * piece to this approach is enabling the iflib abdicate
484*8f82136aSPatrick Kelsey 		 * option (currently via an interface-specific
485*8f82136aSPatrick Kelsey 		 * tunable/sysctl).
486*8f82136aSPatrick Kelsey 		 */
487*8f82136aSPatrick Kelsey 		iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i,
488*8f82136aSPatrick Kelsey 		    irq_name);
489e3c97c2cSBryan Venteicher 	}
490e3c97c2cSBryan Venteicher 
491*8f82136aSPatrick Kelsey 	error = iflib_irq_alloc_generic(ctx, &sc->vmx_event_intr_irq,
492*8f82136aSPatrick Kelsey 	    scctx->isc_nrxqsets + 1, IFLIB_INTR_ADMIN, vmxnet3_event_intr, sc, 0,
493*8f82136aSPatrick Kelsey 	    "event");
494*8f82136aSPatrick Kelsey 	if (error) {
495*8f82136aSPatrick Kelsey 		device_printf(iflib_get_dev(ctx),
496*8f82136aSPatrick Kelsey 		    "Failed to register event interrupt handler\n");
497*8f82136aSPatrick Kelsey 		return (error);
498e3c97c2cSBryan Venteicher 	}
499e3c97c2cSBryan Venteicher 
500*8f82136aSPatrick Kelsey 	return (0);
501*8f82136aSPatrick Kelsey }
502e3c97c2cSBryan Venteicher 
503*8f82136aSPatrick Kelsey static void
504*8f82136aSPatrick Kelsey vmxnet3_free_irqs(struct vmxnet3_softc *sc)
505*8f82136aSPatrick Kelsey {
506*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
507*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
508*8f82136aSPatrick Kelsey 	int i;
509*8f82136aSPatrick Kelsey 
510*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
511*8f82136aSPatrick Kelsey 
512*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
513*8f82136aSPatrick Kelsey 		rxq = &sc->vmx_rxq[i];
514*8f82136aSPatrick Kelsey 		iflib_irq_free(sc->vmx_ctx, &rxq->vxrxq_irq);
515*8f82136aSPatrick Kelsey 	}
516*8f82136aSPatrick Kelsey 
517*8f82136aSPatrick Kelsey 	iflib_irq_free(sc->vmx_ctx, &sc->vmx_event_intr_irq);
518*8f82136aSPatrick Kelsey }
519*8f82136aSPatrick Kelsey 
520*8f82136aSPatrick Kelsey static int
521*8f82136aSPatrick Kelsey vmxnet3_attach_post(if_ctx_t ctx)
522*8f82136aSPatrick Kelsey {
523*8f82136aSPatrick Kelsey 	device_t dev;
524*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
525*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
526*8f82136aSPatrick Kelsey 	int error;
527*8f82136aSPatrick Kelsey 
528*8f82136aSPatrick Kelsey 	dev = iflib_get_dev(ctx);
529*8f82136aSPatrick Kelsey 	scctx = iflib_get_softc_ctx(ctx);
530*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
531*8f82136aSPatrick Kelsey 
532*8f82136aSPatrick Kelsey 	if (scctx->isc_nrxqsets > 1)
533*8f82136aSPatrick Kelsey 		sc->vmx_flags |= VMXNET3_FLAG_RSS;
534*8f82136aSPatrick Kelsey 
535*8f82136aSPatrick Kelsey 	error = vmxnet3_alloc_data(sc);
536*8f82136aSPatrick Kelsey 	if (error)
537*8f82136aSPatrick Kelsey 		goto fail;
538*8f82136aSPatrick Kelsey 
539*8f82136aSPatrick Kelsey 	vmxnet3_set_interrupt_idx(sc);
540*8f82136aSPatrick Kelsey 	vmxnet3_setup_sysctl(sc);
541*8f82136aSPatrick Kelsey 
542*8f82136aSPatrick Kelsey 	ifmedia_add(sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
543*8f82136aSPatrick Kelsey 	ifmedia_set(sc->vmx_media, IFM_ETHER | IFM_AUTO);
544*8f82136aSPatrick Kelsey 
545*8f82136aSPatrick Kelsey fail:
546*8f82136aSPatrick Kelsey 	return (error);
547*8f82136aSPatrick Kelsey }
548*8f82136aSPatrick Kelsey 
549*8f82136aSPatrick Kelsey static int
550*8f82136aSPatrick Kelsey vmxnet3_detach(if_ctx_t ctx)
551*8f82136aSPatrick Kelsey {
552*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
553*8f82136aSPatrick Kelsey 
554*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
555*8f82136aSPatrick Kelsey 
556*8f82136aSPatrick Kelsey 	vmxnet3_free_irqs(sc);
557e3c97c2cSBryan Venteicher 	vmxnet3_free_data(sc);
558e3c97c2cSBryan Venteicher 	vmxnet3_free_resources(sc);
559e3c97c2cSBryan Venteicher 
560e3c97c2cSBryan Venteicher 	return (0);
561e3c97c2cSBryan Venteicher }
562e3c97c2cSBryan Venteicher 
563e3c97c2cSBryan Venteicher static int
564*8f82136aSPatrick Kelsey vmxnet3_shutdown(if_ctx_t ctx)
565*8f82136aSPatrick Kelsey {
566*8f82136aSPatrick Kelsey 
567*8f82136aSPatrick Kelsey 	return (0);
568*8f82136aSPatrick Kelsey }
569*8f82136aSPatrick Kelsey 
570*8f82136aSPatrick Kelsey static int
571*8f82136aSPatrick Kelsey vmxnet3_suspend(if_ctx_t ctx)
572*8f82136aSPatrick Kelsey {
573*8f82136aSPatrick Kelsey 
574*8f82136aSPatrick Kelsey 	return (0);
575*8f82136aSPatrick Kelsey }
576*8f82136aSPatrick Kelsey 
577*8f82136aSPatrick Kelsey static int
578*8f82136aSPatrick Kelsey vmxnet3_resume(if_ctx_t ctx)
579e3c97c2cSBryan Venteicher {
580e3c97c2cSBryan Venteicher 
581e3c97c2cSBryan Venteicher 	return (0);
582e3c97c2cSBryan Venteicher }
583e3c97c2cSBryan Venteicher 
584e3c97c2cSBryan Venteicher static int
585e3c97c2cSBryan Venteicher vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
586e3c97c2cSBryan Venteicher {
587e3c97c2cSBryan Venteicher 	device_t dev;
588e3c97c2cSBryan Venteicher 	int rid;
589e3c97c2cSBryan Venteicher 
590e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
591e3c97c2cSBryan Venteicher 
592e3c97c2cSBryan Venteicher 	rid = PCIR_BAR(0);
593e3c97c2cSBryan Venteicher 	sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
594e3c97c2cSBryan Venteicher 	    RF_ACTIVE);
595e3c97c2cSBryan Venteicher 	if (sc->vmx_res0 == NULL) {
596e3c97c2cSBryan Venteicher 		device_printf(dev,
597e3c97c2cSBryan Venteicher 		    "could not map BAR0 memory\n");
598e3c97c2cSBryan Venteicher 		return (ENXIO);
599e3c97c2cSBryan Venteicher 	}
600e3c97c2cSBryan Venteicher 
601e3c97c2cSBryan Venteicher 	sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
602e3c97c2cSBryan Venteicher 	sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
603e3c97c2cSBryan Venteicher 
604e3c97c2cSBryan Venteicher 	rid = PCIR_BAR(1);
605e3c97c2cSBryan Venteicher 	sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
606e3c97c2cSBryan Venteicher 	    RF_ACTIVE);
607e3c97c2cSBryan Venteicher 	if (sc->vmx_res1 == NULL) {
608e3c97c2cSBryan Venteicher 		device_printf(dev,
609e3c97c2cSBryan Venteicher 		    "could not map BAR1 memory\n");
610e3c97c2cSBryan Venteicher 		return (ENXIO);
611e3c97c2cSBryan Venteicher 	}
612e3c97c2cSBryan Venteicher 
613e3c97c2cSBryan Venteicher 	sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
614e3c97c2cSBryan Venteicher 	sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
615e3c97c2cSBryan Venteicher 
616e3c97c2cSBryan Venteicher 	return (0);
617e3c97c2cSBryan Venteicher }
618e3c97c2cSBryan Venteicher 
619e3c97c2cSBryan Venteicher static void
620e3c97c2cSBryan Venteicher vmxnet3_free_resources(struct vmxnet3_softc *sc)
621e3c97c2cSBryan Venteicher {
622e3c97c2cSBryan Venteicher 	device_t dev;
623e3c97c2cSBryan Venteicher 	int rid;
624e3c97c2cSBryan Venteicher 
625e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
626e3c97c2cSBryan Venteicher 
627e3c97c2cSBryan Venteicher 	if (sc->vmx_res0 != NULL) {
628e3c97c2cSBryan Venteicher 		rid = PCIR_BAR(0);
629e3c97c2cSBryan Venteicher 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
630e3c97c2cSBryan Venteicher 		sc->vmx_res0 = NULL;
631e3c97c2cSBryan Venteicher 	}
632e3c97c2cSBryan Venteicher 
633e3c97c2cSBryan Venteicher 	if (sc->vmx_res1 != NULL) {
634e3c97c2cSBryan Venteicher 		rid = PCIR_BAR(1);
635e3c97c2cSBryan Venteicher 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
636e3c97c2cSBryan Venteicher 		sc->vmx_res1 = NULL;
637e3c97c2cSBryan Venteicher 	}
638e3c97c2cSBryan Venteicher }
639e3c97c2cSBryan Venteicher 
640e3c97c2cSBryan Venteicher static int
641e3c97c2cSBryan Venteicher vmxnet3_check_version(struct vmxnet3_softc *sc)
642e3c97c2cSBryan Venteicher {
643e3c97c2cSBryan Venteicher 	device_t dev;
644e3c97c2cSBryan Venteicher 	uint32_t version;
645e3c97c2cSBryan Venteicher 
646e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
647e3c97c2cSBryan Venteicher 
648e3c97c2cSBryan Venteicher 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
649e3c97c2cSBryan Venteicher 	if ((version & 0x01) == 0) {
650e3c97c2cSBryan Venteicher 		device_printf(dev, "unsupported hardware version %#x\n",
651e3c97c2cSBryan Venteicher 		    version);
652e3c97c2cSBryan Venteicher 		return (ENOTSUP);
6533c965775SBryan Venteicher 	}
654e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
655e3c97c2cSBryan Venteicher 
656e3c97c2cSBryan Venteicher 	version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
657e3c97c2cSBryan Venteicher 	if ((version & 0x01) == 0) {
658e3c97c2cSBryan Venteicher 		device_printf(dev, "unsupported UPT version %#x\n", version);
659e3c97c2cSBryan Venteicher 		return (ENOTSUP);
6603c965775SBryan Venteicher 	}
661e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
662e3c97c2cSBryan Venteicher 
663e3c97c2cSBryan Venteicher 	return (0);
664e3c97c2cSBryan Venteicher }
665e3c97c2cSBryan Venteicher 
666e3c97c2cSBryan Venteicher static void
667e3c97c2cSBryan Venteicher vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
668e3c97c2cSBryan Venteicher {
669*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
670e3c97c2cSBryan Venteicher 	struct vmxnet3_txqueue *txq;
671e3c97c2cSBryan Venteicher 	struct vmxnet3_txq_shared *txs;
672e3c97c2cSBryan Venteicher 	struct vmxnet3_rxqueue *rxq;
673e3c97c2cSBryan Venteicher 	struct vmxnet3_rxq_shared *rxs;
674*8f82136aSPatrick Kelsey 	int intr_idx;
675e3c97c2cSBryan Venteicher 	int i;
676e3c97c2cSBryan Venteicher 
677*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
678e3c97c2cSBryan Venteicher 
679*8f82136aSPatrick Kelsey 	/*
680*8f82136aSPatrick Kelsey 	 * There is either one interrupt, or there is one interrupt per
681*8f82136aSPatrick Kelsey 	 * receive queue.  If there is one interrupt, then all interrupt
682*8f82136aSPatrick Kelsey 	 * indexes are zero.  If there is one interrupt per receive queue,
683*8f82136aSPatrick Kelsey 	 * the transmit queue interrupt indexes are assigned the receive
684*8f82136aSPatrick Kelsey 	 * queue interrupt indexesin round-robin fashion.
685*8f82136aSPatrick Kelsey 	 *
686*8f82136aSPatrick Kelsey 	 * The event interrupt is always the last interrupt index.
687*8f82136aSPatrick Kelsey 	 */
688*8f82136aSPatrick Kelsey 	sc->vmx_event_intr_idx = scctx->isc_vectors - 1;
689e3c97c2cSBryan Venteicher 
690*8f82136aSPatrick Kelsey 	intr_idx = 0;
691*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++, intr_idx++) {
692e3c97c2cSBryan Venteicher 		rxq = &sc->vmx_rxq[i];
693e3c97c2cSBryan Venteicher 		rxs = rxq->vxrxq_rs;
694*8f82136aSPatrick Kelsey 		rxq->vxrxq_intr_idx = intr_idx;
695e3c97c2cSBryan Venteicher 		rxs->intr_idx = rxq->vxrxq_intr_idx;
696e3c97c2cSBryan Venteicher 	}
697*8f82136aSPatrick Kelsey 
698*8f82136aSPatrick Kelsey 	/*
699*8f82136aSPatrick Kelsey 	 * Assign the tx queues interrupt indexes above what we are actually
700*8f82136aSPatrick Kelsey 	 * using.  These interrupts will never be enabled.
701*8f82136aSPatrick Kelsey 	 */
702*8f82136aSPatrick Kelsey 	intr_idx = scctx->isc_vectors;
703*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_ntxqsets; i++, intr_idx++) {
704*8f82136aSPatrick Kelsey 		txq = &sc->vmx_txq[i];
705*8f82136aSPatrick Kelsey 		txs = txq->vxtxq_ts;
706*8f82136aSPatrick Kelsey 		txq->vxtxq_intr_idx = intr_idx;
707*8f82136aSPatrick Kelsey 		txs->intr_idx = txq->vxtxq_intr_idx;
708*8f82136aSPatrick Kelsey 	}
709e3c97c2cSBryan Venteicher }
710e3c97c2cSBryan Venteicher 
711e3c97c2cSBryan Venteicher static int
712*8f82136aSPatrick Kelsey vmxnet3_queues_shared_alloc(struct vmxnet3_softc *sc)
713e3c97c2cSBryan Venteicher {
714*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
715*8f82136aSPatrick Kelsey 	int size;
716e3c97c2cSBryan Venteicher 	int error;
717e3c97c2cSBryan Venteicher 
718*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
719e3c97c2cSBryan Venteicher 
720*8f82136aSPatrick Kelsey 	/*
721*8f82136aSPatrick Kelsey 	 * The txq and rxq shared data areas must be allocated contiguously
722*8f82136aSPatrick Kelsey 	 * as vmxnet3_driver_shared contains only a single address member
723*8f82136aSPatrick Kelsey 	 * for the shared queue data area.
724*8f82136aSPatrick Kelsey 	 */
725*8f82136aSPatrick Kelsey 	size = scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared) +
726*8f82136aSPatrick Kelsey 	    scctx->isc_nrxqsets * sizeof(struct vmxnet3_rxq_shared);
727*8f82136aSPatrick Kelsey 	error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128, &sc->vmx_qs_dma, 0);
728*8f82136aSPatrick Kelsey 	if (error) {
729*8f82136aSPatrick Kelsey 		device_printf(sc->vmx_dev, "cannot alloc queue shared memory\n");
730e3c97c2cSBryan Venteicher 		return (error);
731e3c97c2cSBryan Venteicher 	}
732e3c97c2cSBryan Venteicher 
733e557c1ddSBryan Venteicher 	return (0);
734e557c1ddSBryan Venteicher }
735e557c1ddSBryan Venteicher 
736e557c1ddSBryan Venteicher static void
737*8f82136aSPatrick Kelsey vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
738e557c1ddSBryan Venteicher {
739e557c1ddSBryan Venteicher 	struct vmxnet3_txqueue *txq;
740*8f82136aSPatrick Kelsey 	struct vmxnet3_comp_ring *txc;
741*8f82136aSPatrick Kelsey 	struct vmxnet3_txring *txr;
742*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
743e557c1ddSBryan Venteicher 
744*8f82136aSPatrick Kelsey 	txq = &sc->vmx_txq[q];
745*8f82136aSPatrick Kelsey 	txc = &txq->vxtxq_comp_ring;
746*8f82136aSPatrick Kelsey 	txr = &txq->vxtxq_cmd_ring;
747*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
748*8f82136aSPatrick Kelsey 
749*8f82136aSPatrick Kelsey 	snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
750*8f82136aSPatrick Kelsey 	    device_get_nameunit(sc->vmx_dev), q);
751*8f82136aSPatrick Kelsey 
752*8f82136aSPatrick Kelsey 	txq->vxtxq_sc = sc;
753*8f82136aSPatrick Kelsey 	txq->vxtxq_id = q;
754*8f82136aSPatrick Kelsey 	txc->vxcr_ndesc = scctx->isc_ntxd[0];
755*8f82136aSPatrick Kelsey 	txr->vxtxr_ndesc = scctx->isc_ntxd[1];
756e557c1ddSBryan Venteicher }
757*8f82136aSPatrick Kelsey 
758*8f82136aSPatrick Kelsey static int
759*8f82136aSPatrick Kelsey vmxnet3_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
760*8f82136aSPatrick Kelsey     int ntxqs, int ntxqsets)
761*8f82136aSPatrick Kelsey {
762*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
763*8f82136aSPatrick Kelsey 	int q;
764*8f82136aSPatrick Kelsey 	int error;
765*8f82136aSPatrick Kelsey 	caddr_t kva;
766*8f82136aSPatrick Kelsey 
767*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
768*8f82136aSPatrick Kelsey 
769*8f82136aSPatrick Kelsey 	/* Allocate the array of transmit queues */
770*8f82136aSPatrick Kelsey 	sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) *
771*8f82136aSPatrick Kelsey 	    ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
772*8f82136aSPatrick Kelsey 	if (sc->vmx_txq == NULL)
773*8f82136aSPatrick Kelsey 		return (ENOMEM);
774*8f82136aSPatrick Kelsey 
775*8f82136aSPatrick Kelsey 	/* Initialize driver state for each transmit queue */
776*8f82136aSPatrick Kelsey 	for (q = 0; q < ntxqsets; q++)
777*8f82136aSPatrick Kelsey 		vmxnet3_init_txq(sc, q);
778*8f82136aSPatrick Kelsey 
779*8f82136aSPatrick Kelsey 	/*
780*8f82136aSPatrick Kelsey 	 * Allocate queue state that is shared with the device.  This check
781*8f82136aSPatrick Kelsey 	 * and call is performed in both vmxnet3_tx_queues_alloc() and
782*8f82136aSPatrick Kelsey 	 * vmxnet3_rx_queues_alloc() so that we don't have to care which
783*8f82136aSPatrick Kelsey 	 * order iflib invokes those routines in.
784*8f82136aSPatrick Kelsey 	 */
785*8f82136aSPatrick Kelsey 	if (sc->vmx_qs_dma.idi_size == 0) {
786*8f82136aSPatrick Kelsey 		error = vmxnet3_queues_shared_alloc(sc);
787*8f82136aSPatrick Kelsey 		if (error)
788*8f82136aSPatrick Kelsey 			return (error);
789e557c1ddSBryan Venteicher 	}
790*8f82136aSPatrick Kelsey 
791*8f82136aSPatrick Kelsey 	kva = sc->vmx_qs_dma.idi_vaddr;
792*8f82136aSPatrick Kelsey 	for (q = 0; q < ntxqsets; q++) {
793*8f82136aSPatrick Kelsey 		sc->vmx_txq[q].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
794*8f82136aSPatrick Kelsey 		kva += sizeof(struct vmxnet3_txq_shared);
795*8f82136aSPatrick Kelsey 	}
796*8f82136aSPatrick Kelsey 
797*8f82136aSPatrick Kelsey 	/* Record descriptor ring vaddrs and paddrs */
798*8f82136aSPatrick Kelsey 	for (q = 0; q < ntxqsets; q++) {
799*8f82136aSPatrick Kelsey 		struct vmxnet3_txqueue *txq;
800*8f82136aSPatrick Kelsey 		struct vmxnet3_txring *txr;
801*8f82136aSPatrick Kelsey 		struct vmxnet3_comp_ring *txc;
802*8f82136aSPatrick Kelsey 
803*8f82136aSPatrick Kelsey 		txq = &sc->vmx_txq[q];
804*8f82136aSPatrick Kelsey 		txc = &txq->vxtxq_comp_ring;
805*8f82136aSPatrick Kelsey 		txr = &txq->vxtxq_cmd_ring;
806*8f82136aSPatrick Kelsey 
807*8f82136aSPatrick Kelsey 		/* Completion ring */
808*8f82136aSPatrick Kelsey 		txc->vxcr_u.txcd =
809*8f82136aSPatrick Kelsey 		    (struct vmxnet3_txcompdesc *) vaddrs[q * ntxqs + 0];
810*8f82136aSPatrick Kelsey 		txc->vxcr_paddr = paddrs[q * ntxqs + 0];
811*8f82136aSPatrick Kelsey 
812*8f82136aSPatrick Kelsey 		/* Command ring */
813*8f82136aSPatrick Kelsey 		txr->vxtxr_txd =
814*8f82136aSPatrick Kelsey 		    (struct vmxnet3_txdesc *) vaddrs[q * ntxqs + 1];
815*8f82136aSPatrick Kelsey 		txr->vxtxr_paddr = paddrs[q * ntxqs + 1];
816*8f82136aSPatrick Kelsey 	}
817*8f82136aSPatrick Kelsey 
818*8f82136aSPatrick Kelsey 	return (0);
819e557c1ddSBryan Venteicher }
820e557c1ddSBryan Venteicher 
821e557c1ddSBryan Venteicher static void
822*8f82136aSPatrick Kelsey vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q, int nrxqs)
823e3c97c2cSBryan Venteicher {
824e3c97c2cSBryan Venteicher 	struct vmxnet3_rxqueue *rxq;
825*8f82136aSPatrick Kelsey 	struct vmxnet3_comp_ring *rxc;
826e3c97c2cSBryan Venteicher 	struct vmxnet3_rxring *rxr;
827*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
828e3c97c2cSBryan Venteicher 	int i;
829e3c97c2cSBryan Venteicher 
830e3c97c2cSBryan Venteicher 	rxq = &sc->vmx_rxq[q];
831*8f82136aSPatrick Kelsey 	rxc = &rxq->vxrxq_comp_ring;
832*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
833e3c97c2cSBryan Venteicher 
834e3c97c2cSBryan Venteicher 	snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
835e3c97c2cSBryan Venteicher 	    device_get_nameunit(sc->vmx_dev), q);
836e3c97c2cSBryan Venteicher 
837e3c97c2cSBryan Venteicher 	rxq->vxrxq_sc = sc;
838e3c97c2cSBryan Venteicher 	rxq->vxrxq_id = q;
839e3c97c2cSBryan Venteicher 
840*8f82136aSPatrick Kelsey 	/*
841*8f82136aSPatrick Kelsey 	 * First rxq is the completion queue, so there are nrxqs - 1 command
842*8f82136aSPatrick Kelsey 	 * rings starting at iflib queue id 1.
843*8f82136aSPatrick Kelsey 	 */
844*8f82136aSPatrick Kelsey 	rxc->vxcr_ndesc = scctx->isc_nrxd[0];
845*8f82136aSPatrick Kelsey 	for (i = 0; i < nrxqs - 1; i++) {
846e3c97c2cSBryan Venteicher 		rxr = &rxq->vxrxq_cmd_ring[i];
847*8f82136aSPatrick Kelsey 		rxr->vxrxr_ndesc = scctx->isc_nrxd[i + 1];
8483c965775SBryan Venteicher 	}
849e3c97c2cSBryan Venteicher }
850e3c97c2cSBryan Venteicher 
851e3c97c2cSBryan Venteicher static int
852*8f82136aSPatrick Kelsey vmxnet3_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
853*8f82136aSPatrick Kelsey     int nrxqs, int nrxqsets)
854e3c97c2cSBryan Venteicher {
855*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
856*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
857*8f82136aSPatrick Kelsey 	int q;
858*8f82136aSPatrick Kelsey 	int i;
859*8f82136aSPatrick Kelsey 	int error;
860*8f82136aSPatrick Kelsey 	caddr_t kva;
861e3c97c2cSBryan Venteicher 
862*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
863*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
864e3c97c2cSBryan Venteicher 
865*8f82136aSPatrick Kelsey 	/* Allocate the array of receive queues */
866*8f82136aSPatrick Kelsey 	sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) *
867*8f82136aSPatrick Kelsey 	    nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
868*8f82136aSPatrick Kelsey 	if (sc->vmx_rxq == NULL)
869e3c97c2cSBryan Venteicher 		return (ENOMEM);
870e3c97c2cSBryan Venteicher 
871*8f82136aSPatrick Kelsey 	/* Initialize driver state for each receive queue */
872*8f82136aSPatrick Kelsey 	for (q = 0; q < nrxqsets; q++)
873*8f82136aSPatrick Kelsey 		vmxnet3_init_rxq(sc, q, nrxqs);
874e3c97c2cSBryan Venteicher 
875e557c1ddSBryan Venteicher 	/*
876*8f82136aSPatrick Kelsey 	 * Allocate queue state that is shared with the device.  This check
877*8f82136aSPatrick Kelsey 	 * and call is performed in both vmxnet3_tx_queues_alloc() and
878*8f82136aSPatrick Kelsey 	 * vmxnet3_rx_queues_alloc() so that we don't have to care which
879*8f82136aSPatrick Kelsey 	 * order iflib invokes those routines in.
880e557c1ddSBryan Venteicher 	 */
881*8f82136aSPatrick Kelsey 	if (sc->vmx_qs_dma.idi_size == 0) {
882*8f82136aSPatrick Kelsey 		error = vmxnet3_queues_shared_alloc(sc);
883e3c97c2cSBryan Venteicher 		if (error)
884e3c97c2cSBryan Venteicher 			return (error);
885e3c97c2cSBryan Venteicher 	}
886e3c97c2cSBryan Venteicher 
887*8f82136aSPatrick Kelsey 	kva = sc->vmx_qs_dma.idi_vaddr +
888*8f82136aSPatrick Kelsey 	    scctx->isc_ntxqsets * sizeof(struct vmxnet3_txq_shared);
889*8f82136aSPatrick Kelsey 	for (q = 0; q < nrxqsets; q++) {
890*8f82136aSPatrick Kelsey 		sc->vmx_rxq[q].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
891*8f82136aSPatrick Kelsey 		kva += sizeof(struct vmxnet3_rxq_shared);
892*8f82136aSPatrick Kelsey 	}
893*8f82136aSPatrick Kelsey 
894*8f82136aSPatrick Kelsey 	/* Record descriptor ring vaddrs and paddrs */
895*8f82136aSPatrick Kelsey 	for (q = 0; q < nrxqsets; q++) {
896*8f82136aSPatrick Kelsey 		struct vmxnet3_rxqueue *rxq;
897*8f82136aSPatrick Kelsey 		struct vmxnet3_rxring *rxr;
898*8f82136aSPatrick Kelsey 		struct vmxnet3_comp_ring *rxc;
899*8f82136aSPatrick Kelsey 
900*8f82136aSPatrick Kelsey 		rxq = &sc->vmx_rxq[q];
901*8f82136aSPatrick Kelsey 		rxc = &rxq->vxrxq_comp_ring;
902*8f82136aSPatrick Kelsey 
903*8f82136aSPatrick Kelsey 		/* Completion ring */
904*8f82136aSPatrick Kelsey 		rxc->vxcr_u.rxcd =
905*8f82136aSPatrick Kelsey 		    (struct vmxnet3_rxcompdesc *) vaddrs[q * nrxqs + 0];
906*8f82136aSPatrick Kelsey 		rxc->vxcr_paddr = paddrs[q * nrxqs + 0];
907*8f82136aSPatrick Kelsey 
908*8f82136aSPatrick Kelsey 		/* Command ring(s) */
909*8f82136aSPatrick Kelsey 		for (i = 0; i < nrxqs - 1; i++) {
910*8f82136aSPatrick Kelsey 			rxr = &rxq->vxrxq_cmd_ring[i];
911*8f82136aSPatrick Kelsey 
912*8f82136aSPatrick Kelsey 			rxr->vxrxr_rxd =
913*8f82136aSPatrick Kelsey 			    (struct vmxnet3_rxdesc *) vaddrs[q * nrxqs + 1 + i];
914*8f82136aSPatrick Kelsey 			rxr->vxrxr_paddr = paddrs[q * nrxqs + 1 + i];
915*8f82136aSPatrick Kelsey 		}
916e3c97c2cSBryan Venteicher 	}
917e3c97c2cSBryan Venteicher 
918e3c97c2cSBryan Venteicher 	return (0);
919e3c97c2cSBryan Venteicher }
920e3c97c2cSBryan Venteicher 
921e3c97c2cSBryan Venteicher static void
922*8f82136aSPatrick Kelsey vmxnet3_queues_free(if_ctx_t ctx)
923e3c97c2cSBryan Venteicher {
924*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
925e3c97c2cSBryan Venteicher 
926*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
927e3c97c2cSBryan Venteicher 
928*8f82136aSPatrick Kelsey 	/* Free queue state area that is shared with the device */
929*8f82136aSPatrick Kelsey 	if (sc->vmx_qs_dma.idi_size != 0) {
930*8f82136aSPatrick Kelsey 		iflib_dma_free(&sc->vmx_qs_dma);
931*8f82136aSPatrick Kelsey 		sc->vmx_qs_dma.idi_size = 0;
932e3c97c2cSBryan Venteicher 	}
933e3c97c2cSBryan Venteicher 
934*8f82136aSPatrick Kelsey 	/* Free array of receive queues */
935e3c97c2cSBryan Venteicher 	if (sc->vmx_rxq != NULL) {
936e3c97c2cSBryan Venteicher 		free(sc->vmx_rxq, M_DEVBUF);
937e3c97c2cSBryan Venteicher 		sc->vmx_rxq = NULL;
938e3c97c2cSBryan Venteicher 	}
939e3c97c2cSBryan Venteicher 
940*8f82136aSPatrick Kelsey 	/* Free array of transmit queues */
941e3c97c2cSBryan Venteicher 	if (sc->vmx_txq != NULL) {
942e3c97c2cSBryan Venteicher 		free(sc->vmx_txq, M_DEVBUF);
943e3c97c2cSBryan Venteicher 		sc->vmx_txq = NULL;
944e3c97c2cSBryan Venteicher 	}
945e3c97c2cSBryan Venteicher }
946e3c97c2cSBryan Venteicher 
947e3c97c2cSBryan Venteicher static int
948e3c97c2cSBryan Venteicher vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
949e3c97c2cSBryan Venteicher {
950e3c97c2cSBryan Venteicher 	device_t dev;
951e3c97c2cSBryan Venteicher 	size_t size;
952*8f82136aSPatrick Kelsey 	int error;
953e3c97c2cSBryan Venteicher 
954e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
955e3c97c2cSBryan Venteicher 
956*8f82136aSPatrick Kelsey 	/* Top level state structure shared with the device */
957e3c97c2cSBryan Venteicher 	size = sizeof(struct vmxnet3_driver_shared);
958*8f82136aSPatrick Kelsey 	error = iflib_dma_alloc_align(sc->vmx_ctx, size, 1, &sc->vmx_ds_dma, 0);
959e3c97c2cSBryan Venteicher 	if (error) {
960e3c97c2cSBryan Venteicher 		device_printf(dev, "cannot alloc shared memory\n");
961e3c97c2cSBryan Venteicher 		return (error);
962e3c97c2cSBryan Venteicher 	}
963*8f82136aSPatrick Kelsey 	sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.idi_vaddr;
964e3c97c2cSBryan Venteicher 
965*8f82136aSPatrick Kelsey 	/* RSS table state shared with the device */
966e557c1ddSBryan Venteicher 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
967e557c1ddSBryan Venteicher 		size = sizeof(struct vmxnet3_rss_shared);
968*8f82136aSPatrick Kelsey 		error = iflib_dma_alloc_align(sc->vmx_ctx, size, 128,
969*8f82136aSPatrick Kelsey 		    &sc->vmx_rss_dma, 0);
970e557c1ddSBryan Venteicher 		if (error) {
971e557c1ddSBryan Venteicher 			device_printf(dev, "cannot alloc rss shared memory\n");
972e557c1ddSBryan Venteicher 			return (error);
973e557c1ddSBryan Venteicher 		}
974e557c1ddSBryan Venteicher 		sc->vmx_rss =
975*8f82136aSPatrick Kelsey 		    (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.idi_vaddr;
976e557c1ddSBryan Venteicher 	}
977e557c1ddSBryan Venteicher 
978e3c97c2cSBryan Venteicher 	return (0);
979e3c97c2cSBryan Venteicher }
980e3c97c2cSBryan Venteicher 
981e3c97c2cSBryan Venteicher static void
982e3c97c2cSBryan Venteicher vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
983e3c97c2cSBryan Venteicher {
984e3c97c2cSBryan Venteicher 
985*8f82136aSPatrick Kelsey 	/* Free RSS table state shared with the device */
986e557c1ddSBryan Venteicher 	if (sc->vmx_rss != NULL) {
987*8f82136aSPatrick Kelsey 		iflib_dma_free(&sc->vmx_rss_dma);
988e557c1ddSBryan Venteicher 		sc->vmx_rss = NULL;
989e557c1ddSBryan Venteicher 	}
990e557c1ddSBryan Venteicher 
991*8f82136aSPatrick Kelsey 	/* Free top level state structure shared with the device */
992e3c97c2cSBryan Venteicher 	if (sc->vmx_ds != NULL) {
993*8f82136aSPatrick Kelsey 		iflib_dma_free(&sc->vmx_ds_dma);
994e3c97c2cSBryan Venteicher 		sc->vmx_ds = NULL;
995e3c97c2cSBryan Venteicher 	}
996e3c97c2cSBryan Venteicher }
997e3c97c2cSBryan Venteicher 
998e3c97c2cSBryan Venteicher static int
999e3c97c2cSBryan Venteicher vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1000e3c97c2cSBryan Venteicher {
1001e3c97c2cSBryan Venteicher 	int error;
1002e3c97c2cSBryan Venteicher 
1003*8f82136aSPatrick Kelsey 	/* Multicast table state shared with the device */
1004*8f82136aSPatrick Kelsey 	error = iflib_dma_alloc_align(sc->vmx_ctx,
1005*8f82136aSPatrick Kelsey 	    VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 32, &sc->vmx_mcast_dma, 0);
1006e3c97c2cSBryan Venteicher 	if (error)
1007e3c97c2cSBryan Venteicher 		device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1008e3c97c2cSBryan Venteicher 	else
1009*8f82136aSPatrick Kelsey 		sc->vmx_mcast = sc->vmx_mcast_dma.idi_vaddr;
1010e3c97c2cSBryan Venteicher 
1011e3c97c2cSBryan Venteicher 	return (error);
1012e3c97c2cSBryan Venteicher }
1013e3c97c2cSBryan Venteicher 
1014e3c97c2cSBryan Venteicher static void
1015e3c97c2cSBryan Venteicher vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1016e3c97c2cSBryan Venteicher {
1017e3c97c2cSBryan Venteicher 
1018*8f82136aSPatrick Kelsey 	/* Free multicast table state shared with the device */
1019e3c97c2cSBryan Venteicher 	if (sc->vmx_mcast != NULL) {
1020*8f82136aSPatrick Kelsey 		iflib_dma_free(&sc->vmx_mcast_dma);
1021e3c97c2cSBryan Venteicher 		sc->vmx_mcast = NULL;
1022e3c97c2cSBryan Venteicher 	}
1023e3c97c2cSBryan Venteicher }
1024e3c97c2cSBryan Venteicher 
1025e3c97c2cSBryan Venteicher static void
1026e3c97c2cSBryan Venteicher vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1027e3c97c2cSBryan Venteicher {
1028e3c97c2cSBryan Venteicher 	struct vmxnet3_driver_shared *ds;
1029*8f82136aSPatrick Kelsey 	if_shared_ctx_t sctx;
1030*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1031e3c97c2cSBryan Venteicher 	struct vmxnet3_txqueue *txq;
1032e3c97c2cSBryan Venteicher 	struct vmxnet3_txq_shared *txs;
1033e3c97c2cSBryan Venteicher 	struct vmxnet3_rxqueue *rxq;
1034e3c97c2cSBryan Venteicher 	struct vmxnet3_rxq_shared *rxs;
1035e3c97c2cSBryan Venteicher 	int i;
1036e3c97c2cSBryan Venteicher 
1037e3c97c2cSBryan Venteicher 	ds = sc->vmx_ds;
1038*8f82136aSPatrick Kelsey 	sctx = sc->vmx_sctx;
1039*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1040e3c97c2cSBryan Venteicher 
1041e3c97c2cSBryan Venteicher 	/*
1042e3c97c2cSBryan Venteicher 	 * Initialize fields of the shared data that remains the same across
1043e3c97c2cSBryan Venteicher 	 * reinits. Note the shared data is zero'd when allocated.
1044e3c97c2cSBryan Venteicher 	 */
1045e3c97c2cSBryan Venteicher 
1046e3c97c2cSBryan Venteicher 	ds->magic = VMXNET3_REV1_MAGIC;
1047e3c97c2cSBryan Venteicher 
1048e3c97c2cSBryan Venteicher 	/* DriverInfo */
1049e3c97c2cSBryan Venteicher 	ds->version = VMXNET3_DRIVER_VERSION;
1050ce3be286SBryan Venteicher 	ds->guest = VMXNET3_GOS_FREEBSD |
1051e3c97c2cSBryan Venteicher #ifdef __LP64__
1052e3c97c2cSBryan Venteicher 	    VMXNET3_GOS_64BIT;
1053e3c97c2cSBryan Venteicher #else
1054e3c97c2cSBryan Venteicher 	    VMXNET3_GOS_32BIT;
1055e3c97c2cSBryan Venteicher #endif
1056e3c97c2cSBryan Venteicher 	ds->vmxnet3_revision = 1;
1057e3c97c2cSBryan Venteicher 	ds->upt_version = 1;
1058e3c97c2cSBryan Venteicher 
1059e3c97c2cSBryan Venteicher 	/* Misc. conf */
1060e3c97c2cSBryan Venteicher 	ds->driver_data = vtophys(sc);
1061e3c97c2cSBryan Venteicher 	ds->driver_data_len = sizeof(struct vmxnet3_softc);
1062*8f82136aSPatrick Kelsey 	ds->queue_shared = sc->vmx_qs_dma.idi_paddr;
1063*8f82136aSPatrick Kelsey 	ds->queue_shared_len = sc->vmx_qs_dma.idi_size;
1064*8f82136aSPatrick Kelsey 	ds->nrxsg_max = IFLIB_MAX_RX_SEGS;
1065e3c97c2cSBryan Venteicher 
1066e557c1ddSBryan Venteicher 	/* RSS conf */
1067e557c1ddSBryan Venteicher 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1068e557c1ddSBryan Venteicher 		ds->rss.version = 1;
1069*8f82136aSPatrick Kelsey 		ds->rss.paddr = sc->vmx_rss_dma.idi_paddr;
1070*8f82136aSPatrick Kelsey 		ds->rss.len = sc->vmx_rss_dma.idi_size;
1071e557c1ddSBryan Venteicher 	}
1072e557c1ddSBryan Venteicher 
1073e3c97c2cSBryan Venteicher 	/* Interrupt control. */
1074e3c97c2cSBryan Venteicher 	ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1075*8f82136aSPatrick Kelsey 	/*
1076*8f82136aSPatrick Kelsey 	 * Total number of interrupt indexes we are using in the shared
1077*8f82136aSPatrick Kelsey 	 * config data, even though we don't actually allocate MSIX
1078*8f82136aSPatrick Kelsey 	 * resources for the tx queues.  Some versions of the device will
1079*8f82136aSPatrick Kelsey 	 * fail to initialize successfully if interrupt indexes are used in
1080*8f82136aSPatrick Kelsey 	 * the shared config that exceed the number of interrupts configured
1081*8f82136aSPatrick Kelsey 	 * here.
1082*8f82136aSPatrick Kelsey 	 */
1083*8f82136aSPatrick Kelsey 	ds->nintr = (scctx->isc_vectors == 1) ?
1084*8f82136aSPatrick Kelsey 	    1 : (scctx->isc_nrxqsets + scctx->isc_ntxqsets + 1);
1085e3c97c2cSBryan Venteicher 	ds->evintr = sc->vmx_event_intr_idx;
1086e3c97c2cSBryan Venteicher 	ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1087e3c97c2cSBryan Venteicher 
1088*8f82136aSPatrick Kelsey 	for (i = 0; i < ds->nintr; i++)
1089e3c97c2cSBryan Venteicher 		ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1090e3c97c2cSBryan Venteicher 
1091e3c97c2cSBryan Venteicher 	/* Receive filter. */
1092*8f82136aSPatrick Kelsey 	ds->mcast_table = sc->vmx_mcast_dma.idi_paddr;
1093*8f82136aSPatrick Kelsey 	ds->mcast_tablelen = sc->vmx_mcast_dma.idi_size;
1094e3c97c2cSBryan Venteicher 
1095e3c97c2cSBryan Venteicher 	/* Tx queues */
1096*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
1097e3c97c2cSBryan Venteicher 		txq = &sc->vmx_txq[i];
1098e3c97c2cSBryan Venteicher 		txs = txq->vxtxq_ts;
1099e3c97c2cSBryan Venteicher 
1100*8f82136aSPatrick Kelsey 		txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_paddr;
11013c965775SBryan Venteicher 		txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1102*8f82136aSPatrick Kelsey 		txs->comp_ring = txq->vxtxq_comp_ring.vxcr_paddr;
11033c965775SBryan Venteicher 		txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1104e3c97c2cSBryan Venteicher 		txs->driver_data = vtophys(txq);
1105e3c97c2cSBryan Venteicher 		txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1106e3c97c2cSBryan Venteicher 	}
1107e3c97c2cSBryan Venteicher 
1108e3c97c2cSBryan Venteicher 	/* Rx queues */
1109*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
1110e3c97c2cSBryan Venteicher 		rxq = &sc->vmx_rxq[i];
1111e3c97c2cSBryan Venteicher 		rxs = rxq->vxrxq_rs;
1112e3c97c2cSBryan Venteicher 
1113*8f82136aSPatrick Kelsey 		rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_paddr;
1114e3c97c2cSBryan Venteicher 		rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1115*8f82136aSPatrick Kelsey 		rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_paddr;
1116e3c97c2cSBryan Venteicher 		rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1117*8f82136aSPatrick Kelsey 		rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_paddr;
11183c965775SBryan Venteicher 		rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1119e3c97c2cSBryan Venteicher 		rxs->driver_data = vtophys(rxq);
1120e3c97c2cSBryan Venteicher 		rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1121e3c97c2cSBryan Venteicher 	}
1122e3c97c2cSBryan Venteicher }
1123e3c97c2cSBryan Venteicher 
1124e3c97c2cSBryan Venteicher static void
1125e557c1ddSBryan Venteicher vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1126e557c1ddSBryan Venteicher {
1127e557c1ddSBryan Venteicher 	/*
1128e557c1ddSBryan Venteicher 	 * Use the same key as the Linux driver until FreeBSD can do
1129e557c1ddSBryan Venteicher 	 * RSS (presumably Toeplitz) in software.
1130e557c1ddSBryan Venteicher 	 */
1131e557c1ddSBryan Venteicher 	static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1132e557c1ddSBryan Venteicher 	    0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1133e557c1ddSBryan Venteicher 	    0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1134e557c1ddSBryan Venteicher 	    0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1135e557c1ddSBryan Venteicher 	    0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1136e557c1ddSBryan Venteicher 	    0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1137e557c1ddSBryan Venteicher 	};
1138e557c1ddSBryan Venteicher 
1139e557c1ddSBryan Venteicher 	struct vmxnet3_driver_shared *ds;
1140*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1141e557c1ddSBryan Venteicher 	struct vmxnet3_rss_shared *rss;
1142e557c1ddSBryan Venteicher 	int i;
1143e557c1ddSBryan Venteicher 
1144e557c1ddSBryan Venteicher 	ds = sc->vmx_ds;
1145*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1146e557c1ddSBryan Venteicher 	rss = sc->vmx_rss;
1147e557c1ddSBryan Venteicher 
1148e557c1ddSBryan Venteicher 	rss->hash_type =
1149e557c1ddSBryan Venteicher 	    UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1150e557c1ddSBryan Venteicher 	    UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1151e557c1ddSBryan Venteicher 	rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1152e557c1ddSBryan Venteicher 	rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1153e557c1ddSBryan Venteicher 	rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1154e557c1ddSBryan Venteicher 	memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1155e557c1ddSBryan Venteicher 
1156e557c1ddSBryan Venteicher 	for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1157*8f82136aSPatrick Kelsey 		rss->ind_table[i] = i % scctx->isc_nrxqsets;
1158e3c97c2cSBryan Venteicher }
1159e3c97c2cSBryan Venteicher 
1160e3c97c2cSBryan Venteicher static void
1161e3c97c2cSBryan Venteicher vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1162e3c97c2cSBryan Venteicher {
1163e3c97c2cSBryan Venteicher 	struct ifnet *ifp;
1164e3c97c2cSBryan Venteicher 	struct vmxnet3_driver_shared *ds;
1165*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1166e3c97c2cSBryan Venteicher 
1167e3c97c2cSBryan Venteicher 	ifp = sc->vmx_ifp;
1168e3c97c2cSBryan Venteicher 	ds = sc->vmx_ds;
1169*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1170e3c97c2cSBryan Venteicher 
1171e557c1ddSBryan Venteicher 	ds->mtu = ifp->if_mtu;
1172*8f82136aSPatrick Kelsey 	ds->ntxqueue = scctx->isc_ntxqsets;
1173*8f82136aSPatrick Kelsey 	ds->nrxqueue = scctx->isc_nrxqsets;
1174e557c1ddSBryan Venteicher 
1175e3c97c2cSBryan Venteicher 	ds->upt_features = 0;
1176e3c97c2cSBryan Venteicher 	if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1177e3c97c2cSBryan Venteicher 		ds->upt_features |= UPT1_F_CSUM;
11783c5dfe89SBryan Venteicher 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
11793c5dfe89SBryan Venteicher 		ds->upt_features |= UPT1_F_VLAN;
1180e3c97c2cSBryan Venteicher 	if (ifp->if_capenable & IFCAP_LRO)
1181e3c97c2cSBryan Venteicher 		ds->upt_features |= UPT1_F_LRO;
1182e3c97c2cSBryan Venteicher 
1183e557c1ddSBryan Venteicher 	if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1184e557c1ddSBryan Venteicher 		ds->upt_features |= UPT1_F_RSS;
1185e557c1ddSBryan Venteicher 		vmxnet3_reinit_rss_shared_data(sc);
1186e557c1ddSBryan Venteicher 	}
1187e3c97c2cSBryan Venteicher 
1188*8f82136aSPatrick Kelsey 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.idi_paddr);
1189e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1190*8f82136aSPatrick Kelsey 	    (uint64_t) sc->vmx_ds_dma.idi_paddr >> 32);
1191e3c97c2cSBryan Venteicher }
1192e3c97c2cSBryan Venteicher 
1193e3c97c2cSBryan Venteicher static int
1194e3c97c2cSBryan Venteicher vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1195e3c97c2cSBryan Venteicher {
1196e3c97c2cSBryan Venteicher 	int error;
1197e3c97c2cSBryan Venteicher 
1198e3c97c2cSBryan Venteicher 	error = vmxnet3_alloc_shared_data(sc);
1199e3c97c2cSBryan Venteicher 	if (error)
1200e3c97c2cSBryan Venteicher 		return (error);
1201e3c97c2cSBryan Venteicher 
1202e3c97c2cSBryan Venteicher 	error = vmxnet3_alloc_mcast_table(sc);
1203e3c97c2cSBryan Venteicher 	if (error)
1204e3c97c2cSBryan Venteicher 		return (error);
1205e3c97c2cSBryan Venteicher 
1206e3c97c2cSBryan Venteicher 	vmxnet3_init_shared_data(sc);
1207e3c97c2cSBryan Venteicher 
1208e3c97c2cSBryan Venteicher 	return (0);
1209e3c97c2cSBryan Venteicher }
1210e3c97c2cSBryan Venteicher 
1211e3c97c2cSBryan Venteicher static void
1212e3c97c2cSBryan Venteicher vmxnet3_free_data(struct vmxnet3_softc *sc)
1213e3c97c2cSBryan Venteicher {
1214e3c97c2cSBryan Venteicher 
1215e3c97c2cSBryan Venteicher 	vmxnet3_free_mcast_table(sc);
1216e3c97c2cSBryan Venteicher 	vmxnet3_free_shared_data(sc);
1217e3c97c2cSBryan Venteicher }
1218e3c97c2cSBryan Venteicher 
1219e3c97c2cSBryan Venteicher static void
1220e3c97c2cSBryan Venteicher vmxnet3_evintr(struct vmxnet3_softc *sc)
1221e3c97c2cSBryan Venteicher {
1222e3c97c2cSBryan Venteicher 	device_t dev;
1223e3c97c2cSBryan Venteicher 	struct vmxnet3_txq_shared *ts;
1224e3c97c2cSBryan Venteicher 	struct vmxnet3_rxq_shared *rs;
1225e3c97c2cSBryan Venteicher 	uint32_t event;
1226e3c97c2cSBryan Venteicher 
1227e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
1228e3c97c2cSBryan Venteicher 
1229e3c97c2cSBryan Venteicher 	/* Clear events. */
1230e3c97c2cSBryan Venteicher 	event = sc->vmx_ds->event;
1231e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1232e3c97c2cSBryan Venteicher 
1233*8f82136aSPatrick Kelsey 	if (event & VMXNET3_EVENT_LINK)
1234e3c97c2cSBryan Venteicher 		vmxnet3_link_status(sc);
1235e3c97c2cSBryan Venteicher 
1236e3c97c2cSBryan Venteicher 	if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1237e3c97c2cSBryan Venteicher 		vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1238e3c97c2cSBryan Venteicher 		ts = sc->vmx_txq[0].vxtxq_ts;
1239e3c97c2cSBryan Venteicher 		if (ts->stopped != 0)
1240e3c97c2cSBryan Venteicher 			device_printf(dev, "Tx queue error %#x\n", ts->error);
1241e3c97c2cSBryan Venteicher 		rs = sc->vmx_rxq[0].vxrxq_rs;
1242e3c97c2cSBryan Venteicher 		if (rs->stopped != 0)
1243e3c97c2cSBryan Venteicher 			device_printf(dev, "Rx queue error %#x\n", rs->error);
1244*8f82136aSPatrick Kelsey 
1245*8f82136aSPatrick Kelsey 		/* XXX - rely on liflib watchdog to reset us? */
1246*8f82136aSPatrick Kelsey 		device_printf(dev, "Rx/Tx queue error event ... "
1247*8f82136aSPatrick Kelsey 		    "waiting for iflib watchdog reset\n");
1248e3c97c2cSBryan Venteicher 	}
1249e3c97c2cSBryan Venteicher 
1250e3c97c2cSBryan Venteicher 	if (event & VMXNET3_EVENT_DIC)
1251e3c97c2cSBryan Venteicher 		device_printf(dev, "device implementation change event\n");
1252e3c97c2cSBryan Venteicher 	if (event & VMXNET3_EVENT_DEBUG)
1253e3c97c2cSBryan Venteicher 		device_printf(dev, "debug event\n");
1254e3c97c2cSBryan Venteicher }
1255e3c97c2cSBryan Venteicher 
1256*8f82136aSPatrick Kelsey static int
1257*8f82136aSPatrick Kelsey vmxnet3_isc_txd_encap(void *vsc, if_pkt_info_t pi)
1258*8f82136aSPatrick Kelsey {
1259*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1260*8f82136aSPatrick Kelsey 	struct vmxnet3_txqueue *txq;
1261*8f82136aSPatrick Kelsey 	struct vmxnet3_txring *txr;
1262*8f82136aSPatrick Kelsey 	struct vmxnet3_txdesc *txd, *sop;
1263*8f82136aSPatrick Kelsey 	bus_dma_segment_t *segs;
1264*8f82136aSPatrick Kelsey 	int nsegs;
1265*8f82136aSPatrick Kelsey 	int pidx;
1266*8f82136aSPatrick Kelsey 	int hdrlen;
1267*8f82136aSPatrick Kelsey 	int i;
1268*8f82136aSPatrick Kelsey 	int gen;
1269*8f82136aSPatrick Kelsey 
1270*8f82136aSPatrick Kelsey 	sc = vsc;
1271*8f82136aSPatrick Kelsey 	txq = &sc->vmx_txq[pi->ipi_qsidx];
1272*8f82136aSPatrick Kelsey 	txr = &txq->vxtxq_cmd_ring;
1273*8f82136aSPatrick Kelsey 	segs = pi->ipi_segs;
1274*8f82136aSPatrick Kelsey 	nsegs = pi->ipi_nsegs;
1275*8f82136aSPatrick Kelsey 	pidx = pi->ipi_pidx;
1276*8f82136aSPatrick Kelsey 
1277*8f82136aSPatrick Kelsey 	KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
1278*8f82136aSPatrick Kelsey 	    ("%s: packet with too many segments %d", __func__, nsegs));
1279*8f82136aSPatrick Kelsey 
1280*8f82136aSPatrick Kelsey 	sop = &txr->vxtxr_txd[pidx];
1281*8f82136aSPatrick Kelsey 	gen = txr->vxtxr_gen ^ 1;	/* Owned by cpu (yet) */
1282*8f82136aSPatrick Kelsey 
1283*8f82136aSPatrick Kelsey 	for (i = 0; i < nsegs; i++) {
1284*8f82136aSPatrick Kelsey 		txd = &txr->vxtxr_txd[pidx];
1285*8f82136aSPatrick Kelsey 
1286*8f82136aSPatrick Kelsey 		txd->addr = segs[i].ds_addr;
1287*8f82136aSPatrick Kelsey 		txd->len = segs[i].ds_len;
1288*8f82136aSPatrick Kelsey 		txd->gen = gen;
1289*8f82136aSPatrick Kelsey 		txd->dtype = 0;
1290*8f82136aSPatrick Kelsey 		txd->offload_mode = VMXNET3_OM_NONE;
1291*8f82136aSPatrick Kelsey 		txd->offload_pos = 0;
1292*8f82136aSPatrick Kelsey 		txd->hlen = 0;
1293*8f82136aSPatrick Kelsey 		txd->eop = 0;
1294*8f82136aSPatrick Kelsey 		txd->compreq = 0;
1295*8f82136aSPatrick Kelsey 		txd->vtag_mode = 0;
1296*8f82136aSPatrick Kelsey 		txd->vtag = 0;
1297*8f82136aSPatrick Kelsey 
1298*8f82136aSPatrick Kelsey 		if (++pidx == txr->vxtxr_ndesc) {
1299*8f82136aSPatrick Kelsey 			pidx = 0;
1300*8f82136aSPatrick Kelsey 			txr->vxtxr_gen ^= 1;
1301*8f82136aSPatrick Kelsey 		}
1302*8f82136aSPatrick Kelsey 		gen = txr->vxtxr_gen;
1303*8f82136aSPatrick Kelsey 	}
1304*8f82136aSPatrick Kelsey 	txd->eop = 1;
1305*8f82136aSPatrick Kelsey 	txd->compreq = !!(pi->ipi_flags & IPI_TX_INTR);
1306*8f82136aSPatrick Kelsey 	pi->ipi_new_pidx = pidx;
1307*8f82136aSPatrick Kelsey 
1308*8f82136aSPatrick Kelsey 	/*
1309*8f82136aSPatrick Kelsey 	 * VLAN
1310*8f82136aSPatrick Kelsey 	 */
1311*8f82136aSPatrick Kelsey 	if (pi->ipi_mflags & M_VLANTAG) {
1312*8f82136aSPatrick Kelsey 		sop->vtag_mode = 1;
1313*8f82136aSPatrick Kelsey 		sop->vtag = pi->ipi_vtag;
1314*8f82136aSPatrick Kelsey 	}
1315*8f82136aSPatrick Kelsey 
1316*8f82136aSPatrick Kelsey 	/*
1317*8f82136aSPatrick Kelsey 	 * TSO and checksum offloads
1318*8f82136aSPatrick Kelsey 	 */
1319*8f82136aSPatrick Kelsey 	hdrlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
1320*8f82136aSPatrick Kelsey 	if (pi->ipi_csum_flags & CSUM_TSO) {
1321*8f82136aSPatrick Kelsey 		sop->offload_mode = VMXNET3_OM_TSO;
1322*8f82136aSPatrick Kelsey 		sop->hlen = hdrlen;
1323*8f82136aSPatrick Kelsey 		sop->offload_pos = pi->ipi_tso_segsz;
1324*8f82136aSPatrick Kelsey 	} else if (pi->ipi_csum_flags & (VMXNET3_CSUM_OFFLOAD |
1325*8f82136aSPatrick Kelsey 	    VMXNET3_CSUM_OFFLOAD_IPV6)) {
1326*8f82136aSPatrick Kelsey 		sop->offload_mode = VMXNET3_OM_CSUM;
1327*8f82136aSPatrick Kelsey 		sop->hlen = hdrlen;
1328*8f82136aSPatrick Kelsey 		sop->offload_pos = hdrlen +
1329*8f82136aSPatrick Kelsey 		    ((pi->ipi_ipproto == IPPROTO_TCP) ?
1330*8f82136aSPatrick Kelsey 			offsetof(struct tcphdr, th_sum) :
1331*8f82136aSPatrick Kelsey 			offsetof(struct udphdr, uh_sum));
1332*8f82136aSPatrick Kelsey 	}
1333*8f82136aSPatrick Kelsey 
1334*8f82136aSPatrick Kelsey 	/* Finally, change the ownership. */
1335*8f82136aSPatrick Kelsey 	vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
1336*8f82136aSPatrick Kelsey 	sop->gen ^= 1;
1337*8f82136aSPatrick Kelsey 
1338*8f82136aSPatrick Kelsey 	return (0);
1339e3c97c2cSBryan Venteicher }
1340e3c97c2cSBryan Venteicher 
1341e3c97c2cSBryan Venteicher static void
1342*8f82136aSPatrick Kelsey vmxnet3_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx)
1343e3c97c2cSBryan Venteicher {
1344e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
1345*8f82136aSPatrick Kelsey 	struct vmxnet3_txqueue *txq;
1346*8f82136aSPatrick Kelsey 
1347*8f82136aSPatrick Kelsey 	sc = vsc;
1348*8f82136aSPatrick Kelsey 	txq = &sc->vmx_txq[txqid];
1349*8f82136aSPatrick Kelsey 
1350*8f82136aSPatrick Kelsey 	/*
1351*8f82136aSPatrick Kelsey 	 * pidx is what we last set ipi_new_pidx to in
1352*8f82136aSPatrick Kelsey 	 * vmxnet3_isc_txd_encap()
1353*8f82136aSPatrick Kelsey 	 */
1354*8f82136aSPatrick Kelsey 
1355*8f82136aSPatrick Kelsey 	/*
1356*8f82136aSPatrick Kelsey 	 * Avoid expensive register updates if the flush request is
1357*8f82136aSPatrick Kelsey 	 * redundant.
1358*8f82136aSPatrick Kelsey 	 */
1359*8f82136aSPatrick Kelsey 	if (txq->vxtxq_last_flush == pidx)
1360*8f82136aSPatrick Kelsey 		return;
1361*8f82136aSPatrick Kelsey 	txq->vxtxq_last_flush = pidx;
1362*8f82136aSPatrick Kelsey 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), pidx);
1363*8f82136aSPatrick Kelsey }
1364*8f82136aSPatrick Kelsey 
1365*8f82136aSPatrick Kelsey static int
1366*8f82136aSPatrick Kelsey vmxnet3_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear)
1367*8f82136aSPatrick Kelsey {
1368*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1369*8f82136aSPatrick Kelsey 	struct vmxnet3_txqueue *txq;
1370e3c97c2cSBryan Venteicher 	struct vmxnet3_comp_ring *txc;
1371e3c97c2cSBryan Venteicher 	struct vmxnet3_txcompdesc *txcd;
1372*8f82136aSPatrick Kelsey 	struct vmxnet3_txring *txr;
1373*8f82136aSPatrick Kelsey 	int processed;
1374e3c97c2cSBryan Venteicher 
1375*8f82136aSPatrick Kelsey 	sc = vsc;
1376*8f82136aSPatrick Kelsey 	txq = &sc->vmx_txq[txqid];
1377e3c97c2cSBryan Venteicher 	txc = &txq->vxtxq_comp_ring;
1378*8f82136aSPatrick Kelsey 	txr = &txq->vxtxq_cmd_ring;
1379e3c97c2cSBryan Venteicher 
1380*8f82136aSPatrick Kelsey 	/*
1381*8f82136aSPatrick Kelsey 	 * If clear is true, we need to report the number of TX command ring
1382*8f82136aSPatrick Kelsey 	 * descriptors that have been processed by the device.  If clear is
1383*8f82136aSPatrick Kelsey 	 * false, we just need to report whether or not at least one TX
1384*8f82136aSPatrick Kelsey 	 * command ring descriptor has been processed by the device.
1385*8f82136aSPatrick Kelsey 	 */
1386*8f82136aSPatrick Kelsey 	processed = 0;
1387e3c97c2cSBryan Venteicher 	for (;;) {
1388e3c97c2cSBryan Venteicher 		txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1389e3c97c2cSBryan Venteicher 		if (txcd->gen != txc->vxcr_gen)
1390e3c97c2cSBryan Venteicher 			break;
1391*8f82136aSPatrick Kelsey 		else if (!clear)
1392*8f82136aSPatrick Kelsey 			return (1);
13933c965775SBryan Venteicher 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1394e3c97c2cSBryan Venteicher 
1395e3c97c2cSBryan Venteicher 		if (++txc->vxcr_next == txc->vxcr_ndesc) {
1396e3c97c2cSBryan Venteicher 			txc->vxcr_next = 0;
1397e3c97c2cSBryan Venteicher 			txc->vxcr_gen ^= 1;
1398e3c97c2cSBryan Venteicher 		}
1399e3c97c2cSBryan Venteicher 
1400*8f82136aSPatrick Kelsey 		if (txcd->eop_idx < txr->vxtxr_next)
1401*8f82136aSPatrick Kelsey 			processed += txr->vxtxr_ndesc -
1402*8f82136aSPatrick Kelsey 			    (txr->vxtxr_next - txcd->eop_idx) + 1;
1403*8f82136aSPatrick Kelsey 		else
1404*8f82136aSPatrick Kelsey 			processed += txcd->eop_idx - txr->vxtxr_next + 1;
1405e3c97c2cSBryan Venteicher 		txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1406e3c97c2cSBryan Venteicher 	}
1407e3c97c2cSBryan Venteicher 
1408*8f82136aSPatrick Kelsey 	return (processed);
1409e3c97c2cSBryan Venteicher }
1410e3c97c2cSBryan Venteicher 
1411e3c97c2cSBryan Venteicher static int
1412*8f82136aSPatrick Kelsey vmxnet3_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
1413e3c97c2cSBryan Venteicher {
1414*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1415*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
1416*8f82136aSPatrick Kelsey 	struct vmxnet3_comp_ring *rxc;
1417*8f82136aSPatrick Kelsey 	struct vmxnet3_rxcompdesc *rxcd;
1418*8f82136aSPatrick Kelsey 	int avail;
1419*8f82136aSPatrick Kelsey 	int completed_gen;
1420*8f82136aSPatrick Kelsey #ifdef INVARIANTS
1421*8f82136aSPatrick Kelsey 	int expect_sop = 1;
1422*8f82136aSPatrick Kelsey #endif
1423*8f82136aSPatrick Kelsey 	sc = vsc;
1424*8f82136aSPatrick Kelsey 	rxq = &sc->vmx_rxq[rxqid];
1425*8f82136aSPatrick Kelsey 	rxc = &rxq->vxrxq_comp_ring;
1426*8f82136aSPatrick Kelsey 
1427*8f82136aSPatrick Kelsey 	avail = 0;
1428*8f82136aSPatrick Kelsey 	completed_gen = rxc->vxcr_gen;
1429*8f82136aSPatrick Kelsey 	for (;;) {
1430*8f82136aSPatrick Kelsey 		rxcd = &rxc->vxcr_u.rxcd[idx];
1431*8f82136aSPatrick Kelsey 		if (rxcd->gen != completed_gen)
1432*8f82136aSPatrick Kelsey 			break;
1433*8f82136aSPatrick Kelsey 		vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1434*8f82136aSPatrick Kelsey 
1435*8f82136aSPatrick Kelsey #ifdef INVARIANTS
1436*8f82136aSPatrick Kelsey 		if (expect_sop)
1437*8f82136aSPatrick Kelsey 			KASSERT(rxcd->sop, ("%s: expected sop", __func__));
1438*8f82136aSPatrick Kelsey 		else
1439*8f82136aSPatrick Kelsey 			KASSERT(!rxcd->sop, ("%s: unexpected sop", __func__));
1440*8f82136aSPatrick Kelsey 		expect_sop = rxcd->eop;
1441*8f82136aSPatrick Kelsey #endif
1442*8f82136aSPatrick Kelsey 		if (rxcd->eop && (rxcd->len != 0))
1443*8f82136aSPatrick Kelsey 			avail++;
1444*8f82136aSPatrick Kelsey 		if (avail > budget)
1445*8f82136aSPatrick Kelsey 			break;
1446*8f82136aSPatrick Kelsey 		if (++idx == rxc->vxcr_ndesc) {
1447*8f82136aSPatrick Kelsey 			idx = 0;
1448*8f82136aSPatrick Kelsey 			completed_gen ^= 1;
1449*8f82136aSPatrick Kelsey 		}
1450*8f82136aSPatrick Kelsey 	}
1451*8f82136aSPatrick Kelsey 
1452*8f82136aSPatrick Kelsey 	return (avail);
1453*8f82136aSPatrick Kelsey }
1454*8f82136aSPatrick Kelsey 
1455*8f82136aSPatrick Kelsey static int
1456*8f82136aSPatrick Kelsey vmxnet3_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri)
1457*8f82136aSPatrick Kelsey {
1458*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1459*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1460*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
1461*8f82136aSPatrick Kelsey 	struct vmxnet3_comp_ring *rxc;
1462*8f82136aSPatrick Kelsey 	struct vmxnet3_rxcompdesc *rxcd;
1463*8f82136aSPatrick Kelsey 	struct vmxnet3_rxring *rxr;
1464e3c97c2cSBryan Venteicher 	struct vmxnet3_rxdesc *rxd;
1465*8f82136aSPatrick Kelsey 	if_rxd_frag_t frag;
1466*8f82136aSPatrick Kelsey 	int cqidx;
1467*8f82136aSPatrick Kelsey 	uint16_t total_len;
1468*8f82136aSPatrick Kelsey 	uint8_t nfrags;
1469*8f82136aSPatrick Kelsey 	uint8_t flid;
1470e3c97c2cSBryan Venteicher 
1471*8f82136aSPatrick Kelsey 	sc = vsc;
1472*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1473*8f82136aSPatrick Kelsey 	rxq = &sc->vmx_rxq[ri->iri_qsidx];
1474*8f82136aSPatrick Kelsey 	rxc = &rxq->vxrxq_comp_ring;
1475e3c97c2cSBryan Venteicher 
1476e3c97c2cSBryan Venteicher 	/*
1477*8f82136aSPatrick Kelsey 	 * Get a single packet starting at the given index in the completion
1478*8f82136aSPatrick Kelsey 	 * queue.  That we have been called indicates that
1479*8f82136aSPatrick Kelsey 	 * vmxnet3_isc_rxd_available() has already verified that either
1480*8f82136aSPatrick Kelsey 	 * there is a complete packet available starting at the given index,
1481*8f82136aSPatrick Kelsey 	 * or there are one or more zero length packets starting at the
1482*8f82136aSPatrick Kelsey 	 * given index followed by a complete packet, so no verification of
1483*8f82136aSPatrick Kelsey 	 * ownership of the descriptors (and no associated read barrier) is
1484*8f82136aSPatrick Kelsey 	 * required here.
1485e3c97c2cSBryan Venteicher 	 */
1486*8f82136aSPatrick Kelsey 	cqidx = ri->iri_cidx;
1487*8f82136aSPatrick Kelsey 	rxcd = &rxc->vxcr_u.rxcd[cqidx];
1488*8f82136aSPatrick Kelsey 	while (rxcd->len == 0) {
1489*8f82136aSPatrick Kelsey 		KASSERT(rxcd->sop && rxcd->eop,
1490*8f82136aSPatrick Kelsey 		    ("%s: zero-length packet without both sop and eop set",
1491*8f82136aSPatrick Kelsey 			__func__));
1492*8f82136aSPatrick Kelsey 		if (++cqidx == rxc->vxcr_ndesc) {
1493*8f82136aSPatrick Kelsey 			cqidx = 0;
1494*8f82136aSPatrick Kelsey 			rxc->vxcr_gen ^= 1;
1495*8f82136aSPatrick Kelsey 		}
1496*8f82136aSPatrick Kelsey 		rxcd = &rxc->vxcr_u.rxcd[cqidx];
1497*8f82136aSPatrick Kelsey 	}
1498*8f82136aSPatrick Kelsey 	KASSERT(rxcd->sop, ("%s: expected sop", __func__));
1499*8f82136aSPatrick Kelsey 
1500*8f82136aSPatrick Kelsey 	/*
1501*8f82136aSPatrick Kelsey 	 * RSS and flow ID
1502*8f82136aSPatrick Kelsey 	 */
1503*8f82136aSPatrick Kelsey 	ri->iri_flowid = rxcd->rss_hash;
1504*8f82136aSPatrick Kelsey 	switch (rxcd->rss_type) {
1505*8f82136aSPatrick Kelsey 	case VMXNET3_RCD_RSS_TYPE_NONE:
1506*8f82136aSPatrick Kelsey 		ri->iri_flowid = ri->iri_qsidx;
1507*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_NONE;
1508*8f82136aSPatrick Kelsey 		break;
1509*8f82136aSPatrick Kelsey 	case VMXNET3_RCD_RSS_TYPE_IPV4:
1510*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
1511*8f82136aSPatrick Kelsey 		break;
1512*8f82136aSPatrick Kelsey 	case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1513*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
1514*8f82136aSPatrick Kelsey 		break;
1515*8f82136aSPatrick Kelsey 	case VMXNET3_RCD_RSS_TYPE_IPV6:
1516*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
1517*8f82136aSPatrick Kelsey 		break;
1518*8f82136aSPatrick Kelsey 	case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1519*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
1520*8f82136aSPatrick Kelsey 		break;
1521*8f82136aSPatrick Kelsey 	default:
1522*8f82136aSPatrick Kelsey 		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
1523*8f82136aSPatrick Kelsey 		break;
1524e3c97c2cSBryan Venteicher 	}
1525e3c97c2cSBryan Venteicher 
1526*8f82136aSPatrick Kelsey 	/* VLAN */
1527*8f82136aSPatrick Kelsey 	if (rxcd->vlan) {
1528*8f82136aSPatrick Kelsey 		ri->iri_flags |= M_VLANTAG;
1529*8f82136aSPatrick Kelsey 		ri->iri_vtag = rxcd->vtag;
1530e3c97c2cSBryan Venteicher 	}
1531e3c97c2cSBryan Venteicher 
1532*8f82136aSPatrick Kelsey 	/* Checksum offload */
1533*8f82136aSPatrick Kelsey 	if (!rxcd->no_csum) {
1534*8f82136aSPatrick Kelsey 		uint32_t csum_flags = 0;
1535e3c97c2cSBryan Venteicher 
1536*8f82136aSPatrick Kelsey 		if (rxcd->ipv4) {
1537*8f82136aSPatrick Kelsey 			csum_flags |= CSUM_IP_CHECKED;
1538*8f82136aSPatrick Kelsey 			if (rxcd->ipcsum_ok)
1539*8f82136aSPatrick Kelsey 				csum_flags |= CSUM_IP_VALID;
1540e3c97c2cSBryan Venteicher 		}
1541*8f82136aSPatrick Kelsey 		if (!rxcd->fragment && (rxcd->tcp || rxcd->udp)) {
1542*8f82136aSPatrick Kelsey 			csum_flags |= CSUM_L4_CALC;
1543*8f82136aSPatrick Kelsey 			if (rxcd->csum_ok) {
1544*8f82136aSPatrick Kelsey 				csum_flags |= CSUM_L4_VALID;
1545*8f82136aSPatrick Kelsey 				ri->iri_csum_data = 0xffff;
1546*8f82136aSPatrick Kelsey 			}
1547*8f82136aSPatrick Kelsey 		}
1548*8f82136aSPatrick Kelsey 		ri->iri_csum_flags = csum_flags;
1549e3c97c2cSBryan Venteicher 	}
1550e3c97c2cSBryan Venteicher 
1551*8f82136aSPatrick Kelsey 	/*
1552*8f82136aSPatrick Kelsey 	 * The queue numbering scheme used for rxcd->qid is as follows:
1553*8f82136aSPatrick Kelsey 	 *  - All of the command ring 0s are numbered [0, nrxqsets - 1]
1554*8f82136aSPatrick Kelsey 	 *  - All of the command ring 1s are numbered [nrxqsets, 2*nrxqsets - 1]
1555*8f82136aSPatrick Kelsey 	 *
1556*8f82136aSPatrick Kelsey 	 * Thus, rxcd->qid less than nrxqsets indicates command ring (and
1557*8f82136aSPatrick Kelsey 	 * flid) 0, and rxcd->qid greater than or equal to nrxqsets
1558*8f82136aSPatrick Kelsey 	 * indicates command ring (and flid) 1.
1559*8f82136aSPatrick Kelsey 	 */
1560*8f82136aSPatrick Kelsey 	nfrags = 0;
1561*8f82136aSPatrick Kelsey 	total_len = 0;
1562*8f82136aSPatrick Kelsey 	do {
1563*8f82136aSPatrick Kelsey 		rxcd = &rxc->vxcr_u.rxcd[cqidx];
1564*8f82136aSPatrick Kelsey 		KASSERT(rxcd->gen == rxc->vxcr_gen,
1565*8f82136aSPatrick Kelsey 		    ("%s: generation mismatch", __func__));
1566*8f82136aSPatrick Kelsey 		flid = (rxcd->qid >= scctx->isc_nrxqsets) ? 1 : 0;
1567*8f82136aSPatrick Kelsey 		rxr = &rxq->vxrxq_cmd_ring[flid];
1568*8f82136aSPatrick Kelsey 		rxd = &rxr->vxrxr_rxd[rxcd->rxd_idx];
1569e3c97c2cSBryan Venteicher 
1570*8f82136aSPatrick Kelsey 		frag = &ri->iri_frags[nfrags];
1571*8f82136aSPatrick Kelsey 		frag->irf_flid = flid;
1572*8f82136aSPatrick Kelsey 		frag->irf_idx = rxcd->rxd_idx;
1573*8f82136aSPatrick Kelsey 		frag->irf_len = rxcd->len;
1574*8f82136aSPatrick Kelsey 		total_len += rxcd->len;
1575*8f82136aSPatrick Kelsey 		nfrags++;
1576*8f82136aSPatrick Kelsey 		if (++cqidx == rxc->vxcr_ndesc) {
1577*8f82136aSPatrick Kelsey 			cqidx = 0;
1578*8f82136aSPatrick Kelsey 			rxc->vxcr_gen ^= 1;
1579*8f82136aSPatrick Kelsey 		}
1580*8f82136aSPatrick Kelsey 	} while (!rxcd->eop);
1581e3c97c2cSBryan Venteicher 
1582*8f82136aSPatrick Kelsey 	ri->iri_cidx = cqidx;
1583*8f82136aSPatrick Kelsey 	ri->iri_nfrags = nfrags;
1584*8f82136aSPatrick Kelsey 	ri->iri_len = total_len;
1585*8f82136aSPatrick Kelsey 
1586e3c97c2cSBryan Venteicher 	return (0);
1587e3c97c2cSBryan Venteicher }
1588e3c97c2cSBryan Venteicher 
1589e3c97c2cSBryan Venteicher static void
1590*8f82136aSPatrick Kelsey vmxnet3_isc_rxd_refill(void *vsc, if_rxd_update_t iru)
1591e3c97c2cSBryan Venteicher {
1592e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
1593*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
1594e3c97c2cSBryan Venteicher 	struct vmxnet3_rxring *rxr;
1595e3c97c2cSBryan Venteicher 	struct vmxnet3_rxdesc *rxd;
1596*8f82136aSPatrick Kelsey 	uint64_t *paddrs;
1597*8f82136aSPatrick Kelsey 	int count;
1598*8f82136aSPatrick Kelsey 	int len;
1599*8f82136aSPatrick Kelsey 	int pidx;
1600*8f82136aSPatrick Kelsey 	int i;
1601*8f82136aSPatrick Kelsey 	uint8_t flid;
1602*8f82136aSPatrick Kelsey 	uint8_t btype;
1603e3c97c2cSBryan Venteicher 
1604*8f82136aSPatrick Kelsey 	count = iru->iru_count;
1605*8f82136aSPatrick Kelsey 	len = iru->iru_buf_size;
1606*8f82136aSPatrick Kelsey 	pidx = iru->iru_pidx;
1607*8f82136aSPatrick Kelsey 	flid = iru->iru_flidx;
1608*8f82136aSPatrick Kelsey 	paddrs = iru->iru_paddrs;
1609e3c97c2cSBryan Venteicher 
1610*8f82136aSPatrick Kelsey 	sc = vsc;
1611*8f82136aSPatrick Kelsey 	rxq = &sc->vmx_rxq[iru->iru_qsidx];
1612*8f82136aSPatrick Kelsey 	rxr = &rxq->vxrxq_cmd_ring[flid];
1613*8f82136aSPatrick Kelsey 	rxd = rxr->vxrxr_rxd;
1614e3c97c2cSBryan Venteicher 
1615e3c97c2cSBryan Venteicher 	/*
1616*8f82136aSPatrick Kelsey 	 * Command ring 0 is filled with BTYPE_HEAD descriptors, and
1617*8f82136aSPatrick Kelsey 	 * command ring 1 is filled with BTYPE_BODY descriptors.
1618e3c97c2cSBryan Venteicher 	 */
1619*8f82136aSPatrick Kelsey 	btype = (flid == 0) ? VMXNET3_BTYPE_HEAD : VMXNET3_BTYPE_BODY;
1620*8f82136aSPatrick Kelsey 	for (i = 0; i < count; i++) {
1621*8f82136aSPatrick Kelsey 		rxd[pidx].addr = paddrs[i];
1622*8f82136aSPatrick Kelsey 		rxd[pidx].len = len;
1623*8f82136aSPatrick Kelsey 		rxd[pidx].btype = btype;
1624*8f82136aSPatrick Kelsey 		rxd[pidx].gen = rxr->vxrxr_gen;
1625*8f82136aSPatrick Kelsey 
1626*8f82136aSPatrick Kelsey 		if (++pidx == rxr->vxrxr_ndesc) {
1627*8f82136aSPatrick Kelsey 			pidx = 0;
1628*8f82136aSPatrick Kelsey 			rxr->vxrxr_gen ^= 1;
1629*8f82136aSPatrick Kelsey 		}
1630e3c97c2cSBryan Venteicher 	}
1631e3c97c2cSBryan Venteicher }
1632e3c97c2cSBryan Venteicher 
1633*8f82136aSPatrick Kelsey static void
1634*8f82136aSPatrick Kelsey vmxnet3_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
1635*8f82136aSPatrick Kelsey {
1636*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1637*8f82136aSPatrick Kelsey 	struct vmxnet3_rxqueue *rxq;
1638*8f82136aSPatrick Kelsey 	struct vmxnet3_rxring *rxr;
1639e3c97c2cSBryan Venteicher 	bus_size_t r;
1640e3c97c2cSBryan Venteicher 
1641*8f82136aSPatrick Kelsey 	sc = vsc;
1642*8f82136aSPatrick Kelsey 	rxq = &sc->vmx_rxq[rxqid];
1643*8f82136aSPatrick Kelsey 	rxr = &rxq->vxrxq_cmd_ring[flid];
1644*8f82136aSPatrick Kelsey 
1645*8f82136aSPatrick Kelsey 	if (flid == 0)
1646*8f82136aSPatrick Kelsey 		r = VMXNET3_BAR0_RXH1(rxqid);
1647*8f82136aSPatrick Kelsey 	else
1648*8f82136aSPatrick Kelsey 		r = VMXNET3_BAR0_RXH2(rxqid);
1649*8f82136aSPatrick Kelsey 
1650*8f82136aSPatrick Kelsey 	/*
1651*8f82136aSPatrick Kelsey 	 * pidx is the index of the last descriptor with a buffer the device
1652*8f82136aSPatrick Kelsey 	 * can use, and the device needs to be told which index is one past
1653*8f82136aSPatrick Kelsey 	 * that.
1654*8f82136aSPatrick Kelsey 	 */
1655*8f82136aSPatrick Kelsey 	if (++pidx == rxr->vxrxr_ndesc)
1656*8f82136aSPatrick Kelsey 		pidx = 0;
1657*8f82136aSPatrick Kelsey 	vmxnet3_write_bar0(sc, r, pidx);
1658e3c97c2cSBryan Venteicher }
1659e3c97c2cSBryan Venteicher 
1660*8f82136aSPatrick Kelsey static int
1661e3c97c2cSBryan Venteicher vmxnet3_legacy_intr(void *xsc)
1662e3c97c2cSBryan Venteicher {
1663e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
1664*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1665*8f82136aSPatrick Kelsey 	if_ctx_t ctx;
1666e3c97c2cSBryan Venteicher 
1667e3c97c2cSBryan Venteicher 	sc = xsc;
1668*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1669*8f82136aSPatrick Kelsey 	ctx = sc->vmx_ctx;
1670e3c97c2cSBryan Venteicher 
1671*8f82136aSPatrick Kelsey 	/*
1672*8f82136aSPatrick Kelsey 	 * When there is only a single interrupt configured, this routine
1673*8f82136aSPatrick Kelsey 	 * runs in fast interrupt context, following which the rxq 0 task
1674*8f82136aSPatrick Kelsey 	 * will be enqueued.
1675*8f82136aSPatrick Kelsey 	 */
1676*8f82136aSPatrick Kelsey 	if (scctx->isc_intr == IFLIB_INTR_LEGACY) {
1677e3c97c2cSBryan Venteicher 		if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
1678*8f82136aSPatrick Kelsey 			return (FILTER_HANDLED);
1679e3c97c2cSBryan Venteicher 	}
1680e3c97c2cSBryan Venteicher 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1681*8f82136aSPatrick Kelsey 		vmxnet3_intr_disable_all(ctx);
1682e3c97c2cSBryan Venteicher 
1683e3c97c2cSBryan Venteicher 	if (sc->vmx_ds->event != 0)
1684*8f82136aSPatrick Kelsey 		iflib_admin_intr_deferred(ctx);
1685e3c97c2cSBryan Venteicher 
1686*8f82136aSPatrick Kelsey 	/*
1687*8f82136aSPatrick Kelsey 	 * XXX - When there is both rxq and event activity, do we care
1688*8f82136aSPatrick Kelsey 	 * whether the rxq 0 task or the admin task re-enables the interrupt
1689*8f82136aSPatrick Kelsey 	 * first?
1690*8f82136aSPatrick Kelsey 	 */
1691*8f82136aSPatrick Kelsey 	return (FILTER_SCHEDULE_THREAD);
1692e3c97c2cSBryan Venteicher }
1693e3c97c2cSBryan Venteicher 
1694*8f82136aSPatrick Kelsey static int
1695*8f82136aSPatrick Kelsey vmxnet3_rxq_intr(void *vrxq)
1696e3c97c2cSBryan Venteicher {
1697e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
1698e3c97c2cSBryan Venteicher 	struct vmxnet3_rxqueue *rxq;
1699e3c97c2cSBryan Venteicher 
1700*8f82136aSPatrick Kelsey 	rxq = vrxq;
1701e3c97c2cSBryan Venteicher 	sc = rxq->vxrxq_sc;
1702e3c97c2cSBryan Venteicher 
1703e3c97c2cSBryan Venteicher 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1704e3c97c2cSBryan Venteicher 		vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
1705e3c97c2cSBryan Venteicher 
1706*8f82136aSPatrick Kelsey 	return (FILTER_SCHEDULE_THREAD);
1707e3c97c2cSBryan Venteicher }
1708e3c97c2cSBryan Venteicher 
1709*8f82136aSPatrick Kelsey static int
1710*8f82136aSPatrick Kelsey vmxnet3_event_intr(void *vsc)
1711e3c97c2cSBryan Venteicher {
1712e3c97c2cSBryan Venteicher 	struct vmxnet3_softc *sc;
1713e3c97c2cSBryan Venteicher 
1714*8f82136aSPatrick Kelsey 	sc = vsc;
1715e3c97c2cSBryan Venteicher 
1716e3c97c2cSBryan Venteicher 	if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
1717e3c97c2cSBryan Venteicher 		vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
1718e3c97c2cSBryan Venteicher 
1719*8f82136aSPatrick Kelsey 	/*
1720*8f82136aSPatrick Kelsey 	 * The work will be done via vmxnet3_update_admin_status(), and the
1721*8f82136aSPatrick Kelsey 	 * interrupt will be re-enabled in vmxnet3_link_intr_enable().
1722*8f82136aSPatrick Kelsey 	 *
1723*8f82136aSPatrick Kelsey 	 * The interrupt will be re-enabled by vmxnet3_link_intr_enable().
1724*8f82136aSPatrick Kelsey 	 */
1725*8f82136aSPatrick Kelsey 	return (FILTER_SCHEDULE_THREAD);
1726e3c97c2cSBryan Venteicher }
1727e3c97c2cSBryan Venteicher 
1728e3c97c2cSBryan Venteicher static void
1729*8f82136aSPatrick Kelsey vmxnet3_stop(if_ctx_t ctx)
1730e3c97c2cSBryan Venteicher {
1731*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1732e3c97c2cSBryan Venteicher 
1733*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
1734e3c97c2cSBryan Venteicher 
1735e3c97c2cSBryan Venteicher 	sc->vmx_link_active = 0;
1736e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
1737e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
1738e3c97c2cSBryan Venteicher }
1739e3c97c2cSBryan Venteicher 
1740e3c97c2cSBryan Venteicher static void
1741e3c97c2cSBryan Venteicher vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
1742e3c97c2cSBryan Venteicher {
1743e3c97c2cSBryan Venteicher 	struct vmxnet3_txring *txr;
1744e3c97c2cSBryan Venteicher 	struct vmxnet3_comp_ring *txc;
1745e3c97c2cSBryan Venteicher 
1746*8f82136aSPatrick Kelsey 	txq->vxtxq_last_flush = -1;
1747*8f82136aSPatrick Kelsey 
1748e3c97c2cSBryan Venteicher 	txr = &txq->vxtxq_cmd_ring;
1749e3c97c2cSBryan Venteicher 	txr->vxtxr_next = 0;
1750e3c97c2cSBryan Venteicher 	txr->vxtxr_gen = VMXNET3_INIT_GEN;
1751*8f82136aSPatrick Kelsey 	/*
1752*8f82136aSPatrick Kelsey 	 * iflib has zeroed out the descriptor array during the prior attach
1753*8f82136aSPatrick Kelsey 	 * or stop
1754*8f82136aSPatrick Kelsey 	 */
1755e3c97c2cSBryan Venteicher 
1756e3c97c2cSBryan Venteicher 	txc = &txq->vxtxq_comp_ring;
1757e3c97c2cSBryan Venteicher 	txc->vxcr_next = 0;
1758e3c97c2cSBryan Venteicher 	txc->vxcr_gen = VMXNET3_INIT_GEN;
1759*8f82136aSPatrick Kelsey 	/*
1760*8f82136aSPatrick Kelsey 	 * iflib has zeroed out the descriptor array during the prior attach
1761*8f82136aSPatrick Kelsey 	 * or stop
1762*8f82136aSPatrick Kelsey 	 */
1763e3c97c2cSBryan Venteicher }
1764e3c97c2cSBryan Venteicher 
1765*8f82136aSPatrick Kelsey static void
1766e3c97c2cSBryan Venteicher vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
1767e3c97c2cSBryan Venteicher {
1768e3c97c2cSBryan Venteicher 	struct vmxnet3_rxring *rxr;
1769e3c97c2cSBryan Venteicher 	struct vmxnet3_comp_ring *rxc;
1770*8f82136aSPatrick Kelsey 	int i;
1771e3c97c2cSBryan Venteicher 
1772e3c97c2cSBryan Venteicher 	/*
1773*8f82136aSPatrick Kelsey 	 * The descriptors will be populated with buffers during a
1774*8f82136aSPatrick Kelsey 	 * subsequent invocation of vmxnet3_isc_rxd_refill()
1775e3c97c2cSBryan Venteicher 	 */
1776*8f82136aSPatrick Kelsey 	for (i = 0; i < sc->vmx_sctx->isc_nrxqs - 1; i++) {
1777e3c97c2cSBryan Venteicher 		rxr = &rxq->vxrxq_cmd_ring[i];
1778e3c97c2cSBryan Venteicher 		rxr->vxrxr_gen = VMXNET3_INIT_GEN;
1779*8f82136aSPatrick Kelsey 		/*
1780*8f82136aSPatrick Kelsey 		 * iflib has zeroed out the descriptor array during the
1781*8f82136aSPatrick Kelsey 		 * prior attach or stop
1782*8f82136aSPatrick Kelsey 		 */
1783e3c97c2cSBryan Venteicher 	}
1784e3c97c2cSBryan Venteicher 
1785e3c97c2cSBryan Venteicher 	for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
1786e3c97c2cSBryan Venteicher 		rxr = &rxq->vxrxq_cmd_ring[i];
1787e3c97c2cSBryan Venteicher 		rxr->vxrxr_gen = 0;
1788e3c97c2cSBryan Venteicher 		bzero(rxr->vxrxr_rxd,
1789e3c97c2cSBryan Venteicher 		    rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
1790e3c97c2cSBryan Venteicher 	}
1791e3c97c2cSBryan Venteicher 
1792e3c97c2cSBryan Venteicher 	rxc = &rxq->vxrxq_comp_ring;
1793e3c97c2cSBryan Venteicher 	rxc->vxcr_next = 0;
1794e3c97c2cSBryan Venteicher 	rxc->vxcr_gen = VMXNET3_INIT_GEN;
1795*8f82136aSPatrick Kelsey 	/*
1796*8f82136aSPatrick Kelsey 	 * iflib has zeroed out the descriptor array during the prior attach
1797*8f82136aSPatrick Kelsey 	 * or stop
1798*8f82136aSPatrick Kelsey 	 */
1799e3c97c2cSBryan Venteicher }
1800e3c97c2cSBryan Venteicher 
1801*8f82136aSPatrick Kelsey static void
1802e3c97c2cSBryan Venteicher vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
1803e3c97c2cSBryan Venteicher {
1804*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1805*8f82136aSPatrick Kelsey 	int q;
1806e3c97c2cSBryan Venteicher 
1807*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1808e3c97c2cSBryan Venteicher 
1809*8f82136aSPatrick Kelsey 	for (q = 0; q < scctx->isc_ntxqsets; q++)
1810e3c97c2cSBryan Venteicher 		vmxnet3_txinit(sc, &sc->vmx_txq[q]);
1811e3c97c2cSBryan Venteicher 
1812*8f82136aSPatrick Kelsey 	for (q = 0; q < scctx->isc_nrxqsets; q++)
1813*8f82136aSPatrick Kelsey 		vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
1814e3c97c2cSBryan Venteicher }
1815e3c97c2cSBryan Venteicher 
1816e3c97c2cSBryan Venteicher static int
1817e3c97c2cSBryan Venteicher vmxnet3_enable_device(struct vmxnet3_softc *sc)
1818e3c97c2cSBryan Venteicher {
1819*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1820e3c97c2cSBryan Venteicher 	int q;
1821e3c97c2cSBryan Venteicher 
1822*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1823*8f82136aSPatrick Kelsey 
1824e3c97c2cSBryan Venteicher 	if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
1825e3c97c2cSBryan Venteicher 		device_printf(sc->vmx_dev, "device enable command failed!\n");
1826e3c97c2cSBryan Venteicher 		return (1);
1827e3c97c2cSBryan Venteicher 	}
1828e3c97c2cSBryan Venteicher 
1829e3c97c2cSBryan Venteicher 	/* Reset the Rx queue heads. */
1830*8f82136aSPatrick Kelsey 	for (q = 0; q < scctx->isc_nrxqsets; q++) {
1831e3c97c2cSBryan Venteicher 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
1832e3c97c2cSBryan Venteicher 		vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
1833e3c97c2cSBryan Venteicher 	}
1834e3c97c2cSBryan Venteicher 
1835e3c97c2cSBryan Venteicher 	return (0);
1836e3c97c2cSBryan Venteicher }
1837e3c97c2cSBryan Venteicher 
1838e3c97c2cSBryan Venteicher static void
1839e3c97c2cSBryan Venteicher vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
1840e3c97c2cSBryan Venteicher {
1841e3c97c2cSBryan Venteicher 	struct ifnet *ifp;
1842e3c97c2cSBryan Venteicher 
1843e3c97c2cSBryan Venteicher 	ifp = sc->vmx_ifp;
1844e3c97c2cSBryan Venteicher 
1845*8f82136aSPatrick Kelsey 	vmxnet3_set_rxfilter(sc, if_getflags(ifp));
1846e3c97c2cSBryan Venteicher 
1847e3c97c2cSBryan Venteicher 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1848e3c97c2cSBryan Venteicher 		bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
1849e3c97c2cSBryan Venteicher 		    sizeof(sc->vmx_ds->vlan_filter));
1850e3c97c2cSBryan Venteicher 	else
1851e3c97c2cSBryan Venteicher 		bzero(sc->vmx_ds->vlan_filter,
1852e3c97c2cSBryan Venteicher 		    sizeof(sc->vmx_ds->vlan_filter));
1853e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
1854e3c97c2cSBryan Venteicher }
1855e3c97c2cSBryan Venteicher 
1856*8f82136aSPatrick Kelsey static void
1857*8f82136aSPatrick Kelsey vmxnet3_init(if_ctx_t ctx)
1858e3c97c2cSBryan Venteicher {
1859*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1860*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
1861e3c97c2cSBryan Venteicher 
1862*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
1863*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
1864*8f82136aSPatrick Kelsey 
1865*8f82136aSPatrick Kelsey 	scctx->isc_max_frame_size = if_getmtu(iflib_get_ifp(ctx)) +
1866*8f82136aSPatrick Kelsey 	    ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN;
1867*8f82136aSPatrick Kelsey 
1868*8f82136aSPatrick Kelsey 	/* Use the current MAC address. */
1869*8f82136aSPatrick Kelsey 	bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1870*8f82136aSPatrick Kelsey 	vmxnet3_set_lladdr(sc);
1871*8f82136aSPatrick Kelsey 
1872e3c97c2cSBryan Venteicher 	vmxnet3_reinit_shared_data(sc);
1873*8f82136aSPatrick Kelsey 	vmxnet3_reinit_queues(sc);
1874e3c97c2cSBryan Venteicher 
1875*8f82136aSPatrick Kelsey 	vmxnet3_enable_device(sc);
1876e3c97c2cSBryan Venteicher 
1877e3c97c2cSBryan Venteicher 	vmxnet3_reinit_rxfilters(sc);
1878e3c97c2cSBryan Venteicher 	vmxnet3_link_status(sc);
1879e3c97c2cSBryan Venteicher }
1880e3c97c2cSBryan Venteicher 
1881e3c97c2cSBryan Venteicher static void
1882*8f82136aSPatrick Kelsey vmxnet3_multi_set(if_ctx_t ctx)
1883e3c97c2cSBryan Venteicher {
1884e3c97c2cSBryan Venteicher 
1885*8f82136aSPatrick Kelsey 	vmxnet3_set_rxfilter(iflib_get_softc(ctx),
1886*8f82136aSPatrick Kelsey 	    if_getflags(iflib_get_ifp(ctx)));
1887e3c97c2cSBryan Venteicher }
1888e3c97c2cSBryan Venteicher 
1889e3c97c2cSBryan Venteicher static int
1890*8f82136aSPatrick Kelsey vmxnet3_mtu_set(if_ctx_t ctx, uint32_t mtu)
1891e3c97c2cSBryan Venteicher {
1892e3c97c2cSBryan Venteicher 
1893*8f82136aSPatrick Kelsey 	if (mtu > VMXNET3_TX_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
1894*8f82136aSPatrick Kelsey 		ETHER_CRC_LEN))
1895e3c97c2cSBryan Venteicher 		return (EINVAL);
1896e3c97c2cSBryan Venteicher 
1897e3c97c2cSBryan Venteicher 	return (0);
1898e3c97c2cSBryan Venteicher }
1899e3c97c2cSBryan Venteicher 
1900*8f82136aSPatrick Kelsey static void
1901*8f82136aSPatrick Kelsey vmxnet3_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
1902e3c97c2cSBryan Venteicher {
1903*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
1904e3c97c2cSBryan Venteicher 
1905*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
1906e3c97c2cSBryan Venteicher 
1907*8f82136aSPatrick Kelsey 	ifmr->ifm_status = IFM_AVALID;
1908*8f82136aSPatrick Kelsey 	ifmr->ifm_active = IFM_ETHER;
1909e3c97c2cSBryan Venteicher 
1910*8f82136aSPatrick Kelsey 	if (vmxnet3_link_is_up(sc) != 0) {
1911*8f82136aSPatrick Kelsey 		ifmr->ifm_status |= IFM_ACTIVE;
1912*8f82136aSPatrick Kelsey 		ifmr->ifm_active |= IFM_AUTO;
1913e3c97c2cSBryan Venteicher 	} else
1914*8f82136aSPatrick Kelsey 		ifmr->ifm_active |= IFM_NONE;
1915e3c97c2cSBryan Venteicher }
1916e3c97c2cSBryan Venteicher 
1917e3c97c2cSBryan Venteicher static int
1918*8f82136aSPatrick Kelsey vmxnet3_media_change(if_ctx_t ctx)
1919e3c97c2cSBryan Venteicher {
1920e3c97c2cSBryan Venteicher 
1921*8f82136aSPatrick Kelsey 	/* Ignore. */
1922c7156fe9SLuigi Rizzo 	return (0);
1923e557c1ddSBryan Venteicher }
1924e557c1ddSBryan Venteicher 
1925e557c1ddSBryan Venteicher static int
1926*8f82136aSPatrick Kelsey vmxnet3_promisc_set(if_ctx_t ctx, int flags)
1927e557c1ddSBryan Venteicher {
1928e557c1ddSBryan Venteicher 
1929*8f82136aSPatrick Kelsey 	vmxnet3_set_rxfilter(iflib_get_softc(ctx), flags);
1930e557c1ddSBryan Venteicher 
1931*8f82136aSPatrick Kelsey 	return (0);
1932e557c1ddSBryan Venteicher }
1933e557c1ddSBryan Venteicher 
1934*8f82136aSPatrick Kelsey static uint64_t
1935*8f82136aSPatrick Kelsey vmxnet3_get_counter(if_ctx_t ctx, ift_counter cnt)
1936*8f82136aSPatrick Kelsey {
1937*8f82136aSPatrick Kelsey 	if_t ifp = iflib_get_ifp(ctx);
1938*8f82136aSPatrick Kelsey 
1939*8f82136aSPatrick Kelsey 	if (cnt < IFCOUNTERS)
1940*8f82136aSPatrick Kelsey 		return if_get_counter_default(ifp, cnt);
1941*8f82136aSPatrick Kelsey 
1942*8f82136aSPatrick Kelsey 	return (0);
1943e557c1ddSBryan Venteicher }
1944e557c1ddSBryan Venteicher 
1945e557c1ddSBryan Venteicher static void
1946*8f82136aSPatrick Kelsey vmxnet3_update_admin_status(if_ctx_t ctx)
1947e557c1ddSBryan Venteicher {
1948e557c1ddSBryan Venteicher 	struct vmxnet3_softc *sc;
1949e557c1ddSBryan Venteicher 
1950*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
1951*8f82136aSPatrick Kelsey 	if (sc->vmx_ds->event != 0)
1952*8f82136aSPatrick Kelsey 		vmxnet3_evintr(sc);
1953e557c1ddSBryan Venteicher 
1954*8f82136aSPatrick Kelsey 	vmxnet3_refresh_host_stats(sc);
1955e557c1ddSBryan Venteicher }
1956e557c1ddSBryan Venteicher 
1957e557c1ddSBryan Venteicher static void
1958*8f82136aSPatrick Kelsey vmxnet3_txq_timer(if_ctx_t ctx, uint16_t qid)
1959e557c1ddSBryan Venteicher {
1960*8f82136aSPatrick Kelsey 	/* Host stats refresh is global, so just trigger it on txq 0 */
1961*8f82136aSPatrick Kelsey 	if (qid == 0)
1962*8f82136aSPatrick Kelsey 		vmxnet3_refresh_host_stats(iflib_get_softc(ctx));
1963e557c1ddSBryan Venteicher }
1964e557c1ddSBryan Venteicher 
1965e3c97c2cSBryan Venteicher static void
1966e3c97c2cSBryan Venteicher vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
1967e3c97c2cSBryan Venteicher {
1968e3c97c2cSBryan Venteicher 	int idx, bit;
1969e3c97c2cSBryan Venteicher 
1970e3c97c2cSBryan Venteicher 	if (tag == 0 || tag > 4095)
1971e3c97c2cSBryan Venteicher 		return;
1972e3c97c2cSBryan Venteicher 
1973*8f82136aSPatrick Kelsey 	idx = (tag >> 5) & 0x7F;
1974*8f82136aSPatrick Kelsey 	bit = tag & 0x1F;
1975e3c97c2cSBryan Venteicher 
1976e3c97c2cSBryan Venteicher 	/* Update our private VLAN bitvector. */
1977e3c97c2cSBryan Venteicher 	if (add)
1978e3c97c2cSBryan Venteicher 		sc->vmx_vlan_filter[idx] |= (1 << bit);
1979e3c97c2cSBryan Venteicher 	else
1980e3c97c2cSBryan Venteicher 		sc->vmx_vlan_filter[idx] &= ~(1 << bit);
1981e3c97c2cSBryan Venteicher }
1982e3c97c2cSBryan Venteicher 
1983e3c97c2cSBryan Venteicher static void
1984*8f82136aSPatrick Kelsey vmxnet3_vlan_register(if_ctx_t ctx, uint16_t tag)
1985e3c97c2cSBryan Venteicher {
1986e3c97c2cSBryan Venteicher 
1987*8f82136aSPatrick Kelsey 	vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 1, tag);
1988e3c97c2cSBryan Venteicher }
1989e3c97c2cSBryan Venteicher 
1990e3c97c2cSBryan Venteicher static void
1991*8f82136aSPatrick Kelsey vmxnet3_vlan_unregister(if_ctx_t ctx, uint16_t tag)
1992e3c97c2cSBryan Venteicher {
1993e3c97c2cSBryan Venteicher 
1994*8f82136aSPatrick Kelsey 	vmxnet3_update_vlan_filter(iflib_get_softc(ctx), 0, tag);
1995e3c97c2cSBryan Venteicher }
1996e3c97c2cSBryan Venteicher 
1997e3c97c2cSBryan Venteicher static void
1998*8f82136aSPatrick Kelsey vmxnet3_set_rxfilter(struct vmxnet3_softc *sc, int flags)
1999e3c97c2cSBryan Venteicher {
2000e3c97c2cSBryan Venteicher 	struct ifnet *ifp;
2001e3c97c2cSBryan Venteicher 	struct vmxnet3_driver_shared *ds;
2002e3c97c2cSBryan Venteicher 	struct ifmultiaddr *ifma;
2003e3c97c2cSBryan Venteicher 	u_int mode;
2004e3c97c2cSBryan Venteicher 
2005e3c97c2cSBryan Venteicher 	ifp = sc->vmx_ifp;
2006e3c97c2cSBryan Venteicher 	ds = sc->vmx_ds;
2007e3c97c2cSBryan Venteicher 
2008e557c1ddSBryan Venteicher 	mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
2009*8f82136aSPatrick Kelsey 	if (flags & IFF_PROMISC)
2010e3c97c2cSBryan Venteicher 		mode |= VMXNET3_RXMODE_PROMISC;
2011*8f82136aSPatrick Kelsey 	if (flags & IFF_ALLMULTI)
2012e3c97c2cSBryan Venteicher 		mode |= VMXNET3_RXMODE_ALLMULTI;
2013e3c97c2cSBryan Venteicher 	else {
2014e3c97c2cSBryan Venteicher 		int cnt = 0, overflow = 0;
2015e3c97c2cSBryan Venteicher 
2016e3c97c2cSBryan Venteicher 		if_maddr_rlock(ifp);
2017d7c5a620SMatt Macy 		CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2018e3c97c2cSBryan Venteicher 			if (ifma->ifma_addr->sa_family != AF_LINK)
2019e3c97c2cSBryan Venteicher 				continue;
2020e3c97c2cSBryan Venteicher 			else if (cnt == VMXNET3_MULTICAST_MAX) {
2021e3c97c2cSBryan Venteicher 				overflow = 1;
2022e3c97c2cSBryan Venteicher 				break;
2023e3c97c2cSBryan Venteicher 			}
2024e3c97c2cSBryan Venteicher 
2025e3c97c2cSBryan Venteicher 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2026e3c97c2cSBryan Venteicher 			   &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
2027e3c97c2cSBryan Venteicher 			cnt++;
2028e3c97c2cSBryan Venteicher 		}
2029e3c97c2cSBryan Venteicher 		if_maddr_runlock(ifp);
2030e3c97c2cSBryan Venteicher 
2031e3c97c2cSBryan Venteicher 		if (overflow != 0) {
2032e3c97c2cSBryan Venteicher 			cnt = 0;
2033e3c97c2cSBryan Venteicher 			mode |= VMXNET3_RXMODE_ALLMULTI;
2034e3c97c2cSBryan Venteicher 		} else if (cnt > 0)
2035e3c97c2cSBryan Venteicher 			mode |= VMXNET3_RXMODE_MCAST;
2036e3c97c2cSBryan Venteicher 		ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
2037e3c97c2cSBryan Venteicher 	}
2038e3c97c2cSBryan Venteicher 
2039e3c97c2cSBryan Venteicher 	ds->rxmode = mode;
2040e3c97c2cSBryan Venteicher 
2041e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
2042e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
2043e3c97c2cSBryan Venteicher }
2044e3c97c2cSBryan Venteicher 
2045e3c97c2cSBryan Venteicher static void
2046e557c1ddSBryan Venteicher vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
2047e3c97c2cSBryan Venteicher {
2048e3c97c2cSBryan Venteicher 
2049e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
2050e3c97c2cSBryan Venteicher }
2051e3c97c2cSBryan Venteicher 
2052e3c97c2cSBryan Venteicher static int
2053e3c97c2cSBryan Venteicher vmxnet3_link_is_up(struct vmxnet3_softc *sc)
2054e3c97c2cSBryan Venteicher {
2055e3c97c2cSBryan Venteicher 	uint32_t status;
2056e3c97c2cSBryan Venteicher 
2057e3c97c2cSBryan Venteicher 	status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
2058e3c97c2cSBryan Venteicher 	return !!(status & 0x1);
2059e3c97c2cSBryan Venteicher }
2060e3c97c2cSBryan Venteicher 
2061e3c97c2cSBryan Venteicher static void
2062e3c97c2cSBryan Venteicher vmxnet3_link_status(struct vmxnet3_softc *sc)
2063e3c97c2cSBryan Venteicher {
2064*8f82136aSPatrick Kelsey 	if_ctx_t ctx;
2065*8f82136aSPatrick Kelsey 	uint64_t speed;
2066e3c97c2cSBryan Venteicher 	int link;
2067e3c97c2cSBryan Venteicher 
2068*8f82136aSPatrick Kelsey 	ctx = sc->vmx_ctx;
2069e3c97c2cSBryan Venteicher 	link = vmxnet3_link_is_up(sc);
2070*8f82136aSPatrick Kelsey 	speed = IF_Gbps(10);
2071e3c97c2cSBryan Venteicher 
2072e3c97c2cSBryan Venteicher 	if (link != 0 && sc->vmx_link_active == 0) {
2073e3c97c2cSBryan Venteicher 		sc->vmx_link_active = 1;
2074*8f82136aSPatrick Kelsey 		iflib_link_state_change(ctx, LINK_STATE_UP, speed);
2075e3c97c2cSBryan Venteicher 	} else if (link == 0 && sc->vmx_link_active != 0) {
2076e3c97c2cSBryan Venteicher 		sc->vmx_link_active = 0;
2077*8f82136aSPatrick Kelsey 		iflib_link_state_change(ctx, LINK_STATE_DOWN, speed);
2078e3c97c2cSBryan Venteicher 	}
2079e3c97c2cSBryan Venteicher }
2080e3c97c2cSBryan Venteicher 
2081e3c97c2cSBryan Venteicher static void
2082e3c97c2cSBryan Venteicher vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
2083e3c97c2cSBryan Venteicher {
2084e3c97c2cSBryan Venteicher 	uint32_t ml, mh;
2085e3c97c2cSBryan Venteicher 
2086e3c97c2cSBryan Venteicher 	ml  = sc->vmx_lladdr[0];
2087e3c97c2cSBryan Venteicher 	ml |= sc->vmx_lladdr[1] << 8;
2088e3c97c2cSBryan Venteicher 	ml |= sc->vmx_lladdr[2] << 16;
2089e3c97c2cSBryan Venteicher 	ml |= sc->vmx_lladdr[3] << 24;
2090e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
2091e3c97c2cSBryan Venteicher 
2092e3c97c2cSBryan Venteicher 	mh  = sc->vmx_lladdr[4];
2093e3c97c2cSBryan Venteicher 	mh |= sc->vmx_lladdr[5] << 8;
2094e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
2095e3c97c2cSBryan Venteicher }
2096e3c97c2cSBryan Venteicher 
2097e3c97c2cSBryan Venteicher static void
2098e3c97c2cSBryan Venteicher vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
2099e3c97c2cSBryan Venteicher {
2100e3c97c2cSBryan Venteicher 	uint32_t ml, mh;
2101e3c97c2cSBryan Venteicher 
2102e3c97c2cSBryan Venteicher 	ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
2103e3c97c2cSBryan Venteicher 	mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
2104e3c97c2cSBryan Venteicher 
2105e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[0] = ml;
2106e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[1] = ml >> 8;
2107e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[2] = ml >> 16;
2108e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[3] = ml >> 24;
2109e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[4] = mh;
2110e3c97c2cSBryan Venteicher 	sc->vmx_lladdr[5] = mh >> 8;
2111e3c97c2cSBryan Venteicher }
2112e3c97c2cSBryan Venteicher 
2113e3c97c2cSBryan Venteicher static void
2114e3c97c2cSBryan Venteicher vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
2115e3c97c2cSBryan Venteicher     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2116e3c97c2cSBryan Venteicher {
2117e3c97c2cSBryan Venteicher 	struct sysctl_oid *node, *txsnode;
2118e3c97c2cSBryan Venteicher 	struct sysctl_oid_list *list, *txslist;
2119e3c97c2cSBryan Venteicher 	struct UPT1_TxStats *txstats;
2120e3c97c2cSBryan Venteicher 	char namebuf[16];
2121e3c97c2cSBryan Venteicher 
2122e3c97c2cSBryan Venteicher 	txstats = &txq->vxtxq_ts->stats;
2123e3c97c2cSBryan Venteicher 
2124e3c97c2cSBryan Venteicher 	snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
2125e3c97c2cSBryan Venteicher 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
2126e3c97c2cSBryan Venteicher 	    NULL, "Transmit Queue");
2127e3c97c2cSBryan Venteicher 	txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
2128e3c97c2cSBryan Venteicher 
2129e3c97c2cSBryan Venteicher 	/*
2130*8f82136aSPatrick Kelsey 	 * Add statistics reported by the host. These are updated by the
2131*8f82136aSPatrick Kelsey 	 * iflib txq timer on txq 0.
2132e3c97c2cSBryan Venteicher 	 */
2133e3c97c2cSBryan Venteicher 	txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
2134e3c97c2cSBryan Venteicher 	    NULL, "Host Statistics");
2135e3c97c2cSBryan Venteicher 	txslist = SYSCTL_CHILDREN(txsnode);
2136e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
2137e3c97c2cSBryan Venteicher 	    &txstats->TSO_packets, "TSO packets");
2138e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
2139e3c97c2cSBryan Venteicher 	    &txstats->TSO_bytes, "TSO bytes");
2140e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
2141e3c97c2cSBryan Venteicher 	    &txstats->ucast_packets, "Unicast packets");
2142e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
2143e3c97c2cSBryan Venteicher 	    &txstats->ucast_bytes, "Unicast bytes");
2144e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
2145e3c97c2cSBryan Venteicher 	    &txstats->mcast_packets, "Multicast packets");
2146e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
2147e3c97c2cSBryan Venteicher 	    &txstats->mcast_bytes, "Multicast bytes");
2148e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
2149e3c97c2cSBryan Venteicher 	    &txstats->error, "Errors");
2150e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
2151e3c97c2cSBryan Venteicher 	    &txstats->discard, "Discards");
2152e3c97c2cSBryan Venteicher }
2153e3c97c2cSBryan Venteicher 
2154e3c97c2cSBryan Venteicher static void
2155e3c97c2cSBryan Venteicher vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
2156e3c97c2cSBryan Venteicher     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2157e3c97c2cSBryan Venteicher {
2158e3c97c2cSBryan Venteicher 	struct sysctl_oid *node, *rxsnode;
2159e3c97c2cSBryan Venteicher 	struct sysctl_oid_list *list, *rxslist;
2160e3c97c2cSBryan Venteicher 	struct UPT1_RxStats *rxstats;
2161e3c97c2cSBryan Venteicher 	char namebuf[16];
2162e3c97c2cSBryan Venteicher 
2163e3c97c2cSBryan Venteicher 	rxstats = &rxq->vxrxq_rs->stats;
2164e3c97c2cSBryan Venteicher 
2165e3c97c2cSBryan Venteicher 	snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
2166e3c97c2cSBryan Venteicher 	node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
2167e3c97c2cSBryan Venteicher 	    NULL, "Receive Queue");
2168e3c97c2cSBryan Venteicher 	rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
2169e3c97c2cSBryan Venteicher 
2170e3c97c2cSBryan Venteicher 	/*
2171*8f82136aSPatrick Kelsey 	 * Add statistics reported by the host. These are updated by the
2172*8f82136aSPatrick Kelsey 	 * iflib txq timer on txq 0.
2173e3c97c2cSBryan Venteicher 	 */
2174e3c97c2cSBryan Venteicher 	rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
2175e3c97c2cSBryan Venteicher 	    NULL, "Host Statistics");
2176e3c97c2cSBryan Venteicher 	rxslist = SYSCTL_CHILDREN(rxsnode);
2177e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
2178e3c97c2cSBryan Venteicher 	    &rxstats->LRO_packets, "LRO packets");
2179e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
2180e3c97c2cSBryan Venteicher 	    &rxstats->LRO_bytes, "LRO bytes");
2181e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
2182e3c97c2cSBryan Venteicher 	    &rxstats->ucast_packets, "Unicast packets");
2183e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
2184e3c97c2cSBryan Venteicher 	    &rxstats->ucast_bytes, "Unicast bytes");
2185e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
2186e3c97c2cSBryan Venteicher 	    &rxstats->mcast_packets, "Multicast packets");
2187e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
2188e3c97c2cSBryan Venteicher 	    &rxstats->mcast_bytes, "Multicast bytes");
2189e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
2190e3c97c2cSBryan Venteicher 	    &rxstats->bcast_packets, "Broadcast packets");
2191e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
2192e3c97c2cSBryan Venteicher 	    &rxstats->bcast_bytes, "Broadcast bytes");
2193e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
2194e3c97c2cSBryan Venteicher 	    &rxstats->nobuffer, "No buffer");
2195e3c97c2cSBryan Venteicher 	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
2196e3c97c2cSBryan Venteicher 	    &rxstats->error, "Errors");
2197e3c97c2cSBryan Venteicher }
2198e3c97c2cSBryan Venteicher 
2199e3c97c2cSBryan Venteicher static void
2200e3c97c2cSBryan Venteicher vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
2201e3c97c2cSBryan Venteicher     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2202e3c97c2cSBryan Venteicher {
2203*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
2204e3c97c2cSBryan Venteicher 	struct sysctl_oid *node;
2205e3c97c2cSBryan Venteicher 	struct sysctl_oid_list *list;
2206e3c97c2cSBryan Venteicher 	int i;
2207e3c97c2cSBryan Venteicher 
2208*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
2209*8f82136aSPatrick Kelsey 
2210*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_ntxqsets; i++) {
2211e3c97c2cSBryan Venteicher 		struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
2212e3c97c2cSBryan Venteicher 
2213e3c97c2cSBryan Venteicher 		node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
2214e3c97c2cSBryan Venteicher 		    "debug", CTLFLAG_RD, NULL, "");
2215e3c97c2cSBryan Venteicher 		list = SYSCTL_CHILDREN(node);
2216e3c97c2cSBryan Venteicher 
2217e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
2218e3c97c2cSBryan Venteicher 		    &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
2219e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
2220e3c97c2cSBryan Venteicher 		    &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
2221e3c97c2cSBryan Venteicher 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
2222e3c97c2cSBryan Venteicher 		    &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
2223e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
2224e3c97c2cSBryan Venteicher 		    &txq->vxtxq_comp_ring.vxcr_next, 0, "");
2225e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
2226e3c97c2cSBryan Venteicher 		    &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
2227e3c97c2cSBryan Venteicher 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
2228e3c97c2cSBryan Venteicher 		    &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
2229e3c97c2cSBryan Venteicher 	}
2230e3c97c2cSBryan Venteicher 
2231*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++) {
2232e3c97c2cSBryan Venteicher 		struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
2233e3c97c2cSBryan Venteicher 
2234e3c97c2cSBryan Venteicher 		node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
2235e3c97c2cSBryan Venteicher 		    "debug", CTLFLAG_RD, NULL, "");
2236e3c97c2cSBryan Venteicher 		list = SYSCTL_CHILDREN(node);
2237e3c97c2cSBryan Venteicher 
2238e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
2239e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
2240e3c97c2cSBryan Venteicher 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
2241e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
2242e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
2243e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
2244e3c97c2cSBryan Venteicher 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
2245e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
2246e3c97c2cSBryan Venteicher 		SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
2247e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
2248e3c97c2cSBryan Venteicher 		SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
2249e3c97c2cSBryan Venteicher 		    &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
2250e3c97c2cSBryan Venteicher 	}
2251e3c97c2cSBryan Venteicher }
2252e3c97c2cSBryan Venteicher 
2253e3c97c2cSBryan Venteicher static void
2254e3c97c2cSBryan Venteicher vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
2255e3c97c2cSBryan Venteicher     struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
2256e3c97c2cSBryan Venteicher {
2257*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
2258e3c97c2cSBryan Venteicher 	int i;
2259e3c97c2cSBryan Venteicher 
2260*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
2261*8f82136aSPatrick Kelsey 
2262*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_ntxqsets; i++)
2263e3c97c2cSBryan Venteicher 		vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
2264*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_nrxqsets; i++)
2265e3c97c2cSBryan Venteicher 		vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
2266e3c97c2cSBryan Venteicher 
2267e3c97c2cSBryan Venteicher 	vmxnet3_setup_debug_sysctl(sc, ctx, child);
2268e3c97c2cSBryan Venteicher }
2269e3c97c2cSBryan Venteicher 
2270e3c97c2cSBryan Venteicher static void
2271e3c97c2cSBryan Venteicher vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
2272e3c97c2cSBryan Venteicher {
2273e3c97c2cSBryan Venteicher 	device_t dev;
2274e3c97c2cSBryan Venteicher 	struct sysctl_ctx_list *ctx;
2275e3c97c2cSBryan Venteicher 	struct sysctl_oid *tree;
2276e3c97c2cSBryan Venteicher 	struct sysctl_oid_list *child;
2277e3c97c2cSBryan Venteicher 
2278e3c97c2cSBryan Venteicher 	dev = sc->vmx_dev;
2279e3c97c2cSBryan Venteicher 	ctx = device_get_sysctl_ctx(dev);
2280e3c97c2cSBryan Venteicher 	tree = device_get_sysctl_tree(dev);
2281e3c97c2cSBryan Venteicher 	child = SYSCTL_CHILDREN(tree);
2282e3c97c2cSBryan Venteicher 
2283e3c97c2cSBryan Venteicher 	vmxnet3_setup_queue_sysctl(sc, ctx, child);
2284e3c97c2cSBryan Venteicher }
2285e3c97c2cSBryan Venteicher 
2286e3c97c2cSBryan Venteicher static void
2287e3c97c2cSBryan Venteicher vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
2288e3c97c2cSBryan Venteicher {
2289e3c97c2cSBryan Venteicher 
2290e3c97c2cSBryan Venteicher 	bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
2291e3c97c2cSBryan Venteicher }
2292e3c97c2cSBryan Venteicher 
2293e3c97c2cSBryan Venteicher static uint32_t
2294e3c97c2cSBryan Venteicher vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
2295e3c97c2cSBryan Venteicher {
2296e3c97c2cSBryan Venteicher 
2297e3c97c2cSBryan Venteicher 	return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
2298e3c97c2cSBryan Venteicher }
2299e3c97c2cSBryan Venteicher 
2300e3c97c2cSBryan Venteicher static void
2301e3c97c2cSBryan Venteicher vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
2302e3c97c2cSBryan Venteicher {
2303e3c97c2cSBryan Venteicher 
2304e3c97c2cSBryan Venteicher 	bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
2305e3c97c2cSBryan Venteicher }
2306e3c97c2cSBryan Venteicher 
2307e3c97c2cSBryan Venteicher static void
2308e3c97c2cSBryan Venteicher vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
2309e3c97c2cSBryan Venteicher {
2310e3c97c2cSBryan Venteicher 
2311e3c97c2cSBryan Venteicher 	vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
2312e3c97c2cSBryan Venteicher }
2313e3c97c2cSBryan Venteicher 
2314e3c97c2cSBryan Venteicher static uint32_t
2315e3c97c2cSBryan Venteicher vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
2316e3c97c2cSBryan Venteicher {
2317e3c97c2cSBryan Venteicher 
2318e3c97c2cSBryan Venteicher 	vmxnet3_write_cmd(sc, cmd);
2319e3c97c2cSBryan Venteicher 	bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
2320e3c97c2cSBryan Venteicher 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
2321e3c97c2cSBryan Venteicher 	return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
2322e3c97c2cSBryan Venteicher }
2323e3c97c2cSBryan Venteicher 
2324e3c97c2cSBryan Venteicher static void
2325e3c97c2cSBryan Venteicher vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
2326e3c97c2cSBryan Venteicher {
2327e3c97c2cSBryan Venteicher 
2328e3c97c2cSBryan Venteicher 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
2329e3c97c2cSBryan Venteicher }
2330e3c97c2cSBryan Venteicher 
2331e3c97c2cSBryan Venteicher static void
2332e3c97c2cSBryan Venteicher vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
2333e3c97c2cSBryan Venteicher {
2334e3c97c2cSBryan Venteicher 
2335e3c97c2cSBryan Venteicher 	vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
2336e3c97c2cSBryan Venteicher }
2337e3c97c2cSBryan Venteicher 
2338*8f82136aSPatrick Kelsey static int
2339*8f82136aSPatrick Kelsey vmxnet3_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2340e3c97c2cSBryan Venteicher {
2341*8f82136aSPatrick Kelsey 	/* Not using interrupts for TX */
2342*8f82136aSPatrick Kelsey 	return (0);
2343*8f82136aSPatrick Kelsey }
2344*8f82136aSPatrick Kelsey 
2345*8f82136aSPatrick Kelsey static int
2346*8f82136aSPatrick Kelsey vmxnet3_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
2347*8f82136aSPatrick Kelsey {
2348*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
2349*8f82136aSPatrick Kelsey 
2350*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
2351*8f82136aSPatrick Kelsey 	vmxnet3_enable_intr(sc, sc->vmx_rxq[qid].vxrxq_intr_idx);
2352*8f82136aSPatrick Kelsey 	return (0);
2353*8f82136aSPatrick Kelsey }
2354*8f82136aSPatrick Kelsey 
2355*8f82136aSPatrick Kelsey static void
2356*8f82136aSPatrick Kelsey vmxnet3_link_intr_enable(if_ctx_t ctx)
2357*8f82136aSPatrick Kelsey {
2358*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
2359*8f82136aSPatrick Kelsey 
2360*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
2361*8f82136aSPatrick Kelsey 	vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2362*8f82136aSPatrick Kelsey }
2363*8f82136aSPatrick Kelsey 
2364*8f82136aSPatrick Kelsey static void
2365*8f82136aSPatrick Kelsey vmxnet3_intr_enable_all(if_ctx_t ctx)
2366*8f82136aSPatrick Kelsey {
2367*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
2368*8f82136aSPatrick Kelsey 	if_softc_ctx_t scctx;
2369e3c97c2cSBryan Venteicher 	int i;
2370e3c97c2cSBryan Venteicher 
2371*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
2372*8f82136aSPatrick Kelsey 	scctx = sc->vmx_scctx;
2373e3c97c2cSBryan Venteicher 	sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
2374*8f82136aSPatrick Kelsey 	for (i = 0; i < scctx->isc_vectors; i++)
2375e3c97c2cSBryan Venteicher 		vmxnet3_enable_intr(sc, i);
2376e3c97c2cSBryan Venteicher }
2377e3c97c2cSBryan Venteicher 
2378e3c97c2cSBryan Venteicher static void
2379*8f82136aSPatrick Kelsey vmxnet3_intr_disable_all(if_ctx_t ctx)
2380e3c97c2cSBryan Venteicher {
2381*8f82136aSPatrick Kelsey 	struct vmxnet3_softc *sc;
2382e3c97c2cSBryan Venteicher 	int i;
2383e3c97c2cSBryan Venteicher 
2384*8f82136aSPatrick Kelsey 	sc = iflib_get_softc(ctx);
2385*8f82136aSPatrick Kelsey 	/*
2386*8f82136aSPatrick Kelsey 	 * iflib may invoke this routine before vmxnet3_attach_post() has
2387*8f82136aSPatrick Kelsey 	 * run, which is before the top level shared data area is
2388*8f82136aSPatrick Kelsey 	 * initialized and the device made aware of it.
2389*8f82136aSPatrick Kelsey 	 */
2390*8f82136aSPatrick Kelsey 	if (sc->vmx_ds != NULL)
2391e3c97c2cSBryan Venteicher 		sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
2392*8f82136aSPatrick Kelsey 	for (i = 0; i < VMXNET3_MAX_INTRS; i++)
2393e3c97c2cSBryan Venteicher 		vmxnet3_disable_intr(sc, i);
2394e3c97c2cSBryan Venteicher }
2395e3c97c2cSBryan Venteicher 
2396e3c97c2cSBryan Venteicher /*
2397e3c97c2cSBryan Venteicher  * Since this is a purely paravirtualized device, we do not have
2398e3c97c2cSBryan Venteicher  * to worry about DMA coherency. But at times, we must make sure
2399e3c97c2cSBryan Venteicher  * both the compiler and CPU do not reorder memory operations.
2400e3c97c2cSBryan Venteicher  */
2401e3c97c2cSBryan Venteicher static inline void
2402e3c97c2cSBryan Venteicher vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
2403e3c97c2cSBryan Venteicher {
2404e3c97c2cSBryan Venteicher 
2405e3c97c2cSBryan Venteicher 	switch (type) {
2406e3c97c2cSBryan Venteicher 	case VMXNET3_BARRIER_RD:
2407e3c97c2cSBryan Venteicher 		rmb();
2408e3c97c2cSBryan Venteicher 		break;
2409e3c97c2cSBryan Venteicher 	case VMXNET3_BARRIER_WR:
2410e3c97c2cSBryan Venteicher 		wmb();
2411e3c97c2cSBryan Venteicher 		break;
2412e3c97c2cSBryan Venteicher 	case VMXNET3_BARRIER_RDWR:
2413e3c97c2cSBryan Venteicher 		mb();
2414e3c97c2cSBryan Venteicher 		break;
2415e3c97c2cSBryan Venteicher 	default:
2416e3c97c2cSBryan Venteicher 		panic("%s: bad barrier type %d", __func__, type);
2417e3c97c2cSBryan Venteicher 	}
2418e3c97c2cSBryan Venteicher }
2419