xref: /freebsd/sys/dev/netmap/if_re_netmap.h (revision 68b8534bdfeb5078e84d668124e7585e43b03502)
1*68b8534bSLuigi Rizzo /*
2*68b8534bSLuigi Rizzo  * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
3*68b8534bSLuigi Rizzo  *
4*68b8534bSLuigi Rizzo  * Redistribution and use in source and binary forms, with or without
5*68b8534bSLuigi Rizzo  * modification, are permitted provided that the following conditions
6*68b8534bSLuigi Rizzo  * are met:
7*68b8534bSLuigi Rizzo  * 1. Redistributions of source code must retain the above copyright
8*68b8534bSLuigi Rizzo  *    notice, this list of conditions and the following disclaimer.
9*68b8534bSLuigi Rizzo  * 2. Redistributions in binary form must reproduce the above copyright
10*68b8534bSLuigi Rizzo  *    notice, this list of conditions and the following disclaimer in the
11*68b8534bSLuigi Rizzo  *    documentation and/or other materials provided with the distribution.
12*68b8534bSLuigi Rizzo  *
13*68b8534bSLuigi Rizzo  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14*68b8534bSLuigi Rizzo  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15*68b8534bSLuigi Rizzo  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16*68b8534bSLuigi Rizzo  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17*68b8534bSLuigi Rizzo  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18*68b8534bSLuigi Rizzo  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19*68b8534bSLuigi Rizzo  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20*68b8534bSLuigi Rizzo  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21*68b8534bSLuigi Rizzo  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22*68b8534bSLuigi Rizzo  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23*68b8534bSLuigi Rizzo  * SUCH DAMAGE.
24*68b8534bSLuigi Rizzo  */
25*68b8534bSLuigi Rizzo 
26*68b8534bSLuigi Rizzo /*
27*68b8534bSLuigi Rizzo  * $FreeBSD$
28*68b8534bSLuigi Rizzo  * $Id: if_re_netmap.h 9662 2011-11-16 13:18:06Z luigi $
29*68b8534bSLuigi Rizzo  *
30*68b8534bSLuigi Rizzo  * netmap support for if_re
31*68b8534bSLuigi Rizzo  */
32*68b8534bSLuigi Rizzo 
33*68b8534bSLuigi Rizzo #include <net/netmap.h>
34*68b8534bSLuigi Rizzo #include <sys/selinfo.h>
35*68b8534bSLuigi Rizzo #include <vm/vm.h>
36*68b8534bSLuigi Rizzo #include <vm/pmap.h>    /* vtophys ? */
37*68b8534bSLuigi Rizzo #include <dev/netmap/netmap_kern.h>
38*68b8534bSLuigi Rizzo 
39*68b8534bSLuigi Rizzo static int re_netmap_reg(struct ifnet *, int onoff);
40*68b8534bSLuigi Rizzo static int re_netmap_txsync(void *, u_int, int);
41*68b8534bSLuigi Rizzo static int re_netmap_rxsync(void *, u_int, int);
42*68b8534bSLuigi Rizzo static void re_netmap_lock_wrapper(void *, int, u_int);
43*68b8534bSLuigi Rizzo 
44*68b8534bSLuigi Rizzo static void
45*68b8534bSLuigi Rizzo re_netmap_attach(struct rl_softc *sc)
46*68b8534bSLuigi Rizzo {
47*68b8534bSLuigi Rizzo 	struct netmap_adapter na;
48*68b8534bSLuigi Rizzo 
49*68b8534bSLuigi Rizzo 	bzero(&na, sizeof(na));
50*68b8534bSLuigi Rizzo 
51*68b8534bSLuigi Rizzo 	na.ifp = sc->rl_ifp;
52*68b8534bSLuigi Rizzo 	na.separate_locks = 0;
53*68b8534bSLuigi Rizzo 	na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
54*68b8534bSLuigi Rizzo 	na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
55*68b8534bSLuigi Rizzo 	na.nm_txsync = re_netmap_txsync;
56*68b8534bSLuigi Rizzo 	na.nm_rxsync = re_netmap_rxsync;
57*68b8534bSLuigi Rizzo 	na.nm_lock = re_netmap_lock_wrapper;
58*68b8534bSLuigi Rizzo 	na.nm_register = re_netmap_reg;
59*68b8534bSLuigi Rizzo 	na.buff_size = MCLBYTES;
60*68b8534bSLuigi Rizzo 	netmap_attach(&na, 1);
61*68b8534bSLuigi Rizzo }
62*68b8534bSLuigi Rizzo 
63*68b8534bSLuigi Rizzo 
64*68b8534bSLuigi Rizzo /*
65*68b8534bSLuigi Rizzo  * wrapper to export locks to the generic code
66*68b8534bSLuigi Rizzo  * We should not use the tx/rx locks
67*68b8534bSLuigi Rizzo  */
68*68b8534bSLuigi Rizzo static void
69*68b8534bSLuigi Rizzo re_netmap_lock_wrapper(void *_a, int what, u_int queueid)
70*68b8534bSLuigi Rizzo {
71*68b8534bSLuigi Rizzo 	struct rl_softc *adapter = _a;
72*68b8534bSLuigi Rizzo 
73*68b8534bSLuigi Rizzo 	switch (what) {
74*68b8534bSLuigi Rizzo 	case NETMAP_CORE_LOCK:
75*68b8534bSLuigi Rizzo 		RL_LOCK(adapter);
76*68b8534bSLuigi Rizzo 		break;
77*68b8534bSLuigi Rizzo 	case NETMAP_CORE_UNLOCK:
78*68b8534bSLuigi Rizzo 		RL_UNLOCK(adapter);
79*68b8534bSLuigi Rizzo 		break;
80*68b8534bSLuigi Rizzo 
81*68b8534bSLuigi Rizzo 	case NETMAP_TX_LOCK:
82*68b8534bSLuigi Rizzo 	case NETMAP_RX_LOCK:
83*68b8534bSLuigi Rizzo 	case NETMAP_TX_UNLOCK:
84*68b8534bSLuigi Rizzo 	case NETMAP_RX_UNLOCK:
85*68b8534bSLuigi Rizzo 		D("invalid lock call %d, no tx/rx locks here", what);
86*68b8534bSLuigi Rizzo 		break;
87*68b8534bSLuigi Rizzo 	}
88*68b8534bSLuigi Rizzo }
89*68b8534bSLuigi Rizzo 
90*68b8534bSLuigi Rizzo 
91*68b8534bSLuigi Rizzo /*
92*68b8534bSLuigi Rizzo  * support for netmap register/unregisted. We are already under core lock.
93*68b8534bSLuigi Rizzo  * only called on the first register or the last unregister.
94*68b8534bSLuigi Rizzo  */
95*68b8534bSLuigi Rizzo static int
96*68b8534bSLuigi Rizzo re_netmap_reg(struct ifnet *ifp, int onoff)
97*68b8534bSLuigi Rizzo {
98*68b8534bSLuigi Rizzo 	struct rl_softc *adapter = ifp->if_softc;
99*68b8534bSLuigi Rizzo 	struct netmap_adapter *na = NA(ifp);
100*68b8534bSLuigi Rizzo 	int error = 0;
101*68b8534bSLuigi Rizzo 
102*68b8534bSLuigi Rizzo 	if (!na)
103*68b8534bSLuigi Rizzo 		return EINVAL;
104*68b8534bSLuigi Rizzo 	/* Tell the stack that the interface is no longer active */
105*68b8534bSLuigi Rizzo 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
106*68b8534bSLuigi Rizzo 
107*68b8534bSLuigi Rizzo 	re_stop(adapter);
108*68b8534bSLuigi Rizzo 
109*68b8534bSLuigi Rizzo 	if (onoff) {
110*68b8534bSLuigi Rizzo 		ifp->if_capenable |= IFCAP_NETMAP;
111*68b8534bSLuigi Rizzo 
112*68b8534bSLuigi Rizzo 		/* save if_transmit and restore it */
113*68b8534bSLuigi Rizzo 		na->if_transmit = ifp->if_transmit;
114*68b8534bSLuigi Rizzo 		/* XXX if_start and if_qflush ??? */
115*68b8534bSLuigi Rizzo 		ifp->if_transmit = netmap_start;
116*68b8534bSLuigi Rizzo 
117*68b8534bSLuigi Rizzo 		re_init_locked(adapter);
118*68b8534bSLuigi Rizzo 
119*68b8534bSLuigi Rizzo 		if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
120*68b8534bSLuigi Rizzo 			error = ENOMEM;
121*68b8534bSLuigi Rizzo 			goto fail;
122*68b8534bSLuigi Rizzo 		}
123*68b8534bSLuigi Rizzo 	} else {
124*68b8534bSLuigi Rizzo fail:
125*68b8534bSLuigi Rizzo 		/* restore if_transmit */
126*68b8534bSLuigi Rizzo 		ifp->if_transmit = na->if_transmit;
127*68b8534bSLuigi Rizzo 		ifp->if_capenable &= ~IFCAP_NETMAP;
128*68b8534bSLuigi Rizzo 		re_init_locked(adapter);	/* also enables intr */
129*68b8534bSLuigi Rizzo 	}
130*68b8534bSLuigi Rizzo     return (error);
131*68b8534bSLuigi Rizzo 
132*68b8534bSLuigi Rizzo }
133*68b8534bSLuigi Rizzo 
134*68b8534bSLuigi Rizzo 
135*68b8534bSLuigi Rizzo /*
136*68b8534bSLuigi Rizzo  * Reconcile kernel and user view of the transmit ring.
137*68b8534bSLuigi Rizzo  *
138*68b8534bSLuigi Rizzo  * Userspace has filled tx slots up to cur (excluded).
139*68b8534bSLuigi Rizzo  * The last unused slot previously known to the kernel was nr_hwcur,
140*68b8534bSLuigi Rizzo  * and the last interrupt reported nr_hwavail slots available
141*68b8534bSLuigi Rizzo  * (using the special value -1 to indicate idle transmit ring).
142*68b8534bSLuigi Rizzo  * The function must first update avail to what the kernel
143*68b8534bSLuigi Rizzo  * knows (translating the -1 to nkr_num_slots - 1),
144*68b8534bSLuigi Rizzo  * subtract the newly used slots (cur - nr_hwcur)
145*68b8534bSLuigi Rizzo  * from both avail and nr_hwavail, and set nr_hwcur = cur
146*68b8534bSLuigi Rizzo  * issuing a dmamap_sync on all slots.
147*68b8534bSLuigi Rizzo  */
148*68b8534bSLuigi Rizzo static int
149*68b8534bSLuigi Rizzo re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
150*68b8534bSLuigi Rizzo {
151*68b8534bSLuigi Rizzo 	struct rl_softc *sc = a;
152*68b8534bSLuigi Rizzo 	struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
153*68b8534bSLuigi Rizzo 	struct netmap_adapter *na = NA(sc->rl_ifp);
154*68b8534bSLuigi Rizzo 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
155*68b8534bSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
156*68b8534bSLuigi Rizzo 	int j, k, n, lim = kring->nkr_num_slots - 1;
157*68b8534bSLuigi Rizzo 
158*68b8534bSLuigi Rizzo 	k = ring->cur;
159*68b8534bSLuigi Rizzo 	if ( (kring->nr_kflags & NR_REINIT) || k > lim)
160*68b8534bSLuigi Rizzo 		return netmap_ring_reinit(kring);
161*68b8534bSLuigi Rizzo 
162*68b8534bSLuigi Rizzo 	if (do_lock)
163*68b8534bSLuigi Rizzo 		RL_LOCK(sc);
164*68b8534bSLuigi Rizzo 
165*68b8534bSLuigi Rizzo 	/* Sync the TX descriptor list */
166*68b8534bSLuigi Rizzo 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
167*68b8534bSLuigi Rizzo             sc->rl_ldata.rl_tx_list_map,
168*68b8534bSLuigi Rizzo             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
169*68b8534bSLuigi Rizzo 
170*68b8534bSLuigi Rizzo 	/* record completed transmissions */
171*68b8534bSLuigi Rizzo         for (n = 0, j = sc->rl_ldata.rl_tx_considx;
172*68b8534bSLuigi Rizzo 	    j != sc->rl_ldata.rl_tx_prodidx;
173*68b8534bSLuigi Rizzo 	    n++, j = RL_TX_DESC_NXT(sc, j)) {
174*68b8534bSLuigi Rizzo 		uint32_t cmdstat =
175*68b8534bSLuigi Rizzo 			le32toh(sc->rl_ldata.rl_tx_list[j].rl_cmdstat);
176*68b8534bSLuigi Rizzo 		if (cmdstat & RL_TDESC_STAT_OWN)
177*68b8534bSLuigi Rizzo 			break;
178*68b8534bSLuigi Rizzo 	}
179*68b8534bSLuigi Rizzo 	if (n > 0) {
180*68b8534bSLuigi Rizzo 		sc->rl_ldata.rl_tx_considx = j;
181*68b8534bSLuigi Rizzo 		sc->rl_ldata.rl_tx_free += n;
182*68b8534bSLuigi Rizzo 		kring->nr_hwavail += n;
183*68b8534bSLuigi Rizzo 	}
184*68b8534bSLuigi Rizzo 
185*68b8534bSLuigi Rizzo 	/* update avail to what the hardware knows */
186*68b8534bSLuigi Rizzo 	ring->avail = kring->nr_hwavail;
187*68b8534bSLuigi Rizzo 
188*68b8534bSLuigi Rizzo 	/* we trust prodidx, not hwcur */
189*68b8534bSLuigi Rizzo 	j = kring->nr_hwcur = sc->rl_ldata.rl_tx_prodidx;
190*68b8534bSLuigi Rizzo 	if (j != k) {	/* we have new packets to send */
191*68b8534bSLuigi Rizzo 		n = 0;
192*68b8534bSLuigi Rizzo 		while (j != k) {
193*68b8534bSLuigi Rizzo 			struct netmap_slot *slot = &ring->slot[j];
194*68b8534bSLuigi Rizzo 			struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[j];
195*68b8534bSLuigi Rizzo 			int cmd = slot->len | RL_TDESC_CMD_EOF |
196*68b8534bSLuigi Rizzo 				RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
197*68b8534bSLuigi Rizzo 			void *addr = NMB(slot);
198*68b8534bSLuigi Rizzo 			int len = slot->len;
199*68b8534bSLuigi Rizzo 
200*68b8534bSLuigi Rizzo 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
201*68b8534bSLuigi Rizzo 				if (do_lock)
202*68b8534bSLuigi Rizzo 					RL_UNLOCK(sc);
203*68b8534bSLuigi Rizzo 				return netmap_ring_reinit(kring);
204*68b8534bSLuigi Rizzo 			}
205*68b8534bSLuigi Rizzo 
206*68b8534bSLuigi Rizzo 			if (j == lim)	/* mark end of ring */
207*68b8534bSLuigi Rizzo 				cmd |= RL_TDESC_CMD_EOR;
208*68b8534bSLuigi Rizzo 
209*68b8534bSLuigi Rizzo 			if (slot->flags & NS_BUF_CHANGED) {
210*68b8534bSLuigi Rizzo 				uint64_t paddr = vtophys(addr);
211*68b8534bSLuigi Rizzo 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
212*68b8534bSLuigi Rizzo 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
213*68b8534bSLuigi Rizzo 				/* buffer has changed, unload and reload map */
214*68b8534bSLuigi Rizzo 				netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
215*68b8534bSLuigi Rizzo 					txd[j].tx_dmamap, addr, na->buff_size);
216*68b8534bSLuigi Rizzo 				slot->flags &= ~NS_BUF_CHANGED;
217*68b8534bSLuigi Rizzo 			}
218*68b8534bSLuigi Rizzo 			slot->flags &= ~NS_REPORT;
219*68b8534bSLuigi Rizzo 			desc->rl_cmdstat = htole32(cmd);
220*68b8534bSLuigi Rizzo 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
221*68b8534bSLuigi Rizzo 				txd[j].tx_dmamap, BUS_DMASYNC_PREWRITE);
222*68b8534bSLuigi Rizzo 			j = (j == lim) ? 0 : j + 1;
223*68b8534bSLuigi Rizzo 			n++;
224*68b8534bSLuigi Rizzo 		}
225*68b8534bSLuigi Rizzo 		sc->rl_ldata.rl_tx_prodidx = kring->nr_hwcur = ring->cur;
226*68b8534bSLuigi Rizzo 
227*68b8534bSLuigi Rizzo 		/* decrease avail by number of sent packets */
228*68b8534bSLuigi Rizzo 		ring->avail -= n;
229*68b8534bSLuigi Rizzo 		kring->nr_hwavail = ring->avail;
230*68b8534bSLuigi Rizzo 
231*68b8534bSLuigi Rizzo 		bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
232*68b8534bSLuigi Rizzo 		    sc->rl_ldata.rl_tx_list_map,
233*68b8534bSLuigi Rizzo 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
234*68b8534bSLuigi Rizzo 
235*68b8534bSLuigi Rizzo 		/* start ? */
236*68b8534bSLuigi Rizzo 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
237*68b8534bSLuigi Rizzo 	}
238*68b8534bSLuigi Rizzo 	if (do_lock)
239*68b8534bSLuigi Rizzo 		RL_UNLOCK(sc);
240*68b8534bSLuigi Rizzo 	return 0;
241*68b8534bSLuigi Rizzo }
242*68b8534bSLuigi Rizzo 
243*68b8534bSLuigi Rizzo 
244*68b8534bSLuigi Rizzo /*
245*68b8534bSLuigi Rizzo  * Reconcile kernel and user view of the receive ring.
246*68b8534bSLuigi Rizzo  *
247*68b8534bSLuigi Rizzo  * Userspace has read rx slots up to cur (excluded).
248*68b8534bSLuigi Rizzo  * The last unread slot previously known to the kernel was nr_hwcur,
249*68b8534bSLuigi Rizzo  * and the last interrupt reported nr_hwavail slots available.
250*68b8534bSLuigi Rizzo  * We must subtract the newly consumed slots (cur - nr_hwcur)
251*68b8534bSLuigi Rizzo  * from nr_hwavail, clearing the descriptors for the next
252*68b8534bSLuigi Rizzo  * read, tell the hardware that they are available,
253*68b8534bSLuigi Rizzo  * and set nr_hwcur = cur and avail = nr_hwavail.
254*68b8534bSLuigi Rizzo  * issuing a dmamap_sync on all slots.
255*68b8534bSLuigi Rizzo  */
256*68b8534bSLuigi Rizzo static int
257*68b8534bSLuigi Rizzo re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
258*68b8534bSLuigi Rizzo {
259*68b8534bSLuigi Rizzo 	struct rl_softc *sc = a;
260*68b8534bSLuigi Rizzo 	struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
261*68b8534bSLuigi Rizzo 	struct netmap_adapter *na = NA(sc->rl_ifp);
262*68b8534bSLuigi Rizzo 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
263*68b8534bSLuigi Rizzo 	struct netmap_ring *ring = kring->ring;
264*68b8534bSLuigi Rizzo 	int j, k, n, lim = kring->nkr_num_slots - 1;
265*68b8534bSLuigi Rizzo 
266*68b8534bSLuigi Rizzo 	k = ring->cur;
267*68b8534bSLuigi Rizzo 	if ( (kring->nr_kflags & NR_REINIT) || k > lim)
268*68b8534bSLuigi Rizzo 		return netmap_ring_reinit(kring);
269*68b8534bSLuigi Rizzo 
270*68b8534bSLuigi Rizzo 	if (do_lock)
271*68b8534bSLuigi Rizzo 		RL_LOCK(sc);
272*68b8534bSLuigi Rizzo 	/* XXX check sync modes */
273*68b8534bSLuigi Rizzo 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
274*68b8534bSLuigi Rizzo 	    sc->rl_ldata.rl_rx_list_map,
275*68b8534bSLuigi Rizzo 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
276*68b8534bSLuigi Rizzo 
277*68b8534bSLuigi Rizzo 	/*
278*68b8534bSLuigi Rizzo 	 * The device uses all the buffers in the ring, so we need
279*68b8534bSLuigi Rizzo 	 * another termination condition in addition to RL_RDESC_STAT_OWN
280*68b8534bSLuigi Rizzo 	 * cleared (all buffers could have it cleared. The easiest one
281*68b8534bSLuigi Rizzo 	 * is to limit the amount of data reported up to 'lim'
282*68b8534bSLuigi Rizzo 	 */
283*68b8534bSLuigi Rizzo 	j = sc->rl_ldata.rl_rx_prodidx;
284*68b8534bSLuigi Rizzo 	for (n = kring->nr_hwavail; n < lim ; n++) {
285*68b8534bSLuigi Rizzo 		struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[j];
286*68b8534bSLuigi Rizzo 		uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
287*68b8534bSLuigi Rizzo 		uint32_t total_len;
288*68b8534bSLuigi Rizzo 
289*68b8534bSLuigi Rizzo 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
290*68b8534bSLuigi Rizzo 			break;
291*68b8534bSLuigi Rizzo 		total_len = rxstat & sc->rl_rxlenmask;
292*68b8534bSLuigi Rizzo 		/* XXX subtract crc */
293*68b8534bSLuigi Rizzo 		total_len = (total_len < 4) ? 0 : total_len - 4;
294*68b8534bSLuigi Rizzo 		kring->ring->slot[j].len = total_len;
295*68b8534bSLuigi Rizzo 		/*  sync was in re_newbuf() */
296*68b8534bSLuigi Rizzo 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
297*68b8534bSLuigi Rizzo 		    rxd[j].rx_dmamap, BUS_DMASYNC_POSTREAD);
298*68b8534bSLuigi Rizzo 		j = RL_RX_DESC_NXT(sc, j);
299*68b8534bSLuigi Rizzo 	}
300*68b8534bSLuigi Rizzo 	if (n != kring->nr_hwavail) {
301*68b8534bSLuigi Rizzo 		sc->rl_ldata.rl_rx_prodidx = j;
302*68b8534bSLuigi Rizzo 		sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
303*68b8534bSLuigi Rizzo 		kring->nr_hwavail = n;
304*68b8534bSLuigi Rizzo 	}
305*68b8534bSLuigi Rizzo 
306*68b8534bSLuigi Rizzo 	/* skip past packets that userspace has already processed,
307*68b8534bSLuigi Rizzo 	 * making them available for reception.
308*68b8534bSLuigi Rizzo 	 * advance nr_hwcur and issue a bus_dmamap_sync on the
309*68b8534bSLuigi Rizzo 	 * buffers so it is safe to write to them.
310*68b8534bSLuigi Rizzo 	 * Also increase nr_hwavail
311*68b8534bSLuigi Rizzo 	 */
312*68b8534bSLuigi Rizzo 	j = kring->nr_hwcur;
313*68b8534bSLuigi Rizzo 	if (j != k) {	/* userspace has read some packets. */
314*68b8534bSLuigi Rizzo 		n = 0;
315*68b8534bSLuigi Rizzo 		while (j != k) {
316*68b8534bSLuigi Rizzo 			struct netmap_slot *slot = ring->slot + j;
317*68b8534bSLuigi Rizzo 			struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[j];
318*68b8534bSLuigi Rizzo 			int cmd = na->buff_size | RL_RDESC_CMD_OWN;
319*68b8534bSLuigi Rizzo 			void *addr = NMB(slot);
320*68b8534bSLuigi Rizzo 
321*68b8534bSLuigi Rizzo 			if (addr == netmap_buffer_base) { /* bad buf */
322*68b8534bSLuigi Rizzo 				if (do_lock)
323*68b8534bSLuigi Rizzo 					RL_UNLOCK(sc);
324*68b8534bSLuigi Rizzo 				return netmap_ring_reinit(kring);
325*68b8534bSLuigi Rizzo 			}
326*68b8534bSLuigi Rizzo 
327*68b8534bSLuigi Rizzo 			if (j == lim)	/* mark end of ring */
328*68b8534bSLuigi Rizzo 				cmd |= RL_RDESC_CMD_EOR;
329*68b8534bSLuigi Rizzo 
330*68b8534bSLuigi Rizzo 			desc->rl_cmdstat = htole32(cmd);
331*68b8534bSLuigi Rizzo 			slot->flags &= ~NS_REPORT;
332*68b8534bSLuigi Rizzo 			if (slot->flags & NS_BUF_CHANGED) {
333*68b8534bSLuigi Rizzo 				uint64_t paddr = vtophys(addr);
334*68b8534bSLuigi Rizzo 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
335*68b8534bSLuigi Rizzo 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
336*68b8534bSLuigi Rizzo 				netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
337*68b8534bSLuigi Rizzo 					rxd[j].rx_dmamap, addr, na->buff_size);
338*68b8534bSLuigi Rizzo 				slot->flags &= ~NS_BUF_CHANGED;
339*68b8534bSLuigi Rizzo 			}
340*68b8534bSLuigi Rizzo 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
341*68b8534bSLuigi Rizzo 				rxd[j].rx_dmamap, BUS_DMASYNC_PREREAD);
342*68b8534bSLuigi Rizzo 			j = (j == lim) ? 0 : j + 1;
343*68b8534bSLuigi Rizzo 			n++;
344*68b8534bSLuigi Rizzo 		}
345*68b8534bSLuigi Rizzo 		kring->nr_hwavail -= n;
346*68b8534bSLuigi Rizzo 		kring->nr_hwcur = k;
347*68b8534bSLuigi Rizzo 		/* Flush the RX DMA ring */
348*68b8534bSLuigi Rizzo 
349*68b8534bSLuigi Rizzo 		bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
350*68b8534bSLuigi Rizzo 		    sc->rl_ldata.rl_rx_list_map,
351*68b8534bSLuigi Rizzo 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
352*68b8534bSLuigi Rizzo 	}
353*68b8534bSLuigi Rizzo 	/* tell userspace that there are new packets */
354*68b8534bSLuigi Rizzo 	ring->avail = kring->nr_hwavail ;
355*68b8534bSLuigi Rizzo 	if (do_lock)
356*68b8534bSLuigi Rizzo 		RL_UNLOCK(sc);
357*68b8534bSLuigi Rizzo 	return 0;
358*68b8534bSLuigi Rizzo }
359*68b8534bSLuigi Rizzo 
360*68b8534bSLuigi Rizzo static void
361*68b8534bSLuigi Rizzo re_netmap_tx_init(struct rl_softc *sc)
362*68b8534bSLuigi Rizzo {
363*68b8534bSLuigi Rizzo 	struct rl_txdesc *txd;
364*68b8534bSLuigi Rizzo 	struct rl_desc *desc;
365*68b8534bSLuigi Rizzo 	int i;
366*68b8534bSLuigi Rizzo 	struct netmap_adapter *na = NA(sc->rl_ifp);
367*68b8534bSLuigi Rizzo 	struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
368*68b8534bSLuigi Rizzo 
369*68b8534bSLuigi Rizzo 	/* slot is NULL if we are not in netmap mode */
370*68b8534bSLuigi Rizzo 	if (!slot)
371*68b8534bSLuigi Rizzo 		return;
372*68b8534bSLuigi Rizzo 	/* in netmap mode, overwrite addresses and maps */
373*68b8534bSLuigi Rizzo 	txd = sc->rl_ldata.rl_tx_desc;
374*68b8534bSLuigi Rizzo 	desc = sc->rl_ldata.rl_tx_list;
375*68b8534bSLuigi Rizzo 
376*68b8534bSLuigi Rizzo 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
377*68b8534bSLuigi Rizzo 		void *addr = NMB(slot+i);
378*68b8534bSLuigi Rizzo 		uint64_t paddr = vtophys(addr);
379*68b8534bSLuigi Rizzo 
380*68b8534bSLuigi Rizzo 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
381*68b8534bSLuigi Rizzo 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
382*68b8534bSLuigi Rizzo 		netmap_load_map(sc->rl_ldata.rl_tx_mtag,
383*68b8534bSLuigi Rizzo 			txd[i].tx_dmamap, addr, na->buff_size);
384*68b8534bSLuigi Rizzo 	}
385*68b8534bSLuigi Rizzo }
386*68b8534bSLuigi Rizzo 
387*68b8534bSLuigi Rizzo static void
388*68b8534bSLuigi Rizzo re_netmap_rx_init(struct rl_softc *sc)
389*68b8534bSLuigi Rizzo {
390*68b8534bSLuigi Rizzo 	/* slot is NULL if we are not in netmap mode */
391*68b8534bSLuigi Rizzo 	struct netmap_adapter *na = NA(sc->rl_ifp);
392*68b8534bSLuigi Rizzo 	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
393*68b8534bSLuigi Rizzo 	struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
394*68b8534bSLuigi Rizzo 	uint32_t cmdstat;
395*68b8534bSLuigi Rizzo 	int i;
396*68b8534bSLuigi Rizzo 
397*68b8534bSLuigi Rizzo 	if (!slot)
398*68b8534bSLuigi Rizzo 		return;
399*68b8534bSLuigi Rizzo 
400*68b8534bSLuigi Rizzo 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
401*68b8534bSLuigi Rizzo 		void *addr = NMB(slot+i);
402*68b8534bSLuigi Rizzo 		uint64_t paddr = vtophys(addr);
403*68b8534bSLuigi Rizzo 
404*68b8534bSLuigi Rizzo 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
405*68b8534bSLuigi Rizzo 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
406*68b8534bSLuigi Rizzo 		cmdstat = slot[i].len = na->buff_size; // XXX
407*68b8534bSLuigi Rizzo 		if (i == sc->rl_ldata.rl_rx_desc_cnt - 1)
408*68b8534bSLuigi Rizzo 			cmdstat |= RL_RDESC_CMD_EOR;
409*68b8534bSLuigi Rizzo 		desc[i].rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
410*68b8534bSLuigi Rizzo 
411*68b8534bSLuigi Rizzo 		netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
412*68b8534bSLuigi Rizzo 			sc->rl_ldata.rl_rx_desc[i].rx_dmamap,
413*68b8534bSLuigi Rizzo 			addr, na->buff_size);
414*68b8534bSLuigi Rizzo 	}
415*68b8534bSLuigi Rizzo }
416