xref: /freebsd/sys/dev/netmap/if_re_netmap.h (revision 10b59a9b4add0320d52c15ce057dd697261e7dfc)
1 /*
2  * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  * $Id: if_re_netmap.h 9662 2011-11-16 13:18:06Z luigi $
29  *
30  * netmap support for if_re
31  */
32 
33 #include <net/netmap.h>
34 #include <sys/selinfo.h>
35 #include <vm/vm.h>
36 #include <vm/pmap.h>    /* vtophys ? */
37 #include <dev/netmap/netmap_kern.h>
38 
39 static int re_netmap_reg(struct ifnet *, int onoff);
40 static int re_netmap_txsync(void *, u_int, int);
41 static int re_netmap_rxsync(void *, u_int, int);
42 static void re_netmap_lock_wrapper(void *, int, u_int);
43 
44 static void
45 re_netmap_attach(struct rl_softc *sc)
46 {
47 	struct netmap_adapter na;
48 
49 	bzero(&na, sizeof(na));
50 
51 	na.ifp = sc->rl_ifp;
52 	na.separate_locks = 0;
53 	na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
54 	na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
55 	na.nm_txsync = re_netmap_txsync;
56 	na.nm_rxsync = re_netmap_rxsync;
57 	na.nm_lock = re_netmap_lock_wrapper;
58 	na.nm_register = re_netmap_reg;
59 	na.buff_size = MCLBYTES;
60 	netmap_attach(&na, 1);
61 }
62 
63 
64 /*
65  * wrapper to export locks to the generic code
66  * We should not use the tx/rx locks
67  */
68 static void
69 re_netmap_lock_wrapper(void *_a, int what, u_int queueid)
70 {
71 	struct rl_softc *adapter = _a;
72 
73 	switch (what) {
74 	case NETMAP_CORE_LOCK:
75 		RL_LOCK(adapter);
76 		break;
77 	case NETMAP_CORE_UNLOCK:
78 		RL_UNLOCK(adapter);
79 		break;
80 
81 	case NETMAP_TX_LOCK:
82 	case NETMAP_RX_LOCK:
83 	case NETMAP_TX_UNLOCK:
84 	case NETMAP_RX_UNLOCK:
85 		D("invalid lock call %d, no tx/rx locks here", what);
86 		break;
87 	}
88 }
89 
90 
91 /*
92  * support for netmap register/unregisted. We are already under core lock.
93  * only called on the first register or the last unregister.
94  */
95 static int
96 re_netmap_reg(struct ifnet *ifp, int onoff)
97 {
98 	struct rl_softc *adapter = ifp->if_softc;
99 	struct netmap_adapter *na = NA(ifp);
100 	int error = 0;
101 
102 	if (!na)
103 		return EINVAL;
104 	/* Tell the stack that the interface is no longer active */
105 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
106 
107 	re_stop(adapter);
108 
109 	if (onoff) {
110 		ifp->if_capenable |= IFCAP_NETMAP;
111 
112 		/* save if_transmit and restore it */
113 		na->if_transmit = ifp->if_transmit;
114 		/* XXX if_start and if_qflush ??? */
115 		ifp->if_transmit = netmap_start;
116 
117 		re_init_locked(adapter);
118 
119 		if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == 0) {
120 			error = ENOMEM;
121 			goto fail;
122 		}
123 	} else {
124 fail:
125 		/* restore if_transmit */
126 		ifp->if_transmit = na->if_transmit;
127 		ifp->if_capenable &= ~IFCAP_NETMAP;
128 		re_init_locked(adapter);	/* also enables intr */
129 	}
130     return (error);
131 
132 }
133 
134 
135 /*
136  * Reconcile kernel and user view of the transmit ring.
137  *
138  * Userspace has filled tx slots up to cur (excluded).
139  * The last unused slot previously known to the kernel was nr_hwcur,
140  * and the last interrupt reported nr_hwavail slots available
141  * (using the special value -1 to indicate idle transmit ring).
142  * The function must first update avail to what the kernel
143  * knows (translating the -1 to nkr_num_slots - 1),
144  * subtract the newly used slots (cur - nr_hwcur)
145  * from both avail and nr_hwavail, and set nr_hwcur = cur
146  * issuing a dmamap_sync on all slots.
147  */
148 static int
149 re_netmap_txsync(void *a, u_int ring_nr, int do_lock)
150 {
151 	struct rl_softc *sc = a;
152 	struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
153 	struct netmap_adapter *na = NA(sc->rl_ifp);
154 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
155 	struct netmap_ring *ring = kring->ring;
156 	int j, k, n, lim = kring->nkr_num_slots - 1;
157 
158 	k = ring->cur;
159 	if ( (kring->nr_kflags & NR_REINIT) || k > lim)
160 		return netmap_ring_reinit(kring);
161 
162 	if (do_lock)
163 		RL_LOCK(sc);
164 
165 	/* Sync the TX descriptor list */
166 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
167             sc->rl_ldata.rl_tx_list_map,
168             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
169 
170 	/* record completed transmissions */
171         for (n = 0, j = sc->rl_ldata.rl_tx_considx;
172 	    j != sc->rl_ldata.rl_tx_prodidx;
173 	    n++, j = RL_TX_DESC_NXT(sc, j)) {
174 		uint32_t cmdstat =
175 			le32toh(sc->rl_ldata.rl_tx_list[j].rl_cmdstat);
176 		if (cmdstat & RL_TDESC_STAT_OWN)
177 			break;
178 	}
179 	if (n > 0) {
180 		sc->rl_ldata.rl_tx_considx = j;
181 		sc->rl_ldata.rl_tx_free += n;
182 		kring->nr_hwavail += n;
183 	}
184 
185 	/* update avail to what the hardware knows */
186 	ring->avail = kring->nr_hwavail;
187 
188 	/* we trust prodidx, not hwcur */
189 	j = kring->nr_hwcur = sc->rl_ldata.rl_tx_prodidx;
190 	if (j != k) {	/* we have new packets to send */
191 		n = 0;
192 		while (j != k) {
193 			struct netmap_slot *slot = &ring->slot[j];
194 			struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[j];
195 			int cmd = slot->len | RL_TDESC_CMD_EOF |
196 				RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
197 			void *addr = NMB(slot);
198 			int len = slot->len;
199 
200 			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
201 				if (do_lock)
202 					RL_UNLOCK(sc);
203 				return netmap_ring_reinit(kring);
204 			}
205 
206 			if (j == lim)	/* mark end of ring */
207 				cmd |= RL_TDESC_CMD_EOR;
208 
209 			if (slot->flags & NS_BUF_CHANGED) {
210 				uint64_t paddr = vtophys(addr);
211 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
212 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
213 				/* buffer has changed, unload and reload map */
214 				netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
215 					txd[j].tx_dmamap, addr, na->buff_size);
216 				slot->flags &= ~NS_BUF_CHANGED;
217 			}
218 			slot->flags &= ~NS_REPORT;
219 			desc->rl_cmdstat = htole32(cmd);
220 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
221 				txd[j].tx_dmamap, BUS_DMASYNC_PREWRITE);
222 			j = (j == lim) ? 0 : j + 1;
223 			n++;
224 		}
225 		sc->rl_ldata.rl_tx_prodidx = kring->nr_hwcur = ring->cur;
226 
227 		/* decrease avail by number of sent packets */
228 		ring->avail -= n;
229 		kring->nr_hwavail = ring->avail;
230 
231 		bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
232 		    sc->rl_ldata.rl_tx_list_map,
233 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
234 
235 		/* start ? */
236 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
237 	}
238 	if (do_lock)
239 		RL_UNLOCK(sc);
240 	return 0;
241 }
242 
243 
244 /*
245  * Reconcile kernel and user view of the receive ring.
246  *
247  * Userspace has read rx slots up to cur (excluded).
248  * The last unread slot previously known to the kernel was nr_hwcur,
249  * and the last interrupt reported nr_hwavail slots available.
250  * We must subtract the newly consumed slots (cur - nr_hwcur)
251  * from nr_hwavail, clearing the descriptors for the next
252  * read, tell the hardware that they are available,
253  * and set nr_hwcur = cur and avail = nr_hwavail.
254  * issuing a dmamap_sync on all slots.
255  */
256 static int
257 re_netmap_rxsync(void *a, u_int ring_nr, int do_lock)
258 {
259 	struct rl_softc *sc = a;
260 	struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
261 	struct netmap_adapter *na = NA(sc->rl_ifp);
262 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
263 	struct netmap_ring *ring = kring->ring;
264 	int j, k, n, lim = kring->nkr_num_slots - 1;
265 
266 	k = ring->cur;
267 	if ( (kring->nr_kflags & NR_REINIT) || k > lim)
268 		return netmap_ring_reinit(kring);
269 
270 	if (do_lock)
271 		RL_LOCK(sc);
272 	/* XXX check sync modes */
273 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
274 	    sc->rl_ldata.rl_rx_list_map,
275 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
276 
277 	/*
278 	 * The device uses all the buffers in the ring, so we need
279 	 * another termination condition in addition to RL_RDESC_STAT_OWN
280 	 * cleared (all buffers could have it cleared. The easiest one
281 	 * is to limit the amount of data reported up to 'lim'
282 	 */
283 	j = sc->rl_ldata.rl_rx_prodidx;
284 	for (n = kring->nr_hwavail; n < lim ; n++) {
285 		struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[j];
286 		uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
287 		uint32_t total_len;
288 
289 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
290 			break;
291 		total_len = rxstat & sc->rl_rxlenmask;
292 		/* XXX subtract crc */
293 		total_len = (total_len < 4) ? 0 : total_len - 4;
294 		kring->ring->slot[j].len = total_len;
295 		/*  sync was in re_newbuf() */
296 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
297 		    rxd[j].rx_dmamap, BUS_DMASYNC_POSTREAD);
298 		j = RL_RX_DESC_NXT(sc, j);
299 	}
300 	if (n != kring->nr_hwavail) {
301 		sc->rl_ldata.rl_rx_prodidx = j;
302 		sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
303 		kring->nr_hwavail = n;
304 	}
305 
306 	/* skip past packets that userspace has already processed,
307 	 * making them available for reception.
308 	 * advance nr_hwcur and issue a bus_dmamap_sync on the
309 	 * buffers so it is safe to write to them.
310 	 * Also increase nr_hwavail
311 	 */
312 	j = kring->nr_hwcur;
313 	if (j != k) {	/* userspace has read some packets. */
314 		n = 0;
315 		while (j != k) {
316 			struct netmap_slot *slot = ring->slot + j;
317 			struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[j];
318 			int cmd = na->buff_size | RL_RDESC_CMD_OWN;
319 			void *addr = NMB(slot);
320 
321 			if (addr == netmap_buffer_base) { /* bad buf */
322 				if (do_lock)
323 					RL_UNLOCK(sc);
324 				return netmap_ring_reinit(kring);
325 			}
326 
327 			if (j == lim)	/* mark end of ring */
328 				cmd |= RL_RDESC_CMD_EOR;
329 
330 			desc->rl_cmdstat = htole32(cmd);
331 			slot->flags &= ~NS_REPORT;
332 			if (slot->flags & NS_BUF_CHANGED) {
333 				uint64_t paddr = vtophys(addr);
334 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
335 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
336 				netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
337 					rxd[j].rx_dmamap, addr, na->buff_size);
338 				slot->flags &= ~NS_BUF_CHANGED;
339 			}
340 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
341 				rxd[j].rx_dmamap, BUS_DMASYNC_PREREAD);
342 			j = (j == lim) ? 0 : j + 1;
343 			n++;
344 		}
345 		kring->nr_hwavail -= n;
346 		kring->nr_hwcur = k;
347 		/* Flush the RX DMA ring */
348 
349 		bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
350 		    sc->rl_ldata.rl_rx_list_map,
351 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
352 	}
353 	/* tell userspace that there are new packets */
354 	ring->avail = kring->nr_hwavail ;
355 	if (do_lock)
356 		RL_UNLOCK(sc);
357 	return 0;
358 }
359 
360 static void
361 re_netmap_tx_init(struct rl_softc *sc)
362 {
363 	struct rl_txdesc *txd;
364 	struct rl_desc *desc;
365 	int i;
366 	struct netmap_adapter *na = NA(sc->rl_ifp);
367 	struct netmap_slot *slot = netmap_reset(na, NR_TX, 0, 0);
368 
369 	/* slot is NULL if we are not in netmap mode */
370 	if (!slot)
371 		return;
372 	/* in netmap mode, overwrite addresses and maps */
373 	txd = sc->rl_ldata.rl_tx_desc;
374 	desc = sc->rl_ldata.rl_tx_list;
375 
376 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
377 		void *addr = NMB(slot+i);
378 		uint64_t paddr = vtophys(addr);
379 
380 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
381 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
382 		netmap_load_map(sc->rl_ldata.rl_tx_mtag,
383 			txd[i].tx_dmamap, addr, na->buff_size);
384 	}
385 }
386 
387 static void
388 re_netmap_rx_init(struct rl_softc *sc)
389 {
390 	/* slot is NULL if we are not in netmap mode */
391 	struct netmap_adapter *na = NA(sc->rl_ifp);
392 	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
393 	struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
394 	uint32_t cmdstat;
395 	int i;
396 
397 	if (!slot)
398 		return;
399 
400 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
401 		void *addr = NMB(slot+i);
402 		uint64_t paddr = vtophys(addr);
403 
404 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
405 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
406 		cmdstat = slot[i].len = na->buff_size; // XXX
407 		if (i == sc->rl_ldata.rl_rx_desc_cnt - 1)
408 			cmdstat |= RL_RDESC_CMD_EOR;
409 		desc[i].rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
410 
411 		netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
412 			sc->rl_ldata.rl_rx_desc[i].rx_dmamap,
413 			addr, na->buff_size);
414 	}
415 }
416