xref: /freebsd/sys/dev/netmap/if_re_netmap.h (revision f9790aeb8869bfcedf111517bace712b390e6cc5)
1 /*
2  * Copyright (C) 2011 Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  *
29  * netmap support for: re
30  *
31  * For more details on netmap support please see ixgbe_netmap.h
32  */
33 
34 
35 #include <net/netmap.h>
36 #include <sys/selinfo.h>
37 #include <vm/vm.h>
38 #include <vm/pmap.h>    /* vtophys ? */
39 #include <dev/netmap/netmap_kern.h>
40 
41 
42 /*
43  * Register/unregister. We are already under netmap lock.
44  */
45 static int
46 re_netmap_reg(struct netmap_adapter *na, int onoff)
47 {
48 	struct ifnet *ifp = na->ifp;
49 	struct rl_softc *adapter = ifp->if_softc;
50 
51 	RL_LOCK(adapter);
52 	re_stop(adapter); /* also clears IFF_DRV_RUNNING */
53 	if (onoff) {
54 		nm_set_native_flags(na);
55 	} else {
56 		nm_clear_native_flags(na);
57 	}
58 	re_init_locked(adapter);	/* also enables intr */
59 	RL_UNLOCK(adapter);
60 	return (ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1);
61 }
62 
63 
64 /*
65  * Reconcile kernel and user view of the transmit ring.
66  */
67 static int
68 re_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
69 {
70 	struct ifnet *ifp = na->ifp;
71 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
72 	struct netmap_ring *ring = kring->ring;
73 	u_int nm_i;	/* index into the netmap ring */
74 	u_int nic_i;	/* index into the NIC ring */
75 	u_int n, new_slots;
76 	u_int const lim = kring->nkr_num_slots - 1;
77 	u_int const cur = nm_txsync_prologue(kring, &new_slots);
78 
79 	/* device-specific */
80 	struct rl_softc *sc = ifp->if_softc;
81 	struct rl_txdesc *txd = sc->rl_ldata.rl_tx_desc;
82 
83 	if (cur > lim)	/* error checking in nm_txsync_prologue() */
84 		return netmap_ring_reinit(kring);
85 
86 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
87 	    sc->rl_ldata.rl_tx_list_map,
88 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); // XXX extra postwrite ?
89 
90 	/*
91 	 * First part: process new packets to send.
92 	 */
93 	nm_i = kring->nr_hwcur;
94 	if (nm_i != cur) {	/* we have new packets to send */
95 		nic_i = sc->rl_ldata.rl_tx_prodidx;
96 		// XXX or netmap_idx_k2n(kring, nm_i);
97 
98 		for (n = 0; nm_i != cur; n++) {
99 			struct netmap_slot *slot = &ring->slot[nm_i];
100 			u_int len = slot->len;
101 			uint64_t paddr;
102 			void *addr = PNMB(slot, &paddr);
103 
104 			/* device-specific */
105 			struct rl_desc *desc = &sc->rl_ldata.rl_tx_list[nic_i];
106 			int cmd = slot->len | RL_TDESC_CMD_EOF |
107 				RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF ;
108 
109 			NM_CHECK_ADDR_LEN(addr, len);
110 
111 			if (nic_i == lim)	/* mark end of ring */
112 				cmd |= RL_TDESC_CMD_EOR;
113 
114 			if (slot->flags & NS_BUF_CHANGED) {
115 				/* buffer has changed, reload map */
116 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
117 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
118 				netmap_reload_map(sc->rl_ldata.rl_tx_mtag,
119 					txd[nic_i].tx_dmamap, addr);
120 			}
121 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
122 
123 			/* Fill the slot in the NIC ring. */
124 			desc->rl_cmdstat = htole32(cmd);
125 
126 			/* make sure changes to the buffer are synced */
127 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
128 				txd[nic_i].tx_dmamap,
129 				BUS_DMASYNC_PREWRITE);
130 
131 			nm_i = nm_next(nm_i, lim);
132 			nic_i = nm_next(nic_i, lim);
133 		}
134 		sc->rl_ldata.rl_tx_prodidx = nic_i;
135 		/* decrease avail by # of packets sent minus previous ones */
136 		kring->nr_hwcur = cur; /* the saved ring->cur */
137 		kring->nr_hwavail -= new_slots;
138 
139 		/* synchronize the NIC ring */
140 		bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
141 			sc->rl_ldata.rl_tx_list_map,
142 			BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
143 
144 		/* start ? */
145 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
146 	}
147 
148 	/*
149 	 * Second part: reclaim buffers for completed transmissions.
150 	 */
151 	if (flags & NAF_FORCE_RECLAIM || kring->nr_hwavail < 1) {
152 		nic_i = sc->rl_ldata.rl_tx_considx;
153 		for (n = 0; nic_i != sc->rl_ldata.rl_tx_prodidx;
154 		    n++, nic_i = RL_TX_DESC_NXT(sc, nic_i)) {
155 			uint32_t cmdstat =
156 				le32toh(sc->rl_ldata.rl_tx_list[nic_i].rl_cmdstat);
157 			if (cmdstat & RL_TDESC_STAT_OWN)
158 				break;
159 		}
160 		if (n > 0) {
161 			sc->rl_ldata.rl_tx_considx = nic_i;
162 			sc->rl_ldata.rl_tx_free += n;
163 			kring->nr_hwavail += n;
164 		}
165 	}
166 
167 	nm_txsync_finalize(kring, cur);
168 
169 	return 0;
170 }
171 
172 
173 /*
174  * Reconcile kernel and user view of the receive ring.
175  */
176 static int
177 re_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
178 {
179 	struct ifnet *ifp = na->ifp;
180 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
181 	struct netmap_ring *ring = kring->ring;
182 	u_int nm_i;	/* index into the netmap ring */
183 	u_int nic_i;	/* index into the NIC ring */
184 	u_int n, resvd;
185 	u_int const lim = kring->nkr_num_slots - 1;
186 	u_int const cur = nm_rxsync_prologue(kring, &resvd); /* cur + res */
187 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
188 
189 	/* device-specific */
190 	struct rl_softc *sc = ifp->if_softc;
191 	struct rl_rxdesc *rxd = sc->rl_ldata.rl_rx_desc;
192 
193 	if (cur > lim)
194 		return netmap_ring_reinit(kring);
195 
196 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
197 			sc->rl_ldata.rl_rx_list_map,
198 			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
199 
200 	/*
201 	 * First part: import newly received packets.
202 	 *
203 	 * This device uses all the buffers in the ring, so we need
204 	 * another termination condition in addition to RL_RDESC_STAT_OWN
205 	 * cleared (all buffers could have it cleared. The easiest one
206 	 * is to limit the amount of data reported up to 'lim'
207 	 */
208 	if (netmap_no_pendintr || force_update) {
209 		uint16_t slot_flags = kring->nkr_slot_flags;
210 
211 		nic_i = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
212 		nm_i = netmap_idx_n2k(kring, nic_i);
213 
214 		for (n = kring->nr_hwavail; n < lim ; n++) {
215 			struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[nic_i];
216 			uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
217 			uint32_t total_len;
218 
219 			if ((rxstat & RL_RDESC_STAT_OWN) != 0)
220 				break;
221 			total_len = rxstat & sc->rl_rxlenmask;
222 			/* XXX subtract crc */
223 			total_len = (total_len < 4) ? 0 : total_len - 4;
224 			ring->slot[nm_i].len = total_len;
225 			ring->slot[nm_i].flags = slot_flags;
226 			/*  sync was in re_newbuf() */
227 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
228 			    rxd[nic_i].rx_dmamap, BUS_DMASYNC_POSTREAD);
229 			nm_i = nm_next(nm_i, lim);
230 			nic_i = nm_next(nic_i, lim);
231 		}
232 		if (n != kring->nr_hwavail) {
233 			sc->rl_ldata.rl_rx_prodidx = nic_i;
234 			sc->rl_ifp->if_ipackets += n - kring->nr_hwavail;
235 			kring->nr_hwavail = n;
236 		}
237 		kring->nr_kflags &= ~NKR_PENDINTR;
238 	}
239 
240 	/*
241 	 * Second part: skip past packets that userspace has released.
242 	 */
243 	nm_i = kring->nr_hwcur;
244 	if (nm_i != cur) {
245 		nic_i = netmap_idx_k2n(kring, nm_i);
246 		for (n = 0; nm_i != cur; n++) {
247 			struct netmap_slot *slot = &ring->slot[nm_i];
248 			uint64_t paddr;
249 			void *addr = PNMB(slot, &paddr);
250 
251 			struct rl_desc *desc = &sc->rl_ldata.rl_rx_list[nic_i];
252 			int cmd = NETMAP_BUF_SIZE | RL_RDESC_CMD_OWN;
253 
254 			if (addr == netmap_buffer_base) /* bad buf */
255 				goto ring_reset;
256 
257 			if (nic_i == lim)	/* mark end of ring */
258 				cmd |= RL_RDESC_CMD_EOR;
259 
260 			if (slot->flags & NS_BUF_CHANGED) {
261 				/* buffer has changed, reload map */
262 				desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
263 				desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
264 				netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
265 					rxd[nic_i].rx_dmamap, addr);
266 				slot->flags &= ~NS_BUF_CHANGED;
267 			}
268 			desc->rl_cmdstat = htole32(cmd);
269 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
270 			    rxd[nic_i].rx_dmamap,
271 			    BUS_DMASYNC_PREREAD);
272 			nm_i = nm_next(nm_i, lim);
273 			nic_i = nm_next(nic_i, lim);
274 		}
275 		kring->nr_hwavail -= n;
276 		kring->nr_hwcur = cur;
277 
278 		bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
279 		    sc->rl_ldata.rl_rx_list_map,
280 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
281 	}
282 
283 	/* tell userspace that there might be new packets */
284 	ring->avail = kring->nr_hwavail - resvd;
285 
286 	return 0;
287 
288 ring_reset:
289 	return netmap_ring_reinit(kring);
290 }
291 
292 
293 /*
294  * Additional routines to init the tx and rx rings.
295  * In other drivers we do that inline in the main code.
296  */
297 static void
298 re_netmap_tx_init(struct rl_softc *sc)
299 {
300 	struct rl_txdesc *txd;
301 	struct rl_desc *desc;
302 	int i, n;
303 	struct netmap_adapter *na = NA(sc->rl_ifp);
304 	struct netmap_slot *slot;
305 
306 	if (!na || !(na->na_flags & NAF_NATIVE_ON)) {
307 		return;
308 	}
309 
310 	slot = netmap_reset(na, NR_TX, 0, 0);
311 	/* slot is NULL if we are not in netmap mode */
312 	if (!slot)
313 		return;  // XXX cannot happen
314 	/* in netmap mode, overwrite addresses and maps */
315 	txd = sc->rl_ldata.rl_tx_desc;
316 	desc = sc->rl_ldata.rl_tx_list;
317 	n = sc->rl_ldata.rl_tx_desc_cnt;
318 
319 	/* l points in the netmap ring, i points in the NIC ring */
320 	for (i = 0; i < n; i++) {
321 		uint64_t paddr;
322 		int l = netmap_idx_n2k(&na->tx_rings[0], i);
323 		void *addr = PNMB(slot + l, &paddr);
324 
325 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
326 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
327 		netmap_load_map(sc->rl_ldata.rl_tx_mtag,
328 			txd[i].tx_dmamap, addr);
329 	}
330 }
331 
332 static void
333 re_netmap_rx_init(struct rl_softc *sc)
334 {
335 	struct netmap_adapter *na = NA(sc->rl_ifp);
336 	struct netmap_slot *slot = netmap_reset(na, NR_RX, 0, 0);
337 	struct rl_desc *desc = sc->rl_ldata.rl_rx_list;
338 	uint32_t cmdstat;
339 	int i, n, max_avail;
340 
341 	if (!slot)
342 		return;
343 	n = sc->rl_ldata.rl_rx_desc_cnt;
344 	/*
345 	 * Userspace owned hwavail packets before the reset,
346 	 * so the NIC that last hwavail descriptors of the ring
347 	 * are still owned by the driver (and keep one empty).
348 	 */
349 	max_avail = n - 1 - na->rx_rings[0].nr_hwavail;
350 	for (i = 0; i < n; i++) {
351 		void *addr;
352 		uint64_t paddr;
353 		int l = netmap_idx_n2k(&na->rx_rings[0], i);
354 
355 		addr = PNMB(slot + l, &paddr);
356 
357 		netmap_reload_map(sc->rl_ldata.rl_rx_mtag,
358 		    sc->rl_ldata.rl_rx_desc[i].rx_dmamap, addr);
359 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
360 		    sc->rl_ldata.rl_rx_desc[i].rx_dmamap, BUS_DMASYNC_PREREAD);
361 		desc[i].rl_bufaddr_lo = htole32(RL_ADDR_LO(paddr));
362 		desc[i].rl_bufaddr_hi = htole32(RL_ADDR_HI(paddr));
363 		cmdstat = NETMAP_BUF_SIZE;
364 		if (i == n - 1) /* mark the end of ring */
365 			cmdstat |= RL_RDESC_CMD_EOR;
366 		if (i < max_avail)
367 			cmdstat |= RL_RDESC_CMD_OWN;
368 		desc[i].rl_cmdstat = htole32(cmdstat);
369 	}
370 }
371 
372 
373 static void
374 re_netmap_attach(struct rl_softc *sc)
375 {
376 	struct netmap_adapter na;
377 
378 	bzero(&na, sizeof(na));
379 
380 	na.ifp = sc->rl_ifp;
381 	na.na_flags = NAF_BDG_MAYSLEEP;
382 	na.num_tx_desc = sc->rl_ldata.rl_tx_desc_cnt;
383 	na.num_rx_desc = sc->rl_ldata.rl_rx_desc_cnt;
384 	na.nm_txsync = re_netmap_txsync;
385 	na.nm_rxsync = re_netmap_rxsync;
386 	na.nm_register = re_netmap_reg;
387 	na.num_tx_rings = na.num_rx_rings = 1;
388 	netmap_attach(&na);
389 }
390 
391 /* end of file */
392