xref: /freebsd/sys/dev/netmap/netmap.c (revision 9a41df2a0e6408e9b329bbd8b9e37c2b44461a1b)
1 /*
2  * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #define NM_BRIDGE
27 
28 /*
29  * This module supports memory mapped access to network devices,
30  * see netmap(4).
31  *
32  * The module uses a large, memory pool allocated by the kernel
33  * and accessible as mmapped memory by multiple userspace threads/processes.
34  * The memory pool contains packet buffers and "netmap rings",
35  * i.e. user-accessible copies of the interface's queues.
36  *
37  * Access to the network card works like this:
38  * 1. a process/thread issues one or more open() on /dev/netmap, to create
39  *    select()able file descriptor on which events are reported.
40  * 2. on each descriptor, the process issues an ioctl() to identify
41  *    the interface that should report events to the file descriptor.
42  * 3. on each descriptor, the process issues an mmap() request to
43  *    map the shared memory region within the process' address space.
44  *    The list of interesting queues is indicated by a location in
45  *    the shared memory region.
46  * 4. using the functions in the netmap(4) userspace API, a process
47  *    can look up the occupation state of a queue, access memory buffers,
48  *    and retrieve received packets or enqueue packets to transmit.
49  * 5. using some ioctl()s the process can synchronize the userspace view
50  *    of the queue with the actual status in the kernel. This includes both
51  *    receiving the notification of new packets, and transmitting new
52  *    packets on the output interface.
53  * 6. select() or poll() can be used to wait for events on individual
54  *    transmit or receive queues (or all queues for a given interface).
55  */
56 
57 #ifdef linux
58 #include "bsd_glue.h"
59 static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev);
60 #endif /* linux */
61 
62 #ifdef __APPLE__
63 #include "osx_glue.h"
64 #endif /* __APPLE__ */
65 
66 #ifdef __FreeBSD__
67 #include <sys/cdefs.h> /* prerequisite */
68 __FBSDID("$FreeBSD$");
69 
70 #include <sys/types.h>
71 #include <sys/module.h>
72 #include <sys/errno.h>
73 #include <sys/param.h>	/* defines used in kernel.h */
74 #include <sys/jail.h>
75 #include <sys/kernel.h>	/* types used in module initialization */
76 #include <sys/conf.h>	/* cdevsw struct */
77 #include <sys/uio.h>	/* uio struct */
78 #include <sys/sockio.h>
79 #include <sys/socketvar.h>	/* struct socket */
80 #include <sys/malloc.h>
81 #include <sys/mman.h>	/* PROT_EXEC */
82 #include <sys/poll.h>
83 #include <sys/proc.h>
84 #include <vm/vm.h>	/* vtophys */
85 #include <vm/pmap.h>	/* vtophys */
86 #include <sys/socket.h> /* sockaddrs */
87 #include <machine/bus.h>
88 #include <sys/selinfo.h>
89 #include <sys/sysctl.h>
90 #include <net/if.h>
91 #include <net/bpf.h>		/* BIOCIMMEDIATE */
92 #include <net/vnet.h>
93 #include <machine/bus.h>	/* bus_dmamap_* */
94 
95 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
96 #endif /* __FreeBSD__ */
97 
98 #include <net/netmap.h>
99 #include <dev/netmap/netmap_kern.h>
100 
101 /*
102  * lock and unlock for the netmap memory allocator
103  */
104 #define NMA_LOCK()	mtx_lock(&nm_mem->nm_mtx);
105 #define NMA_UNLOCK()	mtx_unlock(&nm_mem->nm_mtx);
106 struct netmap_mem_d;
107 static struct netmap_mem_d *nm_mem;	/* Our memory allocator. */
108 
109 u_int netmap_total_buffers;
110 char *netmap_buffer_base;	/* address of an invalid buffer */
111 
112 /* user-controlled variables */
113 int netmap_verbose;
114 
115 static int netmap_no_timestamp; /* don't timestamp on rxsync */
116 
117 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
118 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
119     CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
120 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
121     CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
122 u_int netmap_buf_size = 2048;
123 TUNABLE_INT("hw.netmap.buf_size", (u_int *)&netmap_buf_size);
124 SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size,
125     CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers");
126 int netmap_mitigate = 1;
127 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
128 int netmap_no_pendintr = 1;
129 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
130     CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
131 
132 int netmap_drop = 0;	/* debugging */
133 int netmap_flags = 0;	/* debug flags */
134 int netmap_copy = 0;	/* debugging, copy content */
135 
136 SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , "");
137 SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , "");
138 SYSCTL_INT(_dev_netmap, OID_AUTO, copy, CTLFLAG_RW, &netmap_copy, 0 , "");
139 
140 #ifdef NM_BRIDGE /* support for netmap bridge */
141 
142 /*
143  * system parameters.
144  *
145  * All switched ports have prefix NM_NAME.
146  * The switch has a max of NM_BDG_MAXPORTS ports (often stored in a bitmap,
147  * so a practical upper bound is 64).
148  * Each tx ring is read-write, whereas rx rings are readonly (XXX not done yet).
149  * The virtual interfaces use per-queue lock instead of core lock.
150  * In the tx loop, we aggregate traffic in batches to make all operations
151  * faster. The batch size is NM_BDG_BATCH
152  */
153 #define	NM_NAME			"vale"	/* prefix for the interface */
154 #define NM_BDG_MAXPORTS		16	/* up to 64 ? */
155 #define NM_BRIDGE_RINGSIZE	1024	/* in the device */
156 #define NM_BDG_HASH		1024	/* forwarding table entries */
157 #define NM_BDG_BATCH		1024	/* entries in the forwarding buffer */
158 #define	NM_BRIDGES		4	/* number of bridges */
159 int netmap_bridge = NM_BDG_BATCH; /* bridge batch size */
160 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge, CTLFLAG_RW, &netmap_bridge, 0 , "");
161 
162 #ifdef linux
163 #define	ADD_BDG_REF(ifp)	(NA(ifp)->if_refcount++)
164 #define	DROP_BDG_REF(ifp)	(NA(ifp)->if_refcount-- <= 1)
165 #else /* !linux */
166 #define	ADD_BDG_REF(ifp)	(ifp)->if_refcount++
167 #define	DROP_BDG_REF(ifp)	refcount_release(&(ifp)->if_refcount)
168 #ifdef __FreeBSD__
169 #include <sys/endian.h>
170 #include <sys/refcount.h>
171 #endif /* __FreeBSD__ */
172 #define prefetch(x)	__builtin_prefetch(x)
173 #endif /* !linux */
174 
175 static void bdg_netmap_attach(struct ifnet *ifp);
176 static int bdg_netmap_reg(struct ifnet *ifp, int onoff);
177 /* per-tx-queue entry */
178 struct nm_bdg_fwd {	/* forwarding entry for a bridge */
179 	void *buf;
180 	uint64_t dst;	/* dst mask */
181 	uint32_t src;	/* src index ? */
182 	uint16_t len;	/* src len */
183 };
184 
185 struct nm_hash_ent {
186 	uint64_t	mac;	/* the top 2 bytes are the epoch */
187 	uint64_t	ports;
188 };
189 
190 /*
191  * Interfaces for a bridge are all in ports[].
192  * The array has fixed size, an empty entry does not terminate
193  * the search.
194  */
195 struct nm_bridge {
196 	struct ifnet *bdg_ports[NM_BDG_MAXPORTS];
197 	int n_ports;
198 	uint64_t act_ports;
199 	int freelist;	/* first buffer index */
200 	NM_SELINFO_T si;	/* poll/select wait queue */
201 	NM_LOCK_T bdg_lock;	/* protect the selinfo ? */
202 
203 	/* the forwarding table, MAC+ports */
204 	struct nm_hash_ent ht[NM_BDG_HASH];
205 
206 	int namelen;	/* 0 means free */
207 	char basename[IFNAMSIZ];
208 };
209 
210 struct nm_bridge nm_bridges[NM_BRIDGES];
211 
212 #define BDG_LOCK(b)	mtx_lock(&(b)->bdg_lock)
213 #define BDG_UNLOCK(b)	mtx_unlock(&(b)->bdg_lock)
214 
215 /*
216  * NA(ifp)->bdg_port	port index
217  */
218 
219 // XXX only for multiples of 64 bytes, non overlapped.
220 static inline void
221 pkt_copy(void *_src, void *_dst, int l)
222 {
223         uint64_t *src = _src;
224         uint64_t *dst = _dst;
225         if (unlikely(l >= 1024)) {
226                 bcopy(src, dst, l);
227                 return;
228         }
229         for (; likely(l > 0); l-=64) {
230                 *dst++ = *src++;
231                 *dst++ = *src++;
232                 *dst++ = *src++;
233                 *dst++ = *src++;
234                 *dst++ = *src++;
235                 *dst++ = *src++;
236                 *dst++ = *src++;
237                 *dst++ = *src++;
238         }
239 }
240 
241 /*
242  * locate a bridge among the existing ones.
243  * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.
244  * We assume that this is called with a name of at least NM_NAME chars.
245  */
246 static struct nm_bridge *
247 nm_find_bridge(const char *name)
248 {
249 	int i, l, namelen, e;
250 	struct nm_bridge *b = NULL;
251 
252 	namelen = strlen(NM_NAME);	/* base length */
253 	l = strlen(name);		/* actual length */
254 	for (i = namelen + 1; i < l; i++) {
255 		if (name[i] == ':') {
256 			namelen = i;
257 			break;
258 		}
259 	}
260 	if (namelen >= IFNAMSIZ)
261 		namelen = IFNAMSIZ;
262 	ND("--- prefix is '%.*s' ---", namelen, name);
263 
264 	/* use the first entry for locking */
265 	BDG_LOCK(nm_bridges); // XXX do better
266 	for (e = -1, i = 1; i < NM_BRIDGES; i++) {
267 		b = nm_bridges + i;
268 		if (b->namelen == 0)
269 			e = i;	/* record empty slot */
270 		else if (strncmp(name, b->basename, namelen) == 0) {
271 			ND("found '%.*s' at %d", namelen, name, i);
272 			break;
273 		}
274 	}
275 	if (i == NM_BRIDGES) { /* all full */
276 		if (e == -1) { /* no empty slot */
277 			b = NULL;
278 		} else {
279 			b = nm_bridges + e;
280 			strncpy(b->basename, name, namelen);
281 			b->namelen = namelen;
282 		}
283 	}
284 	BDG_UNLOCK(nm_bridges);
285 	return b;
286 }
287 #endif /* NM_BRIDGE */
288 
289 /*------------- memory allocator -----------------*/
290 #ifdef NETMAP_MEM2
291 #include "netmap_mem2.c"
292 #else /* !NETMAP_MEM2 */
293 #include "netmap_mem1.c"
294 #endif /* !NETMAP_MEM2 */
295 /*------------ end of memory allocator ----------*/
296 
297 /* Structure associated to each thread which registered an interface. */
298 struct netmap_priv_d {
299 	struct netmap_if *np_nifp;	/* netmap interface descriptor. */
300 
301 	struct ifnet	*np_ifp;	/* device for which we hold a reference */
302 	int		np_ringid;	/* from the ioctl */
303 	u_int		np_qfirst, np_qlast;	/* range of rings to scan */
304 	uint16_t	np_txpoll;
305 };
306 
307 
308 /*
309  * File descriptor's private data destructor.
310  *
311  * Call nm_register(ifp,0) to stop netmap mode on the interface and
312  * revert to normal operation. We expect that np_ifp has not gone.
313  */
314 static void
315 netmap_dtor_locked(void *data)
316 {
317 	struct netmap_priv_d *priv = data;
318 	struct ifnet *ifp = priv->np_ifp;
319 	struct netmap_adapter *na = NA(ifp);
320 	struct netmap_if *nifp = priv->np_nifp;
321 
322 	na->refcount--;
323 	if (na->refcount <= 0) {	/* last instance */
324 		u_int i, j, lim;
325 
326 		D("deleting last netmap instance for %s", ifp->if_xname);
327 		/*
328 		 * there is a race here with *_netmap_task() and
329 		 * netmap_poll(), which don't run under NETMAP_REG_LOCK.
330 		 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
331 		 * (aka NETMAP_DELETING(na)) are a unique marker that the
332 		 * device is dying.
333 		 * Before destroying stuff we sleep a bit, and then complete
334 		 * the job. NIOCREG should realize the condition and
335 		 * loop until they can continue; the other routines
336 		 * should check the condition at entry and quit if
337 		 * they cannot run.
338 		 */
339 		na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
340 		tsleep(na, 0, "NIOCUNREG", 4);
341 		na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
342 		na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
343 		/* Wake up any sleeping threads. netmap_poll will
344 		 * then return POLLERR
345 		 */
346 		for (i = 0; i < na->num_tx_rings + 1; i++)
347 			selwakeuppri(&na->tx_rings[i].si, PI_NET);
348 		for (i = 0; i < na->num_rx_rings + 1; i++)
349 			selwakeuppri(&na->rx_rings[i].si, PI_NET);
350 		selwakeuppri(&na->tx_si, PI_NET);
351 		selwakeuppri(&na->rx_si, PI_NET);
352 		/* release all buffers */
353 		NMA_LOCK();
354 		for (i = 0; i < na->num_tx_rings + 1; i++) {
355 			struct netmap_ring *ring = na->tx_rings[i].ring;
356 			lim = na->tx_rings[i].nkr_num_slots;
357 			for (j = 0; j < lim; j++)
358 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
359 			/* knlist_destroy(&na->tx_rings[i].si.si_note); */
360 			mtx_destroy(&na->tx_rings[i].q_lock);
361 		}
362 		for (i = 0; i < na->num_rx_rings + 1; i++) {
363 			struct netmap_ring *ring = na->rx_rings[i].ring;
364 			lim = na->rx_rings[i].nkr_num_slots;
365 			for (j = 0; j < lim; j++)
366 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
367 			/* knlist_destroy(&na->rx_rings[i].si.si_note); */
368 			mtx_destroy(&na->rx_rings[i].q_lock);
369 		}
370 		/* XXX kqueue(9) needed; these will mirror knlist_init. */
371 		/* knlist_destroy(&na->tx_si.si_note); */
372 		/* knlist_destroy(&na->rx_si.si_note); */
373 		NMA_UNLOCK();
374 		netmap_free_rings(na);
375 		wakeup(na);
376 	}
377 	netmap_if_free(nifp);
378 }
379 
380 static void
381 nm_if_rele(struct ifnet *ifp)
382 {
383 #ifndef NM_BRIDGE
384 	if_rele(ifp);
385 #else /* NM_BRIDGE */
386 	int i, full;
387 	struct nm_bridge *b;
388 
389 	if (strncmp(ifp->if_xname, NM_NAME, sizeof(NM_NAME) - 1)) {
390 		if_rele(ifp);
391 		return;
392 	}
393 	if (!DROP_BDG_REF(ifp))
394 		return;
395 	b = ifp->if_bridge;
396 	BDG_LOCK(nm_bridges);
397 	BDG_LOCK(b);
398 	ND("want to disconnect %s from the bridge", ifp->if_xname);
399 	full = 0;
400 	for (i = 0; i < NM_BDG_MAXPORTS; i++) {
401 		if (b->bdg_ports[i] == ifp) {
402 			b->bdg_ports[i] = NULL;
403 			bzero(ifp, sizeof(*ifp));
404 			free(ifp, M_DEVBUF);
405 			break;
406 		}
407 		else if (b->bdg_ports[i] != NULL)
408 			full = 1;
409 	}
410 	BDG_UNLOCK(b);
411 	if (full == 0) {
412 		ND("freeing bridge %d", b - nm_bridges);
413 		b->namelen = 0;
414 	}
415 	BDG_UNLOCK(nm_bridges);
416 	if (i == NM_BDG_MAXPORTS)
417 		D("ouch, cannot find ifp to remove");
418 #endif /* NM_BRIDGE */
419 }
420 
421 static void
422 netmap_dtor(void *data)
423 {
424 	struct netmap_priv_d *priv = data;
425 	struct ifnet *ifp = priv->np_ifp;
426 	struct netmap_adapter *na = NA(ifp);
427 
428 	na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
429 	netmap_dtor_locked(data);
430 	na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
431 
432 	nm_if_rele(ifp);
433 	bzero(priv, sizeof(*priv));	/* XXX for safety */
434 	free(priv, M_DEVBUF);
435 }
436 
437 
438 /*
439  * mmap(2) support for the "netmap" device.
440  *
441  * Expose all the memory previously allocated by our custom memory
442  * allocator: this way the user has only to issue a single mmap(2), and
443  * can work on all the data structures flawlessly.
444  *
445  * Return 0 on success, -1 otherwise.
446  */
447 
448 #ifdef __FreeBSD__
449 static int
450 netmap_mmap(__unused struct cdev *dev,
451 #if __FreeBSD_version < 900000
452 		vm_offset_t offset, vm_paddr_t *paddr, int nprot
453 #else
454 		vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
455 		__unused vm_memattr_t *memattr
456 #endif
457 	)
458 {
459 	if (nprot & PROT_EXEC)
460 		return (-1);	// XXX -1 or EINVAL ?
461 
462 	ND("request for offset 0x%x", (uint32_t)offset);
463 	*paddr = netmap_ofstophys(offset);
464 
465 	return (0);
466 }
467 #endif /* __FreeBSD__ */
468 
469 
470 /*
471  * Handlers for synchronization of the queues from/to the host.
472  *
473  * netmap_sync_to_host() passes packets up. We are called from a
474  * system call in user process context, and the only contention
475  * can be among multiple user threads erroneously calling
476  * this routine concurrently. In principle we should not even
477  * need to lock.
478  */
479 static void
480 netmap_sync_to_host(struct netmap_adapter *na)
481 {
482 	struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
483 	struct netmap_ring *ring = kring->ring;
484 	struct mbuf *head = NULL, *tail = NULL, *m;
485 	u_int k, n, lim = kring->nkr_num_slots - 1;
486 
487 	k = ring->cur;
488 	if (k > lim) {
489 		netmap_ring_reinit(kring);
490 		return;
491 	}
492 	// na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
493 
494 	/* Take packets from hwcur to cur and pass them up.
495 	 * In case of no buffers we give up. At the end of the loop,
496 	 * the queue is drained in all cases.
497 	 */
498 	for (n = kring->nr_hwcur; n != k;) {
499 		struct netmap_slot *slot = &ring->slot[n];
500 
501 		n = (n == lim) ? 0 : n + 1;
502 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
503 			D("bad pkt at %d len %d", n, slot->len);
504 			continue;
505 		}
506 		m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL);
507 
508 		if (m == NULL)
509 			break;
510 		if (tail)
511 			tail->m_nextpkt = m;
512 		else
513 			head = m;
514 		tail = m;
515 		m->m_nextpkt = NULL;
516 	}
517 	kring->nr_hwcur = k;
518 	kring->nr_hwavail = ring->avail = lim;
519 	// na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
520 
521 	/* send packets up, outside the lock */
522 	while ((m = head) != NULL) {
523 		head = head->m_nextpkt;
524 		m->m_nextpkt = NULL;
525 		if (netmap_verbose & NM_VERB_HOST)
526 			D("sending up pkt %p size %d", m, MBUF_LEN(m));
527 		NM_SEND_UP(na->ifp, m);
528 	}
529 }
530 
531 /*
532  * rxsync backend for packets coming from the host stack.
533  * They have been put in the queue by netmap_start() so we
534  * need to protect access to the kring using a lock.
535  *
536  * This routine also does the selrecord if called from the poll handler
537  * (we know because td != NULL).
538  *
539  * NOTE: on linux, selrecord() is defined as a macro and uses pwait
540  *     as an additional hidden argument.
541  */
542 static void
543 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait)
544 {
545 	struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
546 	struct netmap_ring *ring = kring->ring;
547 	u_int j, n, lim = kring->nkr_num_slots;
548 	u_int k = ring->cur, resvd = ring->reserved;
549 
550 	(void)pwait;	/* disable unused warnings */
551 	na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
552 	if (k >= lim) {
553 		netmap_ring_reinit(kring);
554 		return;
555 	}
556 	/* new packets are already set in nr_hwavail */
557 	/* skip past packets that userspace has released */
558 	j = kring->nr_hwcur;
559 	if (resvd > 0) {
560 		if (resvd + ring->avail >= lim + 1) {
561 			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
562 			ring->reserved = resvd = 0; // XXX panic...
563 		}
564 		k = (k >= resvd) ? k - resvd : k + lim - resvd;
565         }
566 	if (j != k) {
567 		n = k >= j ? k - j : k + lim - j;
568 		kring->nr_hwavail -= n;
569 		kring->nr_hwcur = k;
570 	}
571 	k = ring->avail = kring->nr_hwavail - resvd;
572 	if (k == 0 && td)
573 		selrecord(td, &kring->si);
574 	if (k && (netmap_verbose & NM_VERB_HOST))
575 		D("%d pkts from stack", k);
576 	na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
577 }
578 
579 
580 /*
581  * get a refcounted reference to an interface.
582  * Return ENXIO if the interface does not exist, EINVAL if netmap
583  * is not supported by the interface.
584  * If successful, hold a reference.
585  */
586 static int
587 get_ifp(const char *name, struct ifnet **ifp)
588 {
589 #ifdef NM_BRIDGE
590 	struct ifnet *iter = NULL;
591 
592 	do {
593 		struct nm_bridge *b;
594 		int i, l, cand = -1;
595 
596 		if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1))
597 			break;
598 		b = nm_find_bridge(name);
599 		if (b == NULL) {
600 			D("no bridges available for '%s'", name);
601 			return (ENXIO);
602 		}
603 		/* XXX locking */
604 		BDG_LOCK(b);
605 		/* lookup in the local list of ports */
606 		for (i = 0; i < NM_BDG_MAXPORTS; i++) {
607 			iter = b->bdg_ports[i];
608 			if (iter == NULL) {
609 				if (cand == -1)
610 					cand = i; /* potential insert point */
611 				continue;
612 			}
613 			if (!strcmp(iter->if_xname, name)) {
614 				ADD_BDG_REF(iter);
615 				ND("found existing interface");
616 				BDG_UNLOCK(b);
617 				break;
618 			}
619 		}
620 		if (i < NM_BDG_MAXPORTS) /* already unlocked */
621 			break;
622 		if (cand == -1) {
623 			D("bridge full, cannot create new port");
624 no_port:
625 			BDG_UNLOCK(b);
626 			*ifp = NULL;
627 			return EINVAL;
628 		}
629 		ND("create new bridge port %s", name);
630 		/* space for forwarding list after the ifnet */
631 		l = sizeof(*iter) +
632 			 sizeof(struct nm_bdg_fwd)*NM_BDG_BATCH ;
633 		iter = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO);
634 		if (!iter)
635 			goto no_port;
636 		strcpy(iter->if_xname, name);
637 		bdg_netmap_attach(iter);
638 		b->bdg_ports[cand] = iter;
639 		iter->if_bridge = b;
640 		ADD_BDG_REF(iter);
641 		BDG_UNLOCK(b);
642 		ND("attaching virtual bridge %p", b);
643 	} while (0);
644 	*ifp = iter;
645 	if (! *ifp)
646 #endif /* NM_BRIDGE */
647 	*ifp = ifunit_ref(name);
648 	if (*ifp == NULL)
649 		return (ENXIO);
650 	/* can do this if the capability exists and if_pspare[0]
651 	 * points to the netmap descriptor.
652 	 */
653 	if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp))
654 		return 0;	/* valid pointer, we hold the refcount */
655 	nm_if_rele(*ifp);
656 	return EINVAL;	// not NETMAP capable
657 }
658 
659 
660 /*
661  * Error routine called when txsync/rxsync detects an error.
662  * Can't do much more than resetting cur = hwcur, avail = hwavail.
663  * Return 1 on reinit.
664  *
665  * This routine is only called by the upper half of the kernel.
666  * It only reads hwcur (which is changed only by the upper half, too)
667  * and hwavail (which may be changed by the lower half, but only on
668  * a tx ring and only to increase it, so any error will be recovered
669  * on the next call). For the above, we don't strictly need to call
670  * it under lock.
671  */
672 int
673 netmap_ring_reinit(struct netmap_kring *kring)
674 {
675 	struct netmap_ring *ring = kring->ring;
676 	u_int i, lim = kring->nkr_num_slots - 1;
677 	int errors = 0;
678 
679 	D("called for %s", kring->na->ifp->if_xname);
680 	if (ring->cur > lim)
681 		errors++;
682 	for (i = 0; i <= lim; i++) {
683 		u_int idx = ring->slot[i].buf_idx;
684 		u_int len = ring->slot[i].len;
685 		if (idx < 2 || idx >= netmap_total_buffers) {
686 			if (!errors++)
687 				D("bad buffer at slot %d idx %d len %d ", i, idx, len);
688 			ring->slot[i].buf_idx = 0;
689 			ring->slot[i].len = 0;
690 		} else if (len > NETMAP_BUF_SIZE) {
691 			ring->slot[i].len = 0;
692 			if (!errors++)
693 				D("bad len %d at slot %d idx %d",
694 					len, i, idx);
695 		}
696 	}
697 	if (errors) {
698 		int pos = kring - kring->na->tx_rings;
699 		int n = kring->na->num_tx_rings + 1;
700 
701 		D("total %d errors", errors);
702 		errors++;
703 		D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
704 			kring->na->ifp->if_xname,
705 			pos < n ?  "TX" : "RX", pos < n ? pos : pos - n,
706 			ring->cur, kring->nr_hwcur,
707 			ring->avail, kring->nr_hwavail);
708 		ring->cur = kring->nr_hwcur;
709 		ring->avail = kring->nr_hwavail;
710 	}
711 	return (errors ? 1 : 0);
712 }
713 
714 
715 /*
716  * Set the ring ID. For devices with a single queue, a request
717  * for all rings is the same as a single ring.
718  */
719 static int
720 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
721 {
722 	struct ifnet *ifp = priv->np_ifp;
723 	struct netmap_adapter *na = NA(ifp);
724 	u_int i = ringid & NETMAP_RING_MASK;
725 	/* initially (np_qfirst == np_qlast) we don't want to lock */
726 	int need_lock = (priv->np_qfirst != priv->np_qlast);
727 	int lim = na->num_rx_rings;
728 
729 	if (na->num_tx_rings > lim)
730 		lim = na->num_tx_rings;
731 	if ( (ringid & NETMAP_HW_RING) && i >= lim) {
732 		D("invalid ring id %d", i);
733 		return (EINVAL);
734 	}
735 	if (need_lock)
736 		na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
737 	priv->np_ringid = ringid;
738 	if (ringid & NETMAP_SW_RING) {
739 		priv->np_qfirst = NETMAP_SW_RING;
740 		priv->np_qlast = 0;
741 	} else if (ringid & NETMAP_HW_RING) {
742 		priv->np_qfirst = i;
743 		priv->np_qlast = i + 1;
744 	} else {
745 		priv->np_qfirst = 0;
746 		priv->np_qlast = NETMAP_HW_RING ;
747 	}
748 	priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
749 	if (need_lock)
750 		na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
751 	if (ringid & NETMAP_SW_RING)
752 		D("ringid %s set to SW RING", ifp->if_xname);
753 	else if (ringid & NETMAP_HW_RING)
754 		D("ringid %s set to HW RING %d", ifp->if_xname,
755 			priv->np_qfirst);
756 	else
757 		D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
758 	return 0;
759 }
760 
761 /*
762  * ioctl(2) support for the "netmap" device.
763  *
764  * Following a list of accepted commands:
765  * - NIOCGINFO
766  * - SIOCGIFADDR	just for convenience
767  * - NIOCREGIF
768  * - NIOCUNREGIF
769  * - NIOCTXSYNC
770  * - NIOCRXSYNC
771  *
772  * Return 0 on success, errno otherwise.
773  */
774 static int
775 netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
776 	int fflag, struct thread *td)
777 {
778 	struct netmap_priv_d *priv = NULL;
779 	struct ifnet *ifp;
780 	struct nmreq *nmr = (struct nmreq *) data;
781 	struct netmap_adapter *na;
782 	int error;
783 	u_int i, lim;
784 	struct netmap_if *nifp;
785 
786 	(void)dev;	/* UNUSED */
787 	(void)fflag;	/* UNUSED */
788 #ifdef linux
789 #define devfs_get_cdevpriv(pp)				\
790 	({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; 	\
791 		(*pp ? 0 : ENOENT); })
792 
793 /* devfs_set_cdevpriv cannot fail on linux */
794 #define devfs_set_cdevpriv(p, fn)				\
795 	({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); })
796 
797 
798 #define devfs_clear_cdevpriv()	do {				\
799 		netmap_dtor(priv); ((struct file *)td)->private_data = 0;	\
800 	} while (0)
801 #endif /* linux */
802 
803 	CURVNET_SET(TD_TO_VNET(td));
804 
805 	error = devfs_get_cdevpriv((void **)&priv);
806 	if (error != ENOENT && error != 0) {
807 		CURVNET_RESTORE();
808 		return (error);
809 	}
810 
811 	error = 0;	/* Could be ENOENT */
812 	nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';	/* truncate name */
813 	switch (cmd) {
814 	case NIOCGINFO:		/* return capabilities etc */
815 		/* memsize is always valid */
816 		nmr->nr_memsize = nm_mem->nm_totalsize;
817 		nmr->nr_offset = 0;
818 		nmr->nr_rx_rings = nmr->nr_tx_rings = 0;
819 		nmr->nr_rx_slots = nmr->nr_tx_slots = 0;
820 		if (nmr->nr_version != NETMAP_API) {
821 			D("API mismatch got %d have %d",
822 				nmr->nr_version, NETMAP_API);
823 			nmr->nr_version = NETMAP_API;
824 			error = EINVAL;
825 			break;
826 		}
827 		if (nmr->nr_name[0] == '\0')	/* just get memory info */
828 			break;
829 		error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
830 		if (error)
831 			break;
832 		na = NA(ifp); /* retrieve netmap_adapter */
833 		nmr->nr_rx_rings = na->num_rx_rings;
834 		nmr->nr_tx_rings = na->num_tx_rings;
835 		nmr->nr_rx_slots = na->num_rx_desc;
836 		nmr->nr_tx_slots = na->num_tx_desc;
837 		nm_if_rele(ifp);	/* return the refcount */
838 		break;
839 
840 	case NIOCREGIF:
841 		if (nmr->nr_version != NETMAP_API) {
842 			nmr->nr_version = NETMAP_API;
843 			error = EINVAL;
844 			break;
845 		}
846 		if (priv != NULL) {	/* thread already registered */
847 			error = netmap_set_ringid(priv, nmr->nr_ringid);
848 			break;
849 		}
850 		/* find the interface and a reference */
851 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
852 		if (error)
853 			break;
854 		na = NA(ifp); /* retrieve netmap adapter */
855 		/*
856 		 * Allocate the private per-thread structure.
857 		 * XXX perhaps we can use a blocking malloc ?
858 		 */
859 		priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
860 			      M_NOWAIT | M_ZERO);
861 		if (priv == NULL) {
862 			error = ENOMEM;
863 			nm_if_rele(ifp);   /* return the refcount */
864 			break;
865 		}
866 
867 		for (i = 10; i > 0; i--) {
868 			na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
869 			if (!NETMAP_DELETING(na))
870 				break;
871 			na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
872 			tsleep(na, 0, "NIOCREGIF", hz/10);
873 		}
874 		if (i == 0) {
875 			D("too many NIOCREGIF attempts, give up");
876 			error = EINVAL;
877 			free(priv, M_DEVBUF);
878 			nm_if_rele(ifp);	/* return the refcount */
879 			break;
880 		}
881 
882 		priv->np_ifp = ifp;	/* store the reference */
883 		error = netmap_set_ringid(priv, nmr->nr_ringid);
884 		if (error)
885 			goto error;
886 		priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na);
887 		if (nifp == NULL) { /* allocation failed */
888 			error = ENOMEM;
889 		} else if (ifp->if_capenable & IFCAP_NETMAP) {
890 			/* was already set */
891 		} else {
892 			/* Otherwise set the card in netmap mode
893 			 * and make it use the shared buffers.
894 			 */
895 			for (i = 0 ; i < na->num_tx_rings + 1; i++)
896 				mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF);
897 			for (i = 0 ; i < na->num_rx_rings + 1; i++) {
898 				mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF);
899 			}
900 			error = na->nm_register(ifp, 1); /* mode on */
901 			if (error)
902 				netmap_dtor_locked(priv);
903 		}
904 
905 		if (error) {	/* reg. failed, release priv and ref */
906 error:
907 			na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
908 			nm_if_rele(ifp);	/* return the refcount */
909 			bzero(priv, sizeof(*priv));
910 			free(priv, M_DEVBUF);
911 			break;
912 		}
913 
914 		na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
915 		error = devfs_set_cdevpriv(priv, netmap_dtor);
916 
917 		if (error != 0) {
918 			/* could not assign the private storage for the
919 			 * thread, call the destructor explicitly.
920 			 */
921 			netmap_dtor(priv);
922 			break;
923 		}
924 
925 		/* return the offset of the netmap_if object */
926 		nmr->nr_rx_rings = na->num_rx_rings;
927 		nmr->nr_tx_rings = na->num_tx_rings;
928 		nmr->nr_rx_slots = na->num_rx_desc;
929 		nmr->nr_tx_slots = na->num_tx_desc;
930 		nmr->nr_memsize = nm_mem->nm_totalsize;
931 		nmr->nr_offset = netmap_if_offset(nifp);
932 		break;
933 
934 	case NIOCUNREGIF:
935 		if (priv == NULL) {
936 			error = ENXIO;
937 			break;
938 		}
939 
940 		/* the interface is unregistered inside the
941 		   destructor of the private data. */
942 		devfs_clear_cdevpriv();
943 		break;
944 
945 	case NIOCTXSYNC:
946         case NIOCRXSYNC:
947 		if (priv == NULL) {
948 			error = ENXIO;
949 			break;
950 		}
951 		ifp = priv->np_ifp;	/* we have a reference */
952 		na = NA(ifp); /* retrieve netmap adapter */
953 		if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */
954 			if (cmd == NIOCTXSYNC)
955 				netmap_sync_to_host(na);
956 			else
957 				netmap_sync_from_host(na, NULL, NULL);
958 			break;
959 		}
960 		/* find the last ring to scan */
961 		lim = priv->np_qlast;
962 		if (lim == NETMAP_HW_RING)
963 			lim = (cmd == NIOCTXSYNC) ?
964 			    na->num_tx_rings : na->num_rx_rings;
965 
966 		for (i = priv->np_qfirst; i < lim; i++) {
967 			if (cmd == NIOCTXSYNC) {
968 				struct netmap_kring *kring = &na->tx_rings[i];
969 				if (netmap_verbose & NM_VERB_TXSYNC)
970 					D("pre txsync ring %d cur %d hwcur %d",
971 					    i, kring->ring->cur,
972 					    kring->nr_hwcur);
973 				na->nm_txsync(ifp, i, 1 /* do lock */);
974 				if (netmap_verbose & NM_VERB_TXSYNC)
975 					D("post txsync ring %d cur %d hwcur %d",
976 					    i, kring->ring->cur,
977 					    kring->nr_hwcur);
978 			} else {
979 				na->nm_rxsync(ifp, i, 1 /* do lock */);
980 				microtime(&na->rx_rings[i].ring->ts);
981 			}
982 		}
983 
984 		break;
985 
986 #ifdef __FreeBSD__
987 	case BIOCIMMEDIATE:
988 	case BIOCGHDRCMPLT:
989 	case BIOCSHDRCMPLT:
990 	case BIOCSSEESENT:
991 		D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
992 		break;
993 
994 	default:	/* allow device-specific ioctls */
995 	    {
996 		struct socket so;
997 		bzero(&so, sizeof(so));
998 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
999 		if (error)
1000 			break;
1001 		so.so_vnet = ifp->if_vnet;
1002 		// so->so_proto not null.
1003 		error = ifioctl(&so, cmd, data, td);
1004 		nm_if_rele(ifp);
1005 		break;
1006 	    }
1007 
1008 #else /* linux */
1009 	default:
1010 		error = EOPNOTSUPP;
1011 #endif /* linux */
1012 	}
1013 
1014 	CURVNET_RESTORE();
1015 	return (error);
1016 }
1017 
1018 
1019 /*
1020  * select(2) and poll(2) handlers for the "netmap" device.
1021  *
1022  * Can be called for one or more queues.
1023  * Return true the event mask corresponding to ready events.
1024  * If there are no ready events, do a selrecord on either individual
1025  * selfd or on the global one.
1026  * Device-dependent parts (locking and sync of tx/rx rings)
1027  * are done through callbacks.
1028  *
1029  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
1030  * The first one is remapped to pwait as selrecord() uses the name as an
1031  * hidden argument.
1032  */
1033 static int
1034 netmap_poll(struct cdev *dev, int events, struct thread *td)
1035 {
1036 	struct netmap_priv_d *priv = NULL;
1037 	struct netmap_adapter *na;
1038 	struct ifnet *ifp;
1039 	struct netmap_kring *kring;
1040 	u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
1041 	u_int lim_tx, lim_rx;
1042 	enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
1043 	void *pwait = dev;	/* linux compatibility */
1044 
1045 	(void)pwait;
1046 
1047 	if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
1048 		return POLLERR;
1049 
1050 	ifp = priv->np_ifp;
1051 	// XXX check for deleting() ?
1052 	if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
1053 		return POLLERR;
1054 
1055 	if (netmap_verbose & 0x8000)
1056 		D("device %s events 0x%x", ifp->if_xname, events);
1057 	want_tx = events & (POLLOUT | POLLWRNORM);
1058 	want_rx = events & (POLLIN | POLLRDNORM);
1059 
1060 	na = NA(ifp); /* retrieve netmap adapter */
1061 
1062 	lim_tx = na->num_tx_rings;
1063 	lim_rx = na->num_rx_rings;
1064 	/* how many queues we are scanning */
1065 	if (priv->np_qfirst == NETMAP_SW_RING) {
1066 		if (priv->np_txpoll || want_tx) {
1067 			/* push any packets up, then we are always ready */
1068 			kring = &na->tx_rings[lim_tx];
1069 			netmap_sync_to_host(na);
1070 			revents |= want_tx;
1071 		}
1072 		if (want_rx) {
1073 			kring = &na->rx_rings[lim_rx];
1074 			if (kring->ring->avail == 0)
1075 				netmap_sync_from_host(na, td, dev);
1076 			if (kring->ring->avail > 0) {
1077 				revents |= want_rx;
1078 			}
1079 		}
1080 		return (revents);
1081 	}
1082 
1083 	/*
1084 	 * check_all is set if the card has more than one queue and
1085 	 * the client is polling all of them. If true, we sleep on
1086 	 * the "global" selfd, otherwise we sleep on individual selfd
1087 	 * (we can only sleep on one of them per direction).
1088 	 * The interrupt routine in the driver should always wake on
1089 	 * the individual selfd, and also on the global one if the card
1090 	 * has more than one ring.
1091 	 *
1092 	 * If the card has only one lock, we just use that.
1093 	 * If the card has separate ring locks, we just use those
1094 	 * unless we are doing check_all, in which case the whole
1095 	 * loop is wrapped by the global lock.
1096 	 * We acquire locks only when necessary: if poll is called
1097 	 * when buffers are available, we can just return without locks.
1098 	 *
1099 	 * rxsync() is only called if we run out of buffers on a POLLIN.
1100 	 * txsync() is called if we run out of buffers on POLLOUT, or
1101 	 * there are pending packets to send. The latter can be disabled
1102 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
1103 	 */
1104 	check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1);
1105 
1106 	/*
1107 	 * core_lock indicates what to do with the core lock.
1108 	 * The core lock is used when either the card has no individual
1109 	 * locks, or it has individual locks but we are cheking all
1110 	 * rings so we need the core lock to avoid missing wakeup events.
1111 	 *
1112 	 * It has three possible states:
1113 	 * NO_CL	we don't need to use the core lock, e.g.
1114 	 *		because we are protected by individual locks.
1115 	 * NEED_CL	we need the core lock. In this case, when we
1116 	 *		call the lock routine, move to LOCKED_CL
1117 	 *		to remember to release the lock once done.
1118 	 * LOCKED_CL	core lock is set, so we need to release it.
1119 	 */
1120 	core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
1121 #ifdef NM_BRIDGE
1122 	/* the bridge uses separate locks */
1123 	if (na->nm_register == bdg_netmap_reg) {
1124 		ND("not using core lock for %s", ifp->if_xname);
1125 		core_lock = NO_CL;
1126 	}
1127 #endif /* NM_BRIDGE */
1128 	if (priv->np_qlast != NETMAP_HW_RING) {
1129 		lim_tx = lim_rx = priv->np_qlast;
1130 	}
1131 
1132 	/*
1133 	 * We start with a lock free round which is good if we have
1134 	 * data available. If this fails, then lock and call the sync
1135 	 * routines.
1136 	 */
1137 	for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) {
1138 		kring = &na->rx_rings[i];
1139 		if (kring->ring->avail > 0) {
1140 			revents |= want_rx;
1141 			want_rx = 0;	/* also breaks the loop */
1142 		}
1143 	}
1144 	for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) {
1145 		kring = &na->tx_rings[i];
1146 		if (kring->ring->avail > 0) {
1147 			revents |= want_tx;
1148 			want_tx = 0;	/* also breaks the loop */
1149 		}
1150 	}
1151 
1152 	/*
1153 	 * If we to push packets out (priv->np_txpoll) or want_tx is
1154 	 * still set, we do need to run the txsync calls (on all rings,
1155 	 * to avoid that the tx rings stall).
1156 	 */
1157 	if (priv->np_txpoll || want_tx) {
1158 		for (i = priv->np_qfirst; i < lim_tx; i++) {
1159 			kring = &na->tx_rings[i];
1160 			/*
1161 			 * Skip the current ring if want_tx == 0
1162 			 * (we have already done a successful sync on
1163 			 * a previous ring) AND kring->cur == kring->hwcur
1164 			 * (there are no pending transmissions for this ring).
1165 			 */
1166 			if (!want_tx && kring->ring->cur == kring->nr_hwcur)
1167 				continue;
1168 			if (core_lock == NEED_CL) {
1169 				na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1170 				core_lock = LOCKED_CL;
1171 			}
1172 			if (na->separate_locks)
1173 				na->nm_lock(ifp, NETMAP_TX_LOCK, i);
1174 			if (netmap_verbose & NM_VERB_TXSYNC)
1175 				D("send %d on %s %d",
1176 					kring->ring->cur,
1177 					ifp->if_xname, i);
1178 			if (na->nm_txsync(ifp, i, 0 /* no lock */))
1179 				revents |= POLLERR;
1180 
1181 			/* Check avail/call selrecord only if called with POLLOUT */
1182 			if (want_tx) {
1183 				if (kring->ring->avail > 0) {
1184 					/* stop at the first ring. We don't risk
1185 					 * starvation.
1186 					 */
1187 					revents |= want_tx;
1188 					want_tx = 0;
1189 				} else if (!check_all)
1190 					selrecord(td, &kring->si);
1191 			}
1192 			if (na->separate_locks)
1193 				na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
1194 		}
1195 	}
1196 
1197 	/*
1198 	 * now if want_rx is still set we need to lock and rxsync.
1199 	 * Do it on all rings because otherwise we starve.
1200 	 */
1201 	if (want_rx) {
1202 		for (i = priv->np_qfirst; i < lim_rx; i++) {
1203 			kring = &na->rx_rings[i];
1204 			if (core_lock == NEED_CL) {
1205 				na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1206 				core_lock = LOCKED_CL;
1207 			}
1208 			if (na->separate_locks)
1209 				na->nm_lock(ifp, NETMAP_RX_LOCK, i);
1210 
1211 			if (na->nm_rxsync(ifp, i, 0 /* no lock */))
1212 				revents |= POLLERR;
1213 			if (netmap_no_timestamp == 0 ||
1214 					kring->ring->flags & NR_TIMESTAMP) {
1215 				microtime(&kring->ring->ts);
1216 			}
1217 
1218 			if (kring->ring->avail > 0)
1219 				revents |= want_rx;
1220 			else if (!check_all)
1221 				selrecord(td, &kring->si);
1222 			if (na->separate_locks)
1223 				na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
1224 		}
1225 	}
1226 	if (check_all && revents == 0) { /* signal on the global queue */
1227 		if (want_tx)
1228 			selrecord(td, &na->tx_si);
1229 		if (want_rx)
1230 			selrecord(td, &na->rx_si);
1231 	}
1232 	if (core_lock == LOCKED_CL)
1233 		na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1234 
1235 	return (revents);
1236 }
1237 
1238 /*------- driver support routines ------*/
1239 
1240 /*
1241  * default lock wrapper.
1242  */
1243 static void
1244 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
1245 {
1246 	struct netmap_adapter *na = NA(dev);
1247 
1248 	switch (what) {
1249 #ifdef linux	/* some system do not need lock on register */
1250 	case NETMAP_REG_LOCK:
1251 	case NETMAP_REG_UNLOCK:
1252 		break;
1253 #endif /* linux */
1254 
1255 	case NETMAP_CORE_LOCK:
1256 		mtx_lock(&na->core_lock);
1257 		break;
1258 
1259 	case NETMAP_CORE_UNLOCK:
1260 		mtx_unlock(&na->core_lock);
1261 		break;
1262 
1263 	case NETMAP_TX_LOCK:
1264 		mtx_lock(&na->tx_rings[queueid].q_lock);
1265 		break;
1266 
1267 	case NETMAP_TX_UNLOCK:
1268 		mtx_unlock(&na->tx_rings[queueid].q_lock);
1269 		break;
1270 
1271 	case NETMAP_RX_LOCK:
1272 		mtx_lock(&na->rx_rings[queueid].q_lock);
1273 		break;
1274 
1275 	case NETMAP_RX_UNLOCK:
1276 		mtx_unlock(&na->rx_rings[queueid].q_lock);
1277 		break;
1278 	}
1279 }
1280 
1281 
1282 /*
1283  * Initialize a ``netmap_adapter`` object created by driver on attach.
1284  * We allocate a block of memory with room for a struct netmap_adapter
1285  * plus two sets of N+2 struct netmap_kring (where N is the number
1286  * of hardware rings):
1287  * krings	0..N-1	are for the hardware queues.
1288  * kring	N	is for the host stack queue
1289  * kring	N+1	is only used for the selinfo for all queues.
1290  * Return 0 on success, ENOMEM otherwise.
1291  *
1292  * By default the receive and transmit adapter ring counts are both initialized
1293  * to num_queues.  na->num_tx_rings can be set for cards with different tx/rx
1294  * setups.
1295  */
1296 int
1297 netmap_attach(struct netmap_adapter *na, int num_queues)
1298 {
1299 	int n, size;
1300 	void *buf;
1301 	struct ifnet *ifp = na->ifp;
1302 
1303 	if (ifp == NULL) {
1304 		D("ifp not set, giving up");
1305 		return EINVAL;
1306 	}
1307 	/* clear other fields ? */
1308 	na->refcount = 0;
1309 	if (na->num_tx_rings == 0)
1310 		na->num_tx_rings = num_queues;
1311 	na->num_rx_rings = num_queues;
1312 	/* on each direction we have N+1 resources
1313 	 * 0..n-1	are the hardware rings
1314 	 * n		is the ring attached to the stack.
1315 	 */
1316 	n = na->num_rx_rings + na->num_tx_rings + 2;
1317 	size = sizeof(*na) + n * sizeof(struct netmap_kring);
1318 
1319 	buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1320 	if (buf) {
1321 		WNA(ifp) = buf;
1322 		na->tx_rings = (void *)((char *)buf + sizeof(*na));
1323 		na->rx_rings = na->tx_rings + na->num_tx_rings + 1;
1324 		bcopy(na, buf, sizeof(*na));
1325 		ifp->if_capabilities |= IFCAP_NETMAP;
1326 
1327 		na = buf;
1328 		/* Core lock initialized here.  Others are initialized after
1329 		 * netmap_if_new.
1330 		 */
1331 		mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK,
1332 		    MTX_DEF);
1333 		if (na->nm_lock == NULL) {
1334 			ND("using default locks for %s", ifp->if_xname);
1335 			na->nm_lock = netmap_lock_wrapper;
1336 		}
1337 	}
1338 #ifdef linux
1339 	if (ifp->netdev_ops) {
1340 		D("netdev_ops %p", ifp->netdev_ops);
1341 		/* prepare a clone of the netdev ops */
1342 		na->nm_ndo = *ifp->netdev_ops;
1343 	}
1344 	na->nm_ndo.ndo_start_xmit = linux_netmap_start;
1345 #endif
1346 	D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
1347 
1348 	return (buf ? 0 : ENOMEM);
1349 }
1350 
1351 
1352 /*
1353  * Free the allocated memory linked to the given ``netmap_adapter``
1354  * object.
1355  */
1356 void
1357 netmap_detach(struct ifnet *ifp)
1358 {
1359 	struct netmap_adapter *na = NA(ifp);
1360 
1361 	if (!na)
1362 		return;
1363 
1364 	mtx_destroy(&na->core_lock);
1365 
1366 	bzero(na, sizeof(*na));
1367 	WNA(ifp) = NULL;
1368 	free(na, M_DEVBUF);
1369 }
1370 
1371 
1372 /*
1373  * Intercept packets from the network stack and pass them
1374  * to netmap as incoming packets on the 'software' ring.
1375  * We are not locked when called.
1376  */
1377 int
1378 netmap_start(struct ifnet *ifp, struct mbuf *m)
1379 {
1380 	struct netmap_adapter *na = NA(ifp);
1381 	struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1382 	u_int i, len = MBUF_LEN(m);
1383 	u_int error = EBUSY, lim = kring->nkr_num_slots - 1;
1384 	struct netmap_slot *slot;
1385 
1386 	if (netmap_verbose & NM_VERB_HOST)
1387 		D("%s packet %d len %d from the stack", ifp->if_xname,
1388 			kring->nr_hwcur + kring->nr_hwavail, len);
1389 	na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1390 	if (kring->nr_hwavail >= lim) {
1391 		if (netmap_verbose)
1392 			D("stack ring %s full\n", ifp->if_xname);
1393 		goto done;	/* no space */
1394 	}
1395 	if (len > NETMAP_BUF_SIZE) {
1396 		D("drop packet size %d > %d", len, NETMAP_BUF_SIZE);
1397 		goto done;	/* too long for us */
1398 	}
1399 
1400 	/* compute the insert position */
1401 	i = kring->nr_hwcur + kring->nr_hwavail;
1402 	if (i > lim)
1403 		i -= lim + 1;
1404 	slot = &kring->ring->slot[i];
1405 	m_copydata(m, 0, len, NMB(slot));
1406 	slot->len = len;
1407 	kring->nr_hwavail++;
1408 	if (netmap_verbose  & NM_VERB_HOST)
1409 		D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
1410 	selwakeuppri(&kring->si, PI_NET);
1411 	error = 0;
1412 done:
1413 	na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1414 
1415 	/* release the mbuf in either cases of success or failure. As an
1416 	 * alternative, put the mbuf in a free list and free the list
1417 	 * only when really necessary.
1418 	 */
1419 	m_freem(m);
1420 
1421 	return (error);
1422 }
1423 
1424 
1425 /*
1426  * netmap_reset() is called by the driver routines when reinitializing
1427  * a ring. The driver is in charge of locking to protect the kring.
1428  * If netmap mode is not set just return NULL.
1429  */
1430 struct netmap_slot *
1431 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
1432 	u_int new_cur)
1433 {
1434 	struct netmap_kring *kring;
1435 	int new_hwofs, lim;
1436 
1437 	if (na == NULL)
1438 		return NULL;	/* no netmap support here */
1439 	if (!(na->ifp->if_capenable & IFCAP_NETMAP))
1440 		return NULL;	/* nothing to reinitialize */
1441 
1442 	if (tx == NR_TX) {
1443 		kring = na->tx_rings + n;
1444 		new_hwofs = kring->nr_hwcur - new_cur;
1445 	} else {
1446 		kring = na->rx_rings + n;
1447 		new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
1448 	}
1449 	lim = kring->nkr_num_slots - 1;
1450 	if (new_hwofs > lim)
1451 		new_hwofs -= lim + 1;
1452 
1453 	/* Alwayws set the new offset value and realign the ring. */
1454 	kring->nkr_hwofs = new_hwofs;
1455 	if (tx == NR_TX)
1456 		kring->nr_hwavail = kring->nkr_num_slots - 1;
1457 	D("new hwofs %d on %s %s[%d]",
1458 			kring->nkr_hwofs, na->ifp->if_xname,
1459 			tx == NR_TX ? "TX" : "RX", n);
1460 
1461 #if 0 // def linux
1462 	/* XXX check that the mappings are correct */
1463 	/* need ring_nr, adapter->pdev, direction */
1464 	buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE);
1465 	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1466 		D("error mapping rx netmap buffer %d", i);
1467 		// XXX fix error handling
1468 	}
1469 
1470 #endif /* linux */
1471 	/*
1472 	 * Wakeup on the individual and global lock
1473 	 * We do the wakeup here, but the ring is not yet reconfigured.
1474 	 * However, we are under lock so there are no races.
1475 	 */
1476 	selwakeuppri(&kring->si, PI_NET);
1477 	selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET);
1478 	return kring->ring->slot;
1479 }
1480 
1481 
1482 /*
1483  * Default functions to handle rx/tx interrupts
1484  * we have 4 cases:
1485  * 1 ring, single lock:
1486  *	lock(core); wake(i=0); unlock(core)
1487  * N rings, single lock:
1488  *	lock(core); wake(i); wake(N+1) unlock(core)
1489  * 1 ring, separate locks: (i=0)
1490  *	lock(i); wake(i); unlock(i)
1491  * N rings, separate locks:
1492  *	lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
1493  * work_done is non-null on the RX path.
1494  */
1495 int
1496 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
1497 {
1498 	struct netmap_adapter *na;
1499 	struct netmap_kring *r;
1500 	NM_SELINFO_T *main_wq;
1501 
1502 	if (!(ifp->if_capenable & IFCAP_NETMAP))
1503 		return 0;
1504 	na = NA(ifp);
1505 	if (work_done) { /* RX path */
1506 		r = na->rx_rings + q;
1507 		r->nr_kflags |= NKR_PENDINTR;
1508 		main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
1509 	} else { /* tx path */
1510 		r = na->tx_rings + q;
1511 		main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
1512 		work_done = &q; /* dummy */
1513 	}
1514 	if (na->separate_locks) {
1515 		mtx_lock(&r->q_lock);
1516 		selwakeuppri(&r->si, PI_NET);
1517 		mtx_unlock(&r->q_lock);
1518 		if (main_wq) {
1519 			mtx_lock(&na->core_lock);
1520 			selwakeuppri(main_wq, PI_NET);
1521 			mtx_unlock(&na->core_lock);
1522 		}
1523 	} else {
1524 		mtx_lock(&na->core_lock);
1525 		selwakeuppri(&r->si, PI_NET);
1526 		if (main_wq)
1527 			selwakeuppri(main_wq, PI_NET);
1528 		mtx_unlock(&na->core_lock);
1529 	}
1530 	*work_done = 1; /* do not fire napi again */
1531 	return 1;
1532 }
1533 
1534 
1535 #ifdef linux	/* linux-specific routines */
1536 
1537 /*
1538  * Remap linux arguments into the FreeBSD call.
1539  * - pwait is the poll table, passed as 'dev';
1540  *   If pwait == NULL someone else already woke up before. We can report
1541  *   events but they are filtered upstream.
1542  *   If pwait != NULL, then pwait->key contains the list of events.
1543  * - events is computed from pwait as above.
1544  * - file is passed as 'td';
1545  */
1546 static u_int
1547 linux_netmap_poll(struct file * file, struct poll_table_struct *pwait)
1548 {
1549 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
1550 	int events = pwait ? pwait->key : POLLIN | POLLOUT;
1551 #else /* in 3.4.0 field 'key' was renamed to '_key' */
1552 	int events = pwait ? pwait->_key : POLLIN | POLLOUT;
1553 #endif
1554 	return netmap_poll((void *)pwait, events, (void *)file);
1555 }
1556 
1557 static int
1558 linux_netmap_mmap(struct file *f, struct vm_area_struct *vma)
1559 {
1560 	int lut_skip, i, j;
1561 	int user_skip = 0;
1562 	struct lut_entry *l_entry;
1563 	const struct netmap_obj_pool *p[] = {
1564 		nm_mem->nm_if_pool,
1565 		nm_mem->nm_ring_pool,
1566 		nm_mem->nm_buf_pool };
1567 	/*
1568 	 * vma->vm_start: start of mapping user address space
1569 	 * vma->vm_end: end of the mapping user address space
1570 	 */
1571 
1572 	(void)f;	/* UNUSED */
1573 	// XXX security checks
1574 
1575 	for (i = 0; i < 3; i++) {  /* loop through obj_pools */
1576 		/*
1577 		 * In each pool memory is allocated in clusters
1578 		 * of size _clustsize , each containing clustentries
1579 		 * entries. For each object k we already store the
1580 		 * vtophys malling in lut[k] so we use that, scanning
1581 		 * the lut[] array in steps of clustentries,
1582 		 * and we map each cluster (not individual pages,
1583 		 * it would be overkill).
1584 		 */
1585 		for (lut_skip = 0, j = 0; j < p[i]->_numclusters; j++) {
1586 			l_entry = &p[i]->lut[lut_skip];
1587 			if (remap_pfn_range(vma, vma->vm_start + user_skip,
1588 					l_entry->paddr >> PAGE_SHIFT, p[i]->_clustsize,
1589 					vma->vm_page_prot))
1590 				return -EAGAIN; // XXX check return value
1591 			lut_skip += p[i]->clustentries;
1592 			user_skip += p[i]->_clustsize;
1593 		}
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 static netdev_tx_t
1600 linux_netmap_start(struct sk_buff *skb, struct net_device *dev)
1601 {
1602 	netmap_start(dev, skb);
1603 	return (NETDEV_TX_OK);
1604 }
1605 
1606 
1607 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)	// XXX was 38
1608 #define LIN_IOCTL_NAME	.ioctl
1609 int
1610 linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */)
1611 #else
1612 #define LIN_IOCTL_NAME	.unlocked_ioctl
1613 long
1614 linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */)
1615 #endif
1616 {
1617 	int ret;
1618 	struct nmreq nmr;
1619 	bzero(&nmr, sizeof(nmr));
1620 
1621 	if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0)
1622 		return -EFAULT;
1623 	ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file);
1624 	if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0)
1625 		return -EFAULT;
1626 	return -ret;
1627 }
1628 
1629 
1630 static int
1631 netmap_release(struct inode *inode, struct file *file)
1632 {
1633 	(void)inode;	/* UNUSED */
1634 	if (file->private_data)
1635 		netmap_dtor(file->private_data);
1636 	return (0);
1637 }
1638 
1639 
1640 static struct file_operations netmap_fops = {
1641     .mmap = linux_netmap_mmap,
1642     LIN_IOCTL_NAME = linux_netmap_ioctl,
1643     .poll = linux_netmap_poll,
1644     .release = netmap_release,
1645 };
1646 
1647 static struct miscdevice netmap_cdevsw = {	/* same name as FreeBSD */
1648 	MISC_DYNAMIC_MINOR,
1649 	"netmap",
1650 	&netmap_fops,
1651 };
1652 
1653 static int netmap_init(void);
1654 static void netmap_fini(void);
1655 
1656 /* Errors have negative values on linux */
1657 static int linux_netmap_init(void)
1658 {
1659 	return -netmap_init();
1660 }
1661 
1662 module_init(linux_netmap_init);
1663 module_exit(netmap_fini);
1664 /* export certain symbols to other modules */
1665 EXPORT_SYMBOL(netmap_attach);		// driver attach routines
1666 EXPORT_SYMBOL(netmap_detach);		// driver detach routines
1667 EXPORT_SYMBOL(netmap_ring_reinit);	// ring init on error
1668 EXPORT_SYMBOL(netmap_buffer_lut);
1669 EXPORT_SYMBOL(netmap_total_buffers);	// index check
1670 EXPORT_SYMBOL(netmap_buffer_base);
1671 EXPORT_SYMBOL(netmap_reset);		// ring init routines
1672 EXPORT_SYMBOL(netmap_buf_size);
1673 EXPORT_SYMBOL(netmap_rx_irq);		// default irq handler
1674 EXPORT_SYMBOL(netmap_no_pendintr);	// XXX mitigation - should go away
1675 
1676 
1677 MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/");
1678 MODULE_DESCRIPTION("The netmap packet I/O framework");
1679 MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */
1680 
1681 #else /* __FreeBSD__ */
1682 
1683 static struct cdevsw netmap_cdevsw = {
1684 	.d_version = D_VERSION,
1685 	.d_name = "netmap",
1686 	.d_mmap = netmap_mmap,
1687 	.d_ioctl = netmap_ioctl,
1688 	.d_poll = netmap_poll,
1689 };
1690 #endif /* __FreeBSD__ */
1691 
1692 #ifdef NM_BRIDGE
1693 /*
1694  *---- support for virtual bridge -----
1695  */
1696 
1697 /* ----- FreeBSD if_bridge hash function ------- */
1698 
1699 /*
1700  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
1701  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
1702  *
1703  * http://www.burtleburtle.net/bob/hash/spooky.html
1704  */
1705 #define mix(a, b, c)                                                    \
1706 do {                                                                    \
1707         a -= b; a -= c; a ^= (c >> 13);                                 \
1708         b -= c; b -= a; b ^= (a << 8);                                  \
1709         c -= a; c -= b; c ^= (b >> 13);                                 \
1710         a -= b; a -= c; a ^= (c >> 12);                                 \
1711         b -= c; b -= a; b ^= (a << 16);                                 \
1712         c -= a; c -= b; c ^= (b >> 5);                                  \
1713         a -= b; a -= c; a ^= (c >> 3);                                  \
1714         b -= c; b -= a; b ^= (a << 10);                                 \
1715         c -= a; c -= b; c ^= (b >> 15);                                 \
1716 } while (/*CONSTCOND*/0)
1717 
1718 static __inline uint32_t
1719 nm_bridge_rthash(const uint8_t *addr)
1720 {
1721         uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
1722 
1723         b += addr[5] << 8;
1724         b += addr[4];
1725         a += addr[3] << 24;
1726         a += addr[2] << 16;
1727         a += addr[1] << 8;
1728         a += addr[0];
1729 
1730         mix(a, b, c);
1731 #define BRIDGE_RTHASH_MASK	(NM_BDG_HASH-1)
1732         return (c & BRIDGE_RTHASH_MASK);
1733 }
1734 
1735 #undef mix
1736 
1737 
1738 static int
1739 bdg_netmap_reg(struct ifnet *ifp, int onoff)
1740 {
1741 	int i, err = 0;
1742 	struct nm_bridge *b = ifp->if_bridge;
1743 
1744 	BDG_LOCK(b);
1745 	if (onoff) {
1746 		/* the interface must be already in the list.
1747 		 * only need to mark the port as active
1748 		 */
1749 		ND("should attach %s to the bridge", ifp->if_xname);
1750 		for (i=0; i < NM_BDG_MAXPORTS; i++)
1751 			if (b->bdg_ports[i] == ifp)
1752 				break;
1753 		if (i == NM_BDG_MAXPORTS) {
1754 			D("no more ports available");
1755 			err = EINVAL;
1756 			goto done;
1757 		}
1758 		ND("setting %s in netmap mode", ifp->if_xname);
1759 		ifp->if_capenable |= IFCAP_NETMAP;
1760 		NA(ifp)->bdg_port = i;
1761 		b->act_ports |= (1<<i);
1762 		b->bdg_ports[i] = ifp;
1763 	} else {
1764 		/* should be in the list, too -- remove from the mask */
1765 		ND("removing %s from netmap mode", ifp->if_xname);
1766 		ifp->if_capenable &= ~IFCAP_NETMAP;
1767 		i = NA(ifp)->bdg_port;
1768 		b->act_ports &= ~(1<<i);
1769 	}
1770 done:
1771 	BDG_UNLOCK(b);
1772 	return err;
1773 }
1774 
1775 
1776 static int
1777 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct ifnet *ifp)
1778 {
1779 	int i, ifn;
1780 	uint64_t all_dst, dst;
1781 	uint32_t sh, dh;
1782 	uint64_t mysrc = 1 << NA(ifp)->bdg_port;
1783 	uint64_t smac, dmac;
1784 	struct netmap_slot *slot;
1785 	struct nm_bridge *b = ifp->if_bridge;
1786 
1787 	ND("prepare to send %d packets, act_ports 0x%x", n, b->act_ports);
1788 	/* only consider valid destinations */
1789 	all_dst = (b->act_ports & ~mysrc);
1790 	/* first pass: hash and find destinations */
1791 	for (i = 0; likely(i < n); i++) {
1792 		uint8_t *buf = ft[i].buf;
1793 		dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
1794 		smac = le64toh(*(uint64_t *)(buf + 4));
1795 		smac >>= 16;
1796 		if (unlikely(netmap_verbose)) {
1797 		    uint8_t *s = buf+6, *d = buf;
1798 		    D("%d len %4d %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x",
1799 			i,
1800 			ft[i].len,
1801 			s[0], s[1], s[2], s[3], s[4], s[5],
1802 			d[0], d[1], d[2], d[3], d[4], d[5]);
1803 		}
1804 		/*
1805 		 * The hash is somewhat expensive, there might be some
1806 		 * worthwhile optimizations here.
1807 		 */
1808 		if ((buf[6] & 1) == 0) { /* valid src */
1809 		    	uint8_t *s = buf+6;
1810 			sh = nm_bridge_rthash(buf+6); // XXX hash of source
1811 			/* update source port forwarding entry */
1812 			b->ht[sh].mac = smac;	/* XXX expire ? */
1813 			b->ht[sh].ports = mysrc;
1814 			if (netmap_verbose)
1815 			    D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
1816 				s[0], s[1], s[2], s[3], s[4], s[5], NA(ifp)->bdg_port);
1817 		}
1818 		dst = 0;
1819 		if ( (buf[0] & 1) == 0) { /* unicast */
1820 		    	uint8_t *d = buf;
1821 			dh = nm_bridge_rthash(buf); // XXX hash of dst
1822 			if (b->ht[dh].mac == dmac) {	/* found dst */
1823 				dst = b->ht[dh].ports;
1824 				if (netmap_verbose)
1825 				    D("dst %02x:%02x:%02x:%02x:%02x:%02x to port %x",
1826 					d[0], d[1], d[2], d[3], d[4], d[5], (uint32_t)(dst >> 16));
1827 			}
1828 		}
1829 		if (dst == 0)
1830 			dst = all_dst;
1831 		dst &= all_dst; /* only consider valid ports */
1832 		if (unlikely(netmap_verbose))
1833 			D("pkt goes to ports 0x%x", (uint32_t)dst);
1834 		ft[i].dst = dst;
1835 	}
1836 
1837 	/* second pass, scan interfaces and forward */
1838 	all_dst = (b->act_ports & ~mysrc);
1839 	for (ifn = 0; all_dst; ifn++) {
1840 		struct ifnet *dst_ifp = b->bdg_ports[ifn];
1841 		struct netmap_adapter *na;
1842 		struct netmap_kring *kring;
1843 		struct netmap_ring *ring;
1844 		int j, lim, sent, locked;
1845 
1846 		if (!dst_ifp)
1847 			continue;
1848 		ND("scan port %d %s", ifn, dst_ifp->if_xname);
1849 		dst = 1 << ifn;
1850 		if ((dst & all_dst) == 0)	/* skip if not set */
1851 			continue;
1852 		all_dst &= ~dst;	/* clear current node */
1853 		na = NA(dst_ifp);
1854 
1855 		ring = NULL;
1856 		kring = NULL;
1857 		lim = sent = locked = 0;
1858 		/* inside, scan slots */
1859 		for (i = 0; likely(i < n); i++) {
1860 			if ((ft[i].dst & dst) == 0)
1861 				continue;	/* not here */
1862 			if (!locked) {
1863 				kring = &na->rx_rings[0];
1864 				ring = kring->ring;
1865 				lim = kring->nkr_num_slots - 1;
1866 				na->nm_lock(dst_ifp, NETMAP_RX_LOCK, 0);
1867 				locked = 1;
1868 			}
1869 			if (unlikely(kring->nr_hwavail >= lim)) {
1870 				if (netmap_verbose)
1871 					D("rx ring full on %s", ifp->if_xname);
1872 				break;
1873 			}
1874 			j = kring->nr_hwcur + kring->nr_hwavail;
1875 			if (j > lim)
1876 				j -= kring->nkr_num_slots;
1877 			slot = &ring->slot[j];
1878 			ND("send %d %d bytes at %s:%d", i, ft[i].len, dst_ifp->if_xname, j);
1879 			pkt_copy(ft[i].buf, NMB(slot), ft[i].len);
1880 			slot->len = ft[i].len;
1881 			kring->nr_hwavail++;
1882 			sent++;
1883 		}
1884 		if (locked) {
1885 			ND("sent %d on %s", sent, dst_ifp->if_xname);
1886 			if (sent)
1887 				selwakeuppri(&kring->si, PI_NET);
1888 			na->nm_lock(dst_ifp, NETMAP_RX_UNLOCK, 0);
1889 		}
1890 	}
1891 	return 0;
1892 }
1893 
1894 /*
1895  * main dispatch routine
1896  */
1897 static int
1898 bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
1899 {
1900 	struct netmap_adapter *na = NA(ifp);
1901 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
1902 	struct netmap_ring *ring = kring->ring;
1903 	int i, j, k, lim = kring->nkr_num_slots - 1;
1904 	struct nm_bdg_fwd *ft = (struct nm_bdg_fwd *)(ifp + 1);
1905 	int ft_i;	/* position in the forwarding table */
1906 
1907 	k = ring->cur;
1908 	if (k > lim)
1909 		return netmap_ring_reinit(kring);
1910 	if (do_lock)
1911 		na->nm_lock(ifp, NETMAP_TX_LOCK, ring_nr);
1912 
1913 	if (netmap_bridge <= 0) { /* testing only */
1914 		j = k; // used all
1915 		goto done;
1916 	}
1917 	if (netmap_bridge > NM_BDG_BATCH)
1918 		netmap_bridge = NM_BDG_BATCH;
1919 
1920 	ft_i = 0;	/* start from 0 */
1921 	for (j = kring->nr_hwcur; likely(j != k); j = unlikely(j == lim) ? 0 : j+1) {
1922 		struct netmap_slot *slot = &ring->slot[j];
1923 		int len = ft[ft_i].len = slot->len;
1924 		char *buf = ft[ft_i].buf = NMB(slot);
1925 
1926 		prefetch(buf);
1927 		if (unlikely(len < 14))
1928 			continue;
1929 		if (unlikely(++ft_i == netmap_bridge))
1930 			ft_i = nm_bdg_flush(ft, ft_i, ifp);
1931 	}
1932 	if (ft_i)
1933 		ft_i = nm_bdg_flush(ft, ft_i, ifp);
1934 	/* count how many packets we sent */
1935 	i = k - j;
1936 	if (i < 0)
1937 		i += kring->nkr_num_slots;
1938 	kring->nr_hwavail = kring->nkr_num_slots - 1 - i;
1939 	if (j != k)
1940 		D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail);
1941 
1942 done:
1943 	kring->nr_hwcur = j;
1944 	ring->avail = kring->nr_hwavail;
1945 	if (do_lock)
1946 		na->nm_lock(ifp, NETMAP_TX_UNLOCK, ring_nr);
1947 
1948 	if (netmap_verbose)
1949 		D("%s ring %d lock %d", ifp->if_xname, ring_nr, do_lock);
1950 	return 0;
1951 }
1952 
1953 static int
1954 bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
1955 {
1956 	struct netmap_adapter *na = NA(ifp);
1957 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
1958 	struct netmap_ring *ring = kring->ring;
1959 	u_int j, n, lim = kring->nkr_num_slots - 1;
1960 	u_int k = ring->cur, resvd = ring->reserved;
1961 
1962 	ND("%s ring %d lock %d avail %d",
1963 		ifp->if_xname, ring_nr, do_lock, kring->nr_hwavail);
1964 
1965 	if (k > lim)
1966 		return netmap_ring_reinit(kring);
1967 	if (do_lock)
1968 		na->nm_lock(ifp, NETMAP_RX_LOCK, ring_nr);
1969 
1970 	/* skip past packets that userspace has released */
1971 	j = kring->nr_hwcur;    /* netmap ring index */
1972 	if (resvd > 0) {
1973 		if (resvd + ring->avail >= lim + 1) {
1974 			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
1975 			ring->reserved = resvd = 0; // XXX panic...
1976 		}
1977 		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
1978 	}
1979 
1980 	if (j != k) { /* userspace has released some packets. */
1981 		n = k - j;
1982 		if (n < 0)
1983 			n += kring->nkr_num_slots;
1984 		ND("userspace releases %d packets", n);
1985                 for (n = 0; likely(j != k); n++) {
1986                         struct netmap_slot *slot = &ring->slot[j];
1987                         void *addr = NMB(slot);
1988 
1989                         if (addr == netmap_buffer_base) { /* bad buf */
1990                                 if (do_lock)
1991                                         na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
1992                                 return netmap_ring_reinit(kring);
1993                         }
1994 			/* decrease refcount for buffer */
1995 
1996 			slot->flags &= ~NS_BUF_CHANGED;
1997                         j = unlikely(j == lim) ? 0 : j + 1;
1998                 }
1999                 kring->nr_hwavail -= n;
2000                 kring->nr_hwcur = k;
2001         }
2002         /* tell userspace that there are new packets */
2003         ring->avail = kring->nr_hwavail - resvd;
2004 
2005 	if (do_lock)
2006 		na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
2007 	return 0;
2008 }
2009 
2010 static void
2011 bdg_netmap_attach(struct ifnet *ifp)
2012 {
2013 	struct netmap_adapter na;
2014 
2015 	ND("attaching virtual bridge");
2016 	bzero(&na, sizeof(na));
2017 
2018 	na.ifp = ifp;
2019 	na.separate_locks = 1;
2020 	na.num_tx_desc = NM_BRIDGE_RINGSIZE;
2021 	na.num_rx_desc = NM_BRIDGE_RINGSIZE;
2022 	na.nm_txsync = bdg_netmap_txsync;
2023 	na.nm_rxsync = bdg_netmap_rxsync;
2024 	na.nm_register = bdg_netmap_reg;
2025 	netmap_attach(&na, 1);
2026 }
2027 
2028 #endif /* NM_BRIDGE */
2029 
2030 static struct cdev *netmap_dev; /* /dev/netmap character device. */
2031 
2032 
2033 /*
2034  * Module loader.
2035  *
2036  * Create the /dev/netmap device and initialize all global
2037  * variables.
2038  *
2039  * Return 0 on success, errno on failure.
2040  */
2041 static int
2042 netmap_init(void)
2043 {
2044 	int error;
2045 
2046 	error = netmap_memory_init();
2047 	if (error != 0) {
2048 		printf("netmap: unable to initialize the memory allocator.\n");
2049 		return (error);
2050 	}
2051 	printf("netmap: loaded module with %d Mbytes\n",
2052 		(int)(nm_mem->nm_totalsize >> 20));
2053 	netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
2054 			      "netmap");
2055 
2056 #ifdef NM_BRIDGE
2057 	{
2058 	int i;
2059 	for (i = 0; i < NM_BRIDGES; i++)
2060 		mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF);
2061 	}
2062 #endif
2063 	return (error);
2064 }
2065 
2066 
2067 /*
2068  * Module unloader.
2069  *
2070  * Free all the memory, and destroy the ``/dev/netmap`` device.
2071  */
2072 static void
2073 netmap_fini(void)
2074 {
2075 	destroy_dev(netmap_dev);
2076 	netmap_memory_fini();
2077 	printf("netmap: unloaded module.\n");
2078 }
2079 
2080 
2081 #ifdef __FreeBSD__
2082 /*
2083  * Kernel entry point.
2084  *
2085  * Initialize/finalize the module and return.
2086  *
2087  * Return 0 on success, errno on failure.
2088  */
2089 static int
2090 netmap_loader(__unused struct module *module, int event, __unused void *arg)
2091 {
2092 	int error = 0;
2093 
2094 	switch (event) {
2095 	case MOD_LOAD:
2096 		error = netmap_init();
2097 		break;
2098 
2099 	case MOD_UNLOAD:
2100 		netmap_fini();
2101 		break;
2102 
2103 	default:
2104 		error = EOPNOTSUPP;
2105 		break;
2106 	}
2107 
2108 	return (error);
2109 }
2110 
2111 
2112 DEV_MODULE(netmap, netmap_loader, NULL);
2113 #endif /* __FreeBSD__ */
2114