xref: /freebsd/sys/dev/netmap/netmap.c (revision 4c0d7cdf5d3b64e235140553601c0dd5827429a7)
1 /*
2  * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #define NM_BRIDGE
27 
28 /*
29  * This module supports memory mapped access to network devices,
30  * see netmap(4).
31  *
32  * The module uses a large, memory pool allocated by the kernel
33  * and accessible as mmapped memory by multiple userspace threads/processes.
34  * The memory pool contains packet buffers and "netmap rings",
35  * i.e. user-accessible copies of the interface's queues.
36  *
37  * Access to the network card works like this:
38  * 1. a process/thread issues one or more open() on /dev/netmap, to create
39  *    select()able file descriptor on which events are reported.
40  * 2. on each descriptor, the process issues an ioctl() to identify
41  *    the interface that should report events to the file descriptor.
42  * 3. on each descriptor, the process issues an mmap() request to
43  *    map the shared memory region within the process' address space.
44  *    The list of interesting queues is indicated by a location in
45  *    the shared memory region.
46  * 4. using the functions in the netmap(4) userspace API, a process
47  *    can look up the occupation state of a queue, access memory buffers,
48  *    and retrieve received packets or enqueue packets to transmit.
49  * 5. using some ioctl()s the process can synchronize the userspace view
50  *    of the queue with the actual status in the kernel. This includes both
51  *    receiving the notification of new packets, and transmitting new
52  *    packets on the output interface.
53  * 6. select() or poll() can be used to wait for events on individual
54  *    transmit or receive queues (or all queues for a given interface).
55  */
56 
57 #ifdef linux
58 #include "bsd_glue.h"
59 static netdev_tx_t linux_netmap_start(struct sk_buff *skb, struct net_device *dev);
60 #endif /* linux */
61 
62 #ifdef __APPLE__
63 #include "osx_glue.h"
64 #endif /* __APPLE__ */
65 
66 #ifdef __FreeBSD__
67 #include <sys/cdefs.h> /* prerequisite */
68 __FBSDID("$FreeBSD$");
69 
70 #include <sys/types.h>
71 #include <sys/module.h>
72 #include <sys/errno.h>
73 #include <sys/param.h>	/* defines used in kernel.h */
74 #include <sys/jail.h>
75 #include <sys/kernel.h>	/* types used in module initialization */
76 #include <sys/conf.h>	/* cdevsw struct */
77 #include <sys/uio.h>	/* uio struct */
78 #include <sys/sockio.h>
79 #include <sys/socketvar.h>	/* struct socket */
80 #include <sys/malloc.h>
81 #include <sys/mman.h>	/* PROT_EXEC */
82 #include <sys/poll.h>
83 #include <sys/proc.h>
84 #include <vm/vm.h>	/* vtophys */
85 #include <vm/pmap.h>	/* vtophys */
86 #include <sys/socket.h> /* sockaddrs */
87 #include <machine/bus.h>
88 #include <sys/selinfo.h>
89 #include <sys/sysctl.h>
90 #include <net/if.h>
91 #include <net/bpf.h>		/* BIOCIMMEDIATE */
92 #include <net/vnet.h>
93 #include <machine/bus.h>	/* bus_dmamap_* */
94 
95 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
96 #endif /* __FreeBSD__ */
97 
98 #include <net/netmap.h>
99 #include <dev/netmap/netmap_kern.h>
100 
101 u_int netmap_total_buffers;
102 u_int netmap_buf_size;
103 char *netmap_buffer_base;	/* address of an invalid buffer */
104 
105 /* user-controlled variables */
106 int netmap_verbose;
107 
108 static int netmap_no_timestamp; /* don't timestamp on rxsync */
109 
110 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
111 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
112     CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
113 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
114     CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
115 int netmap_mitigate = 1;
116 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
117 int netmap_no_pendintr = 1;
118 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr,
119     CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets.");
120 
121 int netmap_drop = 0;	/* debugging */
122 int netmap_flags = 0;	/* debug flags */
123 int netmap_copy = 0;	/* debugging, copy content */
124 
125 SYSCTL_INT(_dev_netmap, OID_AUTO, drop, CTLFLAG_RW, &netmap_drop, 0 , "");
126 SYSCTL_INT(_dev_netmap, OID_AUTO, flags, CTLFLAG_RW, &netmap_flags, 0 , "");
127 SYSCTL_INT(_dev_netmap, OID_AUTO, copy, CTLFLAG_RW, &netmap_copy, 0 , "");
128 
129 #ifdef NM_BRIDGE /* support for netmap bridge */
130 
131 /*
132  * system parameters.
133  *
134  * All switched ports have prefix NM_NAME.
135  * The switch has a max of NM_BDG_MAXPORTS ports (often stored in a bitmap,
136  * so a practical upper bound is 64).
137  * Each tx ring is read-write, whereas rx rings are readonly (XXX not done yet).
138  * The virtual interfaces use per-queue lock instead of core lock.
139  * In the tx loop, we aggregate traffic in batches to make all operations
140  * faster. The batch size is NM_BDG_BATCH
141  */
142 #define	NM_NAME			"vale"	/* prefix for the interface */
143 #define NM_BDG_MAXPORTS		16	/* up to 64 ? */
144 #define NM_BRIDGE_RINGSIZE	1024	/* in the device */
145 #define NM_BDG_HASH		1024	/* forwarding table entries */
146 #define NM_BDG_BATCH		1024	/* entries in the forwarding buffer */
147 #define	NM_BRIDGES		4	/* number of bridges */
148 int netmap_bridge = NM_BDG_BATCH; /* bridge batch size */
149 SYSCTL_INT(_dev_netmap, OID_AUTO, bridge, CTLFLAG_RW, &netmap_bridge, 0 , "");
150 
151 #ifdef linux
152 #define	ADD_BDG_REF(ifp)	(NA(ifp)->if_refcount++)
153 #define	DROP_BDG_REF(ifp)	(NA(ifp)->if_refcount-- <= 1)
154 #else /* !linux */
155 #define	ADD_BDG_REF(ifp)	(ifp)->if_refcount++
156 #define	DROP_BDG_REF(ifp)	refcount_release(&(ifp)->if_refcount)
157 #ifdef __FreeBSD__
158 #include <sys/endian.h>
159 #include <sys/refcount.h>
160 #endif /* __FreeBSD__ */
161 #define prefetch(x)	__builtin_prefetch(x)
162 #endif /* !linux */
163 
164 static void bdg_netmap_attach(struct ifnet *ifp);
165 static int bdg_netmap_reg(struct ifnet *ifp, int onoff);
166 /* per-tx-queue entry */
167 struct nm_bdg_fwd {	/* forwarding entry for a bridge */
168 	void *buf;
169 	uint64_t dst;	/* dst mask */
170 	uint32_t src;	/* src index ? */
171 	uint16_t len;	/* src len */
172 };
173 
174 struct nm_hash_ent {
175 	uint64_t	mac;	/* the top 2 bytes are the epoch */
176 	uint64_t	ports;
177 };
178 
179 /*
180  * Interfaces for a bridge are all in ports[].
181  * The array has fixed size, an empty entry does not terminate
182  * the search.
183  */
184 struct nm_bridge {
185 	struct ifnet *bdg_ports[NM_BDG_MAXPORTS];
186 	int n_ports;
187 	uint64_t act_ports;
188 	int freelist;	/* first buffer index */
189 	NM_SELINFO_T si;	/* poll/select wait queue */
190 	NM_LOCK_T bdg_lock;	/* protect the selinfo ? */
191 
192 	/* the forwarding table, MAC+ports */
193 	struct nm_hash_ent ht[NM_BDG_HASH];
194 
195 	int namelen;	/* 0 means free */
196 	char basename[IFNAMSIZ];
197 };
198 
199 struct nm_bridge nm_bridges[NM_BRIDGES];
200 
201 #define BDG_LOCK(b)	mtx_lock(&(b)->bdg_lock)
202 #define BDG_UNLOCK(b)	mtx_unlock(&(b)->bdg_lock)
203 
204 /*
205  * NA(ifp)->bdg_port	port index
206  */
207 
208 // XXX only for multiples of 64 bytes, non overlapped.
209 static inline void
210 pkt_copy(void *_src, void *_dst, int l)
211 {
212         uint64_t *src = _src;
213         uint64_t *dst = _dst;
214         if (unlikely(l >= 1024)) {
215                 bcopy(src, dst, l);
216                 return;
217         }
218         for (; likely(l > 0); l-=64) {
219                 *dst++ = *src++;
220                 *dst++ = *src++;
221                 *dst++ = *src++;
222                 *dst++ = *src++;
223                 *dst++ = *src++;
224                 *dst++ = *src++;
225                 *dst++ = *src++;
226                 *dst++ = *src++;
227         }
228 }
229 
230 /*
231  * locate a bridge among the existing ones.
232  * a ':' in the name terminates the bridge name. Otherwise, just NM_NAME.
233  * We assume that this is called with a name of at least NM_NAME chars.
234  */
235 static struct nm_bridge *
236 nm_find_bridge(const char *name)
237 {
238 	int i, l, namelen, e;
239 	struct nm_bridge *b = NULL;
240 
241 	namelen = strlen(NM_NAME);	/* base length */
242 	l = strlen(name);		/* actual length */
243 	for (i = namelen + 1; i < l; i++) {
244 		if (name[i] == ':') {
245 			namelen = i;
246 			break;
247 		}
248 	}
249 	if (namelen >= IFNAMSIZ)
250 		namelen = IFNAMSIZ;
251 	ND("--- prefix is '%.*s' ---", namelen, name);
252 
253 	/* use the first entry for locking */
254 	BDG_LOCK(nm_bridges); // XXX do better
255 	for (e = -1, i = 1; i < NM_BRIDGES; i++) {
256 		b = nm_bridges + i;
257 		if (b->namelen == 0)
258 			e = i;	/* record empty slot */
259 		else if (strncmp(name, b->basename, namelen) == 0) {
260 			ND("found '%.*s' at %d", namelen, name, i);
261 			break;
262 		}
263 	}
264 	if (i == NM_BRIDGES) { /* all full */
265 		if (e == -1) { /* no empty slot */
266 			b = NULL;
267 		} else {
268 			b = nm_bridges + e;
269 			strncpy(b->basename, name, namelen);
270 			b->namelen = namelen;
271 		}
272 	}
273 	BDG_UNLOCK(nm_bridges);
274 	return b;
275 }
276 #endif /* NM_BRIDGE */
277 
278 /*------------- memory allocator -----------------*/
279 #ifdef NETMAP_MEM2
280 #include "netmap_mem2.c"
281 #else /* !NETMAP_MEM2 */
282 #include "netmap_mem1.c"
283 #endif /* !NETMAP_MEM2 */
284 /*------------ end of memory allocator ----------*/
285 
286 
287 /* Structure associated to each thread which registered an interface.
288  *
289  * The first 4 fields of this structure are written by NIOCREGIF and
290  * read by poll() and NIOC?XSYNC.
291  * There is low contention among writers (actually, a correct user program
292  * should have no contention among writers) and among writers and readers,
293  * so we use a single global lock to protect the structure initialization.
294  * Since initialization involves the allocation of memory, we reuse the memory
295  * allocator lock.
296  * Read access to the structure is lock free. Readers must check that
297  * np_nifp is not NULL before using the other fields.
298  * If np_nifp is NULL initialization has not been performed, so they should
299  * return an error to userlevel.
300  *
301  * The ref_done field is used to regulate access to the refcount in the
302  * memory allocator. The refcount must be incremented at most once for
303  * each open("/dev/netmap"). The increment is performed by the first
304  * function that calls netmap_get_memory() (currently called by
305  * mmap(), NIOCGINFO and NIOCREGIF).
306  * If the refcount is incremented, it is then decremented when the
307  * private structure is destroyed.
308  */
309 struct netmap_priv_d {
310 	struct netmap_if * volatile np_nifp;	/* netmap interface descriptor. */
311 
312 	struct ifnet	*np_ifp;	/* device for which we hold a reference */
313 	int		np_ringid;	/* from the ioctl */
314 	u_int		np_qfirst, np_qlast;	/* range of rings to scan */
315 	uint16_t	np_txpoll;
316 
317 	unsigned long	ref_done;	/* use with NMA_LOCK held */
318 };
319 
320 
321 static int
322 netmap_get_memory(struct netmap_priv_d* p)
323 {
324 	int error = 0;
325 	NMA_LOCK();
326 	if (!p->ref_done) {
327 		error = netmap_memory_finalize();
328 		if (!error)
329 			p->ref_done = 1;
330 	}
331 	NMA_UNLOCK();
332 	return error;
333 }
334 
335 /*
336  * File descriptor's private data destructor.
337  *
338  * Call nm_register(ifp,0) to stop netmap mode on the interface and
339  * revert to normal operation. We expect that np_ifp has not gone.
340  */
341 /* call with NMA_LOCK held */
342 static void
343 netmap_dtor_locked(void *data)
344 {
345 	struct netmap_priv_d *priv = data;
346 	struct ifnet *ifp = priv->np_ifp;
347 	struct netmap_adapter *na = NA(ifp);
348 	struct netmap_if *nifp = priv->np_nifp;
349 
350 	na->refcount--;
351 	if (na->refcount <= 0) {	/* last instance */
352 		u_int i, j, lim;
353 
354 		D("deleting last netmap instance for %s", ifp->if_xname);
355 		/*
356 		 * there is a race here with *_netmap_task() and
357 		 * netmap_poll(), which don't run under NETMAP_REG_LOCK.
358 		 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
359 		 * (aka NETMAP_DELETING(na)) are a unique marker that the
360 		 * device is dying.
361 		 * Before destroying stuff we sleep a bit, and then complete
362 		 * the job. NIOCREG should realize the condition and
363 		 * loop until they can continue; the other routines
364 		 * should check the condition at entry and quit if
365 		 * they cannot run.
366 		 */
367 		na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
368 		tsleep(na, 0, "NIOCUNREG", 4);
369 		na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
370 		na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
371 		/* Wake up any sleeping threads. netmap_poll will
372 		 * then return POLLERR
373 		 */
374 		for (i = 0; i < na->num_tx_rings + 1; i++)
375 			selwakeuppri(&na->tx_rings[i].si, PI_NET);
376 		for (i = 0; i < na->num_rx_rings + 1; i++)
377 			selwakeuppri(&na->rx_rings[i].si, PI_NET);
378 		selwakeuppri(&na->tx_si, PI_NET);
379 		selwakeuppri(&na->rx_si, PI_NET);
380 		/* release all buffers */
381 		for (i = 0; i < na->num_tx_rings + 1; i++) {
382 			struct netmap_ring *ring = na->tx_rings[i].ring;
383 			lim = na->tx_rings[i].nkr_num_slots;
384 			for (j = 0; j < lim; j++)
385 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
386 			/* knlist_destroy(&na->tx_rings[i].si.si_note); */
387 			mtx_destroy(&na->tx_rings[i].q_lock);
388 		}
389 		for (i = 0; i < na->num_rx_rings + 1; i++) {
390 			struct netmap_ring *ring = na->rx_rings[i].ring;
391 			lim = na->rx_rings[i].nkr_num_slots;
392 			for (j = 0; j < lim; j++)
393 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
394 			/* knlist_destroy(&na->rx_rings[i].si.si_note); */
395 			mtx_destroy(&na->rx_rings[i].q_lock);
396 		}
397 		/* XXX kqueue(9) needed; these will mirror knlist_init. */
398 		/* knlist_destroy(&na->tx_si.si_note); */
399 		/* knlist_destroy(&na->rx_si.si_note); */
400 		netmap_free_rings(na);
401 		wakeup(na);
402 	}
403 	netmap_if_free(nifp);
404 }
405 
406 static void
407 nm_if_rele(struct ifnet *ifp)
408 {
409 #ifndef NM_BRIDGE
410 	if_rele(ifp);
411 #else /* NM_BRIDGE */
412 	int i, full;
413 	struct nm_bridge *b;
414 
415 	if (strncmp(ifp->if_xname, NM_NAME, sizeof(NM_NAME) - 1)) {
416 		if_rele(ifp);
417 		return;
418 	}
419 	if (!DROP_BDG_REF(ifp))
420 		return;
421 	b = ifp->if_bridge;
422 	BDG_LOCK(nm_bridges);
423 	BDG_LOCK(b);
424 	ND("want to disconnect %s from the bridge", ifp->if_xname);
425 	full = 0;
426 	for (i = 0; i < NM_BDG_MAXPORTS; i++) {
427 		if (b->bdg_ports[i] == ifp) {
428 			b->bdg_ports[i] = NULL;
429 			bzero(ifp, sizeof(*ifp));
430 			free(ifp, M_DEVBUF);
431 			break;
432 		}
433 		else if (b->bdg_ports[i] != NULL)
434 			full = 1;
435 	}
436 	BDG_UNLOCK(b);
437 	if (full == 0) {
438 		ND("freeing bridge %d", b - nm_bridges);
439 		b->namelen = 0;
440 	}
441 	BDG_UNLOCK(nm_bridges);
442 	if (i == NM_BDG_MAXPORTS)
443 		D("ouch, cannot find ifp to remove");
444 #endif /* NM_BRIDGE */
445 }
446 
447 static void
448 netmap_dtor(void *data)
449 {
450 	struct netmap_priv_d *priv = data;
451 	struct ifnet *ifp = priv->np_ifp;
452 	struct netmap_adapter *na;
453 
454 	NMA_LOCK();
455 	if (ifp) {
456 		na = NA(ifp);
457 		na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
458 		netmap_dtor_locked(data);
459 		na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
460 
461 		nm_if_rele(ifp);
462 	}
463 	if (priv->ref_done) {
464 		netmap_memory_deref();
465 	}
466 	NMA_UNLOCK();
467 	bzero(priv, sizeof(*priv));	/* XXX for safety */
468 	free(priv, M_DEVBUF);
469 }
470 
471 #ifdef __FreeBSD__
472 #include <vm/vm.h>
473 #include <vm/vm_param.h>
474 #include <vm/vm_object.h>
475 #include <vm/vm_page.h>
476 #include <vm/vm_pager.h>
477 #include <vm/uma.h>
478 
479 static struct cdev_pager_ops saved_cdev_pager_ops;
480 
481 static int
482 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
483     vm_ooffset_t foff, struct ucred *cred, u_short *color)
484 {
485 	D("first mmap for %p", handle);
486 	return saved_cdev_pager_ops.cdev_pg_ctor(handle,
487 			size, prot, foff, cred, color);
488 }
489 
490 static void
491 netmap_dev_pager_dtor(void *handle)
492 {
493 	saved_cdev_pager_ops.cdev_pg_dtor(handle);
494 	D("ready to release memory for %p", handle);
495 }
496 
497 
498 static struct cdev_pager_ops netmap_cdev_pager_ops = {
499         .cdev_pg_ctor = netmap_dev_pager_ctor,
500         .cdev_pg_dtor = netmap_dev_pager_dtor,
501         .cdev_pg_fault = NULL,
502 };
503 
504 static int
505 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
506 	vm_size_t objsize,  vm_object_t *objp, int prot)
507 {
508 	vm_object_t obj;
509 
510 	D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
511 	    (intmax_t )*foff, (intmax_t )objsize, objp, prot);
512 	obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
513             curthread->td_ucred);
514 	ND("returns obj %p", obj);
515 	if (obj == NULL)
516 		return EINVAL;
517 	if (saved_cdev_pager_ops.cdev_pg_fault == NULL) {
518 		D("initialize cdev_pager_ops");
519 		saved_cdev_pager_ops = *(obj->un_pager.devp.ops);
520 		netmap_cdev_pager_ops.cdev_pg_fault =
521 			saved_cdev_pager_ops.cdev_pg_fault;
522 	};
523 	obj->un_pager.devp.ops = &netmap_cdev_pager_ops;
524 	*objp = obj;
525 	return 0;
526 }
527 #endif /* __FreeBSD__ */
528 
529 
530 /*
531  * mmap(2) support for the "netmap" device.
532  *
533  * Expose all the memory previously allocated by our custom memory
534  * allocator: this way the user has only to issue a single mmap(2), and
535  * can work on all the data structures flawlessly.
536  *
537  * Return 0 on success, -1 otherwise.
538  */
539 
540 #ifdef __FreeBSD__
541 static int
542 netmap_mmap(__unused struct cdev *dev,
543 #if __FreeBSD_version < 900000
544 		vm_offset_t offset, vm_paddr_t *paddr, int nprot
545 #else
546 		vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
547 		__unused vm_memattr_t *memattr
548 #endif
549 	)
550 {
551 	int error = 0;
552 	struct netmap_priv_d *priv;
553 
554 	if (nprot & PROT_EXEC)
555 		return (-1);	// XXX -1 or EINVAL ?
556 
557 	error = devfs_get_cdevpriv((void **)&priv);
558 	if (error == EBADF) {	/* called on fault, memory is initialized */
559 		ND(5, "handling fault at ofs 0x%x", offset);
560 		error = 0;
561 	} else if (error == 0)	/* make sure memory is set */
562 		error = netmap_get_memory(priv);
563 	if (error)
564 		return (error);
565 
566 	ND("request for offset 0x%x", (uint32_t)offset);
567 	*paddr = netmap_ofstophys(offset);
568 
569 	return (*paddr ? 0 : ENOMEM);
570 }
571 
572 static int
573 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
574 {
575 	D("dev %p fflag 0x%x devtype %d td %p", dev, fflag, devtype, td);
576 	return 0;
577 }
578 
579 static int
580 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
581 {
582 	struct netmap_priv_d *priv;
583 	int error;
584 
585 	priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
586 			      M_NOWAIT | M_ZERO);
587 	if (priv == NULL)
588 		return ENOMEM;
589 
590 	error = devfs_set_cdevpriv(priv, netmap_dtor);
591 	if (error)
592 	        return error;
593 
594 	return 0;
595 }
596 #endif /* __FreeBSD__ */
597 
598 
599 /*
600  * Handlers for synchronization of the queues from/to the host.
601  *
602  * netmap_sync_to_host() passes packets up. We are called from a
603  * system call in user process context, and the only contention
604  * can be among multiple user threads erroneously calling
605  * this routine concurrently. In principle we should not even
606  * need to lock.
607  */
608 static void
609 netmap_sync_to_host(struct netmap_adapter *na)
610 {
611 	struct netmap_kring *kring = &na->tx_rings[na->num_tx_rings];
612 	struct netmap_ring *ring = kring->ring;
613 	struct mbuf *head = NULL, *tail = NULL, *m;
614 	u_int k, n, lim = kring->nkr_num_slots - 1;
615 
616 	k = ring->cur;
617 	if (k > lim) {
618 		netmap_ring_reinit(kring);
619 		return;
620 	}
621 	// na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
622 
623 	/* Take packets from hwcur to cur and pass them up.
624 	 * In case of no buffers we give up. At the end of the loop,
625 	 * the queue is drained in all cases.
626 	 */
627 	for (n = kring->nr_hwcur; n != k;) {
628 		struct netmap_slot *slot = &ring->slot[n];
629 
630 		n = (n == lim) ? 0 : n + 1;
631 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
632 			D("bad pkt at %d len %d", n, slot->len);
633 			continue;
634 		}
635 		m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL);
636 
637 		if (m == NULL)
638 			break;
639 		if (tail)
640 			tail->m_nextpkt = m;
641 		else
642 			head = m;
643 		tail = m;
644 		m->m_nextpkt = NULL;
645 	}
646 	kring->nr_hwcur = k;
647 	kring->nr_hwavail = ring->avail = lim;
648 	// na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
649 
650 	/* send packets up, outside the lock */
651 	while ((m = head) != NULL) {
652 		head = head->m_nextpkt;
653 		m->m_nextpkt = NULL;
654 		if (netmap_verbose & NM_VERB_HOST)
655 			D("sending up pkt %p size %d", m, MBUF_LEN(m));
656 		NM_SEND_UP(na->ifp, m);
657 	}
658 }
659 
660 /*
661  * rxsync backend for packets coming from the host stack.
662  * They have been put in the queue by netmap_start() so we
663  * need to protect access to the kring using a lock.
664  *
665  * This routine also does the selrecord if called from the poll handler
666  * (we know because td != NULL).
667  *
668  * NOTE: on linux, selrecord() is defined as a macro and uses pwait
669  *     as an additional hidden argument.
670  */
671 static void
672 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait)
673 {
674 	struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
675 	struct netmap_ring *ring = kring->ring;
676 	u_int j, n, lim = kring->nkr_num_slots;
677 	u_int k = ring->cur, resvd = ring->reserved;
678 
679 	(void)pwait;	/* disable unused warnings */
680 	na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0);
681 	if (k >= lim) {
682 		netmap_ring_reinit(kring);
683 		return;
684 	}
685 	/* new packets are already set in nr_hwavail */
686 	/* skip past packets that userspace has released */
687 	j = kring->nr_hwcur;
688 	if (resvd > 0) {
689 		if (resvd + ring->avail >= lim + 1) {
690 			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
691 			ring->reserved = resvd = 0; // XXX panic...
692 		}
693 		k = (k >= resvd) ? k - resvd : k + lim - resvd;
694         }
695 	if (j != k) {
696 		n = k >= j ? k - j : k + lim - j;
697 		kring->nr_hwavail -= n;
698 		kring->nr_hwcur = k;
699 	}
700 	k = ring->avail = kring->nr_hwavail - resvd;
701 	if (k == 0 && td)
702 		selrecord(td, &kring->si);
703 	if (k && (netmap_verbose & NM_VERB_HOST))
704 		D("%d pkts from stack", k);
705 	na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0);
706 }
707 
708 
709 /*
710  * get a refcounted reference to an interface.
711  * Return ENXIO if the interface does not exist, EINVAL if netmap
712  * is not supported by the interface.
713  * If successful, hold a reference.
714  */
715 static int
716 get_ifp(const char *name, struct ifnet **ifp)
717 {
718 #ifdef NM_BRIDGE
719 	struct ifnet *iter = NULL;
720 
721 	do {
722 		struct nm_bridge *b;
723 		int i, l, cand = -1;
724 
725 		if (strncmp(name, NM_NAME, sizeof(NM_NAME) - 1))
726 			break;
727 		b = nm_find_bridge(name);
728 		if (b == NULL) {
729 			D("no bridges available for '%s'", name);
730 			return (ENXIO);
731 		}
732 		/* XXX locking */
733 		BDG_LOCK(b);
734 		/* lookup in the local list of ports */
735 		for (i = 0; i < NM_BDG_MAXPORTS; i++) {
736 			iter = b->bdg_ports[i];
737 			if (iter == NULL) {
738 				if (cand == -1)
739 					cand = i; /* potential insert point */
740 				continue;
741 			}
742 			if (!strcmp(iter->if_xname, name)) {
743 				ADD_BDG_REF(iter);
744 				ND("found existing interface");
745 				BDG_UNLOCK(b);
746 				break;
747 			}
748 		}
749 		if (i < NM_BDG_MAXPORTS) /* already unlocked */
750 			break;
751 		if (cand == -1) {
752 			D("bridge full, cannot create new port");
753 no_port:
754 			BDG_UNLOCK(b);
755 			*ifp = NULL;
756 			return EINVAL;
757 		}
758 		ND("create new bridge port %s", name);
759 		/* space for forwarding list after the ifnet */
760 		l = sizeof(*iter) +
761 			 sizeof(struct nm_bdg_fwd)*NM_BDG_BATCH ;
762 		iter = malloc(l, M_DEVBUF, M_NOWAIT | M_ZERO);
763 		if (!iter)
764 			goto no_port;
765 		strcpy(iter->if_xname, name);
766 		bdg_netmap_attach(iter);
767 		b->bdg_ports[cand] = iter;
768 		iter->if_bridge = b;
769 		ADD_BDG_REF(iter);
770 		BDG_UNLOCK(b);
771 		ND("attaching virtual bridge %p", b);
772 	} while (0);
773 	*ifp = iter;
774 	if (! *ifp)
775 #endif /* NM_BRIDGE */
776 	*ifp = ifunit_ref(name);
777 	if (*ifp == NULL)
778 		return (ENXIO);
779 	/* can do this if the capability exists and if_pspare[0]
780 	 * points to the netmap descriptor.
781 	 */
782 	if (NETMAP_CAPABLE(*ifp))
783 		return 0;	/* valid pointer, we hold the refcount */
784 	nm_if_rele(*ifp);
785 	return EINVAL;	// not NETMAP capable
786 }
787 
788 
789 /*
790  * Error routine called when txsync/rxsync detects an error.
791  * Can't do much more than resetting cur = hwcur, avail = hwavail.
792  * Return 1 on reinit.
793  *
794  * This routine is only called by the upper half of the kernel.
795  * It only reads hwcur (which is changed only by the upper half, too)
796  * and hwavail (which may be changed by the lower half, but only on
797  * a tx ring and only to increase it, so any error will be recovered
798  * on the next call). For the above, we don't strictly need to call
799  * it under lock.
800  */
801 int
802 netmap_ring_reinit(struct netmap_kring *kring)
803 {
804 	struct netmap_ring *ring = kring->ring;
805 	u_int i, lim = kring->nkr_num_slots - 1;
806 	int errors = 0;
807 
808 	RD(10, "called for %s", kring->na->ifp->if_xname);
809 	if (ring->cur > lim)
810 		errors++;
811 	for (i = 0; i <= lim; i++) {
812 		u_int idx = ring->slot[i].buf_idx;
813 		u_int len = ring->slot[i].len;
814 		if (idx < 2 || idx >= netmap_total_buffers) {
815 			if (!errors++)
816 				D("bad buffer at slot %d idx %d len %d ", i, idx, len);
817 			ring->slot[i].buf_idx = 0;
818 			ring->slot[i].len = 0;
819 		} else if (len > NETMAP_BUF_SIZE) {
820 			ring->slot[i].len = 0;
821 			if (!errors++)
822 				D("bad len %d at slot %d idx %d",
823 					len, i, idx);
824 		}
825 	}
826 	if (errors) {
827 		int pos = kring - kring->na->tx_rings;
828 		int n = kring->na->num_tx_rings + 1;
829 
830 		RD(10, "total %d errors", errors);
831 		errors++;
832 		RD(10, "%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
833 			kring->na->ifp->if_xname,
834 			pos < n ?  "TX" : "RX", pos < n ? pos : pos - n,
835 			ring->cur, kring->nr_hwcur,
836 			ring->avail, kring->nr_hwavail);
837 		ring->cur = kring->nr_hwcur;
838 		ring->avail = kring->nr_hwavail;
839 	}
840 	return (errors ? 1 : 0);
841 }
842 
843 
844 /*
845  * Set the ring ID. For devices with a single queue, a request
846  * for all rings is the same as a single ring.
847  */
848 static int
849 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
850 {
851 	struct ifnet *ifp = priv->np_ifp;
852 	struct netmap_adapter *na = NA(ifp);
853 	u_int i = ringid & NETMAP_RING_MASK;
854 	/* initially (np_qfirst == np_qlast) we don't want to lock */
855 	int need_lock = (priv->np_qfirst != priv->np_qlast);
856 	int lim = na->num_rx_rings;
857 
858 	if (na->num_tx_rings > lim)
859 		lim = na->num_tx_rings;
860 	if ( (ringid & NETMAP_HW_RING) && i >= lim) {
861 		D("invalid ring id %d", i);
862 		return (EINVAL);
863 	}
864 	if (need_lock)
865 		na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
866 	priv->np_ringid = ringid;
867 	if (ringid & NETMAP_SW_RING) {
868 		priv->np_qfirst = NETMAP_SW_RING;
869 		priv->np_qlast = 0;
870 	} else if (ringid & NETMAP_HW_RING) {
871 		priv->np_qfirst = i;
872 		priv->np_qlast = i + 1;
873 	} else {
874 		priv->np_qfirst = 0;
875 		priv->np_qlast = NETMAP_HW_RING ;
876 	}
877 	priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
878 	if (need_lock)
879 		na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
880 	if (ringid & NETMAP_SW_RING)
881 		D("ringid %s set to SW RING", ifp->if_xname);
882 	else if (ringid & NETMAP_HW_RING)
883 		D("ringid %s set to HW RING %d", ifp->if_xname,
884 			priv->np_qfirst);
885 	else
886 		D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim);
887 	return 0;
888 }
889 
890 /*
891  * ioctl(2) support for the "netmap" device.
892  *
893  * Following a list of accepted commands:
894  * - NIOCGINFO
895  * - SIOCGIFADDR	just for convenience
896  * - NIOCREGIF
897  * - NIOCUNREGIF
898  * - NIOCTXSYNC
899  * - NIOCRXSYNC
900  *
901  * Return 0 on success, errno otherwise.
902  */
903 static int
904 netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
905 	int fflag, struct thread *td)
906 {
907 	struct netmap_priv_d *priv = NULL;
908 	struct ifnet *ifp;
909 	struct nmreq *nmr = (struct nmreq *) data;
910 	struct netmap_adapter *na;
911 	int error;
912 	u_int i, lim;
913 	struct netmap_if *nifp;
914 
915 	(void)dev;	/* UNUSED */
916 	(void)fflag;	/* UNUSED */
917 #ifdef linux
918 #define devfs_get_cdevpriv(pp)				\
919 	({ *(struct netmap_priv_d **)pp = ((struct file *)td)->private_data; 	\
920 		(*pp ? 0 : ENOENT); })
921 
922 /* devfs_set_cdevpriv cannot fail on linux */
923 #define devfs_set_cdevpriv(p, fn)				\
924 	({ ((struct file *)td)->private_data = p; (p ? 0 : EINVAL); })
925 
926 
927 #define devfs_clear_cdevpriv()	do {				\
928 		netmap_dtor(priv); ((struct file *)td)->private_data = 0;	\
929 	} while (0)
930 #endif /* linux */
931 
932 	CURVNET_SET(TD_TO_VNET(td));
933 
934 	error = devfs_get_cdevpriv((void **)&priv);
935 	if (error) {
936 		CURVNET_RESTORE();
937 		/* XXX ENOENT should be impossible, since the priv
938 		 * is now created in the open */
939 		return (error == ENOENT ? ENXIO : error);
940 	}
941 
942 	nmr->nr_name[sizeof(nmr->nr_name) - 1] = '\0';	/* truncate name */
943 	switch (cmd) {
944 	case NIOCGINFO:		/* return capabilities etc */
945 		if (nmr->nr_version != NETMAP_API) {
946 			D("API mismatch got %d have %d",
947 				nmr->nr_version, NETMAP_API);
948 			nmr->nr_version = NETMAP_API;
949 			error = EINVAL;
950 			break;
951 		}
952 		/* update configuration */
953 		error = netmap_get_memory(priv);
954 		ND("get_memory returned %d", error);
955 		if (error)
956 			break;
957 		/* memsize is always valid */
958 		nmr->nr_memsize = nm_mem.nm_totalsize;
959 		nmr->nr_offset = 0;
960 		nmr->nr_rx_rings = nmr->nr_tx_rings = 0;
961 		nmr->nr_rx_slots = nmr->nr_tx_slots = 0;
962 		if (nmr->nr_name[0] == '\0')	/* just get memory info */
963 			break;
964 		error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
965 		if (error)
966 			break;
967 		na = NA(ifp); /* retrieve netmap_adapter */
968 		nmr->nr_rx_rings = na->num_rx_rings;
969 		nmr->nr_tx_rings = na->num_tx_rings;
970 		nmr->nr_rx_slots = na->num_rx_desc;
971 		nmr->nr_tx_slots = na->num_tx_desc;
972 		nm_if_rele(ifp);	/* return the refcount */
973 		break;
974 
975 	case NIOCREGIF:
976 		if (nmr->nr_version != NETMAP_API) {
977 			nmr->nr_version = NETMAP_API;
978 			error = EINVAL;
979 			break;
980 		}
981 		/* ensure allocators are ready */
982 		error = netmap_get_memory(priv);
983 		ND("get_memory returned %d", error);
984 		if (error)
985 			break;
986 
987 		/* protect access to priv from concurrent NIOCREGIF */
988 		NMA_LOCK();
989 		if (priv->np_ifp != NULL) {	/* thread already registered */
990 			error = netmap_set_ringid(priv, nmr->nr_ringid);
991 			NMA_UNLOCK();
992 			break;
993 		}
994 		/* find the interface and a reference */
995 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
996 		if (error) {
997 			NMA_UNLOCK();
998 			break;
999 		}
1000 		na = NA(ifp); /* retrieve netmap adapter */
1001 
1002 		for (i = 10; i > 0; i--) {
1003 			na->nm_lock(ifp, NETMAP_REG_LOCK, 0);
1004 			if (!NETMAP_DELETING(na))
1005 				break;
1006 			na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1007 			tsleep(na, 0, "NIOCREGIF", hz/10);
1008 		}
1009 		if (i == 0) {
1010 			D("too many NIOCREGIF attempts, give up");
1011 			error = EINVAL;
1012 			nm_if_rele(ifp);	/* return the refcount */
1013 			NMA_UNLOCK();
1014 			break;
1015 		}
1016 
1017 		priv->np_ifp = ifp;	/* store the reference */
1018 		error = netmap_set_ringid(priv, nmr->nr_ringid);
1019 		if (error)
1020 			goto error;
1021 		nifp = netmap_if_new(nmr->nr_name, na);
1022 		if (nifp == NULL) { /* allocation failed */
1023 			error = ENOMEM;
1024 		} else if (ifp->if_capenable & IFCAP_NETMAP) {
1025 			/* was already set */
1026 		} else {
1027 			/* Otherwise set the card in netmap mode
1028 			 * and make it use the shared buffers.
1029 			 */
1030 			for (i = 0 ; i < na->num_tx_rings + 1; i++)
1031 				mtx_init(&na->tx_rings[i].q_lock, "nm_txq_lock", MTX_NETWORK_LOCK, MTX_DEF);
1032 			for (i = 0 ; i < na->num_rx_rings + 1; i++) {
1033 				mtx_init(&na->rx_rings[i].q_lock, "nm_rxq_lock", MTX_NETWORK_LOCK, MTX_DEF);
1034 			}
1035 			error = na->nm_register(ifp, 1); /* mode on */
1036 			if (error) {
1037 				netmap_dtor_locked(priv);
1038 				netmap_if_free(nifp);
1039 			}
1040 		}
1041 
1042 		if (error) {	/* reg. failed, release priv and ref */
1043 error:
1044 			na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1045 			nm_if_rele(ifp);	/* return the refcount */
1046 			priv->np_ifp = NULL;
1047 			priv->np_nifp = NULL;
1048 			NMA_UNLOCK();
1049 			break;
1050 		}
1051 
1052 		na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0);
1053 
1054 		/* the following assignment is a commitment.
1055 		 * Readers (i.e., poll and *SYNC) check for
1056 		 * np_nifp != NULL without locking
1057 		 */
1058 		wmb(); /* make sure previous writes are visible to all CPUs */
1059 		priv->np_nifp = nifp;
1060 		NMA_UNLOCK();
1061 
1062 		/* return the offset of the netmap_if object */
1063 		nmr->nr_rx_rings = na->num_rx_rings;
1064 		nmr->nr_tx_rings = na->num_tx_rings;
1065 		nmr->nr_rx_slots = na->num_rx_desc;
1066 		nmr->nr_tx_slots = na->num_tx_desc;
1067 		nmr->nr_memsize = nm_mem.nm_totalsize;
1068 		nmr->nr_offset = netmap_if_offset(nifp);
1069 		break;
1070 
1071 	case NIOCUNREGIF:
1072 		// XXX we have no data here ?
1073 		D("deprecated, data is %p", nmr);
1074 		error = EINVAL;
1075 		break;
1076 
1077 	case NIOCTXSYNC:
1078 	case NIOCRXSYNC:
1079 		nifp = priv->np_nifp;
1080 
1081 		if (nifp == NULL) {
1082 			error = ENXIO;
1083 			break;
1084 		}
1085 		rmb(); /* make sure following reads are not from cache */
1086 
1087 
1088 		ifp = priv->np_ifp;	/* we have a reference */
1089 
1090 		if (ifp == NULL) {
1091 			D("Internal error: nifp != NULL && ifp == NULL");
1092 			error = ENXIO;
1093 			break;
1094 		}
1095 
1096 		na = NA(ifp); /* retrieve netmap adapter */
1097 		if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */
1098 			if (cmd == NIOCTXSYNC)
1099 				netmap_sync_to_host(na);
1100 			else
1101 				netmap_sync_from_host(na, NULL, NULL);
1102 			break;
1103 		}
1104 		/* find the last ring to scan */
1105 		lim = priv->np_qlast;
1106 		if (lim == NETMAP_HW_RING)
1107 			lim = (cmd == NIOCTXSYNC) ?
1108 			    na->num_tx_rings : na->num_rx_rings;
1109 
1110 		for (i = priv->np_qfirst; i < lim; i++) {
1111 			if (cmd == NIOCTXSYNC) {
1112 				struct netmap_kring *kring = &na->tx_rings[i];
1113 				if (netmap_verbose & NM_VERB_TXSYNC)
1114 					D("pre txsync ring %d cur %d hwcur %d",
1115 					    i, kring->ring->cur,
1116 					    kring->nr_hwcur);
1117 				na->nm_txsync(ifp, i, 1 /* do lock */);
1118 				if (netmap_verbose & NM_VERB_TXSYNC)
1119 					D("post txsync ring %d cur %d hwcur %d",
1120 					    i, kring->ring->cur,
1121 					    kring->nr_hwcur);
1122 			} else {
1123 				na->nm_rxsync(ifp, i, 1 /* do lock */);
1124 				microtime(&na->rx_rings[i].ring->ts);
1125 			}
1126 		}
1127 
1128 		break;
1129 
1130 #ifdef __FreeBSD__
1131 	case BIOCIMMEDIATE:
1132 	case BIOCGHDRCMPLT:
1133 	case BIOCSHDRCMPLT:
1134 	case BIOCSSEESENT:
1135 		D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
1136 		break;
1137 
1138 	default:	/* allow device-specific ioctls */
1139 	    {
1140 		struct socket so;
1141 		bzero(&so, sizeof(so));
1142 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
1143 		if (error)
1144 			break;
1145 		so.so_vnet = ifp->if_vnet;
1146 		// so->so_proto not null.
1147 		error = ifioctl(&so, cmd, data, td);
1148 		nm_if_rele(ifp);
1149 		break;
1150 	    }
1151 
1152 #else /* linux */
1153 	default:
1154 		error = EOPNOTSUPP;
1155 #endif /* linux */
1156 	}
1157 
1158 	CURVNET_RESTORE();
1159 	return (error);
1160 }
1161 
1162 
1163 /*
1164  * select(2) and poll(2) handlers for the "netmap" device.
1165  *
1166  * Can be called for one or more queues.
1167  * Return true the event mask corresponding to ready events.
1168  * If there are no ready events, do a selrecord on either individual
1169  * selfd or on the global one.
1170  * Device-dependent parts (locking and sync of tx/rx rings)
1171  * are done through callbacks.
1172  *
1173  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
1174  * The first one is remapped to pwait as selrecord() uses the name as an
1175  * hidden argument.
1176  */
1177 static int
1178 netmap_poll(struct cdev *dev, int events, struct thread *td)
1179 {
1180 	struct netmap_priv_d *priv = NULL;
1181 	struct netmap_adapter *na;
1182 	struct ifnet *ifp;
1183 	struct netmap_kring *kring;
1184 	u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
1185 	u_int lim_tx, lim_rx;
1186 	enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
1187 	void *pwait = dev;	/* linux compatibility */
1188 
1189 	(void)pwait;
1190 
1191 	if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
1192 		return POLLERR;
1193 
1194 	if (priv->np_nifp == NULL) {
1195 		D("No if registered");
1196 		return POLLERR;
1197 	}
1198 	rmb(); /* make sure following reads are not from cache */
1199 
1200 	ifp = priv->np_ifp;
1201 	// XXX check for deleting() ?
1202 	if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
1203 		return POLLERR;
1204 
1205 	if (netmap_verbose & 0x8000)
1206 		D("device %s events 0x%x", ifp->if_xname, events);
1207 	want_tx = events & (POLLOUT | POLLWRNORM);
1208 	want_rx = events & (POLLIN | POLLRDNORM);
1209 
1210 	na = NA(ifp); /* retrieve netmap adapter */
1211 
1212 	lim_tx = na->num_tx_rings;
1213 	lim_rx = na->num_rx_rings;
1214 	/* how many queues we are scanning */
1215 	if (priv->np_qfirst == NETMAP_SW_RING) {
1216 		if (priv->np_txpoll || want_tx) {
1217 			/* push any packets up, then we are always ready */
1218 			kring = &na->tx_rings[lim_tx];
1219 			netmap_sync_to_host(na);
1220 			revents |= want_tx;
1221 		}
1222 		if (want_rx) {
1223 			kring = &na->rx_rings[lim_rx];
1224 			if (kring->ring->avail == 0)
1225 				netmap_sync_from_host(na, td, dev);
1226 			if (kring->ring->avail > 0) {
1227 				revents |= want_rx;
1228 			}
1229 		}
1230 		return (revents);
1231 	}
1232 
1233 	/*
1234 	 * check_all is set if the card has more than one queue and
1235 	 * the client is polling all of them. If true, we sleep on
1236 	 * the "global" selfd, otherwise we sleep on individual selfd
1237 	 * (we can only sleep on one of them per direction).
1238 	 * The interrupt routine in the driver should always wake on
1239 	 * the individual selfd, and also on the global one if the card
1240 	 * has more than one ring.
1241 	 *
1242 	 * If the card has only one lock, we just use that.
1243 	 * If the card has separate ring locks, we just use those
1244 	 * unless we are doing check_all, in which case the whole
1245 	 * loop is wrapped by the global lock.
1246 	 * We acquire locks only when necessary: if poll is called
1247 	 * when buffers are available, we can just return without locks.
1248 	 *
1249 	 * rxsync() is only called if we run out of buffers on a POLLIN.
1250 	 * txsync() is called if we run out of buffers on POLLOUT, or
1251 	 * there are pending packets to send. The latter can be disabled
1252 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
1253 	 */
1254 	check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1);
1255 
1256 	/*
1257 	 * core_lock indicates what to do with the core lock.
1258 	 * The core lock is used when either the card has no individual
1259 	 * locks, or it has individual locks but we are cheking all
1260 	 * rings so we need the core lock to avoid missing wakeup events.
1261 	 *
1262 	 * It has three possible states:
1263 	 * NO_CL	we don't need to use the core lock, e.g.
1264 	 *		because we are protected by individual locks.
1265 	 * NEED_CL	we need the core lock. In this case, when we
1266 	 *		call the lock routine, move to LOCKED_CL
1267 	 *		to remember to release the lock once done.
1268 	 * LOCKED_CL	core lock is set, so we need to release it.
1269 	 */
1270 	core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
1271 #ifdef NM_BRIDGE
1272 	/* the bridge uses separate locks */
1273 	if (na->nm_register == bdg_netmap_reg) {
1274 		ND("not using core lock for %s", ifp->if_xname);
1275 		core_lock = NO_CL;
1276 	}
1277 #endif /* NM_BRIDGE */
1278 	if (priv->np_qlast != NETMAP_HW_RING) {
1279 		lim_tx = lim_rx = priv->np_qlast;
1280 	}
1281 
1282 	/*
1283 	 * We start with a lock free round which is good if we have
1284 	 * data available. If this fails, then lock and call the sync
1285 	 * routines.
1286 	 */
1287 	for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) {
1288 		kring = &na->rx_rings[i];
1289 		if (kring->ring->avail > 0) {
1290 			revents |= want_rx;
1291 			want_rx = 0;	/* also breaks the loop */
1292 		}
1293 	}
1294 	for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) {
1295 		kring = &na->tx_rings[i];
1296 		if (kring->ring->avail > 0) {
1297 			revents |= want_tx;
1298 			want_tx = 0;	/* also breaks the loop */
1299 		}
1300 	}
1301 
1302 	/*
1303 	 * If we to push packets out (priv->np_txpoll) or want_tx is
1304 	 * still set, we do need to run the txsync calls (on all rings,
1305 	 * to avoid that the tx rings stall).
1306 	 */
1307 	if (priv->np_txpoll || want_tx) {
1308 		for (i = priv->np_qfirst; i < lim_tx; i++) {
1309 			kring = &na->tx_rings[i];
1310 			/*
1311 			 * Skip the current ring if want_tx == 0
1312 			 * (we have already done a successful sync on
1313 			 * a previous ring) AND kring->cur == kring->hwcur
1314 			 * (there are no pending transmissions for this ring).
1315 			 */
1316 			if (!want_tx && kring->ring->cur == kring->nr_hwcur)
1317 				continue;
1318 			if (core_lock == NEED_CL) {
1319 				na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1320 				core_lock = LOCKED_CL;
1321 			}
1322 			if (na->separate_locks)
1323 				na->nm_lock(ifp, NETMAP_TX_LOCK, i);
1324 			if (netmap_verbose & NM_VERB_TXSYNC)
1325 				D("send %d on %s %d",
1326 					kring->ring->cur,
1327 					ifp->if_xname, i);
1328 			if (na->nm_txsync(ifp, i, 0 /* no lock */))
1329 				revents |= POLLERR;
1330 
1331 			/* Check avail/call selrecord only if called with POLLOUT */
1332 			if (want_tx) {
1333 				if (kring->ring->avail > 0) {
1334 					/* stop at the first ring. We don't risk
1335 					 * starvation.
1336 					 */
1337 					revents |= want_tx;
1338 					want_tx = 0;
1339 				} else if (!check_all)
1340 					selrecord(td, &kring->si);
1341 			}
1342 			if (na->separate_locks)
1343 				na->nm_lock(ifp, NETMAP_TX_UNLOCK, i);
1344 		}
1345 	}
1346 
1347 	/*
1348 	 * now if want_rx is still set we need to lock and rxsync.
1349 	 * Do it on all rings because otherwise we starve.
1350 	 */
1351 	if (want_rx) {
1352 		for (i = priv->np_qfirst; i < lim_rx; i++) {
1353 			kring = &na->rx_rings[i];
1354 			if (core_lock == NEED_CL) {
1355 				na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1356 				core_lock = LOCKED_CL;
1357 			}
1358 			if (na->separate_locks)
1359 				na->nm_lock(ifp, NETMAP_RX_LOCK, i);
1360 
1361 			if (na->nm_rxsync(ifp, i, 0 /* no lock */))
1362 				revents |= POLLERR;
1363 			if (netmap_no_timestamp == 0 ||
1364 					kring->ring->flags & NR_TIMESTAMP) {
1365 				microtime(&kring->ring->ts);
1366 			}
1367 
1368 			if (kring->ring->avail > 0)
1369 				revents |= want_rx;
1370 			else if (!check_all)
1371 				selrecord(td, &kring->si);
1372 			if (na->separate_locks)
1373 				na->nm_lock(ifp, NETMAP_RX_UNLOCK, i);
1374 		}
1375 	}
1376 	if (check_all && revents == 0) { /* signal on the global queue */
1377 		if (want_tx)
1378 			selrecord(td, &na->tx_si);
1379 		if (want_rx)
1380 			selrecord(td, &na->rx_si);
1381 	}
1382 	if (core_lock == LOCKED_CL)
1383 		na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1384 
1385 	return (revents);
1386 }
1387 
1388 /*------- driver support routines ------*/
1389 
1390 /*
1391  * default lock wrapper.
1392  */
1393 static void
1394 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid)
1395 {
1396 	struct netmap_adapter *na = NA(dev);
1397 
1398 	switch (what) {
1399 #ifdef linux	/* some system do not need lock on register */
1400 	case NETMAP_REG_LOCK:
1401 	case NETMAP_REG_UNLOCK:
1402 		break;
1403 #endif /* linux */
1404 
1405 	case NETMAP_CORE_LOCK:
1406 		mtx_lock(&na->core_lock);
1407 		break;
1408 
1409 	case NETMAP_CORE_UNLOCK:
1410 		mtx_unlock(&na->core_lock);
1411 		break;
1412 
1413 	case NETMAP_TX_LOCK:
1414 		mtx_lock(&na->tx_rings[queueid].q_lock);
1415 		break;
1416 
1417 	case NETMAP_TX_UNLOCK:
1418 		mtx_unlock(&na->tx_rings[queueid].q_lock);
1419 		break;
1420 
1421 	case NETMAP_RX_LOCK:
1422 		mtx_lock(&na->rx_rings[queueid].q_lock);
1423 		break;
1424 
1425 	case NETMAP_RX_UNLOCK:
1426 		mtx_unlock(&na->rx_rings[queueid].q_lock);
1427 		break;
1428 	}
1429 }
1430 
1431 
1432 /*
1433  * Initialize a ``netmap_adapter`` object created by driver on attach.
1434  * We allocate a block of memory with room for a struct netmap_adapter
1435  * plus two sets of N+2 struct netmap_kring (where N is the number
1436  * of hardware rings):
1437  * krings	0..N-1	are for the hardware queues.
1438  * kring	N	is for the host stack queue
1439  * kring	N+1	is only used for the selinfo for all queues.
1440  * Return 0 on success, ENOMEM otherwise.
1441  *
1442  * By default the receive and transmit adapter ring counts are both initialized
1443  * to num_queues.  na->num_tx_rings can be set for cards with different tx/rx
1444  * setups.
1445  */
1446 int
1447 netmap_attach(struct netmap_adapter *na, int num_queues)
1448 {
1449 	int n, size;
1450 	void *buf;
1451 	struct ifnet *ifp = na->ifp;
1452 
1453 	if (ifp == NULL) {
1454 		D("ifp not set, giving up");
1455 		return EINVAL;
1456 	}
1457 	/* clear other fields ? */
1458 	na->refcount = 0;
1459 	if (na->num_tx_rings == 0)
1460 		na->num_tx_rings = num_queues;
1461 	na->num_rx_rings = num_queues;
1462 	/* on each direction we have N+1 resources
1463 	 * 0..n-1	are the hardware rings
1464 	 * n		is the ring attached to the stack.
1465 	 */
1466 	n = na->num_rx_rings + na->num_tx_rings + 2;
1467 	size = sizeof(*na) + n * sizeof(struct netmap_kring);
1468 
1469 	buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1470 	if (buf) {
1471 		WNA(ifp) = buf;
1472 		na->tx_rings = (void *)((char *)buf + sizeof(*na));
1473 		na->rx_rings = na->tx_rings + na->num_tx_rings + 1;
1474 		bcopy(na, buf, sizeof(*na));
1475 		NETMAP_SET_CAPABLE(ifp);
1476 
1477 		na = buf;
1478 		/* Core lock initialized here.  Others are initialized after
1479 		 * netmap_if_new.
1480 		 */
1481 		mtx_init(&na->core_lock, "netmap core lock", MTX_NETWORK_LOCK,
1482 		    MTX_DEF);
1483 		if (na->nm_lock == NULL) {
1484 			ND("using default locks for %s", ifp->if_xname);
1485 			na->nm_lock = netmap_lock_wrapper;
1486 		}
1487 	}
1488 #ifdef linux
1489 	if (ifp->netdev_ops) {
1490 		ND("netdev_ops %p", ifp->netdev_ops);
1491 		/* prepare a clone of the netdev ops */
1492 		na->nm_ndo = *ifp->netdev_ops;
1493 	}
1494 	na->nm_ndo.ndo_start_xmit = linux_netmap_start;
1495 #endif
1496 	D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
1497 
1498 	return (buf ? 0 : ENOMEM);
1499 }
1500 
1501 
1502 /*
1503  * Free the allocated memory linked to the given ``netmap_adapter``
1504  * object.
1505  */
1506 void
1507 netmap_detach(struct ifnet *ifp)
1508 {
1509 	struct netmap_adapter *na = NA(ifp);
1510 
1511 	if (!na)
1512 		return;
1513 
1514 	mtx_destroy(&na->core_lock);
1515 
1516 	bzero(na, sizeof(*na));
1517 	WNA(ifp) = NULL;
1518 	free(na, M_DEVBUF);
1519 }
1520 
1521 
1522 /*
1523  * Intercept packets from the network stack and pass them
1524  * to netmap as incoming packets on the 'software' ring.
1525  * We are not locked when called.
1526  */
1527 int
1528 netmap_start(struct ifnet *ifp, struct mbuf *m)
1529 {
1530 	struct netmap_adapter *na = NA(ifp);
1531 	struct netmap_kring *kring = &na->rx_rings[na->num_rx_rings];
1532 	u_int i, len = MBUF_LEN(m);
1533 	u_int error = EBUSY, lim = kring->nkr_num_slots - 1;
1534 	struct netmap_slot *slot;
1535 
1536 	if (netmap_verbose & NM_VERB_HOST)
1537 		D("%s packet %d len %d from the stack", ifp->if_xname,
1538 			kring->nr_hwcur + kring->nr_hwavail, len);
1539 	na->nm_lock(ifp, NETMAP_CORE_LOCK, 0);
1540 	if (kring->nr_hwavail >= lim) {
1541 		if (netmap_verbose)
1542 			D("stack ring %s full\n", ifp->if_xname);
1543 		goto done;	/* no space */
1544 	}
1545 	if (len > NETMAP_BUF_SIZE) {
1546 		D("drop packet size %d > %d", len, NETMAP_BUF_SIZE);
1547 		goto done;	/* too long for us */
1548 	}
1549 
1550 	/* compute the insert position */
1551 	i = kring->nr_hwcur + kring->nr_hwavail;
1552 	if (i > lim)
1553 		i -= lim + 1;
1554 	slot = &kring->ring->slot[i];
1555 	m_copydata(m, 0, len, NMB(slot));
1556 	slot->len = len;
1557 	kring->nr_hwavail++;
1558 	if (netmap_verbose  & NM_VERB_HOST)
1559 		D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_rings);
1560 	selwakeuppri(&kring->si, PI_NET);
1561 	error = 0;
1562 done:
1563 	na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0);
1564 
1565 	/* release the mbuf in either cases of success or failure. As an
1566 	 * alternative, put the mbuf in a free list and free the list
1567 	 * only when really necessary.
1568 	 */
1569 	m_freem(m);
1570 
1571 	return (error);
1572 }
1573 
1574 
1575 /*
1576  * netmap_reset() is called by the driver routines when reinitializing
1577  * a ring. The driver is in charge of locking to protect the kring.
1578  * If netmap mode is not set just return NULL.
1579  */
1580 struct netmap_slot *
1581 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
1582 	u_int new_cur)
1583 {
1584 	struct netmap_kring *kring;
1585 	int new_hwofs, lim;
1586 
1587 	if (na == NULL)
1588 		return NULL;	/* no netmap support here */
1589 	if (!(na->ifp->if_capenable & IFCAP_NETMAP))
1590 		return NULL;	/* nothing to reinitialize */
1591 
1592 	if (tx == NR_TX) {
1593 		if (n >= na->num_tx_rings)
1594 			return NULL;
1595 		kring = na->tx_rings + n;
1596 		new_hwofs = kring->nr_hwcur - new_cur;
1597 	} else {
1598 		if (n >= na->num_rx_rings)
1599 			return NULL;
1600 		kring = na->rx_rings + n;
1601 		new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
1602 	}
1603 	lim = kring->nkr_num_slots - 1;
1604 	if (new_hwofs > lim)
1605 		new_hwofs -= lim + 1;
1606 
1607 	/* Alwayws set the new offset value and realign the ring. */
1608 	kring->nkr_hwofs = new_hwofs;
1609 	if (tx == NR_TX)
1610 		kring->nr_hwavail = kring->nkr_num_slots - 1;
1611 	ND(10, "new hwofs %d on %s %s[%d]",
1612 			kring->nkr_hwofs, na->ifp->if_xname,
1613 			tx == NR_TX ? "TX" : "RX", n);
1614 
1615 #if 0 // def linux
1616 	/* XXX check that the mappings are correct */
1617 	/* need ring_nr, adapter->pdev, direction */
1618 	buffer_info->dma = dma_map_single(&pdev->dev, addr, adapter->rx_buffer_len, DMA_FROM_DEVICE);
1619 	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1620 		D("error mapping rx netmap buffer %d", i);
1621 		// XXX fix error handling
1622 	}
1623 
1624 #endif /* linux */
1625 	/*
1626 	 * Wakeup on the individual and global lock
1627 	 * We do the wakeup here, but the ring is not yet reconfigured.
1628 	 * However, we are under lock so there are no races.
1629 	 */
1630 	selwakeuppri(&kring->si, PI_NET);
1631 	selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET);
1632 	return kring->ring->slot;
1633 }
1634 
1635 
1636 /*
1637  * Default functions to handle rx/tx interrupts
1638  * we have 4 cases:
1639  * 1 ring, single lock:
1640  *	lock(core); wake(i=0); unlock(core)
1641  * N rings, single lock:
1642  *	lock(core); wake(i); wake(N+1) unlock(core)
1643  * 1 ring, separate locks: (i=0)
1644  *	lock(i); wake(i); unlock(i)
1645  * N rings, separate locks:
1646  *	lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core)
1647  * work_done is non-null on the RX path.
1648  */
1649 int
1650 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done)
1651 {
1652 	struct netmap_adapter *na;
1653 	struct netmap_kring *r;
1654 	NM_SELINFO_T *main_wq;
1655 
1656 	if (!(ifp->if_capenable & IFCAP_NETMAP))
1657 		return 0;
1658 	ND(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
1659 	na = NA(ifp);
1660 	if (na->na_flags & NAF_SKIP_INTR) {
1661 		ND("use regular interrupt");
1662 		return 0;
1663 	}
1664 
1665 	if (work_done) { /* RX path */
1666 		if (q >= na->num_rx_rings)
1667 			return 0;	// regular queue
1668 		r = na->rx_rings + q;
1669 		r->nr_kflags |= NKR_PENDINTR;
1670 		main_wq = (na->num_rx_rings > 1) ? &na->rx_si : NULL;
1671 	} else { /* tx path */
1672 		if (q >= na->num_tx_rings)
1673 			return 0;	// regular queue
1674 		r = na->tx_rings + q;
1675 		main_wq = (na->num_tx_rings > 1) ? &na->tx_si : NULL;
1676 		work_done = &q; /* dummy */
1677 	}
1678 	if (na->separate_locks) {
1679 		mtx_lock(&r->q_lock);
1680 		selwakeuppri(&r->si, PI_NET);
1681 		mtx_unlock(&r->q_lock);
1682 		if (main_wq) {
1683 			mtx_lock(&na->core_lock);
1684 			selwakeuppri(main_wq, PI_NET);
1685 			mtx_unlock(&na->core_lock);
1686 		}
1687 	} else {
1688 		mtx_lock(&na->core_lock);
1689 		selwakeuppri(&r->si, PI_NET);
1690 		if (main_wq)
1691 			selwakeuppri(main_wq, PI_NET);
1692 		mtx_unlock(&na->core_lock);
1693 	}
1694 	*work_done = 1; /* do not fire napi again */
1695 	return 1;
1696 }
1697 
1698 
1699 #ifdef linux	/* linux-specific routines */
1700 
1701 /*
1702  * Remap linux arguments into the FreeBSD call.
1703  * - pwait is the poll table, passed as 'dev';
1704  *   If pwait == NULL someone else already woke up before. We can report
1705  *   events but they are filtered upstream.
1706  *   If pwait != NULL, then pwait->key contains the list of events.
1707  * - events is computed from pwait as above.
1708  * - file is passed as 'td';
1709  */
1710 static u_int
1711 linux_netmap_poll(struct file * file, struct poll_table_struct *pwait)
1712 {
1713 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
1714 	int events = pwait ? pwait->key : POLLIN | POLLOUT;
1715 #else /* in 3.4.0 field 'key' was renamed to '_key' */
1716 	int events = pwait ? pwait->_key : POLLIN | POLLOUT;
1717 #endif
1718 	return netmap_poll((void *)pwait, events, (void *)file);
1719 }
1720 
1721 static int
1722 linux_netmap_mmap(struct file *f, struct vm_area_struct *vma)
1723 {
1724 	int lut_skip, i, j;
1725 	int user_skip = 0;
1726 	struct lut_entry *l_entry;
1727 	int error = 0;
1728 	unsigned long off, tomap;
1729 	/*
1730 	 * vma->vm_start: start of mapping user address space
1731 	 * vma->vm_end: end of the mapping user address space
1732 	 * vma->vm_pfoff: offset of first page in the device
1733 	 */
1734 
1735 	// XXX security checks
1736 
1737 	error = netmap_get_memory(f->private_data);
1738 	ND("get_memory returned %d", error);
1739 	if (error)
1740 	    return -error;
1741 
1742 	off = vma->vm_pgoff << PAGE_SHIFT; /* offset in bytes */
1743 	tomap = vma->vm_end - vma->vm_start;
1744 	for (i = 0; i < NETMAP_POOLS_NR; i++) {  /* loop through obj_pools */
1745 		const struct netmap_obj_pool *p = &nm_mem.pools[i];
1746 		/*
1747 		 * In each pool memory is allocated in clusters
1748 		 * of size _clustsize, each containing clustentries
1749 		 * entries. For each object k we already store the
1750 		 * vtophys mapping in lut[k] so we use that, scanning
1751 		 * the lut[] array in steps of clustentries,
1752 		 * and we map each cluster (not individual pages,
1753 		 * it would be overkill).
1754 		 */
1755 
1756 		/*
1757 		 * We interpret vm_pgoff as an offset into the whole
1758 		 * netmap memory, as if all clusters where contiguous.
1759 		 */
1760 		for (lut_skip = 0, j = 0; j < p->_numclusters; j++, lut_skip += p->clustentries) {
1761 			unsigned long paddr, mapsize;
1762 			if (p->_clustsize <= off) {
1763 				off -= p->_clustsize;
1764 				continue;
1765 			}
1766 			l_entry = &p->lut[lut_skip]; /* first obj in the cluster */
1767 			paddr = l_entry->paddr + off;
1768 			mapsize = p->_clustsize - off;
1769 			off = 0;
1770 			if (mapsize > tomap)
1771 				mapsize = tomap;
1772 			ND("remap_pfn_range(%lx, %lx, %lx)",
1773 				vma->vm_start + user_skip,
1774 				paddr >> PAGE_SHIFT, mapsize);
1775 			if (remap_pfn_range(vma, vma->vm_start + user_skip,
1776 					paddr >> PAGE_SHIFT, mapsize,
1777 					vma->vm_page_prot))
1778 				return -EAGAIN; // XXX check return value
1779 			user_skip += mapsize;
1780 			tomap -= mapsize;
1781 			if (tomap == 0)
1782 				goto done;
1783 		}
1784 	}
1785 done:
1786 
1787 	return 0;
1788 }
1789 
1790 static netdev_tx_t
1791 linux_netmap_start(struct sk_buff *skb, struct net_device *dev)
1792 {
1793 	netmap_start(dev, skb);
1794 	return (NETDEV_TX_OK);
1795 }
1796 
1797 
1798 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)	// XXX was 38
1799 #define LIN_IOCTL_NAME	.ioctl
1800 int
1801 linux_netmap_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long data /* arg */)
1802 #else
1803 #define LIN_IOCTL_NAME	.unlocked_ioctl
1804 long
1805 linux_netmap_ioctl(struct file *file, u_int cmd, u_long data /* arg */)
1806 #endif
1807 {
1808 	int ret;
1809 	struct nmreq nmr;
1810 	bzero(&nmr, sizeof(nmr));
1811 
1812 	if (data && copy_from_user(&nmr, (void *)data, sizeof(nmr) ) != 0)
1813 		return -EFAULT;
1814 	ret = netmap_ioctl(NULL, cmd, (caddr_t)&nmr, 0, (void *)file);
1815 	if (data && copy_to_user((void*)data, &nmr, sizeof(nmr) ) != 0)
1816 		return -EFAULT;
1817 	return -ret;
1818 }
1819 
1820 
1821 static int
1822 netmap_release(struct inode *inode, struct file *file)
1823 {
1824 	(void)inode;	/* UNUSED */
1825 	if (file->private_data)
1826 		netmap_dtor(file->private_data);
1827 	return (0);
1828 }
1829 
1830 static int
1831 linux_netmap_open(struct inode *inode, struct file *file)
1832 {
1833 	struct netmap_priv_d *priv;
1834 	(void)inode;	/* UNUSED */
1835 
1836 	priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
1837 			      M_NOWAIT | M_ZERO);
1838 	if (priv == NULL)
1839 		return -ENOMEM;
1840 
1841 	file->private_data = priv;
1842 
1843 	return (0);
1844 }
1845 
1846 static struct file_operations netmap_fops = {
1847     .open = linux_netmap_open,
1848     .mmap = linux_netmap_mmap,
1849     LIN_IOCTL_NAME = linux_netmap_ioctl,
1850     .poll = linux_netmap_poll,
1851     .release = netmap_release,
1852 };
1853 
1854 static struct miscdevice netmap_cdevsw = {	/* same name as FreeBSD */
1855 	MISC_DYNAMIC_MINOR,
1856 	"netmap",
1857 	&netmap_fops,
1858 };
1859 
1860 static int netmap_init(void);
1861 static void netmap_fini(void);
1862 
1863 /* Errors have negative values on linux */
1864 static int linux_netmap_init(void)
1865 {
1866 	return -netmap_init();
1867 }
1868 
1869 module_init(linux_netmap_init);
1870 module_exit(netmap_fini);
1871 /* export certain symbols to other modules */
1872 EXPORT_SYMBOL(netmap_attach);		// driver attach routines
1873 EXPORT_SYMBOL(netmap_detach);		// driver detach routines
1874 EXPORT_SYMBOL(netmap_ring_reinit);	// ring init on error
1875 EXPORT_SYMBOL(netmap_buffer_lut);
1876 EXPORT_SYMBOL(netmap_total_buffers);	// index check
1877 EXPORT_SYMBOL(netmap_buffer_base);
1878 EXPORT_SYMBOL(netmap_reset);		// ring init routines
1879 EXPORT_SYMBOL(netmap_buf_size);
1880 EXPORT_SYMBOL(netmap_rx_irq);		// default irq handler
1881 EXPORT_SYMBOL(netmap_no_pendintr);	// XXX mitigation - should go away
1882 
1883 
1884 MODULE_AUTHOR("http://info.iet.unipi.it/~luigi/netmap/");
1885 MODULE_DESCRIPTION("The netmap packet I/O framework");
1886 MODULE_LICENSE("Dual BSD/GPL"); /* the code here is all BSD. */
1887 
1888 #else /* __FreeBSD__ */
1889 
1890 static struct cdevsw netmap_cdevsw = {
1891 	.d_version = D_VERSION,
1892 	.d_name = "netmap",
1893 	.d_open = netmap_open,
1894 	.d_mmap = netmap_mmap,
1895 	.d_mmap_single = netmap_mmap_single,
1896 	.d_ioctl = netmap_ioctl,
1897 	.d_poll = netmap_poll,
1898 	.d_close = netmap_close,
1899 };
1900 #endif /* __FreeBSD__ */
1901 
1902 #ifdef NM_BRIDGE
1903 /*
1904  *---- support for virtual bridge -----
1905  */
1906 
1907 /* ----- FreeBSD if_bridge hash function ------- */
1908 
1909 /*
1910  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
1911  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
1912  *
1913  * http://www.burtleburtle.net/bob/hash/spooky.html
1914  */
1915 #define mix(a, b, c)                                                    \
1916 do {                                                                    \
1917         a -= b; a -= c; a ^= (c >> 13);                                 \
1918         b -= c; b -= a; b ^= (a << 8);                                  \
1919         c -= a; c -= b; c ^= (b >> 13);                                 \
1920         a -= b; a -= c; a ^= (c >> 12);                                 \
1921         b -= c; b -= a; b ^= (a << 16);                                 \
1922         c -= a; c -= b; c ^= (b >> 5);                                  \
1923         a -= b; a -= c; a ^= (c >> 3);                                  \
1924         b -= c; b -= a; b ^= (a << 10);                                 \
1925         c -= a; c -= b; c ^= (b >> 15);                                 \
1926 } while (/*CONSTCOND*/0)
1927 
1928 static __inline uint32_t
1929 nm_bridge_rthash(const uint8_t *addr)
1930 {
1931         uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
1932 
1933         b += addr[5] << 8;
1934         b += addr[4];
1935         a += addr[3] << 24;
1936         a += addr[2] << 16;
1937         a += addr[1] << 8;
1938         a += addr[0];
1939 
1940         mix(a, b, c);
1941 #define BRIDGE_RTHASH_MASK	(NM_BDG_HASH-1)
1942         return (c & BRIDGE_RTHASH_MASK);
1943 }
1944 
1945 #undef mix
1946 
1947 
1948 static int
1949 bdg_netmap_reg(struct ifnet *ifp, int onoff)
1950 {
1951 	int i, err = 0;
1952 	struct nm_bridge *b = ifp->if_bridge;
1953 
1954 	BDG_LOCK(b);
1955 	if (onoff) {
1956 		/* the interface must be already in the list.
1957 		 * only need to mark the port as active
1958 		 */
1959 		ND("should attach %s to the bridge", ifp->if_xname);
1960 		for (i=0; i < NM_BDG_MAXPORTS; i++)
1961 			if (b->bdg_ports[i] == ifp)
1962 				break;
1963 		if (i == NM_BDG_MAXPORTS) {
1964 			D("no more ports available");
1965 			err = EINVAL;
1966 			goto done;
1967 		}
1968 		ND("setting %s in netmap mode", ifp->if_xname);
1969 		ifp->if_capenable |= IFCAP_NETMAP;
1970 		NA(ifp)->bdg_port = i;
1971 		b->act_ports |= (1<<i);
1972 		b->bdg_ports[i] = ifp;
1973 	} else {
1974 		/* should be in the list, too -- remove from the mask */
1975 		ND("removing %s from netmap mode", ifp->if_xname);
1976 		ifp->if_capenable &= ~IFCAP_NETMAP;
1977 		i = NA(ifp)->bdg_port;
1978 		b->act_ports &= ~(1<<i);
1979 	}
1980 done:
1981 	BDG_UNLOCK(b);
1982 	return err;
1983 }
1984 
1985 
1986 static int
1987 nm_bdg_flush(struct nm_bdg_fwd *ft, int n, struct ifnet *ifp)
1988 {
1989 	int i, ifn;
1990 	uint64_t all_dst, dst;
1991 	uint32_t sh, dh;
1992 	uint64_t mysrc = 1 << NA(ifp)->bdg_port;
1993 	uint64_t smac, dmac;
1994 	struct netmap_slot *slot;
1995 	struct nm_bridge *b = ifp->if_bridge;
1996 
1997 	ND("prepare to send %d packets, act_ports 0x%x", n, b->act_ports);
1998 	/* only consider valid destinations */
1999 	all_dst = (b->act_ports & ~mysrc);
2000 	/* first pass: hash and find destinations */
2001 	for (i = 0; likely(i < n); i++) {
2002 		uint8_t *buf = ft[i].buf;
2003 		dmac = le64toh(*(uint64_t *)(buf)) & 0xffffffffffff;
2004 		smac = le64toh(*(uint64_t *)(buf + 4));
2005 		smac >>= 16;
2006 		if (unlikely(netmap_verbose)) {
2007 		    uint8_t *s = buf+6, *d = buf;
2008 		    D("%d len %4d %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x",
2009 			i,
2010 			ft[i].len,
2011 			s[0], s[1], s[2], s[3], s[4], s[5],
2012 			d[0], d[1], d[2], d[3], d[4], d[5]);
2013 		}
2014 		/*
2015 		 * The hash is somewhat expensive, there might be some
2016 		 * worthwhile optimizations here.
2017 		 */
2018 		if ((buf[6] & 1) == 0) { /* valid src */
2019 		    	uint8_t *s = buf+6;
2020 			sh = nm_bridge_rthash(buf+6); // XXX hash of source
2021 			/* update source port forwarding entry */
2022 			b->ht[sh].mac = smac;	/* XXX expire ? */
2023 			b->ht[sh].ports = mysrc;
2024 			if (netmap_verbose)
2025 			    D("src %02x:%02x:%02x:%02x:%02x:%02x on port %d",
2026 				s[0], s[1], s[2], s[3], s[4], s[5], NA(ifp)->bdg_port);
2027 		}
2028 		dst = 0;
2029 		if ( (buf[0] & 1) == 0) { /* unicast */
2030 		    	uint8_t *d = buf;
2031 			dh = nm_bridge_rthash(buf); // XXX hash of dst
2032 			if (b->ht[dh].mac == dmac) {	/* found dst */
2033 				dst = b->ht[dh].ports;
2034 				if (netmap_verbose)
2035 				    D("dst %02x:%02x:%02x:%02x:%02x:%02x to port %x",
2036 					d[0], d[1], d[2], d[3], d[4], d[5], (uint32_t)(dst >> 16));
2037 			}
2038 		}
2039 		if (dst == 0)
2040 			dst = all_dst;
2041 		dst &= all_dst; /* only consider valid ports */
2042 		if (unlikely(netmap_verbose))
2043 			D("pkt goes to ports 0x%x", (uint32_t)dst);
2044 		ft[i].dst = dst;
2045 	}
2046 
2047 	/* second pass, scan interfaces and forward */
2048 	all_dst = (b->act_ports & ~mysrc);
2049 	for (ifn = 0; all_dst; ifn++) {
2050 		struct ifnet *dst_ifp = b->bdg_ports[ifn];
2051 		struct netmap_adapter *na;
2052 		struct netmap_kring *kring;
2053 		struct netmap_ring *ring;
2054 		int j, lim, sent, locked;
2055 
2056 		if (!dst_ifp)
2057 			continue;
2058 		ND("scan port %d %s", ifn, dst_ifp->if_xname);
2059 		dst = 1 << ifn;
2060 		if ((dst & all_dst) == 0)	/* skip if not set */
2061 			continue;
2062 		all_dst &= ~dst;	/* clear current node */
2063 		na = NA(dst_ifp);
2064 
2065 		ring = NULL;
2066 		kring = NULL;
2067 		lim = sent = locked = 0;
2068 		/* inside, scan slots */
2069 		for (i = 0; likely(i < n); i++) {
2070 			if ((ft[i].dst & dst) == 0)
2071 				continue;	/* not here */
2072 			if (!locked) {
2073 				kring = &na->rx_rings[0];
2074 				ring = kring->ring;
2075 				lim = kring->nkr_num_slots - 1;
2076 				na->nm_lock(dst_ifp, NETMAP_RX_LOCK, 0);
2077 				locked = 1;
2078 			}
2079 			if (unlikely(kring->nr_hwavail >= lim)) {
2080 				if (netmap_verbose)
2081 					D("rx ring full on %s", ifp->if_xname);
2082 				break;
2083 			}
2084 			j = kring->nr_hwcur + kring->nr_hwavail;
2085 			if (j > lim)
2086 				j -= kring->nkr_num_slots;
2087 			slot = &ring->slot[j];
2088 			ND("send %d %d bytes at %s:%d", i, ft[i].len, dst_ifp->if_xname, j);
2089 			pkt_copy(ft[i].buf, NMB(slot), ft[i].len);
2090 			slot->len = ft[i].len;
2091 			kring->nr_hwavail++;
2092 			sent++;
2093 		}
2094 		if (locked) {
2095 			ND("sent %d on %s", sent, dst_ifp->if_xname);
2096 			if (sent)
2097 				selwakeuppri(&kring->si, PI_NET);
2098 			na->nm_lock(dst_ifp, NETMAP_RX_UNLOCK, 0);
2099 		}
2100 	}
2101 	return 0;
2102 }
2103 
2104 /*
2105  * main dispatch routine
2106  */
2107 static int
2108 bdg_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
2109 {
2110 	struct netmap_adapter *na = NA(ifp);
2111 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
2112 	struct netmap_ring *ring = kring->ring;
2113 	int i, j, k, lim = kring->nkr_num_slots - 1;
2114 	struct nm_bdg_fwd *ft = (struct nm_bdg_fwd *)(ifp + 1);
2115 	int ft_i;	/* position in the forwarding table */
2116 
2117 	k = ring->cur;
2118 	if (k > lim)
2119 		return netmap_ring_reinit(kring);
2120 	if (do_lock)
2121 		na->nm_lock(ifp, NETMAP_TX_LOCK, ring_nr);
2122 
2123 	if (netmap_bridge <= 0) { /* testing only */
2124 		j = k; // used all
2125 		goto done;
2126 	}
2127 	if (netmap_bridge > NM_BDG_BATCH)
2128 		netmap_bridge = NM_BDG_BATCH;
2129 
2130 	ft_i = 0;	/* start from 0 */
2131 	for (j = kring->nr_hwcur; likely(j != k); j = unlikely(j == lim) ? 0 : j+1) {
2132 		struct netmap_slot *slot = &ring->slot[j];
2133 		int len = ft[ft_i].len = slot->len;
2134 		char *buf = ft[ft_i].buf = NMB(slot);
2135 
2136 		prefetch(buf);
2137 		if (unlikely(len < 14))
2138 			continue;
2139 		if (unlikely(++ft_i == netmap_bridge))
2140 			ft_i = nm_bdg_flush(ft, ft_i, ifp);
2141 	}
2142 	if (ft_i)
2143 		ft_i = nm_bdg_flush(ft, ft_i, ifp);
2144 	/* count how many packets we sent */
2145 	i = k - j;
2146 	if (i < 0)
2147 		i += kring->nkr_num_slots;
2148 	kring->nr_hwavail = kring->nkr_num_slots - 1 - i;
2149 	if (j != k)
2150 		D("early break at %d/ %d, avail %d", j, k, kring->nr_hwavail);
2151 
2152 done:
2153 	kring->nr_hwcur = j;
2154 	ring->avail = kring->nr_hwavail;
2155 	if (do_lock)
2156 		na->nm_lock(ifp, NETMAP_TX_UNLOCK, ring_nr);
2157 
2158 	if (netmap_verbose)
2159 		D("%s ring %d lock %d", ifp->if_xname, ring_nr, do_lock);
2160 	return 0;
2161 }
2162 
2163 static int
2164 bdg_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
2165 {
2166 	struct netmap_adapter *na = NA(ifp);
2167 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
2168 	struct netmap_ring *ring = kring->ring;
2169 	u_int j, n, lim = kring->nkr_num_slots - 1;
2170 	u_int k = ring->cur, resvd = ring->reserved;
2171 
2172 	ND("%s ring %d lock %d avail %d",
2173 		ifp->if_xname, ring_nr, do_lock, kring->nr_hwavail);
2174 
2175 	if (k > lim)
2176 		return netmap_ring_reinit(kring);
2177 	if (do_lock)
2178 		na->nm_lock(ifp, NETMAP_RX_LOCK, ring_nr);
2179 
2180 	/* skip past packets that userspace has released */
2181 	j = kring->nr_hwcur;    /* netmap ring index */
2182 	if (resvd > 0) {
2183 		if (resvd + ring->avail >= lim + 1) {
2184 			D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
2185 			ring->reserved = resvd = 0; // XXX panic...
2186 		}
2187 		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
2188 	}
2189 
2190 	if (j != k) { /* userspace has released some packets. */
2191 		n = k - j;
2192 		if (n < 0)
2193 			n += kring->nkr_num_slots;
2194 		ND("userspace releases %d packets", n);
2195                 for (n = 0; likely(j != k); n++) {
2196                         struct netmap_slot *slot = &ring->slot[j];
2197                         void *addr = NMB(slot);
2198 
2199                         if (addr == netmap_buffer_base) { /* bad buf */
2200                                 if (do_lock)
2201                                         na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
2202                                 return netmap_ring_reinit(kring);
2203                         }
2204 			/* decrease refcount for buffer */
2205 
2206 			slot->flags &= ~NS_BUF_CHANGED;
2207                         j = unlikely(j == lim) ? 0 : j + 1;
2208                 }
2209                 kring->nr_hwavail -= n;
2210                 kring->nr_hwcur = k;
2211         }
2212         /* tell userspace that there are new packets */
2213         ring->avail = kring->nr_hwavail - resvd;
2214 
2215 	if (do_lock)
2216 		na->nm_lock(ifp, NETMAP_RX_UNLOCK, ring_nr);
2217 	return 0;
2218 }
2219 
2220 static void
2221 bdg_netmap_attach(struct ifnet *ifp)
2222 {
2223 	struct netmap_adapter na;
2224 
2225 	ND("attaching virtual bridge");
2226 	bzero(&na, sizeof(na));
2227 
2228 	na.ifp = ifp;
2229 	na.separate_locks = 1;
2230 	na.num_tx_desc = NM_BRIDGE_RINGSIZE;
2231 	na.num_rx_desc = NM_BRIDGE_RINGSIZE;
2232 	na.nm_txsync = bdg_netmap_txsync;
2233 	na.nm_rxsync = bdg_netmap_rxsync;
2234 	na.nm_register = bdg_netmap_reg;
2235 	netmap_attach(&na, 1);
2236 }
2237 
2238 #endif /* NM_BRIDGE */
2239 
2240 static struct cdev *netmap_dev; /* /dev/netmap character device. */
2241 
2242 
2243 /*
2244  * Module loader.
2245  *
2246  * Create the /dev/netmap device and initialize all global
2247  * variables.
2248  *
2249  * Return 0 on success, errno on failure.
2250  */
2251 static int
2252 netmap_init(void)
2253 {
2254 	int error;
2255 
2256 	error = netmap_memory_init();
2257 	if (error != 0) {
2258 		printf("netmap: unable to initialize the memory allocator.\n");
2259 		return (error);
2260 	}
2261 	printf("netmap: loaded module\n");
2262 	netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
2263 			      "netmap");
2264 
2265 #ifdef NM_BRIDGE
2266 	{
2267 	int i;
2268 	for (i = 0; i < NM_BRIDGES; i++)
2269 		mtx_init(&nm_bridges[i].bdg_lock, "bdg lock", "bdg_lock", MTX_DEF);
2270 	}
2271 #endif
2272 	return (error);
2273 }
2274 
2275 
2276 /*
2277  * Module unloader.
2278  *
2279  * Free all the memory, and destroy the ``/dev/netmap`` device.
2280  */
2281 static void
2282 netmap_fini(void)
2283 {
2284 	destroy_dev(netmap_dev);
2285 	netmap_memory_fini();
2286 	printf("netmap: unloaded module.\n");
2287 }
2288 
2289 
2290 #ifdef __FreeBSD__
2291 /*
2292  * Kernel entry point.
2293  *
2294  * Initialize/finalize the module and return.
2295  *
2296  * Return 0 on success, errno on failure.
2297  */
2298 static int
2299 netmap_loader(__unused struct module *module, int event, __unused void *arg)
2300 {
2301 	int error = 0;
2302 
2303 	switch (event) {
2304 	case MOD_LOAD:
2305 		error = netmap_init();
2306 		break;
2307 
2308 	case MOD_UNLOAD:
2309 		netmap_fini();
2310 		break;
2311 
2312 	default:
2313 		error = EOPNOTSUPP;
2314 		break;
2315 	}
2316 
2317 	return (error);
2318 }
2319 
2320 
2321 DEV_MODULE(netmap, netmap_loader, NULL);
2322 #endif /* __FreeBSD__ */
2323