xref: /freebsd/sys/dev/netmap/netmap.c (revision b7c60aadbbd5c846a250c05791fe7406d6d78bf4)
1 /*
2  * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * $FreeBSD$
28  * $Id: netmap.c 9795 2011-12-02 11:39:08Z luigi $
29  *
30  * This module supports memory mapped access to network devices,
31  * see netmap(4).
32  *
33  * The module uses a large, memory pool allocated by the kernel
34  * and accessible as mmapped memory by multiple userspace threads/processes.
35  * The memory pool contains packet buffers and "netmap rings",
36  * i.e. user-accessible copies of the interface's queues.
37  *
38  * Access to the network card works like this:
39  * 1. a process/thread issues one or more open() on /dev/netmap, to create
40  *    select()able file descriptor on which events are reported.
41  * 2. on each descriptor, the process issues an ioctl() to identify
42  *    the interface that should report events to the file descriptor.
43  * 3. on each descriptor, the process issues an mmap() request to
44  *    map the shared memory region within the process' address space.
45  *    The list of interesting queues is indicated by a location in
46  *    the shared memory region.
47  * 4. using the functions in the netmap(4) userspace API, a process
48  *    can look up the occupation state of a queue, access memory buffers,
49  *    and retrieve received packets or enqueue packets to transmit.
50  * 5. using some ioctl()s the process can synchronize the userspace view
51  *    of the queue with the actual status in the kernel. This includes both
52  *    receiving the notification of new packets, and transmitting new
53  *    packets on the output interface.
54  * 6. select() or poll() can be used to wait for events on individual
55  *    transmit or receive queues (or all queues for a given interface).
56  */
57 
58 #include <sys/cdefs.h> /* prerequisite */
59 __FBSDID("$FreeBSD$");
60 
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/errno.h>
64 #include <sys/param.h>	/* defines used in kernel.h */
65 #include <sys/jail.h>
66 #include <sys/kernel.h>	/* types used in module initialization */
67 #include <sys/conf.h>	/* cdevsw struct */
68 #include <sys/uio.h>	/* uio struct */
69 #include <sys/sockio.h>
70 #include <sys/socketvar.h>	/* struct socket */
71 #include <sys/malloc.h>
72 #include <sys/mman.h>	/* PROT_EXEC */
73 #include <sys/poll.h>
74 #include <sys/proc.h>
75 #include <vm/vm.h>	/* vtophys */
76 #include <vm/pmap.h>	/* vtophys */
77 #include <sys/socket.h> /* sockaddrs */
78 #include <machine/bus.h>
79 #include <sys/selinfo.h>
80 #include <sys/sysctl.h>
81 #include <net/if.h>
82 #include <net/bpf.h>		/* BIOCIMMEDIATE */
83 #include <net/vnet.h>
84 #include <net/netmap.h>
85 #include <dev/netmap/netmap_kern.h>
86 #include <machine/bus.h>	/* bus_dmamap_* */
87 
88 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map");
89 
90 /*
91  * lock and unlock for the netmap memory allocator
92  */
93 #define NMA_LOCK()	mtx_lock(&netmap_mem_d->nm_mtx);
94 #define NMA_UNLOCK()	mtx_unlock(&netmap_mem_d->nm_mtx);
95 
96 /*
97  * Default amount of memory pre-allocated by the module.
98  * We start with a large size and then shrink our demand
99  * according to what is avalable when the module is loaded.
100  * At the moment the block is contiguous, but we can easily
101  * restrict our demand to smaller units (16..64k)
102  */
103 #define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE)
104 static void * netmap_malloc(size_t size, const char *msg);
105 static void netmap_free(void *addr, const char *msg);
106 
107 #define netmap_if_malloc(len)   netmap_malloc(len, "nifp")
108 #define netmap_if_free(v)	netmap_free((v), "nifp")
109 
110 #define netmap_ring_malloc(len) netmap_malloc(len, "ring")
111 #define netmap_free_rings(na)		\
112 	netmap_free((na)->tx_rings[0].ring, "shadow rings");
113 
114 /*
115  * Allocator for a pool of packet buffers. For each buffer we have
116  * one entry in the bitmap to signal the state. Allocation scans
117  * the bitmap, but since this is done only on attach, we are not
118  * too worried about performance
119  * XXX if we need to allocate small blocks, a translation
120  * table is used both for kernel virtual address and physical
121  * addresses.
122  */
123 struct netmap_buf_pool {
124 	u_int total_buffers;	/* total buffers. */
125 	u_int free;
126 	u_int bufsize;
127 	char *base;		/* buffer base address */
128 	uint32_t *bitmap;	/* one bit per buffer, 1 means free */
129 };
130 struct netmap_buf_pool nm_buf_pool;
131 /* XXX move these two vars back into netmap_buf_pool */
132 u_int netmap_total_buffers;
133 char *netmap_buffer_base;	/* address of an invalid buffer */
134 
135 /* user-controlled variables */
136 int netmap_verbose;
137 
138 static int no_timestamp; /* don't timestamp on rxsync */
139 
140 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
141 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
142     CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
143 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
144     CTLFLAG_RW, &no_timestamp, 0, "no_timestamp");
145 SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers,
146     CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers");
147 SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers,
148     CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers");
149 int netmap_mitigate = 1;
150 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, "");
151 int netmap_skip_txsync;
152 SYSCTL_INT(_dev_netmap, OID_AUTO, skip_txsync, CTLFLAG_RW, &netmap_skip_txsync, 0, "");
153 int netmap_skip_rxsync;
154 SYSCTL_INT(_dev_netmap, OID_AUTO, skip_rxsync, CTLFLAG_RW, &netmap_skip_rxsync, 0, "");
155 
156 /*
157  * Allocate n buffers from the ring, and fill the slot.
158  * Buffer 0 is the 'junk' buffer.
159  */
160 static void
161 netmap_new_bufs(struct netmap_if *nifp __unused,
162 		struct netmap_slot *slot, u_int n)
163 {
164 	struct netmap_buf_pool *p = &nm_buf_pool;
165 	uint32_t bi = 0;		/* index in the bitmap */
166 	uint32_t mask, j, i = 0;	/* slot counter */
167 
168 	if (n > p->free) {
169 		D("only %d out of %d buffers available", i, n);
170 		return;
171 	}
172 	/* termination is guaranteed by p->free */
173 	while (i < n && p->free > 0) {
174 		uint32_t cur = p->bitmap[bi];
175 		if (cur == 0) { /* bitmask is fully used */
176 			bi++;
177 			continue;
178 		}
179 		/* locate a slot */
180 		for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ;
181 		p->bitmap[bi] &= ~mask;		/* slot in use */
182 		p->free--;
183 		slot[i].buf_idx = bi*32+j;
184 		slot[i].len = p->bufsize;
185 		slot[i].flags = NS_BUF_CHANGED;
186 		i++;
187 	}
188 	ND("allocated %d buffers, %d available", n, p->free);
189 }
190 
191 
192 static void
193 netmap_free_buf(struct netmap_if *nifp __unused, uint32_t i)
194 {
195 	struct netmap_buf_pool *p = &nm_buf_pool;
196 
197 	uint32_t pos, mask;
198 	if (i >= p->total_buffers) {
199 		D("invalid free index %d", i);
200 		return;
201 	}
202 	pos = i / 32;
203 	mask = 1 << (i % 32);
204 	if (p->bitmap[pos] & mask) {
205 		D("slot %d already free", i);
206 		return;
207 	}
208 	p->bitmap[pos] |= mask;
209 	p->free++;
210 }
211 
212 
213 /* Descriptor of the memory objects handled by our memory allocator. */
214 struct netmap_mem_obj {
215 	TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the
216 						 chain. */
217 	int nmo_used; /* flag set on used memory objects. */
218 	size_t nmo_size; /* size of the memory area reserved for the
219 			    object. */
220 	void *nmo_data; /* pointer to the memory area. */
221 };
222 
223 /* Wrap our memory objects to make them ``chainable``. */
224 TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj);
225 
226 
227 /* Descriptor of our custom memory allocator. */
228 struct netmap_mem_d {
229 	struct mtx nm_mtx; /* lock used to handle the chain of memory
230 			      objects. */
231 	struct netmap_mem_obj_h nm_molist; /* list of memory objects */
232 	size_t nm_size; /* total amount of memory used for rings etc. */
233 	size_t nm_totalsize; /* total amount of allocated memory
234 		(the difference is used for buffers) */
235 	size_t nm_buf_start; /* offset of packet buffers.
236 			This is page-aligned. */
237 	size_t nm_buf_len; /* total memory for buffers */
238 	void *nm_buffer; /* pointer to the whole pre-allocated memory
239 			    area. */
240 };
241 
242 
243 /* Structure associated to each thread which registered an interface. */
244 struct netmap_priv_d {
245 	struct netmap_if *np_nifp;	/* netmap interface descriptor. */
246 
247 	struct ifnet	*np_ifp;	/* device for which we hold a reference */
248 	int		np_ringid;	/* from the ioctl */
249 	u_int		np_qfirst, np_qlast;	/* range of rings to scan */
250 	uint16_t	np_txpoll;
251 };
252 
253 /* Shorthand to compute a netmap interface offset. */
254 #define netmap_if_offset(v)                                     \
255     ((char *) (v) - (char *) netmap_mem_d->nm_buffer)
256 /* .. and get a physical address given a memory offset */
257 #define netmap_ofstophys(o)                                     \
258     (vtophys(netmap_mem_d->nm_buffer) + (o))
259 
260 static struct cdev *netmap_dev; /* /dev/netmap character device. */
261 static struct netmap_mem_d *netmap_mem_d; /* Our memory allocator. */
262 
263 
264 static d_mmap_t netmap_mmap;
265 static d_ioctl_t netmap_ioctl;
266 static d_poll_t netmap_poll;
267 
268 #ifdef NETMAP_KEVENT
269 static d_kqfilter_t netmap_kqfilter;
270 #endif
271 
272 static struct cdevsw netmap_cdevsw = {
273 	.d_version = D_VERSION,
274 	.d_name = "netmap",
275 	.d_mmap = netmap_mmap,
276 	.d_ioctl = netmap_ioctl,
277 	.d_poll = netmap_poll,
278 #ifdef NETMAP_KEVENT
279 	.d_kqfilter = netmap_kqfilter,
280 #endif
281 };
282 
283 #ifdef NETMAP_KEVENT
284 static int              netmap_kqread(struct knote *, long);
285 static int              netmap_kqwrite(struct knote *, long);
286 static void             netmap_kqdetach(struct knote *);
287 
288 static struct filterops netmap_read_filterops = {
289 	.f_isfd =       1,
290 	.f_attach =     NULL,
291 	.f_detach =     netmap_kqdetach,
292 	.f_event =      netmap_kqread,
293 };
294 
295 static struct filterops netmap_write_filterops = {
296 	.f_isfd =       1,
297 	.f_attach =     NULL,
298 	.f_detach =     netmap_kqdetach,
299 	.f_event =      netmap_kqwrite,
300 };
301 
302 /*
303  * support for the kevent() system call.
304  *
305  * This is the kevent filter, and is executed each time a new event
306  * is triggered on the device. This function execute some operation
307  * depending on the received filter.
308  *
309  * The implementation should test the filters and should implement
310  * filter operations we are interested on (a full list in /sys/event.h).
311  *
312  * On a match we should:
313  * - set kn->kn_fop
314  * - set kn->kn_hook
315  * - call knlist_add() to deliver the event to the application.
316  *
317  * Return 0 if the event should be delivered to the application.
318  */
319 static int
320 netmap_kqfilter(struct cdev *dev, struct knote *kn)
321 {
322 	/* declare variables needed to read/write */
323 
324 	switch(kn->kn_filter) {
325 	case EVFILT_READ:
326 		if (netmap_verbose)
327 			D("%s kqfilter: EVFILT_READ" ifp->if_xname);
328 
329 		/* read operations */
330 		kn->kn_fop = &netmap_read_filterops;
331 		break;
332 
333 	case EVFILT_WRITE:
334 		if (netmap_verbose)
335 			D("%s kqfilter: EVFILT_WRITE" ifp->if_xname);
336 
337 		/* write operations */
338 		kn->kn_fop = &netmap_write_filterops;
339 		break;
340 
341 	default:
342 		if (netmap_verbose)
343 			D("%s kqfilter: invalid filter" ifp->if_xname);
344 		return(EINVAL);
345 	}
346 
347 	kn->kn_hook = 0;//
348 	knlist_add(&netmap_sc->tun_rsel.si_note, kn, 0);
349 
350 	return (0);
351 }
352 #endif /* NETMAP_KEVENT */
353 
354 /*
355  * File descriptor's private data destructor.
356  *
357  * Call nm_register(ifp,0) to stop netmap mode on the interface and
358  * revert to normal operation. We expect that np_ifp has not gone.
359  */
360 static void
361 netmap_dtor(void *data)
362 {
363 	struct netmap_priv_d *priv = data;
364 	struct ifnet *ifp = priv->np_ifp;
365 	struct netmap_adapter *na = NA(ifp);
366 	struct netmap_if *nifp = priv->np_nifp;
367 
368 	if (0)
369 	    printf("%s starting for %p ifp %p\n", __FUNCTION__, priv,
370 		priv ? priv->np_ifp : NULL);
371 
372 	na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
373 
374 	na->refcount--;
375 	if (na->refcount <= 0) {	/* last instance */
376 		u_int i;
377 
378 		D("deleting last netmap instance for %s", ifp->if_xname);
379 		/*
380 		 * there is a race here with *_netmap_task() and
381 		 * netmap_poll(), which don't run under NETMAP_CORE_LOCK.
382 		 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP
383 		 * (aka NETMAP_DELETING(na)) are a unique marker that the
384 		 * device is dying.
385 		 * Before destroying stuff we sleep a bit, and then complete
386 		 * the job. NIOCREG should realize the condition and
387 		 * loop until they can continue; the other routines
388 		 * should check the condition at entry and quit if
389 		 * they cannot run.
390 		 */
391 		na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
392 		tsleep(na, 0, "NIOCUNREG", 4);
393 		na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
394 		na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */
395 		/* Wake up any sleeping threads. netmap_poll will
396 		 * then return POLLERR
397 		 */
398 		for (i = 0; i < na->num_queues + 2; i++) {
399 			selwakeuppri(&na->tx_rings[i].si, PI_NET);
400 			selwakeuppri(&na->rx_rings[i].si, PI_NET);
401 		}
402 		/* release all buffers */
403 		NMA_LOCK();
404 		for (i = 0; i < na->num_queues + 1; i++) {
405 			int j, lim;
406 			struct netmap_ring *ring;
407 
408 			ND("tx queue %d", i);
409 			ring = na->tx_rings[i].ring;
410 			lim = na->tx_rings[i].nkr_num_slots;
411 			for (j = 0; j < lim; j++)
412 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
413 
414 			ND("rx queue %d", i);
415 			ring = na->rx_rings[i].ring;
416 			lim = na->rx_rings[i].nkr_num_slots;
417 			for (j = 0; j < lim; j++)
418 				netmap_free_buf(nifp, ring->slot[j].buf_idx);
419 		}
420 		NMA_UNLOCK();
421 		netmap_free_rings(na);
422 		wakeup(na);
423 	}
424 	netmap_if_free(nifp);
425 
426 	na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
427 
428 	if_rele(ifp);
429 
430 	bzero(priv, sizeof(*priv));	/* XXX for safety */
431 	free(priv, M_DEVBUF);
432 }
433 
434 
435 /*
436  * Create and return a new ``netmap_if`` object, and possibly also
437  * rings and packet buffors.
438  *
439  * Return NULL on failure.
440  */
441 static void *
442 netmap_if_new(const char *ifname, struct netmap_adapter *na)
443 {
444 	struct netmap_if *nifp;
445 	struct netmap_ring *ring;
446 	char *buff;
447 	u_int i, len, ofs;
448 	u_int n = na->num_queues + 1; /* shorthand, include stack queue */
449 
450 	/*
451 	 * the descriptor is followed inline by an array of offsets
452 	 * to the tx and rx rings in the shared memory region.
453 	 */
454 	len = sizeof(struct netmap_if) + 2 * n * sizeof(ssize_t);
455 	nifp = netmap_if_malloc(len);
456 	if (nifp == NULL)
457 		return (NULL);
458 
459 	/* initialize base fields */
460 	*(int *)(uintptr_t)&nifp->ni_num_queues = na->num_queues;
461 	strncpy(nifp->ni_name, ifname, IFNAMSIZ);
462 
463 	(na->refcount)++;	/* XXX atomic ? we are under lock */
464 	if (na->refcount > 1)
465 		goto final;
466 
467 	/*
468 	 * If this is the first instance, allocate the shadow rings and
469 	 * buffers for this card (one for each hw queue, one for the host).
470 	 * The rings are contiguous, but have variable size.
471 	 * The entire block is reachable at
472 	 *	na->tx_rings[0].ring
473 	 */
474 
475 	len = n * (2 * sizeof(struct netmap_ring) +
476 		  (na->num_tx_desc + na->num_rx_desc) *
477 		   sizeof(struct netmap_slot) );
478 	buff = netmap_ring_malloc(len);
479 	if (buff == NULL) {
480 		D("failed to allocate %d bytes for %s shadow ring",
481 			len, ifname);
482 error:
483 		(na->refcount)--;
484 		netmap_if_free(nifp);
485 		return (NULL);
486 	}
487 	/* do we have the bufers ? we are in need of num_tx_desc buffers for
488 	 * each tx ring and num_tx_desc buffers for each rx ring. */
489 	len = n * (na->num_tx_desc + na->num_rx_desc);
490 	NMA_LOCK();
491 	if (nm_buf_pool.free < len) {
492 		NMA_UNLOCK();
493 		netmap_free(buff, "not enough bufs");
494 		goto error;
495 	}
496 	/*
497 	 * in the kring, store the pointers to the shared rings
498 	 * and initialize the rings. We are under NMA_LOCK().
499 	 */
500 	ofs = 0;
501 	for (i = 0; i < n; i++) {
502 		struct netmap_kring *kring;
503 		int numdesc;
504 
505 		/* Transmit rings */
506 		kring = &na->tx_rings[i];
507 		numdesc = na->num_tx_desc;
508 		bzero(kring, sizeof(*kring));
509 		kring->na = na;
510 
511 		ring = kring->ring = (struct netmap_ring *)(buff + ofs);
512 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
513 			nm_buf_pool.base - (char *)ring;
514 		ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs);
515 		*(int *)(int *)(uintptr_t)&ring->num_slots =
516 			kring->nkr_num_slots = numdesc;
517 
518 		/*
519 		 * IMPORTANT:
520 		 * Always keep one slot empty, so we can detect new
521 		 * transmissions comparing cur and nr_hwcur (they are
522 		 * the same only if there are no new transmissions).
523 		 */
524 		ring->avail = kring->nr_hwavail = numdesc - 1;
525 		ring->cur = kring->nr_hwcur = 0;
526 		netmap_new_bufs(nifp, ring->slot, numdesc);
527 
528 		ofs += sizeof(struct netmap_ring) +
529 			numdesc * sizeof(struct netmap_slot);
530 
531 		/* Receive rings */
532 		kring = &na->rx_rings[i];
533 		numdesc = na->num_rx_desc;
534 		bzero(kring, sizeof(*kring));
535 		kring->na = na;
536 
537 		ring = kring->ring = (struct netmap_ring *)(buff + ofs);
538 		*(ssize_t *)(uintptr_t)&ring->buf_ofs =
539 			nm_buf_pool.base - (char *)ring;
540 		ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs);
541 		*(int *)(int *)(uintptr_t)&ring->num_slots =
542 			kring->nkr_num_slots = numdesc;
543 		ring->cur = kring->nr_hwcur = 0;
544 		ring->avail = kring->nr_hwavail = 0; /* empty */
545 		netmap_new_bufs(nifp, ring->slot, numdesc);
546 		ofs += sizeof(struct netmap_ring) +
547 			numdesc * sizeof(struct netmap_slot);
548 	}
549 	NMA_UNLOCK();
550 	for (i = 0; i < n+1; i++) {
551 		// XXX initialize the selrecord structs.
552 	}
553 final:
554 	/*
555 	 * fill the slots for the rx and tx queues. They contain the offset
556 	 * between the ring and nifp, so the information is usable in
557 	 * userspace to reach the ring from the nifp.
558 	 */
559 	for (i = 0; i < n; i++) {
560 		char *base = (char *)nifp;
561 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
562 			(char *)na->tx_rings[i].ring - base;
563 		*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n] =
564 			(char *)na->rx_rings[i].ring - base;
565 	}
566 	return (nifp);
567 }
568 
569 
570 /*
571  * mmap(2) support for the "netmap" device.
572  *
573  * Expose all the memory previously allocated by our custom memory
574  * allocator: this way the user has only to issue a single mmap(2), and
575  * can work on all the data structures flawlessly.
576  *
577  * Return 0 on success, -1 otherwise.
578  */
579 static int
580 #if __FreeBSD_version < 900000
581 netmap_mmap(__unused struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr,
582 	    int nprot)
583 #else
584 netmap_mmap(__unused struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
585 	    int nprot, __unused vm_memattr_t *memattr)
586 #endif
587 {
588 	if (nprot & PROT_EXEC)
589 		return (-1);	// XXX -1 or EINVAL ?
590 
591 	ND("request for offset 0x%x", (uint32_t)offset);
592 	*paddr = netmap_ofstophys(offset);
593 
594 	return (0);
595 }
596 
597 
598 /*
599  * Handlers for synchronization of the queues from/to the host.
600  *
601  * netmap_sync_to_host() passes packets up. We are called from a
602  * system call in user process context, and the only contention
603  * can be among multiple user threads erroneously calling
604  * this routine concurrently. In principle we should not even
605  * need to lock.
606  */
607 static void
608 netmap_sync_to_host(struct netmap_adapter *na)
609 {
610 	struct netmap_kring *kring = &na->tx_rings[na->num_queues];
611 	struct netmap_ring *ring = kring->ring;
612 	struct mbuf *head = NULL, *tail = NULL, *m;
613 	u_int k, n, lim = kring->nkr_num_slots - 1;
614 
615 	k = ring->cur;
616 	if (k > lim) {
617 		netmap_ring_reinit(kring);
618 		return;
619 	}
620 	// na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
621 
622 	/* Take packets from hwcur to cur and pass them up.
623 	 * In case of no buffers we give up. At the end of the loop,
624 	 * the queue is drained in all cases.
625 	 */
626 	for (n = kring->nr_hwcur; n != k;) {
627 		struct netmap_slot *slot = &ring->slot[n];
628 
629 		n = (n == lim) ? 0 : n + 1;
630 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) {
631 			D("bad pkt at %d len %d", n, slot->len);
632 			continue;
633 		}
634 		m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL);
635 
636 		if (m == NULL)
637 			break;
638 		if (tail)
639 			tail->m_nextpkt = m;
640 		else
641 			head = m;
642 		tail = m;
643 		m->m_nextpkt = NULL;
644 	}
645 	kring->nr_hwcur = k;
646 	kring->nr_hwavail = ring->avail = lim;
647 	// na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
648 
649 	/* send packets up, outside the lock */
650 	while ((m = head) != NULL) {
651 		head = head->m_nextpkt;
652 		m->m_nextpkt = NULL;
653 		m->m_pkthdr.rcvif = na->ifp;
654 		if (netmap_verbose & NM_VERB_HOST)
655 			D("sending up pkt %p size %d", m, m->m_pkthdr.len);
656 		(na->ifp->if_input)(na->ifp, m);
657 	}
658 }
659 
660 /*
661  * rxsync backend for packets coming from the host stack.
662  * They have been put in the queue by netmap_start() so we
663  * need to protect access to the kring using a lock.
664  *
665  * This routine also does the selrecord if called from the poll handler
666  * (we know because td != NULL).
667  */
668 static void
669 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td)
670 {
671 	struct netmap_kring *kring = &na->rx_rings[na->num_queues];
672 	struct netmap_ring *ring = kring->ring;
673 	int error = 1, delta;
674 	u_int k = ring->cur, lim = kring->nkr_num_slots;
675 
676 	na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0);
677 	if (k >= lim) /* bad value */
678 		goto done;
679 	delta = k - kring->nr_hwcur;
680 	if (delta < 0)
681 		delta += lim;
682 	kring->nr_hwavail -= delta;
683 	if (kring->nr_hwavail < 0)	/* error */
684 		goto done;
685 	kring->nr_hwcur = k;
686 	error = 0;
687 	k = ring->avail = kring->nr_hwavail;
688 	if (k == 0 && td)
689 		selrecord(td, &kring->si);
690 	if (k && (netmap_verbose & NM_VERB_HOST))
691 		D("%d pkts from stack", k);
692 done:
693 	na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
694 	if (error)
695 		netmap_ring_reinit(kring);
696 }
697 
698 
699 /*
700  * get a refcounted reference to an interface.
701  * Return ENXIO if the interface does not exist, EINVAL if netmap
702  * is not supported by the interface.
703  * If successful, hold a reference.
704  */
705 static int
706 get_ifp(const char *name, struct ifnet **ifp)
707 {
708 	*ifp = ifunit_ref(name);
709 	if (*ifp == NULL)
710 		return (ENXIO);
711 	/* can do this if the capability exists and if_pspare[0]
712 	 * points to the netmap descriptor.
713 	 */
714 	if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp))
715 		return 0;	/* valid pointer, we hold the refcount */
716 	if_rele(*ifp);
717 	return EINVAL;	// not NETMAP capable
718 }
719 
720 
721 /*
722  * Error routine called when txsync/rxsync detects an error.
723  * Can't do much more than resetting cur = hwcur, avail = hwavail.
724  * Return 1 on reinit.
725  *
726  * This routine is only called by the upper half of the kernel.
727  * It only reads hwcur (which is changed only by the upper half, too)
728  * and hwavail (which may be changed by the lower half, but only on
729  * a tx ring and only to increase it, so any error will be recovered
730  * on the next call). For the above, we don't strictly need to call
731  * it under lock.
732  */
733 int
734 netmap_ring_reinit(struct netmap_kring *kring)
735 {
736 	struct netmap_ring *ring = kring->ring;
737 	u_int i, lim = kring->nkr_num_slots - 1;
738 	int errors = 0;
739 
740 	D("called for %s", kring->na->ifp->if_xname);
741 	if (ring->cur > lim)
742 		errors++;
743 	for (i = 0; i <= lim; i++) {
744 		u_int idx = ring->slot[i].buf_idx;
745 		u_int len = ring->slot[i].len;
746 		if (idx < 2 || idx >= netmap_total_buffers) {
747 			if (!errors++)
748 				D("bad buffer at slot %d idx %d len %d ", i, idx, len);
749 			ring->slot[i].buf_idx = 0;
750 			ring->slot[i].len = 0;
751 		} else if (len > NETMAP_BUF_SIZE) {
752 			ring->slot[i].len = 0;
753 			if (!errors++)
754 				D("bad len %d at slot %d idx %d",
755 					len, i, idx);
756 		}
757 	}
758 	if (errors) {
759 		int pos = kring - kring->na->tx_rings;
760 		int n = kring->na->num_queues + 2;
761 
762 		D("total %d errors", errors);
763 		errors++;
764 		D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d",
765 			kring->na->ifp->if_xname,
766 			pos < n ?  "TX" : "RX", pos < n ? pos : pos - n,
767 			ring->cur, kring->nr_hwcur,
768 			ring->avail, kring->nr_hwavail);
769 		ring->cur = kring->nr_hwcur;
770 		ring->avail = kring->nr_hwavail;
771 	}
772 	return (errors ? 1 : 0);
773 }
774 
775 
776 /*
777  * Set the ring ID. For devices with a single queue, a request
778  * for all rings is the same as a single ring.
779  */
780 static int
781 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid)
782 {
783 	struct ifnet *ifp = priv->np_ifp;
784 	struct netmap_adapter *na = NA(ifp);
785 	void *adapter = na->ifp->if_softc;	/* shorthand */
786 	u_int i = ringid & NETMAP_RING_MASK;
787 	/* first time we don't lock */
788 	int need_lock = (priv->np_qfirst != priv->np_qlast);
789 
790 	if ( (ringid & NETMAP_HW_RING) && i >= na->num_queues) {
791 		D("invalid ring id %d", i);
792 		return (EINVAL);
793 	}
794 	if (need_lock)
795 		na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
796 	priv->np_ringid = ringid;
797 	if (ringid & NETMAP_SW_RING) {
798 		priv->np_qfirst = na->num_queues;
799 		priv->np_qlast = na->num_queues + 1;
800 	} else if (ringid & NETMAP_HW_RING) {
801 		priv->np_qfirst = i;
802 		priv->np_qlast = i + 1;
803 	} else {
804 		priv->np_qfirst = 0;
805 		priv->np_qlast = na->num_queues;
806 	}
807 	priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1;
808 	if (need_lock)
809 		na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
810 	if (ringid & NETMAP_SW_RING)
811 		D("ringid %s set to SW RING", ifp->if_xname);
812 	else if (ringid & NETMAP_HW_RING)
813 		D("ringid %s set to HW RING %d", ifp->if_xname,
814 			priv->np_qfirst);
815 	else
816 		D("ringid %s set to all %d HW RINGS", ifp->if_xname,
817 			priv->np_qlast);
818 	return 0;
819 }
820 
821 /*
822  * ioctl(2) support for the "netmap" device.
823  *
824  * Following a list of accepted commands:
825  * - NIOCGINFO
826  * - SIOCGIFADDR	just for convenience
827  * - NIOCREGIF
828  * - NIOCUNREGIF
829  * - NIOCTXSYNC
830  * - NIOCRXSYNC
831  *
832  * Return 0 on success, errno otherwise.
833  */
834 static int
835 netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data,
836 	__unused int fflag, struct thread *td)
837 {
838 	struct netmap_priv_d *priv = NULL;
839 	struct ifnet *ifp;
840 	struct nmreq *nmr = (struct nmreq *) data;
841 	struct netmap_adapter *na;
842 	void *adapter;
843 	int error;
844 	u_int i;
845 	struct netmap_if *nifp;
846 
847 	CURVNET_SET(TD_TO_VNET(td));
848 
849 	error = devfs_get_cdevpriv((void **)&priv);
850 	if (error != ENOENT && error != 0) {
851 		CURVNET_RESTORE();
852 		return (error);
853 	}
854 
855 	error = 0;	/* Could be ENOENT */
856 	switch (cmd) {
857 	case NIOCGINFO:		/* return capabilities etc */
858 		/* memsize is always valid */
859 		nmr->nr_memsize = netmap_mem_d->nm_totalsize;
860 		nmr->nr_offset = 0;
861 		nmr->nr_numrings = 0;
862 		nmr->nr_numslots = 0;
863 		if (nmr->nr_name[0] == '\0')	/* just get memory info */
864 			break;
865 		error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */
866 		if (error)
867 			break;
868 		na = NA(ifp); /* retrieve netmap_adapter */
869 		nmr->nr_numrings = na->num_queues;
870 		nmr->nr_numslots = na->num_tx_desc;
871 		if_rele(ifp);	/* return the refcount */
872 		break;
873 
874 	case NIOCREGIF:
875 		if (priv != NULL) {	/* thread already registered */
876 			error = netmap_set_ringid(priv, nmr->nr_ringid);
877 			break;
878 		}
879 		/* find the interface and a reference */
880 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
881 		if (error)
882 			break;
883 		na = NA(ifp); /* retrieve netmap adapter */
884 		adapter = na->ifp->if_softc;	/* shorthand */
885 		/*
886 		 * Allocate the private per-thread structure.
887 		 * XXX perhaps we can use a blocking malloc ?
888 		 */
889 		priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
890 			      M_NOWAIT | M_ZERO);
891 		if (priv == NULL) {
892 			error = ENOMEM;
893 			if_rele(ifp);   /* return the refcount */
894 			break;
895 		}
896 
897 
898 		for (i = 10; i > 0; i--) {
899 			na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
900 			if (!NETMAP_DELETING(na))
901 				break;
902 			na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
903 			tsleep(na, 0, "NIOCREGIF", hz/10);
904 		}
905 		if (i == 0) {
906 			D("too many NIOCREGIF attempts, give up");
907 			error = EINVAL;
908 			free(priv, M_DEVBUF);
909 			if_rele(ifp);	/* return the refcount */
910 			break;
911 		}
912 
913 		priv->np_ifp = ifp;	/* store the reference */
914 		error = netmap_set_ringid(priv, nmr->nr_ringid);
915 		if (error)
916 			goto error;
917 		priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na);
918 		if (nifp == NULL) { /* allocation failed */
919 			error = ENOMEM;
920 		} else if (ifp->if_capenable & IFCAP_NETMAP) {
921 			/* was already set */
922 		} else {
923 			/* Otherwise set the card in netmap mode
924 			 * and make it use the shared buffers.
925 			 */
926 			error = na->nm_register(ifp, 1); /* mode on */
927 			if (error) {
928 				/*
929 				 * do something similar to netmap_dtor().
930 				 */
931 				netmap_free_rings(na);
932 				// XXX tx_rings is inline, must not be freed.
933 				// free(na->tx_rings, M_DEVBUF); // XXX wrong ?
934 				na->tx_rings = na->rx_rings = NULL;
935 				na->refcount--;
936 				netmap_if_free(nifp);
937 				nifp = NULL;
938 			}
939 		}
940 
941 		if (error) {	/* reg. failed, release priv and ref */
942 error:
943 			na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
944 			free(priv, M_DEVBUF);
945 			if_rele(ifp);	/* return the refcount */
946 			break;
947 		}
948 
949 		na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
950 		error = devfs_set_cdevpriv(priv, netmap_dtor);
951 
952 		if (error != 0) {
953 			/* could not assign the private storage for the
954 			 * thread, call the destructor explicitly.
955 			 */
956 			netmap_dtor(priv);
957 			break;
958 		}
959 
960 		/* return the offset of the netmap_if object */
961 		nmr->nr_numrings = na->num_queues;
962 		nmr->nr_numslots = na->num_tx_desc;
963 		nmr->nr_memsize = netmap_mem_d->nm_totalsize;
964 		nmr->nr_offset = netmap_if_offset(nifp);
965 		break;
966 
967 	case NIOCUNREGIF:
968 		if (priv == NULL) {
969 			error = ENXIO;
970 			break;
971 		}
972 
973 		/* the interface is unregistered inside the
974 		   destructor of the private data. */
975 		devfs_clear_cdevpriv();
976 		break;
977 
978 	case NIOCTXSYNC:
979         case NIOCRXSYNC:
980 		if (priv == NULL) {
981 			error = ENXIO;
982 			break;
983 		}
984 		ifp = priv->np_ifp;	/* we have a reference */
985 		na = NA(ifp); /* retrieve netmap adapter */
986 		adapter = ifp->if_softc;	/* shorthand */
987 
988 		if (priv->np_qfirst == na->num_queues) {
989 			/* queues to/from host */
990 			if (cmd == NIOCTXSYNC)
991 				netmap_sync_to_host(na);
992 			else
993 				netmap_sync_from_host(na, NULL);
994 			break;
995 		}
996 
997 		for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
998 		    if (cmd == NIOCTXSYNC) {
999 			struct netmap_kring *kring = &na->tx_rings[i];
1000 			if (netmap_verbose & NM_VERB_TXSYNC)
1001 				D("sync tx ring %d cur %d hwcur %d",
1002 					i, kring->ring->cur,
1003 					kring->nr_hwcur);
1004                         na->nm_txsync(adapter, i, 1 /* do lock */);
1005 			if (netmap_verbose & NM_VERB_TXSYNC)
1006 				D("after sync tx ring %d cur %d hwcur %d",
1007 					i, kring->ring->cur,
1008 					kring->nr_hwcur);
1009 		    } else {
1010 			na->nm_rxsync(adapter, i, 1 /* do lock */);
1011 			microtime(&na->rx_rings[i].ring->ts);
1012 		    }
1013 		}
1014 
1015                 break;
1016 
1017 	case BIOCIMMEDIATE:
1018 	case BIOCGHDRCMPLT:
1019 	case BIOCSHDRCMPLT:
1020 	case BIOCSSEESENT:
1021 		D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT");
1022 		break;
1023 
1024 	default:
1025 	    {
1026 		/*
1027 		 * allow device calls
1028 		 */
1029 		struct socket so;
1030 		bzero(&so, sizeof(so));
1031 		error = get_ifp(nmr->nr_name, &ifp); /* keep reference */
1032 		if (error)
1033 			break;
1034 		so.so_vnet = ifp->if_vnet;
1035 		// so->so_proto not null.
1036 		error = ifioctl(&so, cmd, data, td);
1037 		if_rele(ifp);
1038 	    }
1039 	}
1040 
1041 	CURVNET_RESTORE();
1042 	return (error);
1043 }
1044 
1045 
1046 /*
1047  * select(2) and poll(2) handlers for the "netmap" device.
1048  *
1049  * Can be called for one or more queues.
1050  * Return true the event mask corresponding to ready events.
1051  * If there are no ready events, do a selrecord on either individual
1052  * selfd or on the global one.
1053  * Device-dependent parts (locking and sync of tx/rx rings)
1054  * are done through callbacks.
1055  */
1056 static int
1057 netmap_poll(__unused struct cdev *dev, int events, struct thread *td)
1058 {
1059 	struct netmap_priv_d *priv = NULL;
1060 	struct netmap_adapter *na;
1061 	struct ifnet *ifp;
1062 	struct netmap_kring *kring;
1063 	u_int core_lock, i, check_all, want_tx, want_rx, revents = 0;
1064 	void *adapter;
1065 	enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */
1066 
1067 	if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL)
1068 		return POLLERR;
1069 
1070 	ifp = priv->np_ifp;
1071 	// XXX check for deleting() ?
1072 	if ( (ifp->if_capenable & IFCAP_NETMAP) == 0)
1073 		return POLLERR;
1074 
1075 	if (netmap_verbose & 0x8000)
1076 		D("device %s events 0x%x", ifp->if_xname, events);
1077 	want_tx = events & (POLLOUT | POLLWRNORM);
1078 	want_rx = events & (POLLIN | POLLRDNORM);
1079 
1080 	adapter = ifp->if_softc;
1081 	na = NA(ifp); /* retrieve netmap adapter */
1082 
1083 	/* how many queues we are scanning */
1084 	i = priv->np_qfirst;
1085 	if (i == na->num_queues) { /* from/to host */
1086 		if (priv->np_txpoll || want_tx) {
1087 			/* push any packets up, then we are always ready */
1088 			kring = &na->tx_rings[i];
1089 			netmap_sync_to_host(na);
1090 			revents |= want_tx;
1091 		}
1092 		if (want_rx) {
1093 			kring = &na->rx_rings[i];
1094 			if (kring->ring->avail == 0)
1095 				netmap_sync_from_host(na, td);
1096 			if (kring->ring->avail > 0) {
1097 				revents |= want_rx;
1098 			}
1099 		}
1100 		return (revents);
1101 	}
1102 
1103 	/*
1104 	 * check_all is set if the card has more than one queue and
1105 	 * the client is polling all of them. If true, we sleep on
1106 	 * the "global" selfd, otherwise we sleep on individual selfd
1107 	 * (we can only sleep on one of them per direction).
1108 	 * The interrupt routine in the driver should always wake on
1109 	 * the individual selfd, and also on the global one if the card
1110 	 * has more than one ring.
1111 	 *
1112 	 * If the card has only one lock, we just use that.
1113 	 * If the card has separate ring locks, we just use those
1114 	 * unless we are doing check_all, in which case the whole
1115 	 * loop is wrapped by the global lock.
1116 	 * We acquire locks only when necessary: if poll is called
1117 	 * when buffers are available, we can just return without locks.
1118 	 *
1119 	 * rxsync() is only called if we run out of buffers on a POLLIN.
1120 	 * txsync() is called if we run out of buffers on POLLOUT, or
1121 	 * there are pending packets to send. The latter can be disabled
1122 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
1123 	 */
1124 	check_all = (i + 1 != priv->np_qlast);
1125 
1126 	/*
1127 	 * core_lock indicates what to do with the core lock.
1128 	 * The core lock is used when either the card has no individual
1129 	 * locks, or it has individual locks but we are cheking all
1130 	 * rings so we need the core lock to avoid missing wakeup events.
1131 	 *
1132 	 * It has three possible states:
1133 	 * NO_CL	we don't need to use the core lock, e.g.
1134 	 *		because we are protected by individual locks.
1135 	 * NEED_CL	we need the core lock. In this case, when we
1136 	 *		call the lock routine, move to LOCKED_CL
1137 	 *		to remember to release the lock once done.
1138 	 * LOCKED_CL	core lock is set, so we need to release it.
1139 	 */
1140 	core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL;
1141 	/*
1142 	 * We start with a lock free round which is good if we have
1143 	 * data available. If this fails, then lock and call the sync
1144 	 * routines.
1145 	 */
1146 		for (i = priv->np_qfirst; want_rx && i < priv->np_qlast; i++) {
1147 			kring = &na->rx_rings[i];
1148 			if (kring->ring->avail > 0) {
1149 				revents |= want_rx;
1150 				want_rx = 0;	/* also breaks the loop */
1151 			}
1152 		}
1153 		for (i = priv->np_qfirst; want_tx && i < priv->np_qlast; i++) {
1154 			kring = &na->tx_rings[i];
1155 			if (kring->ring->avail > 0) {
1156 				revents |= want_tx;
1157 				want_tx = 0;	/* also breaks the loop */
1158 			}
1159 		}
1160 
1161 	/*
1162 	 * If we to push packets out (priv->np_txpoll) or want_tx is
1163 	 * still set, we do need to run the txsync calls (on all rings,
1164 	 * to avoid that the tx rings stall).
1165 	 */
1166 	if (priv->np_txpoll || want_tx) {
1167 		for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
1168 			kring = &na->tx_rings[i];
1169 			if (!want_tx && kring->ring->cur == kring->nr_hwcur)
1170 				continue;
1171 			if (core_lock == NEED_CL) {
1172 				na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
1173 				core_lock = LOCKED_CL;
1174 			}
1175 			if (na->separate_locks)
1176 				na->nm_lock(adapter, NETMAP_TX_LOCK, i);
1177 			if (netmap_verbose & NM_VERB_TXSYNC)
1178 				D("send %d on %s %d",
1179 					kring->ring->cur,
1180 					ifp->if_xname, i);
1181 			if (na->nm_txsync(adapter, i, 0 /* no lock */))
1182 				revents |= POLLERR;
1183 
1184 			if (want_tx) {
1185 				if (kring->ring->avail > 0) {
1186 					/* stop at the first ring. We don't risk
1187 					 * starvation.
1188 					 */
1189 					revents |= want_tx;
1190 					want_tx = 0;
1191 				} else if (!check_all)
1192 					selrecord(td, &kring->si);
1193 			}
1194 			if (na->separate_locks)
1195 				na->nm_lock(adapter, NETMAP_TX_UNLOCK, i);
1196 		}
1197 	}
1198 
1199 	/*
1200 	 * now if want_rx is still set we need to lock and rxsync.
1201 	 * Do it on all rings because otherwise we starve.
1202 	 */
1203 	if (want_rx) {
1204 		for (i = priv->np_qfirst; i < priv->np_qlast; i++) {
1205 			kring = &na->rx_rings[i];
1206 			if (core_lock == NEED_CL) {
1207 				na->nm_lock(adapter, NETMAP_CORE_LOCK, 0);
1208 				core_lock = LOCKED_CL;
1209 			}
1210 			if (na->separate_locks)
1211 				na->nm_lock(adapter, NETMAP_RX_LOCK, i);
1212 
1213 			if (na->nm_rxsync(adapter, i, 0 /* no lock */))
1214 				revents |= POLLERR;
1215 			if (no_timestamp == 0 ||
1216 					kring->ring->flags & NR_TIMESTAMP)
1217 				microtime(&kring->ring->ts);
1218 
1219 			if (kring->ring->avail > 0)
1220 				revents |= want_rx;
1221 			else if (!check_all)
1222 				selrecord(td, &kring->si);
1223 			if (na->separate_locks)
1224 				na->nm_lock(adapter, NETMAP_RX_UNLOCK, i);
1225 		}
1226 	}
1227 	if (check_all && revents == 0) {
1228 		i = na->num_queues + 1; /* the global queue */
1229 		if (want_tx)
1230 			selrecord(td, &na->tx_rings[i].si);
1231 		if (want_rx)
1232 			selrecord(td, &na->rx_rings[i].si);
1233 	}
1234 	if (core_lock == LOCKED_CL)
1235 		na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0);
1236 
1237 	return (revents);
1238 }
1239 
1240 /*------- driver support routines ------*/
1241 
1242 /*
1243  * Initialize a ``netmap_adapter`` object created by driver on attach.
1244  * We allocate a block of memory with room for a struct netmap_adapter
1245  * plus two sets of N+2 struct netmap_kring (where N is the number
1246  * of hardware rings):
1247  * krings	0..N-1	are for the hardware queues.
1248  * kring	N	is for the host stack queue
1249  * kring	N+1	is only used for the selinfo for all queues.
1250  * Return 0 on success, ENOMEM otherwise.
1251  */
1252 int
1253 netmap_attach(struct netmap_adapter *na, int num_queues)
1254 {
1255 	int n = num_queues + 2;
1256 	int size = sizeof(*na) + 2 * n * sizeof(struct netmap_kring);
1257 	void *buf;
1258 	struct ifnet *ifp = na->ifp;
1259 
1260 	if (ifp == NULL) {
1261 		D("ifp not set, giving up");
1262 		return EINVAL;
1263 	}
1264 	na->refcount = 0;
1265 	na->num_queues = num_queues;
1266 
1267 	buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1268 	if (buf) {
1269 		WNA(ifp) = buf;
1270 		na->tx_rings = (void *)((char *)buf + sizeof(*na));
1271 		na->rx_rings = na->tx_rings + n;
1272 		bcopy(na, buf, sizeof(*na));
1273 		ifp->if_capabilities |= IFCAP_NETMAP;
1274 	}
1275 	D("%s for %s", buf ? "ok" : "failed", ifp->if_xname);
1276 
1277 	return (buf ? 0 : ENOMEM);
1278 }
1279 
1280 
1281 /*
1282  * Free the allocated memory linked to the given ``netmap_adapter``
1283  * object.
1284  */
1285 void
1286 netmap_detach(struct ifnet *ifp)
1287 {
1288 	u_int i;
1289 	struct netmap_adapter *na = NA(ifp);
1290 
1291 	if (!na)
1292 		return;
1293 
1294 	for (i = 0; i < na->num_queues + 2; i++) {
1295 		knlist_destroy(&na->tx_rings[i].si.si_note);
1296 		knlist_destroy(&na->rx_rings[i].si.si_note);
1297 	}
1298 	bzero(na, sizeof(*na));
1299 	WNA(ifp) = NULL;
1300 	free(na, M_DEVBUF);
1301 }
1302 
1303 
1304 /*
1305  * Intercept packets from the network stack and pass them
1306  * to netmap as incoming packets on the 'software' ring.
1307  * We are not locked when called.
1308  */
1309 int
1310 netmap_start(struct ifnet *ifp, struct mbuf *m)
1311 {
1312 	struct netmap_adapter *na = NA(ifp);
1313 	struct netmap_kring *kring = &na->rx_rings[na->num_queues];
1314 	u_int i, len = m->m_pkthdr.len;
1315 	int error = EBUSY, lim = kring->nkr_num_slots - 1;
1316 	struct netmap_slot *slot;
1317 
1318 	if (netmap_verbose & NM_VERB_HOST)
1319 		D("%s packet %d len %d from the stack", ifp->if_xname,
1320 			kring->nr_hwcur + kring->nr_hwavail, len);
1321 	na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0);
1322 	if (kring->nr_hwavail >= lim) {
1323 		D("stack ring %s full\n", ifp->if_xname);
1324 		goto done;	/* no space */
1325 	}
1326 	if (len > na->buff_size) {
1327 		D("drop packet size %d > %d", len, na->buff_size);
1328 		goto done;	/* too long for us */
1329 	}
1330 
1331 	/* compute the insert position */
1332 	i = kring->nr_hwcur + kring->nr_hwavail;
1333 	if (i > lim)
1334 		i -= lim + 1;
1335 	slot = &kring->ring->slot[i];
1336 	m_copydata(m, 0, len, NMB(slot));
1337 	slot->len = len;
1338 	kring->nr_hwavail++;
1339 	if (netmap_verbose  & NM_VERB_HOST)
1340 		D("wake up host ring %s %d", na->ifp->if_xname, na->num_queues);
1341 	selwakeuppri(&kring->si, PI_NET);
1342 	error = 0;
1343 done:
1344 	na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0);
1345 
1346 	/* release the mbuf in either cases of success or failure. As an
1347 	 * alternative, put the mbuf in a free list and free the list
1348 	 * only when really necessary.
1349 	 */
1350 	m_freem(m);
1351 
1352 	return (error);
1353 }
1354 
1355 
1356 /*
1357  * netmap_reset() is called by the driver routines when reinitializing
1358  * a ring. The driver is in charge of locking to protect the kring.
1359  * If netmap mode is not set just return NULL.
1360  */
1361 struct netmap_slot *
1362 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n,
1363 	u_int new_cur)
1364 {
1365 	struct netmap_kring *kring;
1366 	struct netmap_ring *ring;
1367 	int new_hwofs, lim;
1368 
1369 	if (na == NULL)
1370 		return NULL;	/* no netmap support here */
1371 	if (!(na->ifp->if_capenable & IFCAP_NETMAP))
1372 		return NULL;	/* nothing to reinitialize */
1373 	kring = tx == NR_TX ?  na->tx_rings + n : na->rx_rings + n;
1374 	ring = kring->ring;
1375 	lim = kring->nkr_num_slots - 1;
1376 
1377 	if (tx == NR_TX)
1378 		new_hwofs = kring->nr_hwcur - new_cur;
1379 	else
1380 		new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur;
1381 	if (new_hwofs > lim)
1382 		new_hwofs -= lim + 1;
1383 
1384 	/* Alwayws set the new offset value and realign the ring. */
1385 	kring->nkr_hwofs = new_hwofs;
1386 	if (tx == NR_TX)
1387 		kring->nr_hwavail = kring->nkr_num_slots - 1;
1388 	D("new hwofs %d on %s %s[%d]",
1389 			kring->nkr_hwofs, na->ifp->if_xname,
1390 			tx == NR_TX ? "TX" : "RX", n);
1391 
1392 	/*
1393 	 * We do the wakeup here, but the ring is not yet reconfigured.
1394 	 * However, we are under lock so there are no races.
1395 	 */
1396 	selwakeuppri(&kring->si, PI_NET);
1397 	selwakeuppri(&kring[na->num_queues + 1 - n].si, PI_NET);
1398 	return kring->ring->slot;
1399 }
1400 
1401 
1402 /*------ netmap memory allocator -------*/
1403 /*
1404  * Request for a chunk of memory.
1405  *
1406  * Memory objects are arranged into a list, hence we need to walk this
1407  * list until we find an object with the needed amount of data free.
1408  * This sounds like a completely inefficient implementation, but given
1409  * the fact that data allocation is done once, we can handle it
1410  * flawlessly.
1411  *
1412  * Return NULL on failure.
1413  */
1414 static void *
1415 netmap_malloc(size_t size, __unused const char *msg)
1416 {
1417 	struct netmap_mem_obj *mem_obj, *new_mem_obj;
1418 	void *ret = NULL;
1419 
1420 	NMA_LOCK();
1421 	TAILQ_FOREACH(mem_obj, &netmap_mem_d->nm_molist, nmo_next) {
1422 		if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size)
1423 			continue;
1424 
1425 		new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP,
1426 				     M_WAITOK | M_ZERO);
1427 		TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next);
1428 
1429 		new_mem_obj->nmo_used = 1;
1430 		new_mem_obj->nmo_size = size;
1431 		new_mem_obj->nmo_data = mem_obj->nmo_data;
1432 		memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size);
1433 
1434 		mem_obj->nmo_size -= size;
1435 		mem_obj->nmo_data = (char *) mem_obj->nmo_data + size;
1436 		if (mem_obj->nmo_size == 0) {
1437 			TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj,
1438 				     nmo_next);
1439 			free(mem_obj, M_NETMAP);
1440 		}
1441 
1442 		ret = new_mem_obj->nmo_data;
1443 
1444 		break;
1445 	}
1446 	NMA_UNLOCK();
1447 	ND("%s: %d bytes at %p", msg, size, ret);
1448 
1449 	return (ret);
1450 }
1451 
1452 /*
1453  * Return the memory to the allocator.
1454  *
1455  * While freeing a memory object, we try to merge adjacent chunks in
1456  * order to reduce memory fragmentation.
1457  */
1458 static void
1459 netmap_free(void *addr, const char *msg)
1460 {
1461 	size_t size;
1462 	struct netmap_mem_obj *cur, *prev, *next;
1463 
1464 	if (addr == NULL) {
1465 		D("NULL addr for %s", msg);
1466 		return;
1467 	}
1468 
1469 	NMA_LOCK();
1470 	TAILQ_FOREACH(cur, &netmap_mem_d->nm_molist, nmo_next) {
1471 		if (cur->nmo_data == addr && cur->nmo_used)
1472 			break;
1473 	}
1474 	if (cur == NULL) {
1475 		NMA_UNLOCK();
1476 		D("invalid addr %s %p", msg, addr);
1477 		return;
1478 	}
1479 
1480 	size = cur->nmo_size;
1481 	cur->nmo_used = 0;
1482 
1483 	/* merge current chunk of memory with the previous one,
1484 	   if present. */
1485 	prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next);
1486 	if (prev && prev->nmo_used == 0) {
1487 		TAILQ_REMOVE(&netmap_mem_d->nm_molist, cur, nmo_next);
1488 		prev->nmo_size += cur->nmo_size;
1489 		free(cur, M_NETMAP);
1490 		cur = prev;
1491 	}
1492 
1493 	/* merge with the next one */
1494 	next = TAILQ_NEXT(cur, nmo_next);
1495 	if (next && next->nmo_used == 0) {
1496 		TAILQ_REMOVE(&netmap_mem_d->nm_molist, next, nmo_next);
1497 		cur->nmo_size += next->nmo_size;
1498 		free(next, M_NETMAP);
1499 	}
1500 	NMA_UNLOCK();
1501 	ND("freed %s %d bytes at %p", msg, size, addr);
1502 }
1503 
1504 
1505 /*
1506  * Initialize the memory allocator.
1507  *
1508  * Create the descriptor for the memory , allocate the pool of memory
1509  * and initialize the list of memory objects with a single chunk
1510  * containing the whole pre-allocated memory marked as free.
1511  *
1512  * Start with a large size, then halve as needed if we fail to
1513  * allocate the block. While halving, always add one extra page
1514  * because buffers 0 and 1 are used for special purposes.
1515  * Return 0 on success, errno otherwise.
1516  */
1517 static int
1518 netmap_memory_init(void)
1519 {
1520 	struct netmap_mem_obj *mem_obj;
1521 	void *buf = NULL;
1522 	int i, n, sz = NETMAP_MEMORY_SIZE;
1523 	int extra_sz = 0; // space for rings and two spare buffers
1524 
1525 	for (; sz >= 1<<20; sz >>=1) {
1526 		extra_sz = sz/200;
1527 		extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
1528 	        buf = contigmalloc(sz + extra_sz,
1529 			     M_NETMAP,
1530 			     M_WAITOK | M_ZERO,
1531 			     0, /* low address */
1532 			     -1UL, /* high address */
1533 			     PAGE_SIZE, /* alignment */
1534 			     0 /* boundary */
1535 			    );
1536 		if (buf)
1537 			break;
1538 	}
1539 	if (buf == NULL)
1540 		return (ENOMEM);
1541 	sz += extra_sz;
1542 	netmap_mem_d = malloc(sizeof(struct netmap_mem_d), M_NETMAP,
1543 			      M_WAITOK | M_ZERO);
1544 	mtx_init(&netmap_mem_d->nm_mtx, "netmap memory allocator lock", NULL,
1545 		 MTX_DEF);
1546 	TAILQ_INIT(&netmap_mem_d->nm_molist);
1547 	netmap_mem_d->nm_buffer = buf;
1548 	netmap_mem_d->nm_totalsize = sz;
1549 
1550 	/*
1551 	 * A buffer takes 2k, a slot takes 8 bytes + ring overhead,
1552 	 * so the ratio is 200:1. In other words, we can use 1/200 of
1553 	 * the memory for the rings, and the rest for the buffers,
1554 	 * and be sure we never run out.
1555 	 */
1556 	netmap_mem_d->nm_size = sz/200;
1557 	netmap_mem_d->nm_buf_start =
1558 		(netmap_mem_d->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
1559 	netmap_mem_d->nm_buf_len = sz - netmap_mem_d->nm_buf_start;
1560 
1561 	nm_buf_pool.base = netmap_mem_d->nm_buffer;
1562 	nm_buf_pool.base += netmap_mem_d->nm_buf_start;
1563 	netmap_buffer_base = nm_buf_pool.base;
1564 	D("netmap_buffer_base %p (offset %d)",
1565 		netmap_buffer_base, (int)netmap_mem_d->nm_buf_start);
1566 	/* number of buffers, they all start as free */
1567 
1568 	netmap_total_buffers = nm_buf_pool.total_buffers =
1569 		netmap_mem_d->nm_buf_len / NETMAP_BUF_SIZE;
1570 	nm_buf_pool.bufsize = NETMAP_BUF_SIZE;
1571 
1572 	D("Have %d MB, use %dKB for rings, %d buffers at %p",
1573 		(sz >> 20), (int)(netmap_mem_d->nm_size >> 10),
1574 		nm_buf_pool.total_buffers, nm_buf_pool.base);
1575 
1576 	/* allocate and initialize the bitmap. Entry 0 is considered
1577 	 * always busy (used as default when there are no buffers left).
1578 	 */
1579 	n = (nm_buf_pool.total_buffers + 31) / 32;
1580 	nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP,
1581 			 M_WAITOK | M_ZERO);
1582 	nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */
1583 	for (i = 1; i < n; i++)
1584 		nm_buf_pool.bitmap[i] = ~0;
1585 	nm_buf_pool.free = nm_buf_pool.total_buffers - 2;
1586 
1587 	mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP,
1588 			 M_WAITOK | M_ZERO);
1589 	TAILQ_INSERT_HEAD(&netmap_mem_d->nm_molist, mem_obj, nmo_next);
1590 	mem_obj->nmo_used = 0;
1591 	mem_obj->nmo_size = netmap_mem_d->nm_size;
1592 	mem_obj->nmo_data = netmap_mem_d->nm_buffer;
1593 
1594 	return (0);
1595 }
1596 
1597 
1598 /*
1599  * Finalize the memory allocator.
1600  *
1601  * Free all the memory objects contained inside the list, and deallocate
1602  * the pool of memory; finally free the memory allocator descriptor.
1603  */
1604 static void
1605 netmap_memory_fini(void)
1606 {
1607 	struct netmap_mem_obj *mem_obj;
1608 
1609 	while (!TAILQ_EMPTY(&netmap_mem_d->nm_molist)) {
1610 		mem_obj = TAILQ_FIRST(&netmap_mem_d->nm_molist);
1611 		TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, nmo_next);
1612 		if (mem_obj->nmo_used == 1) {
1613 			printf("netmap: leaked %d bytes at %p\n",
1614 			       (int)mem_obj->nmo_size,
1615 			       mem_obj->nmo_data);
1616 		}
1617 		free(mem_obj, M_NETMAP);
1618 	}
1619 	contigfree(netmap_mem_d->nm_buffer, netmap_mem_d->nm_totalsize, M_NETMAP);
1620 	// XXX mutex_destroy(nm_mtx);
1621 	free(netmap_mem_d, M_NETMAP);
1622 }
1623 
1624 
1625 /*
1626  * Module loader.
1627  *
1628  * Create the /dev/netmap device and initialize all global
1629  * variables.
1630  *
1631  * Return 0 on success, errno on failure.
1632  */
1633 static int
1634 netmap_init(void)
1635 {
1636 	int error;
1637 
1638 
1639 	error = netmap_memory_init();
1640 	if (error != 0) {
1641 		printf("netmap: unable to initialize the memory allocator.");
1642 		return (error);
1643 	}
1644 	printf("netmap: loaded module with %d Mbytes\n",
1645 		(int)(netmap_mem_d->nm_totalsize >> 20));
1646 
1647 	netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
1648 			      "netmap");
1649 
1650 	return (0);
1651 }
1652 
1653 
1654 /*
1655  * Module unloader.
1656  *
1657  * Free all the memory, and destroy the ``/dev/netmap`` device.
1658  */
1659 static void
1660 netmap_fini(void)
1661 {
1662 	destroy_dev(netmap_dev);
1663 
1664 	netmap_memory_fini();
1665 
1666 	printf("netmap: unloaded module.\n");
1667 }
1668 
1669 
1670 /*
1671  * Kernel entry point.
1672  *
1673  * Initialize/finalize the module and return.
1674  *
1675  * Return 0 on success, errno on failure.
1676  */
1677 static int
1678 netmap_loader(__unused struct module *module, int event, __unused void *arg)
1679 {
1680 	int error = 0;
1681 
1682 	switch (event) {
1683 	case MOD_LOAD:
1684 		error = netmap_init();
1685 		break;
1686 
1687 	case MOD_UNLOAD:
1688 		netmap_fini();
1689 		break;
1690 
1691 	default:
1692 		error = EOPNOTSUPP;
1693 		break;
1694 	}
1695 
1696 	return (error);
1697 }
1698 
1699 
1700 DEV_MODULE(netmap, netmap_loader, NULL);
1701