xref: /freebsd/sys/dev/netmap/netmap_generic.c (revision 6d732c66bca5da4d261577aad2c8ea84519b0bea)
1 /*
2  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *   1. Redistributions of source code must retain the above copyright
8  *      notice, this list of conditions and the following disclaimer.
9  *   2. Redistributions in binary form must reproduce the above copyright
10  *      notice, this list of conditions and the following disclaimer in the
11  *      documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * This module implements netmap support on top of standard,
28  * unmodified device drivers.
29  *
30  * A NIOCREGIF request is handled here if the device does not
31  * have native support. TX and RX rings are emulated as follows:
32  *
33  * NIOCREGIF
34  *	We preallocate a block of TX mbufs (roughly as many as
35  *	tx descriptors; the number is not critical) to speed up
36  *	operation during transmissions. The refcount on most of
37  *	these buffers is artificially bumped up so we can recycle
38  *	them more easily. Also, the destructor is intercepted
39  *	so we use it as an interrupt notification to wake up
40  *	processes blocked on a poll().
41  *
42  *	For each receive ring we allocate one "struct mbq"
43  *	(an mbuf tailq plus a spinlock). We intercept packets
44  *	(through if_input)
45  *	on the receive path and put them in the mbq from which
46  *	netmap receive routines can grab them.
47  *
48  * TX:
49  *	in the generic_txsync() routine, netmap buffers are copied
50  *	(or linked, in a future) to the preallocated mbufs
51  *	and pushed to the transmit queue. Some of these mbufs
52  *	(those with NS_REPORT, or otherwise every half ring)
53  *	have the refcount=1, others have refcount=2.
54  *	When the destructor is invoked, we take that as
55  *	a notification that all mbufs up to that one in
56  *	the specific ring have been completed, and generate
57  *	the equivalent of a transmit interrupt.
58  *
59  * RX:
60  *
61  */
62 
63 #ifdef __FreeBSD__
64 
65 #include <sys/cdefs.h> /* prerequisite */
66 __FBSDID("$FreeBSD$");
67 
68 #include <sys/types.h>
69 #include <sys/errno.h>
70 #include <sys/malloc.h>
71 #include <sys/lock.h>   /* PROT_EXEC */
72 #include <sys/rwlock.h>
73 #include <sys/socket.h> /* sockaddrs */
74 #include <sys/selinfo.h>
75 #include <net/if.h>
76 #include <net/if_var.h>
77 #include <machine/bus.h>        /* bus_dmamap_* in netmap_kern.h */
78 
79 // XXX temporary - D() defined here
80 #include <net/netmap.h>
81 #include <dev/netmap/netmap_kern.h>
82 #include <dev/netmap/netmap_mem2.h>
83 
84 #define rtnl_lock() D("rtnl_lock called");
85 #define rtnl_unlock() D("rtnl_unlock called");
86 #define MBUF_TXQ(m)	((m)->m_pkthdr.flowid)
87 #define smp_mb()
88 
89 /*
90  * mbuf wrappers
91  */
92 
93 /*
94  * we allocate an EXT_PACKET
95  */
96 #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR|M_NOFREE)
97 
98 /* mbuf destructor, also need to change the type to EXT_EXTREF,
99  * add an M_NOFREE flag, and then clear the flag and
100  * chain into uma_zfree(zone_pack, mf)
101  * (or reinstall the buffer ?)
102  */
103 #define SET_MBUF_DESTRUCTOR(m, fn)	do {		\
104 	(m)->m_ext.ext_free = (void *)fn;	\
105 	(m)->m_ext.ext_type = EXT_EXTREF;	\
106 } while (0)
107 
108 
109 #define GET_MBUF_REFCNT(m)	((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1)
110 
111 
112 
113 #else /* linux */
114 
115 #include "bsd_glue.h"
116 
117 #include <linux/rtnetlink.h>    /* rtnl_[un]lock() */
118 #include <linux/ethtool.h>      /* struct ethtool_ops, get_ringparam */
119 #include <linux/hrtimer.h>
120 
121 //#define RATE  /* Enables communication statistics. */
122 
123 //#define REG_RESET
124 
125 #endif /* linux */
126 
127 
128 /* Common headers. */
129 #include <net/netmap.h>
130 #include <dev/netmap/netmap_kern.h>
131 #include <dev/netmap/netmap_mem2.h>
132 
133 
134 
135 /* ======================== usage stats =========================== */
136 
137 #ifdef RATE
138 #define IFRATE(x) x
139 struct rate_stats {
140 	unsigned long txpkt;
141 	unsigned long txsync;
142 	unsigned long txirq;
143 	unsigned long rxpkt;
144 	unsigned long rxirq;
145 	unsigned long rxsync;
146 };
147 
148 struct rate_context {
149 	unsigned refcount;
150 	struct timer_list timer;
151 	struct rate_stats new;
152 	struct rate_stats old;
153 };
154 
155 #define RATE_PRINTK(_NAME_) \
156 	printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
157 #define RATE_PERIOD  2
158 static void rate_callback(unsigned long arg)
159 {
160 	struct rate_context * ctx = (struct rate_context *)arg;
161 	struct rate_stats cur = ctx->new;
162 	int r;
163 
164 	RATE_PRINTK(txpkt);
165 	RATE_PRINTK(txsync);
166 	RATE_PRINTK(txirq);
167 	RATE_PRINTK(rxpkt);
168 	RATE_PRINTK(rxsync);
169 	RATE_PRINTK(rxirq);
170 	printk("\n");
171 
172 	ctx->old = cur;
173 	r = mod_timer(&ctx->timer, jiffies +
174 			msecs_to_jiffies(RATE_PERIOD * 1000));
175 	if (unlikely(r))
176 		D("[v1000] Error: mod_timer()");
177 }
178 
179 static struct rate_context rate_ctx;
180 
181 #else /* !RATE */
182 #define IFRATE(x)
183 #endif /* !RATE */
184 
185 
186 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */
187 #define GENERIC_BUF_SIZE        netmap_buf_size    /* Size of the mbufs in the Tx pool. */
188 
189 /*
190  * Wrapper used by the generic adapter layer to notify
191  * the poller threads. Differently from netmap_rx_irq(), we check
192  * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq.
193  */
194 static void
195 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done)
196 {
197 	if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP)))
198 		return;
199 
200 	netmap_common_irq(ifp, q, work_done);
201 }
202 
203 
204 /* Enable/disable netmap mode for a generic network interface. */
205 static int
206 generic_netmap_register(struct netmap_adapter *na, int enable)
207 {
208 	struct ifnet *ifp = na->ifp;
209 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
210 	struct mbuf *m;
211 	int error;
212 	int i, r;
213 
214 	if (!na)
215 		return EINVAL;
216 
217 #ifdef REG_RESET
218 	error = ifp->netdev_ops->ndo_stop(ifp);
219 	if (error) {
220 		return error;
221 	}
222 #endif /* REG_RESET */
223 
224 	if (enable) { /* Enable netmap mode. */
225 		/* Initialize the rx queue, as generic_rx_handler() can
226 		 * be called as soon as netmap_catch_rx() returns.
227 		 */
228 		for (r=0; r<na->num_rx_rings; r++) {
229 			mbq_safe_init(&na->rx_rings[r].rx_queue);
230 		}
231 
232 		/* Init the mitigation timer. */
233 		netmap_mitigation_init(gna);
234 
235 		/*
236 		 * Preallocate packet buffers for the tx rings.
237 		 */
238 		for (r=0; r<na->num_tx_rings; r++)
239 			na->tx_rings[r].tx_pool = NULL;
240 		for (r=0; r<na->num_tx_rings; r++) {
241 			na->tx_rings[r].tx_pool = malloc(na->num_tx_desc * sizeof(struct mbuf *),
242 					M_DEVBUF, M_NOWAIT | M_ZERO);
243 			if (!na->tx_rings[r].tx_pool) {
244 				D("tx_pool allocation failed");
245 				error = ENOMEM;
246 				goto free_tx_pools;
247 			}
248 			for (i=0; i<na->num_tx_desc; i++)
249 				na->tx_rings[r].tx_pool[i] = NULL;
250 			for (i=0; i<na->num_tx_desc; i++) {
251 				m = netmap_get_mbuf(GENERIC_BUF_SIZE);
252 				if (!m) {
253 					D("tx_pool[%d] allocation failed", i);
254 					error = ENOMEM;
255 					goto free_tx_pools;
256 				}
257 				na->tx_rings[r].tx_pool[i] = m;
258 			}
259 		}
260 		rtnl_lock();
261 		/* Prepare to intercept incoming traffic. */
262 		error = netmap_catch_rx(na, 1);
263 		if (error) {
264 			D("netdev_rx_handler_register() failed");
265 			goto register_handler;
266 		}
267 		ifp->if_capenable |= IFCAP_NETMAP;
268 
269 		/* Make netmap control the packet steering. */
270 		netmap_catch_tx(gna, 1);
271 
272 		rtnl_unlock();
273 
274 #ifdef RATE
275 		if (rate_ctx.refcount == 0) {
276 			D("setup_timer()");
277 			memset(&rate_ctx, 0, sizeof(rate_ctx));
278 			setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
279 			if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
280 				D("Error: mod_timer()");
281 			}
282 		}
283 		rate_ctx.refcount++;
284 #endif /* RATE */
285 
286 	} else { /* Disable netmap mode. */
287 		rtnl_lock();
288 
289 		ifp->if_capenable &= ~IFCAP_NETMAP;
290 
291 		/* Release packet steering control. */
292 		netmap_catch_tx(gna, 0);
293 
294 		/* Do not intercept packets on the rx path. */
295 		netmap_catch_rx(na, 0);
296 
297 		rtnl_unlock();
298 
299 		/* Free the mbufs going to the netmap rings */
300 		for (r=0; r<na->num_rx_rings; r++) {
301 			mbq_safe_purge(&na->rx_rings[r].rx_queue);
302 			mbq_safe_destroy(&na->rx_rings[r].rx_queue);
303 		}
304 
305 		netmap_mitigation_cleanup(gna);
306 
307 		for (r=0; r<na->num_tx_rings; r++) {
308 			for (i=0; i<na->num_tx_desc; i++) {
309 				m_freem(na->tx_rings[r].tx_pool[i]);
310 			}
311 			free(na->tx_rings[r].tx_pool, M_DEVBUF);
312 		}
313 
314 #ifdef RATE
315 		if (--rate_ctx.refcount == 0) {
316 			D("del_timer()");
317 			del_timer(&rate_ctx.timer);
318 		}
319 #endif
320 	}
321 
322 #ifdef REG_RESET
323 	error = ifp->netdev_ops->ndo_open(ifp);
324 	if (error) {
325 		goto alloc_tx_pool;
326 	}
327 #endif
328 
329 	return 0;
330 
331 register_handler:
332 	rtnl_unlock();
333 free_tx_pools:
334 	for (r=0; r<na->num_tx_rings; r++) {
335 		if (na->tx_rings[r].tx_pool == NULL)
336 			continue;
337 		for (i=0; i<na->num_tx_desc; i++)
338 			if (na->tx_rings[r].tx_pool[i])
339 				m_freem(na->tx_rings[r].tx_pool[i]);
340 		free(na->tx_rings[r].tx_pool, M_DEVBUF);
341 	}
342 
343 	return error;
344 }
345 
346 /*
347  * Callback invoked when the device driver frees an mbuf used
348  * by netmap to transmit a packet. This usually happens when
349  * the NIC notifies the driver that transmission is completed.
350  */
351 static void
352 generic_mbuf_destructor(struct mbuf *m)
353 {
354 	if (netmap_verbose)
355 		D("Tx irq (%p) queue %d", m, MBUF_TXQ(m));
356 	netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL);
357 #ifdef __FreeBSD__
358 	m->m_ext.ext_type = EXT_PACKET;
359 	m->m_ext.ext_free = NULL;
360 	if (*(m->m_ext.ref_cnt) == 0)
361 		*(m->m_ext.ref_cnt) = 1;
362 	uma_zfree(zone_pack, m);
363 #endif /* __FreeBSD__ */
364 	IFRATE(rate_ctx.new.txirq++);
365 }
366 
367 /* Record completed transmissions and update hwtail.
368  *
369  * The oldest tx buffer not yet completed is at nr_hwtail + 1,
370  * nr_hwcur is the first unsent buffer.
371  */
372 static u_int
373 generic_netmap_tx_clean(struct netmap_kring *kring)
374 {
375 	u_int const lim = kring->nkr_num_slots - 1;
376 	u_int nm_i = nm_next(kring->nr_hwtail, lim);
377 	u_int hwcur = kring->nr_hwcur;
378 	u_int n = 0;
379 	struct mbuf **tx_pool = kring->tx_pool;
380 
381 	while (nm_i != hwcur) { /* buffers not completed */
382 		struct mbuf *m = tx_pool[nm_i];
383 
384 		if (unlikely(m == NULL)) {
385 			/* this is done, try to replenish the entry */
386 			tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
387 			if (unlikely(m == NULL)) {
388 				D("mbuf allocation failed, XXX error");
389 				// XXX how do we proceed ? break ?
390 				return -ENOMEM;
391 			}
392 		} else if (GET_MBUF_REFCNT(m) != 1) {
393 			break; /* This mbuf is still busy: its refcnt is 2. */
394 		}
395 		n++;
396 		nm_i = nm_next(nm_i, lim);
397 	}
398 	kring->nr_hwtail = nm_prev(nm_i, lim);
399 	ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail);
400 
401 	return n;
402 }
403 
404 
405 /*
406  * We have pending packets in the driver between nr_hwtail +1 and hwcur.
407  * Compute a position in the middle, to be used to generate
408  * a notification.
409  */
410 static inline u_int
411 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur)
412 {
413 	u_int n = kring->nkr_num_slots;
414 	u_int ntc = nm_next(kring->nr_hwtail, n-1);
415 	u_int e;
416 
417 	if (hwcur >= ntc) {
418 		e = (hwcur + ntc) / 2;
419 	} else { /* wrap around */
420 		e = (hwcur + n + ntc) / 2;
421 		if (e >= n) {
422 			e -= n;
423 		}
424 	}
425 
426 	if (unlikely(e >= n)) {
427 		D("This cannot happen");
428 		e = 0;
429 	}
430 
431 	return e;
432 }
433 
434 /*
435  * We have pending packets in the driver between nr_hwtail+1 and hwcur.
436  * Schedule a notification approximately in the middle of the two.
437  * There is a race but this is only called within txsync which does
438  * a double check.
439  */
440 static void
441 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
442 {
443 	struct mbuf *m;
444 	u_int e;
445 
446 	if (nm_next(kring->nr_hwtail, kring->nkr_num_slots -1) == hwcur) {
447 		return; /* all buffers are free */
448 	}
449 	e = generic_tx_event_middle(kring, hwcur);
450 
451 	m = kring->tx_pool[e];
452 	if (m == NULL) {
453 		/* This can happen if there is already an event on the netmap
454 		   slot 'e': There is nothing to do. */
455 		return;
456 	}
457 	ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m));
458 	kring->tx_pool[e] = NULL;
459 	SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
460 
461 	// XXX wmb() ?
462 	/* Decrement the refcount an free it if we have the last one. */
463 	m_freem(m);
464 	smp_mb();
465 }
466 
467 
468 /*
469  * generic_netmap_txsync() transforms netmap buffers into mbufs
470  * and passes them to the standard device driver
471  * (ndo_start_xmit() or ifp->if_transmit() ).
472  * On linux this is not done directly, but using dev_queue_xmit(),
473  * since it implements the TX flow control (and takes some locks).
474  */
475 static int
476 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
477 {
478 	struct ifnet *ifp = na->ifp;
479 	struct netmap_kring *kring = &na->tx_rings[ring_nr];
480 	struct netmap_ring *ring = kring->ring;
481 	u_int nm_i;	/* index into the netmap ring */ // j
482 	u_int const lim = kring->nkr_num_slots - 1;
483 	u_int const head = kring->rhead;
484 
485 	IFRATE(rate_ctx.new.txsync++);
486 
487 	// TODO: handle the case of mbuf allocation failure
488 
489 	rmb();
490 
491 	/*
492 	 * First part: process new packets to send.
493 	 */
494 	nm_i = kring->nr_hwcur;
495 	if (nm_i != head) {	/* we have new packets to send */
496 		while (nm_i != head) {
497 			struct netmap_slot *slot = &ring->slot[nm_i];
498 			u_int len = slot->len;
499 			void *addr = NMB(slot);
500 
501 			/* device-specific */
502 			struct mbuf *m;
503 			int tx_ret;
504 
505 			NM_CHECK_ADDR_LEN(addr, len);
506 
507 			/* Tale a mbuf from the tx pool and copy in the user packet. */
508 			m = kring->tx_pool[nm_i];
509 			if (unlikely(!m)) {
510 				RD(5, "This should never happen");
511 				kring->tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
512 				if (unlikely(m == NULL)) {
513 					D("mbuf allocation failed");
514 					break;
515 				}
516 			}
517 			/* XXX we should ask notifications when NS_REPORT is set,
518 			 * or roughly every half frame. We can optimize this
519 			 * by lazily requesting notifications only when a
520 			 * transmission fails. Probably the best way is to
521 			 * break on failures and set notifications when
522 			 * ring->cur == ring->tail || nm_i != cur
523 			 */
524 			tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr);
525 			if (unlikely(tx_ret)) {
526 				RD(5, "start_xmit failed: err %d [nm_i %u, head %u, hwtail %u]",
527 						tx_ret, nm_i, head, kring->nr_hwtail);
528 				/*
529 				 * No room for this mbuf in the device driver.
530 				 * Request a notification FOR A PREVIOUS MBUF,
531 				 * then call generic_netmap_tx_clean(kring) to do the
532 				 * double check and see if we can free more buffers.
533 				 * If there is space continue, else break;
534 				 * NOTE: the double check is necessary if the problem
535 				 * occurs in the txsync call after selrecord().
536 				 * Also, we need some way to tell the caller that not
537 				 * all buffers were queued onto the device (this was
538 				 * not a problem with native netmap driver where space
539 				 * is preallocated). The bridge has a similar problem
540 				 * and we solve it there by dropping the excess packets.
541 				 */
542 				generic_set_tx_event(kring, nm_i);
543 				if (generic_netmap_tx_clean(kring)) { /* space now available */
544 					continue;
545 				} else {
546 					break;
547 				}
548 			}
549 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
550 			nm_i = nm_next(nm_i, lim);
551 		}
552 
553 		/* Update hwcur to the next slot to transmit. */
554 		kring->nr_hwcur = nm_i; /* not head, we could break early */
555 
556 		IFRATE(rate_ctx.new.txpkt += ntx);
557 	}
558 
559 	/*
560 	 * Second, reclaim completed buffers
561 	 */
562 	if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
563 		/* No more available slots? Set a notification event
564 		 * on a netmap slot that will be cleaned in the future.
565 		 * No doublecheck is performed, since txsync() will be
566 		 * called twice by netmap_poll().
567 		 */
568 		generic_set_tx_event(kring, nm_i);
569 	}
570 	ND("tx #%d, hwtail = %d", n, kring->nr_hwtail);
571 
572 	generic_netmap_tx_clean(kring);
573 
574 	nm_txsync_finalize(kring);
575 
576 	return 0;
577 }
578 
579 
580 /*
581  * This handler is registered (through netmap_catch_rx())
582  * within the attached network interface
583  * in the RX subsystem, so that every mbuf passed up by
584  * the driver can be stolen to the network stack.
585  * Stolen packets are put in a queue where the
586  * generic_netmap_rxsync() callback can extract them.
587  */
588 void
589 generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
590 {
591 	struct netmap_adapter *na = NA(ifp);
592 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
593 	u_int work_done;
594 	u_int rr = 0; // receive ring number
595 
596 	/* limit the size of the queue */
597 	if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) {
598 		m_freem(m);
599 	} else {
600 		mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m);
601 	}
602 
603 	if (netmap_generic_mit < 32768) {
604 		/* no rx mitigation, pass notification up */
605 		netmap_generic_irq(na->ifp, rr, &work_done);
606 		IFRATE(rate_ctx.new.rxirq++);
607 	} else {
608 		/* same as send combining, filter notification if there is a
609 		 * pending timer, otherwise pass it up and start a timer.
610 		 */
611 		if (likely(netmap_mitigation_active(gna))) {
612 			/* Record that there is some pending work. */
613 			gna->mit_pending = 1;
614 		} else {
615 			netmap_generic_irq(na->ifp, rr, &work_done);
616 			IFRATE(rate_ctx.new.rxirq++);
617 			netmap_mitigation_start(gna);
618 		}
619 	}
620 }
621 
622 /*
623  * generic_netmap_rxsync() extracts mbufs from the queue filled by
624  * generic_netmap_rx_handler() and puts their content in the netmap
625  * receive ring.
626  * Access must be protected because the rx handler is asynchronous,
627  */
628 static int
629 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
630 {
631 	struct netmap_kring *kring = &na->rx_rings[ring_nr];
632 	struct netmap_ring *ring = kring->ring;
633 	u_int nm_i;	/* index into the netmap ring */ //j,
634 	u_int n;
635 	u_int const lim = kring->nkr_num_slots - 1;
636 	u_int const head = nm_rxsync_prologue(kring);
637 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
638 
639 	if (head > lim)
640 		return netmap_ring_reinit(kring);
641 
642 	/*
643 	 * First part: import newly received packets.
644 	 */
645 	if (netmap_no_pendintr || force_update) {
646 		/* extract buffers from the rx queue, stop at most one
647 		 * slot before nr_hwcur (stop_i)
648 		 */
649 		uint16_t slot_flags = kring->nkr_slot_flags;
650 		u_int stop_i = nm_prev(kring->nr_hwcur, lim);
651 
652 		nm_i = kring->nr_hwtail; /* first empty slot in the receive ring */
653 		for (n = 0; nm_i != stop_i; n++) {
654 			int len;
655 			void *addr = NMB(&ring->slot[nm_i]);
656 			struct mbuf *m;
657 
658 			/* we only check the address here on generic rx rings */
659 			if (addr == netmap_buffer_base) { /* Bad buffer */
660 				return netmap_ring_reinit(kring);
661 			}
662 			/*
663 			 * Call the locked version of the function.
664 			 * XXX Ideally we could grab a batch of mbufs at once
665 			 * and save some locking overhead.
666 			 */
667 			m = mbq_safe_dequeue(&kring->rx_queue);
668 			if (!m)	/* no more data */
669 				break;
670 			len = MBUF_LEN(m);
671 			m_copydata(m, 0, len, addr);
672 			ring->slot[nm_i].len = len;
673 			ring->slot[nm_i].flags = slot_flags;
674 			m_freem(m);
675 			nm_i = nm_next(nm_i, lim);
676 			n++;
677 		}
678 		if (n) {
679 			kring->nr_hwtail = nm_i;
680 			IFRATE(rate_ctx.new.rxpkt += n);
681 		}
682 		kring->nr_kflags &= ~NKR_PENDINTR;
683 	}
684 
685 	// XXX should we invert the order ?
686 	/*
687 	 * Second part: skip past packets that userspace has released.
688 	 */
689 	nm_i = kring->nr_hwcur;
690 	if (nm_i != head) {
691 		/* Userspace has released some packets. */
692 		for (n = 0; nm_i != head; n++) {
693 			struct netmap_slot *slot = &ring->slot[nm_i];
694 
695 			slot->flags &= ~NS_BUF_CHANGED;
696 			nm_i = nm_next(nm_i, lim);
697 		}
698 		kring->nr_hwcur = head;
699 	}
700 	/* tell userspace that there might be new packets. */
701 	nm_rxsync_finalize(kring);
702 	IFRATE(rate_ctx.new.rxsync++);
703 
704 	return 0;
705 }
706 
707 static void
708 generic_netmap_dtor(struct netmap_adapter *na)
709 {
710 	struct ifnet *ifp = na->ifp;
711 	struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
712 	struct netmap_adapter *prev_na = gna->prev;
713 
714 	if (prev_na != NULL) {
715 		D("Released generic NA %p", gna);
716 		if_rele(na->ifp);
717 		netmap_adapter_put(prev_na);
718 	}
719 	if (ifp != NULL) {
720 		WNA(ifp) = prev_na;
721 		D("Restored native NA %p", prev_na);
722 		na->ifp = NULL;
723 	}
724 }
725 
726 /*
727  * generic_netmap_attach() makes it possible to use netmap on
728  * a device without native netmap support.
729  * This is less performant than native support but potentially
730  * faster than raw sockets or similar schemes.
731  *
732  * In this "emulated" mode, netmap rings do not necessarily
733  * have the same size as those in the NIC. We use a default
734  * value and possibly override it if the OS has ways to fetch the
735  * actual configuration.
736  */
737 int
738 generic_netmap_attach(struct ifnet *ifp)
739 {
740 	struct netmap_adapter *na;
741 	struct netmap_generic_adapter *gna;
742 	int retval;
743 	u_int num_tx_desc, num_rx_desc;
744 
745 	num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
746 
747 	generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc);
748 	ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
749 
750 	gna = malloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO);
751 	if (gna == NULL) {
752 		D("no memory on attach, give up");
753 		return ENOMEM;
754 	}
755 	na = (struct netmap_adapter *)gna;
756 	na->ifp = ifp;
757 	na->num_tx_desc = num_tx_desc;
758 	na->num_rx_desc = num_rx_desc;
759 	na->nm_register = &generic_netmap_register;
760 	na->nm_txsync = &generic_netmap_txsync;
761 	na->nm_rxsync = &generic_netmap_rxsync;
762 	na->nm_dtor = &generic_netmap_dtor;
763 	/* when using generic, IFCAP_NETMAP is set so we force
764 	 * NAF_SKIP_INTR to use the regular interrupt handler
765 	 */
766 	na->na_flags = NAF_SKIP_INTR;
767 
768 	ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
769 			ifp->num_tx_queues, ifp->real_num_tx_queues,
770 			ifp->tx_queue_len);
771 	ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
772 			ifp->num_rx_queues, ifp->real_num_rx_queues);
773 
774 	generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
775 
776 	retval = netmap_attach_common(na);
777 	if (retval) {
778 		free(gna, M_DEVBUF);
779 	}
780 
781 	return retval;
782 }
783