xref: /freebsd/sys/dev/netmap/netmap.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2011-2014 Matteo Landi
5  * Copyright (C) 2011-2016 Luigi Rizzo
6  * Copyright (C) 2011-2016 Giuseppe Lettieri
7  * Copyright (C) 2011-2016 Vincenzo Maffione
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *   1. Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *   2. Redistributions in binary form must reproduce the above copyright
16  *      notice, this list of conditions and the following disclaimer in the
17  *      documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 /*
34  * $FreeBSD$
35  *
36  * This module supports memory mapped access to network devices,
37  * see netmap(4).
38  *
39  * The module uses a large, memory pool allocated by the kernel
40  * and accessible as mmapped memory by multiple userspace threads/processes.
41  * The memory pool contains packet buffers and "netmap rings",
42  * i.e. user-accessible copies of the interface's queues.
43  *
44  * Access to the network card works like this:
45  * 1. a process/thread issues one or more open() on /dev/netmap, to create
46  *    select()able file descriptor on which events are reported.
47  * 2. on each descriptor, the process issues an ioctl() to identify
48  *    the interface that should report events to the file descriptor.
49  * 3. on each descriptor, the process issues an mmap() request to
50  *    map the shared memory region within the process' address space.
51  *    The list of interesting queues is indicated by a location in
52  *    the shared memory region.
53  * 4. using the functions in the netmap(4) userspace API, a process
54  *    can look up the occupation state of a queue, access memory buffers,
55  *    and retrieve received packets or enqueue packets to transmit.
56  * 5. using some ioctl()s the process can synchronize the userspace view
57  *    of the queue with the actual status in the kernel. This includes both
58  *    receiving the notification of new packets, and transmitting new
59  *    packets on the output interface.
60  * 6. select() or poll() can be used to wait for events on individual
61  *    transmit or receive queues (or all queues for a given interface).
62  *
63 
64 		SYNCHRONIZATION (USER)
65 
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
72 invalid usage.
73 
74 		LOCKING (INTERNAL)
75 
76 Within the kernel, access to the netmap rings is protected as follows:
77 
78 - a spinlock on each ring, to handle producer/consumer races on
79   RX rings attached to the host stack (against multiple host
80   threads writing from the host stack to the same ring),
81   and on 'destination' rings attached to a VALE switch
82   (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83   protecting multiple active senders for the same destination)
84 
85 - an atomic variable to guarantee that there is at most one
86   instance of *_*xsync() on the ring at any time.
87   For rings connected to user file
88   descriptors, an atomic_test_and_set() protects this, and the
89   lock on the ring is not actually used.
90   For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91   is also used to prevent multiple executions (the driver might indeed
92   already guarantee this).
93   For NIC TX rings connected to a VALE switch, the lock arbitrates
94   access to the queue (both when allocating buffers and when pushing
95   them out).
96 
97 - *xsync() should be protected against initializations of the card.
98   On FreeBSD most devices have the reset routine protected by
99   a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100   the RING protection on rx_reset(), this should be added.
101 
102   On linux there is an external lock on the tx path, which probably
103   also arbitrates access to the reset routine. XXX to be revised
104 
105 - a per-interface core_lock protecting access from the host stack
106   while interfaces may be detached from netmap mode.
107   XXX there should be no need for this lock if we detach the interfaces
108   only while they are down.
109 
110 
111 --- VALE SWITCH ---
112 
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
115 
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
123 
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
130 
131  */
132 
133 
134 /* --- internals ----
135  *
136  * Roadmap to the code that implements the above.
137  *
138  * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139  * >    select()able file descriptor on which events are reported.
140  *
141  *  	Internally, we allocate a netmap_priv_d structure, that will be
142  *  	initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143  *  	structure for each open().
144  *
145  *      os-specific:
146  *  	    FreeBSD: see netmap_open() (netmap_freebsd.c)
147  *  	    linux:   see linux_netmap_open() (netmap_linux.c)
148  *
149  * > 2. on each descriptor, the process issues an ioctl() to identify
150  * >    the interface that should report events to the file descriptor.
151  *
152  * 	Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153  * 	Most important things happen in netmap_get_na() and
154  * 	netmap_do_regif(), called from there. Additional details can be
155  * 	found in the comments above those functions.
156  *
157  * 	In all cases, this action creates/takes-a-reference-to a
158  * 	netmap_*_adapter describing the port, and allocates a netmap_if
159  * 	and all necessary netmap rings, filling them with netmap buffers.
160  *
161  *      In this phase, the sync callbacks for each ring are set (these are used
162  *      in steps 5 and 6 below).  The callbacks depend on the type of adapter.
163  *      The adapter creation/initialization code puts them in the
164  * 	netmap_adapter (fields na->nm_txsync and na->nm_rxsync).  Then, they
165  * 	are copied from there to the netmap_kring's during netmap_do_regif(), by
166  * 	the nm_krings_create() callback.  All the nm_krings_create callbacks
167  * 	actually call netmap_krings_create() to perform this and the other
168  * 	common stuff. netmap_krings_create() also takes care of the host rings,
169  * 	if needed, by setting their sync callbacks appropriately.
170  *
171  * 	Additional actions depend on the kind of netmap_adapter that has been
172  * 	registered:
173  *
174  * 	- netmap_hw_adapter:  	     [netmap.c]
175  * 	     This is a system netdev/ifp with native netmap support.
176  * 	     The ifp is detached from the host stack by redirecting:
177  * 	       - transmissions (from the network stack) to netmap_transmit()
178  * 	       - receive notifications to the nm_notify() callback for
179  * 	         this adapter. The callback is normally netmap_notify(), unless
180  * 	         the ifp is attached to a bridge using bwrap, in which case it
181  * 	         is netmap_bwrap_intr_notify().
182  *
183  * 	- netmap_generic_adapter:      [netmap_generic.c]
184  * 	      A system netdev/ifp without native netmap support.
185  *
186  * 	(the decision about native/non native support is taken in
187  * 	 netmap_get_hw_na(), called by netmap_get_na())
188  *
189  * 	- netmap_vp_adapter 		[netmap_vale.c]
190  * 	      Returned by netmap_get_bdg_na().
191  * 	      This is a persistent or ephemeral VALE port. Ephemeral ports
192  * 	      are created on the fly if they don't already exist, and are
193  * 	      always attached to a bridge.
194  * 	      Persistent VALE ports must must be created separately, and i
195  * 	      then attached like normal NICs. The NIOCREGIF we are examining
196  * 	      will find them only if they had previosly been created and
197  * 	      attached (see VALE_CTL below).
198  *
199  * 	- netmap_pipe_adapter 	      [netmap_pipe.c]
200  * 	      Returned by netmap_get_pipe_na().
201  * 	      Both pipe ends are created, if they didn't already exist.
202  *
203  * 	- netmap_monitor_adapter      [netmap_monitor.c]
204  * 	      Returned by netmap_get_monitor_na().
205  * 	      If successful, the nm_sync callbacks of the monitored adapter
206  * 	      will be intercepted by the returned monitor.
207  *
208  * 	- netmap_bwrap_adapter	      [netmap_vale.c]
209  * 	      Cannot be obtained in this way, see VALE_CTL below
210  *
211  *
212  * 	os-specific:
213  * 	    linux: we first go through linux_netmap_ioctl() to
214  * 	           adapt the FreeBSD interface to the linux one.
215  *
216  *
217  * > 3. on each descriptor, the process issues an mmap() request to
218  * >    map the shared memory region within the process' address space.
219  * >    The list of interesting queues is indicated by a location in
220  * >    the shared memory region.
221  *
222  *      os-specific:
223  *  	    FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224  *  	    linux:   linux_netmap_mmap (netmap_linux.c).
225  *
226  * > 4. using the functions in the netmap(4) userspace API, a process
227  * >    can look up the occupation state of a queue, access memory buffers,
228  * >    and retrieve received packets or enqueue packets to transmit.
229  *
230  * 	these actions do not involve the kernel.
231  *
232  * > 5. using some ioctl()s the process can synchronize the userspace view
233  * >    of the queue with the actual status in the kernel. This includes both
234  * >    receiving the notification of new packets, and transmitting new
235  * >    packets on the output interface.
236  *
237  * 	These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238  * 	cases. They invoke the nm_sync callbacks on the netmap_kring
239  * 	structures, as initialized in step 2 and maybe later modified
240  * 	by a monitor. Monitors, however, will always call the original
241  * 	callback before doing anything else.
242  *
243  *
244  * > 6. select() or poll() can be used to wait for events on individual
245  * >    transmit or receive queues (or all queues for a given interface).
246  *
247  * 	Implemented in netmap_poll(). This will call the same nm_sync()
248  * 	callbacks as in step 5 above.
249  *
250  * 	os-specific:
251  * 		linux: we first go through linux_netmap_poll() to adapt
252  * 		       the FreeBSD interface to the linux one.
253  *
254  *
255  *  ----  VALE_CTL -----
256  *
257  *  VALE switches are controlled by issuing a NIOCREGIF with a non-null
258  *  nr_cmd in the nmreq structure. These subcommands are handled by
259  *  netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260  *  and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261  *  subcommands, respectively.
262  *
263  *  Any network interface known to the system (including a persistent VALE
264  *  port) can be attached to a VALE switch by issuing the
265  *  NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266  *  look exactly like ephemeral VALE ports (as created in step 2 above).  The
267  *  attachment of other interfaces, instead, requires the creation of a
268  *  netmap_bwrap_adapter.  Moreover, the attached interface must be put in
269  *  netmap mode. This may require the creation of a netmap_generic_adapter if
270  *  we have no native support for the interface, or if generic adapters have
271  *  been forced by sysctl.
272  *
273  *  Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274  *  called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275  *  callback.  In the case of the bwrap, the callback creates the
276  *  netmap_bwrap_adapter.  The initialization of the bwrap is then
277  *  completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278  *  callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279  *  A generic adapter for the wrapped ifp will be created if needed, when
280  *  netmap_get_bdg_na() calls netmap_get_hw_na().
281  *
282  *
283  *  ---- DATAPATHS -----
284  *
285  *              -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
286  *
287  *    na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
288  *
289  *    - tx from netmap userspace:
290  *	 concurrently:
291  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292  *                kring->nm_sync() == DEVICE_netmap_txsync()
293  *           2) device interrupt handler
294  *                na->nm_notify()  == netmap_notify()
295  *    - rx from netmap userspace:
296  *       concurrently:
297  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298  *                kring->nm_sync() == DEVICE_netmap_rxsync()
299  *           2) device interrupt handler
300  *                na->nm_notify()  == netmap_notify()
301  *    - rx from host stack
302  *       concurrently:
303  *           1) host stack
304  *                netmap_transmit()
305  *                  na->nm_notify  == netmap_notify()
306  *           2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307  *                kring->nm_sync() == netmap_rxsync_from_host
308  *                  netmap_rxsync_from_host(na, NULL, NULL)
309  *    - tx to host stack
310  *           ioctl(NIOCTXSYNC)/netmap_poll() in process context
311  *             kring->nm_sync() == netmap_txsync_to_host
312  *               netmap_txsync_to_host(na)
313  *                 nm_os_send_up()
314  *                   FreeBSD: na->if_input() == ether_input()
315  *                   linux: netif_rx() with NM_MAGIC_PRIORITY_RX
316  *
317  *
318  *               -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
319  *
320  *    na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
321  *
322  *    - tx from netmap userspace:
323  *       concurrently:
324  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325  *               kring->nm_sync() == generic_netmap_txsync()
326  *                   nm_os_generic_xmit_frame()
327  *                       linux:   dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328  *                           ifp->ndo_start_xmit == generic_ndo_start_xmit()
329  *                               gna->save_start_xmit == orig. dev. start_xmit
330  *                       FreeBSD: na->if_transmit() == orig. dev if_transmit
331  *           2) generic_mbuf_destructor()
332  *                   na->nm_notify() == netmap_notify()
333  *    - rx from netmap userspace:
334  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335  *               kring->nm_sync() == generic_netmap_rxsync()
336  *                   mbq_safe_dequeue()
337  *           2) device driver
338  *               generic_rx_handler()
339  *                   mbq_safe_enqueue()
340  *                   na->nm_notify() == netmap_notify()
341  *    - rx from host stack
342  *        FreeBSD: same as native
343  *        Linux: same as native except:
344  *           1) host stack
345  *               dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346  *                   ifp->ndo_start_xmit == generic_ndo_start_xmit()
347  *                       netmap_transmit()
348  *                           na->nm_notify() == netmap_notify()
349  *    - tx to host stack (same as native):
350  *
351  *
352  *                           -= VALE =-
353  *
354  *   INCOMING:
355  *
356  *      - VALE ports:
357  *          ioctl(NIOCTXSYNC)/netmap_poll() in process context
358  *              kring->nm_sync() == netmap_vp_txsync()
359  *
360  *      - system device with native support:
361  *         from cable:
362  *             interrupt
363  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
365  *                     netmap_vp_txsync()
366  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
367  *         from host stack:
368  *             netmap_transmit()
369  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370  *                     kring->nm_sync() == netmap_rxsync_from_host()
371  *                     netmap_vp_txsync()
372  *
373  *      - system device with generic support:
374  *         from device driver:
375  *            generic_rx_handler()
376  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377  *                     kring->nm_sync() == generic_netmap_rxsync()
378  *                     netmap_vp_txsync()
379  *                     kring->nm_sync() == generic_netmap_rxsync()
380  *         from host stack:
381  *            netmap_transmit()
382  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383  *                     kring->nm_sync() == netmap_rxsync_from_host()
384  *                     netmap_vp_txsync()
385  *
386  *   (all cases) --> nm_bdg_flush()
387  *                      dest_na->nm_notify() == (see below)
388  *
389  *   OUTGOING:
390  *
391  *      - VALE ports:
392  *         concurrently:
393  *             1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394  *                    kring->nm_sync() == netmap_vp_rxsync()
395  *             2) from nm_bdg_flush()
396  *                    na->nm_notify() == netmap_notify()
397  *
398  *      - system device with native support:
399  *          to cable:
400  *             na->nm_notify() == netmap_bwrap_notify()
401  *                 netmap_vp_rxsync()
402  *                 kring->nm_sync() == DEVICE_netmap_txsync()
403  *                 netmap_vp_rxsync()
404  *          to host stack:
405  *                 netmap_vp_rxsync()
406  *                 kring->nm_sync() == netmap_txsync_to_host
407  *                 netmap_vp_rxsync_locked()
408  *
409  *      - system device with generic adapter:
410  *          to device driver:
411  *             na->nm_notify() == netmap_bwrap_notify()
412  *                 netmap_vp_rxsync()
413  *                 kring->nm_sync() == generic_netmap_txsync()
414  *                 netmap_vp_rxsync()
415  *          to host stack:
416  *                 netmap_vp_rxsync()
417  *                 kring->nm_sync() == netmap_txsync_to_host
418  *                 netmap_vp_rxsync()
419  *
420  */
421 
422 /*
423  * OS-specific code that is used only within this file.
424  * Other OS-specific code that must be accessed by drivers
425  * is present in netmap_kern.h
426  */
427 
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h>	/* defines used in kernel.h */
433 #include <sys/kernel.h>	/* types used in module initialization */
434 #include <sys/conf.h>	/* cdevsw struct, UID, GID */
435 #include <sys/filio.h>	/* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h>	/* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
448 #include <net/if.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h>		/* BIOCIMMEDIATE */
451 #include <machine/bus.h>	/* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h>	/* ETHER_BPF_MTAP */
455 
456 
457 #elif defined(linux)
458 
459 #include "bsd_glue.h"
460 
461 #elif defined(__APPLE__)
462 
463 #warning OSX support is only partial
464 #include "osx_glue.h"
465 
466 #elif defined (_WIN32)
467 
468 #include "win_glue.h"
469 
470 #else
471 
472 #error	Unsupported platform
473 
474 #endif /* unsupported */
475 
476 /*
477  * common headers
478  */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
482 
483 
484 /* user-controlled variables */
485 int netmap_verbose;
486 #ifdef CONFIG_NETMAP_DEBUG
487 int netmap_debug;
488 #endif /* CONFIG_NETMAP_DEBUG */
489 
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0;	/* force transparent forwarding */
494 
495 /*
496  * netmap_admode selects the netmap mode to use.
497  * Invalid values are reset to NETMAP_ADMODE_BEST
498  */
499 enum {	NETMAP_ADMODE_BEST = 0,	/* use native, fallback to generic */
500 	NETMAP_ADMODE_NATIVE,	/* either native or none */
501 	NETMAP_ADMODE_GENERIC,	/* force generic */
502 	NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
504 
505 /* netmap_generic_mit controls mitigation of RX notifications for
506  * the generic netmap adapter. The value is a time interval in
507  * nanoseconds. */
508 int netmap_generic_mit = 100*1000;
509 
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511  * even if there can be a little performance hit with hardware NICs.
512  * However, using the qdisc is the safer approach, for two reasons:
513  * 1) it prevents non-fifo qdiscs to break the TX notification
514  *    scheme, which is based on mbuf destructors when txqdisc is
515  *    not used.
516  * 2) it makes it possible to transmit over software devices that
517  *    change skb->dev, like bridge, veth, ...
518  *
519  * Anyway users looking for the best performance should
520  * use native adapters.
521  */
522 #ifdef linux
523 int netmap_generic_txqdisc = 1;
524 #endif
525 
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
529 
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
532 
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
535 
536 /*
537  * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538  * in some other operating systems
539  */
540 SYSBEGIN(main_init);
541 
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
544     "Netmap args");
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 		CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 		CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 		CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 		0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 		&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
557 
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 		"Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 		"Adapter mode. 0 selects the best option available,"
562 		"1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 		0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 		"1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 		0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 		&netmap_generic_ringsize, 0,
570 		"Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 		&netmap_generic_rings, 0,
573 		"Number of TX/RX queues for emulated netmap adapters");
574 #ifdef linux
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 		&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
577 #endif
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 		0, "Allow ptnet devices to use virtio-net headers");
580 
581 SYSEND;
582 
583 NMG_LOCK_T	netmap_global_lock;
584 
585 /*
586  * mark the ring as stopped, and run through the locks
587  * to make sure other users get to see it.
588  * stopped must be either NR_KR_STOPPED (for unbounded stop)
589  * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
590  */
591 static void
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
593 {
594 	nm_kr_stop(kr, stopped);
595 	// XXX check if nm_kr_stop is sufficient
596 	mtx_lock(&kr->q_lock);
597 	mtx_unlock(&kr->q_lock);
598 	nm_kr_put(kr);
599 }
600 
601 /* stop or enable a single ring */
602 void
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
604 {
605 	if (stopped)
606 		netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 	else
608 		NMR(na, t)[ring_id]->nkr_stopped = 0;
609 }
610 
611 
612 /* stop or enable all the rings of na */
613 void
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
615 {
616 	int i;
617 	enum txrx t;
618 
619 	if (!nm_netmap_on(na))
620 		return;
621 
622 	if (netmap_verbose) {
623 		nm_prinf("%s: %sable all rings", na->name,
624 		    (stopped ? "dis" : "en"));
625 	}
626 	for_rx_tx(t) {
627 		for (i = 0; i < netmap_real_rings(na, t); i++) {
628 			netmap_set_ring(na, i, t, stopped);
629 		}
630 	}
631 }
632 
633 /*
634  * Convenience function used in drivers.  Waits for current txsync()s/rxsync()s
635  * to finish and prevents any new one from starting.  Call this before turning
636  * netmap mode off, or before removing the hardware rings (e.g., on module
637  * onload).
638  */
639 void
640 netmap_disable_all_rings(struct ifnet *ifp)
641 {
642 	if (NM_NA_VALID(ifp)) {
643 		netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
644 	}
645 }
646 
647 /*
648  * Convenience function used in drivers.  Re-enables rxsync and txsync on the
649  * adapter's rings In linux drivers, this should be placed near each
650  * napi_enable().
651  */
652 void
653 netmap_enable_all_rings(struct ifnet *ifp)
654 {
655 	if (NM_NA_VALID(ifp)) {
656 		netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 	}
658 }
659 
660 void
661 netmap_make_zombie(struct ifnet *ifp)
662 {
663 	if (NM_NA_VALID(ifp)) {
664 		struct netmap_adapter *na = NA(ifp);
665 		netmap_set_all_rings(na, NM_KR_LOCKED);
666 		na->na_flags |= NAF_ZOMBIE;
667 		netmap_set_all_rings(na, 0);
668 	}
669 }
670 
671 void
672 netmap_undo_zombie(struct ifnet *ifp)
673 {
674 	if (NM_NA_VALID(ifp)) {
675 		struct netmap_adapter *na = NA(ifp);
676 		if (na->na_flags & NAF_ZOMBIE) {
677 			netmap_set_all_rings(na, NM_KR_LOCKED);
678 			na->na_flags &= ~NAF_ZOMBIE;
679 			netmap_set_all_rings(na, 0);
680 		}
681 	}
682 }
683 
684 /*
685  * generic bound_checking function
686  */
687 u_int
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
689 {
690 	u_int oldv = *v;
691 	const char *op = NULL;
692 
693 	if (dflt < lo)
694 		dflt = lo;
695 	if (dflt > hi)
696 		dflt = hi;
697 	if (oldv < lo) {
698 		*v = dflt;
699 		op = "Bump";
700 	} else if (oldv > hi) {
701 		*v = hi;
702 		op = "Clamp";
703 	}
704 	if (op && msg)
705 		nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
706 	return *v;
707 }
708 
709 
710 /*
711  * packet-dump function, user-supplied or static buffer.
712  * The destination buffer must be at least 30+4*len
713  */
714 const char *
715 nm_dump_buf(char *p, int len, int lim, char *dst)
716 {
717 	static char _dst[8192];
718 	int i, j, i0;
719 	static char hex[] ="0123456789abcdef";
720 	char *o;	/* output position */
721 
722 #define P_HI(x)	hex[((x) & 0xf0)>>4]
723 #define P_LO(x)	hex[((x) & 0xf)]
724 #define P_C(x)	((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
725 	if (!dst)
726 		dst = _dst;
727 	if (lim <= 0 || lim > len)
728 		lim = len;
729 	o = dst;
730 	sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
731 	o += strlen(o);
732 	/* hexdump routine */
733 	for (i = 0; i < lim; ) {
734 		sprintf(o, "%5d: ", i);
735 		o += strlen(o);
736 		memset(o, ' ', 48);
737 		i0 = i;
738 		for (j=0; j < 16 && i < lim; i++, j++) {
739 			o[j*3] = P_HI(p[i]);
740 			o[j*3+1] = P_LO(p[i]);
741 		}
742 		i = i0;
743 		for (j=0; j < 16 && i < lim; i++, j++)
744 			o[j + 48] = P_C(p[i]);
745 		o[j+48] = '\n';
746 		o += j+49;
747 	}
748 	*o = '\0';
749 #undef P_HI
750 #undef P_LO
751 #undef P_C
752 	return dst;
753 }
754 
755 
756 /*
757  * Fetch configuration from the device, to cope with dynamic
758  * reconfigurations after loading the module.
759  */
760 /* call with NMG_LOCK held */
761 int
762 netmap_update_config(struct netmap_adapter *na)
763 {
764 	struct nm_config_info info;
765 
766 	bzero(&info, sizeof(info));
767 	if (na->nm_config == NULL ||
768 	    na->nm_config(na, &info)) {
769 		/* take whatever we had at init time */
770 		info.num_tx_rings = na->num_tx_rings;
771 		info.num_tx_descs = na->num_tx_desc;
772 		info.num_rx_rings = na->num_rx_rings;
773 		info.num_rx_descs = na->num_rx_desc;
774 		info.rx_buf_maxsize = na->rx_buf_maxsize;
775 	}
776 
777 	if (na->num_tx_rings == info.num_tx_rings &&
778 	    na->num_tx_desc == info.num_tx_descs &&
779 	    na->num_rx_rings == info.num_rx_rings &&
780 	    na->num_rx_desc == info.num_rx_descs &&
781 	    na->rx_buf_maxsize == info.rx_buf_maxsize)
782 		return 0; /* nothing changed */
783 	if (na->active_fds == 0) {
784 		na->num_tx_rings = info.num_tx_rings;
785 		na->num_tx_desc = info.num_tx_descs;
786 		na->num_rx_rings = info.num_rx_rings;
787 		na->num_rx_desc = info.num_rx_descs;
788 		na->rx_buf_maxsize = info.rx_buf_maxsize;
789 		if (netmap_verbose)
790 			nm_prinf("configuration changed for %s: txring %d x %d, "
791 				"rxring %d x %d, rxbufsz %d",
792 				na->name, na->num_tx_rings, na->num_tx_desc,
793 				na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
794 		return 0;
795 	}
796 	nm_prerr("WARNING: configuration changed for %s while active: "
797 		"txring %d x %d, rxring %d x %d, rxbufsz %d",
798 		na->name, info.num_tx_rings, info.num_tx_descs,
799 		info.num_rx_rings, info.num_rx_descs,
800 		info.rx_buf_maxsize);
801 	return 1;
802 }
803 
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
807 
808 /* create the krings array and initialize the fields common to all adapters.
809  * The array layout is this:
810  *
811  *                    +----------+
812  * na->tx_rings ----->|          | \
813  *                    |          |  } na->num_tx_ring
814  *                    |          | /
815  *                    +----------+
816  *                    |          |    host tx kring
817  * na->rx_rings ----> +----------+
818  *                    |          | \
819  *                    |          |  } na->num_rx_rings
820  *                    |          | /
821  *                    +----------+
822  *                    |          |    host rx kring
823  *                    +----------+
824  * na->tailroom ----->|          | \
825  *                    |          |  } tailroom bytes
826  *                    |          | /
827  *                    +----------+
828  *
829  * Note: for compatibility, host krings are created even when not needed.
830  * The tailroom space is currently used by vale ports for allocating leases.
831  */
832 /* call with NMG_LOCK held */
833 int
834 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
835 {
836 	u_int i, len, ndesc;
837 	struct netmap_kring *kring;
838 	u_int n[NR_TXRX];
839 	enum txrx t;
840 	int err = 0;
841 
842 	if (na->tx_rings != NULL) {
843 		if (netmap_debug & NM_DEBUG_ON)
844 			nm_prerr("warning: krings were already created");
845 		return 0;
846 	}
847 
848 	/* account for the (possibly fake) host rings */
849 	n[NR_TX] = netmap_all_rings(na, NR_TX);
850 	n[NR_RX] = netmap_all_rings(na, NR_RX);
851 
852 	len = (n[NR_TX] + n[NR_RX]) *
853 		(sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
854 		+ tailroom;
855 
856 	na->tx_rings = nm_os_malloc((size_t)len);
857 	if (na->tx_rings == NULL) {
858 		nm_prerr("Cannot allocate krings");
859 		return ENOMEM;
860 	}
861 	na->rx_rings = na->tx_rings + n[NR_TX];
862 	na->tailroom = na->rx_rings + n[NR_RX];
863 
864 	/* link the krings in the krings array */
865 	kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
866 	for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
867 		na->tx_rings[i] = kring;
868 		kring++;
869 	}
870 
871 	/*
872 	 * All fields in krings are 0 except the one initialized below.
873 	 * but better be explicit on important kring fields.
874 	 */
875 	for_rx_tx(t) {
876 		ndesc = nma_get_ndesc(na, t);
877 		for (i = 0; i < n[t]; i++) {
878 			kring = NMR(na, t)[i];
879 			bzero(kring, sizeof(*kring));
880 			kring->notify_na = na;
881 			kring->ring_id = i;
882 			kring->tx = t;
883 			kring->nkr_num_slots = ndesc;
884 			kring->nr_mode = NKR_NETMAP_OFF;
885 			kring->nr_pending_mode = NKR_NETMAP_OFF;
886 			if (i < nma_get_nrings(na, t)) {
887 				kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
888 			} else {
889 				if (!(na->na_flags & NAF_HOST_RINGS))
890 					kring->nr_kflags |= NKR_FAKERING;
891 				kring->nm_sync = (t == NR_TX ?
892 						netmap_txsync_to_host:
893 						netmap_rxsync_from_host);
894 			}
895 			kring->nm_notify = na->nm_notify;
896 			kring->rhead = kring->rcur = kring->nr_hwcur = 0;
897 			/*
898 			 * IMPORTANT: Always keep one slot empty.
899 			 */
900 			kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
901 			snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
902 					nm_txrx2str(t), i);
903 			nm_prdis("ktx %s h %d c %d t %d",
904 				kring->name, kring->rhead, kring->rcur, kring->rtail);
905 			err = nm_os_selinfo_init(&kring->si, kring->name);
906 			if (err) {
907 				netmap_krings_delete(na);
908 				return err;
909 			}
910 			mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
911 			kring->na = na;	/* setting this field marks the mutex as initialized */
912 		}
913 		err = nm_os_selinfo_init(&na->si[t], na->name);
914 		if (err) {
915 			netmap_krings_delete(na);
916 			return err;
917 		}
918 	}
919 
920 	return 0;
921 }
922 
923 
924 /* undo the actions performed by netmap_krings_create */
925 /* call with NMG_LOCK held */
926 void
927 netmap_krings_delete(struct netmap_adapter *na)
928 {
929 	struct netmap_kring **kring = na->tx_rings;
930 	enum txrx t;
931 
932 	if (na->tx_rings == NULL) {
933 		if (netmap_debug & NM_DEBUG_ON)
934 			nm_prerr("warning: krings were already deleted");
935 		return;
936 	}
937 
938 	for_rx_tx(t)
939 		nm_os_selinfo_uninit(&na->si[t]);
940 
941 	/* we rely on the krings layout described above */
942 	for ( ; kring != na->tailroom; kring++) {
943 		if ((*kring)->na != NULL)
944 			mtx_destroy(&(*kring)->q_lock);
945 		nm_os_selinfo_uninit(&(*kring)->si);
946 	}
947 	nm_os_free(na->tx_rings);
948 	na->tx_rings = na->rx_rings = na->tailroom = NULL;
949 }
950 
951 
952 /*
953  * Destructor for NIC ports. They also have an mbuf queue
954  * on the rings connected to the host so we need to purge
955  * them first.
956  */
957 /* call with NMG_LOCK held */
958 void
959 netmap_hw_krings_delete(struct netmap_adapter *na)
960 {
961 	u_int lim = netmap_real_rings(na, NR_RX), i;
962 
963 	for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
964 		struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
965 		nm_prdis("destroy sw mbq with len %d", mbq_len(q));
966 		mbq_purge(q);
967 		mbq_safe_fini(q);
968 	}
969 	netmap_krings_delete(na);
970 }
971 
972 static void
973 netmap_mem_drop(struct netmap_adapter *na)
974 {
975 	int last = netmap_mem_deref(na->nm_mem, na);
976 	/* if the native allocator had been overrided on regif,
977 	 * restore it now and drop the temporary one
978 	 */
979 	if (last && na->nm_mem_prev) {
980 		netmap_mem_put(na->nm_mem);
981 		na->nm_mem = na->nm_mem_prev;
982 		na->nm_mem_prev = NULL;
983 	}
984 }
985 
986 /*
987  * Undo everything that was done in netmap_do_regif(). In particular,
988  * call nm_register(ifp,0) to stop netmap mode on the interface and
989  * revert to normal operation.
990  */
991 /* call with NMG_LOCK held */
992 static void netmap_unset_ringid(struct netmap_priv_d *);
993 static void netmap_krings_put(struct netmap_priv_d *);
994 void
995 netmap_do_unregif(struct netmap_priv_d *priv)
996 {
997 	struct netmap_adapter *na = priv->np_na;
998 
999 	NMG_LOCK_ASSERT();
1000 	na->active_fds--;
1001 	/* unset nr_pending_mode and possibly release exclusive mode */
1002 	netmap_krings_put(priv);
1003 
1004 #ifdef	WITH_MONITOR
1005 	/* XXX check whether we have to do something with monitor
1006 	 * when rings change nr_mode. */
1007 	if (na->active_fds <= 0) {
1008 		/* walk through all the rings and tell any monitor
1009 		 * that the port is going to exit netmap mode
1010 		 */
1011 		netmap_monitor_stop(na);
1012 	}
1013 #endif
1014 
1015 	if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1016 		na->nm_register(na, 0);
1017 	}
1018 
1019 	/* delete rings and buffers that are no longer needed */
1020 	netmap_mem_rings_delete(na);
1021 
1022 	if (na->active_fds <= 0) {	/* last instance */
1023 		/*
1024 		 * (TO CHECK) We enter here
1025 		 * when the last reference to this file descriptor goes
1026 		 * away. This means we cannot have any pending poll()
1027 		 * or interrupt routine operating on the structure.
1028 		 * XXX The file may be closed in a thread while
1029 		 * another thread is using it.
1030 		 * Linux keeps the file opened until the last reference
1031 		 * by any outstanding ioctl/poll or mmap is gone.
1032 		 * FreeBSD does not track mmap()s (but we do) and
1033 		 * wakes up any sleeping poll(). Need to check what
1034 		 * happens if the close() occurs while a concurrent
1035 		 * syscall is running.
1036 		 */
1037 		if (netmap_debug & NM_DEBUG_ON)
1038 			nm_prinf("deleting last instance for %s", na->name);
1039 
1040 		if (nm_netmap_on(na)) {
1041 			nm_prerr("BUG: netmap on while going to delete the krings");
1042 		}
1043 
1044 		na->nm_krings_delete(na);
1045 
1046 		/* restore the default number of host tx and rx rings */
1047 		if (na->na_flags & NAF_HOST_RINGS) {
1048 			na->num_host_tx_rings = 1;
1049 			na->num_host_rx_rings = 1;
1050 		} else {
1051 			na->num_host_tx_rings = 0;
1052 			na->num_host_rx_rings = 0;
1053 		}
1054 	}
1055 
1056 	/* possibily decrement counter of tx_si/rx_si users */
1057 	netmap_unset_ringid(priv);
1058 	/* delete the nifp */
1059 	netmap_mem_if_delete(na, priv->np_nifp);
1060 	/* drop the allocator */
1061 	netmap_mem_drop(na);
1062 	/* mark the priv as unregistered */
1063 	priv->np_na = NULL;
1064 	priv->np_nifp = NULL;
1065 }
1066 
1067 struct netmap_priv_d*
1068 netmap_priv_new(void)
1069 {
1070 	struct netmap_priv_d *priv;
1071 
1072 	priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1073 	if (priv == NULL)
1074 		return NULL;
1075 	priv->np_refs = 1;
1076 	nm_os_get_module();
1077 	return priv;
1078 }
1079 
1080 /*
1081  * Destructor of the netmap_priv_d, called when the fd is closed
1082  * Action: undo all the things done by NIOCREGIF,
1083  * On FreeBSD we need to track whether there are active mmap()s,
1084  * and we use np_active_mmaps for that. On linux, the field is always 0.
1085  * Return: 1 if we can free priv, 0 otherwise.
1086  *
1087  */
1088 /* call with NMG_LOCK held */
1089 void
1090 netmap_priv_delete(struct netmap_priv_d *priv)
1091 {
1092 	struct netmap_adapter *na = priv->np_na;
1093 
1094 	/* number of active references to this fd */
1095 	if (--priv->np_refs > 0) {
1096 		return;
1097 	}
1098 	nm_os_put_module();
1099 	if (na) {
1100 		netmap_do_unregif(priv);
1101 	}
1102 	netmap_unget_na(na, priv->np_ifp);
1103 	bzero(priv, sizeof(*priv));	/* for safety */
1104 	nm_os_free(priv);
1105 }
1106 
1107 
1108 /* call with NMG_LOCK *not* held */
1109 void
1110 netmap_dtor(void *data)
1111 {
1112 	struct netmap_priv_d *priv = data;
1113 
1114 	NMG_LOCK();
1115 	netmap_priv_delete(priv);
1116 	NMG_UNLOCK();
1117 }
1118 
1119 
1120 /*
1121  * Handlers for synchronization of the rings from/to the host stack.
1122  * These are associated to a network interface and are just another
1123  * ring pair managed by userspace.
1124  *
1125  * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1126  * flags):
1127  *
1128  * - Before releasing buffers on hw RX rings, the application can mark
1129  *   them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1130  *   will be forwarded to the host stack, similarly to what happened if
1131  *   the application moved them to the host TX ring.
1132  *
1133  * - Before releasing buffers on the host RX ring, the application can
1134  *   mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1135  *   they will be forwarded to the hw TX rings, saving the application
1136  *   from doing the same task in user-space.
1137  *
1138  * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
1139  * flag, or globally with the netmap_fwd sysctl.
1140  *
1141  * The transfer NIC --> host is relatively easy, just encapsulate
1142  * into mbufs and we are done. The host --> NIC side is slightly
1143  * harder because there might not be room in the tx ring so it
1144  * might take a while before releasing the buffer.
1145  */
1146 
1147 
1148 /*
1149  * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1150  * We do not need to lock because the queue is private.
1151  * After this call the queue is empty.
1152  */
1153 static void
1154 netmap_send_up(struct ifnet *dst, struct mbq *q)
1155 {
1156 	struct mbuf *m;
1157 	struct mbuf *head = NULL, *prev = NULL;
1158 #ifdef __FreeBSD__
1159 	struct epoch_tracker et;
1160 
1161 	NET_EPOCH_ENTER(et);
1162 #endif /* __FreeBSD__ */
1163 	/* Send packets up, outside the lock; head/prev machinery
1164 	 * is only useful for Windows. */
1165 	while ((m = mbq_dequeue(q)) != NULL) {
1166 		if (netmap_debug & NM_DEBUG_HOST)
1167 			nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1168 		prev = nm_os_send_up(dst, m, prev);
1169 		if (head == NULL)
1170 			head = prev;
1171 	}
1172 	if (head)
1173 		nm_os_send_up(dst, NULL, head);
1174 #ifdef __FreeBSD__
1175 	NET_EPOCH_EXIT(et);
1176 #endif /* __FreeBSD__ */
1177 	mbq_fini(q);
1178 }
1179 
1180 
1181 /*
1182  * Scan the buffers from hwcur to ring->head, and put a copy of those
1183  * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1184  * Drop remaining packets in the unlikely event
1185  * of an mbuf shortage.
1186  */
1187 static void
1188 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1189 {
1190 	u_int const lim = kring->nkr_num_slots - 1;
1191 	u_int const head = kring->rhead;
1192 	u_int n;
1193 	struct netmap_adapter *na = kring->na;
1194 
1195 	for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1196 		struct mbuf *m;
1197 		struct netmap_slot *slot = &kring->ring->slot[n];
1198 
1199 		if ((slot->flags & NS_FORWARD) == 0 && !force)
1200 			continue;
1201 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1202 			nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1203 			continue;
1204 		}
1205 		slot->flags &= ~NS_FORWARD; // XXX needed ?
1206 		/* XXX TODO: adapt to the case of a multisegment packet */
1207 		m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1208 
1209 		if (m == NULL)
1210 			break;
1211 		mbq_enqueue(q, m);
1212 	}
1213 }
1214 
1215 static inline int
1216 _nm_may_forward(struct netmap_kring *kring)
1217 {
1218 	return	((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1219 		 kring->na->na_flags & NAF_HOST_RINGS &&
1220 		 kring->tx == NR_RX);
1221 }
1222 
1223 static inline int
1224 nm_may_forward_up(struct netmap_kring *kring)
1225 {
1226 	return	_nm_may_forward(kring) &&
1227 		 kring->ring_id != kring->na->num_rx_rings;
1228 }
1229 
1230 static inline int
1231 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1232 {
1233 	return	_nm_may_forward(kring) &&
1234 		 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1235 		 kring->ring_id == kring->na->num_rx_rings;
1236 }
1237 
1238 /*
1239  * Send to the NIC rings packets marked NS_FORWARD between
1240  * kring->nr_hwcur and kring->rhead.
1241  * Called under kring->rx_queue.lock on the sw rx ring.
1242  *
1243  * It can only be called if the user opened all the TX hw rings,
1244  * see NAF_CAN_FORWARD_DOWN flag.
1245  * We can touch the TX netmap rings (slots, head and cur) since
1246  * we are in poll/ioctl system call context, and the application
1247  * is not supposed to touch the ring (using a different thread)
1248  * during the execution of the system call.
1249  */
1250 static u_int
1251 netmap_sw_to_nic(struct netmap_adapter *na)
1252 {
1253 	struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1254 	struct netmap_slot *rxslot = kring->ring->slot;
1255 	u_int i, rxcur = kring->nr_hwcur;
1256 	u_int const head = kring->rhead;
1257 	u_int const src_lim = kring->nkr_num_slots - 1;
1258 	u_int sent = 0;
1259 
1260 	/* scan rings to find space, then fill as much as possible */
1261 	for (i = 0; i < na->num_tx_rings; i++) {
1262 		struct netmap_kring *kdst = na->tx_rings[i];
1263 		struct netmap_ring *rdst = kdst->ring;
1264 		u_int const dst_lim = kdst->nkr_num_slots - 1;
1265 
1266 		/* XXX do we trust ring or kring->rcur,rtail ? */
1267 		for (; rxcur != head && !nm_ring_empty(rdst);
1268 		     rxcur = nm_next(rxcur, src_lim) ) {
1269 			struct netmap_slot *src, *dst, tmp;
1270 			u_int dst_head = rdst->head;
1271 
1272 			src = &rxslot[rxcur];
1273 			if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1274 				continue;
1275 
1276 			sent++;
1277 
1278 			dst = &rdst->slot[dst_head];
1279 
1280 			tmp = *src;
1281 
1282 			src->buf_idx = dst->buf_idx;
1283 			src->flags = NS_BUF_CHANGED;
1284 
1285 			dst->buf_idx = tmp.buf_idx;
1286 			dst->len = tmp.len;
1287 			dst->flags = NS_BUF_CHANGED;
1288 
1289 			rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1290 		}
1291 		/* if (sent) XXX txsync ? it would be just an optimization */
1292 	}
1293 	return sent;
1294 }
1295 
1296 
1297 /*
1298  * netmap_txsync_to_host() passes packets up. We are called from a
1299  * system call in user process context, and the only contention
1300  * can be among multiple user threads erroneously calling
1301  * this routine concurrently.
1302  */
1303 static int
1304 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1305 {
1306 	struct netmap_adapter *na = kring->na;
1307 	u_int const lim = kring->nkr_num_slots - 1;
1308 	u_int const head = kring->rhead;
1309 	struct mbq q;
1310 
1311 	/* Take packets from hwcur to head and pass them up.
1312 	 * Force hwcur = head since netmap_grab_packets() stops at head
1313 	 */
1314 	mbq_init(&q);
1315 	netmap_grab_packets(kring, &q, 1 /* force */);
1316 	nm_prdis("have %d pkts in queue", mbq_len(&q));
1317 	kring->nr_hwcur = head;
1318 	kring->nr_hwtail = head + lim;
1319 	if (kring->nr_hwtail > lim)
1320 		kring->nr_hwtail -= lim + 1;
1321 
1322 	netmap_send_up(na->ifp, &q);
1323 	return 0;
1324 }
1325 
1326 
1327 /*
1328  * rxsync backend for packets coming from the host stack.
1329  * They have been put in kring->rx_queue by netmap_transmit().
1330  * We protect access to the kring using kring->rx_queue.lock
1331  *
1332  * also moves to the nic hw rings any packet the user has marked
1333  * for transparent-mode forwarding, then sets the NR_FORWARD
1334  * flag in the kring to let the caller push them out
1335  */
1336 static int
1337 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1338 {
1339 	struct netmap_adapter *na = kring->na;
1340 	struct netmap_ring *ring = kring->ring;
1341 	u_int nm_i, n;
1342 	u_int const lim = kring->nkr_num_slots - 1;
1343 	u_int const head = kring->rhead;
1344 	int ret = 0;
1345 	struct mbq *q = &kring->rx_queue, fq;
1346 
1347 	mbq_init(&fq); /* fq holds packets to be freed */
1348 
1349 	mbq_lock(q);
1350 
1351 	/* First part: import newly received packets */
1352 	n = mbq_len(q);
1353 	if (n) { /* grab packets from the queue */
1354 		struct mbuf *m;
1355 		uint32_t stop_i;
1356 
1357 		nm_i = kring->nr_hwtail;
1358 		stop_i = nm_prev(kring->nr_hwcur, lim);
1359 		while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1360 			int len = MBUF_LEN(m);
1361 			struct netmap_slot *slot = &ring->slot[nm_i];
1362 
1363 			m_copydata(m, 0, len, NMB(na, slot));
1364 			nm_prdis("nm %d len %d", nm_i, len);
1365 			if (netmap_debug & NM_DEBUG_HOST)
1366 				nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1367 
1368 			slot->len = len;
1369 			slot->flags = 0;
1370 			nm_i = nm_next(nm_i, lim);
1371 			mbq_enqueue(&fq, m);
1372 		}
1373 		kring->nr_hwtail = nm_i;
1374 	}
1375 
1376 	/*
1377 	 * Second part: skip past packets that userspace has released.
1378 	 */
1379 	nm_i = kring->nr_hwcur;
1380 	if (nm_i != head) { /* something was released */
1381 		if (nm_may_forward_down(kring, flags)) {
1382 			ret = netmap_sw_to_nic(na);
1383 			if (ret > 0) {
1384 				kring->nr_kflags |= NR_FORWARD;
1385 				ret = 0;
1386 			}
1387 		}
1388 		kring->nr_hwcur = head;
1389 	}
1390 
1391 	mbq_unlock(q);
1392 
1393 	mbq_purge(&fq);
1394 	mbq_fini(&fq);
1395 
1396 	return ret;
1397 }
1398 
1399 
1400 /* Get a netmap adapter for the port.
1401  *
1402  * If it is possible to satisfy the request, return 0
1403  * with *na containing the netmap adapter found.
1404  * Otherwise return an error code, with *na containing NULL.
1405  *
1406  * When the port is attached to a bridge, we always return
1407  * EBUSY.
1408  * Otherwise, if the port is already bound to a file descriptor,
1409  * then we unconditionally return the existing adapter into *na.
1410  * In all the other cases, we return (into *na) either native,
1411  * generic or NULL, according to the following table:
1412  *
1413  *					native_support
1414  * active_fds   dev.netmap.admode         YES     NO
1415  * -------------------------------------------------------
1416  *    >0              *                 NA(ifp) NA(ifp)
1417  *
1418  *     0        NETMAP_ADMODE_BEST      NATIVE  GENERIC
1419  *     0        NETMAP_ADMODE_NATIVE    NATIVE   NULL
1420  *     0        NETMAP_ADMODE_GENERIC   GENERIC GENERIC
1421  *
1422  */
1423 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1424 int
1425 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1426 {
1427 	/* generic support */
1428 	int i = netmap_admode;	/* Take a snapshot. */
1429 	struct netmap_adapter *prev_na;
1430 	int error = 0;
1431 
1432 	*na = NULL; /* default */
1433 
1434 	/* reset in case of invalid value */
1435 	if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1436 		i = netmap_admode = NETMAP_ADMODE_BEST;
1437 
1438 	if (NM_NA_VALID(ifp)) {
1439 		prev_na = NA(ifp);
1440 		/* If an adapter already exists, return it if
1441 		 * there are active file descriptors or if
1442 		 * netmap is not forced to use generic
1443 		 * adapters.
1444 		 */
1445 		if (NETMAP_OWNED_BY_ANY(prev_na)
1446 			|| i != NETMAP_ADMODE_GENERIC
1447 			|| prev_na->na_flags & NAF_FORCE_NATIVE
1448 #ifdef WITH_PIPES
1449 			/* ugly, but we cannot allow an adapter switch
1450 			 * if some pipe is referring to this one
1451 			 */
1452 			|| prev_na->na_next_pipe > 0
1453 #endif
1454 		) {
1455 			*na = prev_na;
1456 			goto assign_mem;
1457 		}
1458 	}
1459 
1460 	/* If there isn't native support and netmap is not allowed
1461 	 * to use generic adapters, we cannot satisfy the request.
1462 	 */
1463 	if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1464 		return EOPNOTSUPP;
1465 
1466 	/* Otherwise, create a generic adapter and return it,
1467 	 * saving the previously used netmap adapter, if any.
1468 	 *
1469 	 * Note that here 'prev_na', if not NULL, MUST be a
1470 	 * native adapter, and CANNOT be a generic one. This is
1471 	 * true because generic adapters are created on demand, and
1472 	 * destroyed when not used anymore. Therefore, if the adapter
1473 	 * currently attached to an interface 'ifp' is generic, it
1474 	 * must be that
1475 	 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1476 	 * Consequently, if NA(ifp) is generic, we will enter one of
1477 	 * the branches above. This ensures that we never override
1478 	 * a generic adapter with another generic adapter.
1479 	 */
1480 	error = generic_netmap_attach(ifp);
1481 	if (error)
1482 		return error;
1483 
1484 	*na = NA(ifp);
1485 
1486 assign_mem:
1487 	if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1488 	    (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1489 		(*na)->nm_mem_prev = (*na)->nm_mem;
1490 		(*na)->nm_mem = netmap_mem_get(nmd);
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 /*
1497  * MUST BE CALLED UNDER NMG_LOCK()
1498  *
1499  * Get a refcounted reference to a netmap adapter attached
1500  * to the interface specified by req.
1501  * This is always called in the execution of an ioctl().
1502  *
1503  * Return ENXIO if the interface specified by the request does
1504  * not exist, ENOTSUP if netmap is not supported by the interface,
1505  * EBUSY if the interface is already attached to a bridge,
1506  * EINVAL if parameters are invalid, ENOMEM if needed resources
1507  * could not be allocated.
1508  * If successful, hold a reference to the netmap adapter.
1509  *
1510  * If the interface specified by req is a system one, also keep
1511  * a reference to it and return a valid *ifp.
1512  */
1513 int
1514 netmap_get_na(struct nmreq_header *hdr,
1515 	      struct netmap_adapter **na, struct ifnet **ifp,
1516 	      struct netmap_mem_d *nmd, int create)
1517 {
1518 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1519 	int error = 0;
1520 	struct netmap_adapter *ret = NULL;
1521 	int nmd_ref = 0;
1522 
1523 	*na = NULL;     /* default return value */
1524 	*ifp = NULL;
1525 
1526 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1527 		return EINVAL;
1528 	}
1529 
1530 	if (req->nr_mode == NR_REG_PIPE_MASTER ||
1531 			req->nr_mode == NR_REG_PIPE_SLAVE) {
1532 		/* Do not accept deprecated pipe modes. */
1533 		nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1534 		return EINVAL;
1535 	}
1536 
1537 	NMG_LOCK_ASSERT();
1538 
1539 	/* if the request contain a memid, try to find the
1540 	 * corresponding memory region
1541 	 */
1542 	if (nmd == NULL && req->nr_mem_id) {
1543 		nmd = netmap_mem_find(req->nr_mem_id);
1544 		if (nmd == NULL)
1545 			return EINVAL;
1546 		/* keep the rereference */
1547 		nmd_ref = 1;
1548 	}
1549 
1550 	/* We cascade through all possible types of netmap adapter.
1551 	 * All netmap_get_*_na() functions return an error and an na,
1552 	 * with the following combinations:
1553 	 *
1554 	 * error    na
1555 	 *   0	   NULL		type doesn't match
1556 	 *  !0	   NULL		type matches, but na creation/lookup failed
1557 	 *   0	  !NULL		type matches and na created/found
1558 	 *  !0    !NULL		impossible
1559 	 */
1560 	error = netmap_get_null_na(hdr, na, nmd, create);
1561 	if (error || *na != NULL)
1562 		goto out;
1563 
1564 	/* try to see if this is a monitor port */
1565 	error = netmap_get_monitor_na(hdr, na, nmd, create);
1566 	if (error || *na != NULL)
1567 		goto out;
1568 
1569 	/* try to see if this is a pipe port */
1570 	error = netmap_get_pipe_na(hdr, na, nmd, create);
1571 	if (error || *na != NULL)
1572 		goto out;
1573 
1574 	/* try to see if this is a bridge port */
1575 	error = netmap_get_vale_na(hdr, na, nmd, create);
1576 	if (error)
1577 		goto out;
1578 
1579 	if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1580 		goto out;
1581 
1582 	/*
1583 	 * This must be a hardware na, lookup the name in the system.
1584 	 * Note that by hardware we actually mean "it shows up in ifconfig".
1585 	 * This may still be a tap, a veth/epair, or even a
1586 	 * persistent VALE port.
1587 	 */
1588 	*ifp = ifunit_ref(hdr->nr_name);
1589 	if (*ifp == NULL) {
1590 		error = ENXIO;
1591 		goto out;
1592 	}
1593 
1594 	error = netmap_get_hw_na(*ifp, nmd, &ret);
1595 	if (error)
1596 		goto out;
1597 
1598 	*na = ret;
1599 	netmap_adapter_get(ret);
1600 
1601 	/*
1602 	 * if the adapter supports the host rings and it is not alread open,
1603 	 * try to set the number of host rings as requested by the user
1604 	 */
1605 	if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1606 		if (req->nr_host_tx_rings)
1607 			(*na)->num_host_tx_rings = req->nr_host_tx_rings;
1608 		if (req->nr_host_rx_rings)
1609 			(*na)->num_host_rx_rings = req->nr_host_rx_rings;
1610 	}
1611 	nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1612 			(*na)->num_host_rx_rings);
1613 
1614 out:
1615 	if (error) {
1616 		if (ret)
1617 			netmap_adapter_put(ret);
1618 		if (*ifp) {
1619 			if_rele(*ifp);
1620 			*ifp = NULL;
1621 		}
1622 	}
1623 	if (nmd_ref)
1624 		netmap_mem_put(nmd);
1625 
1626 	return error;
1627 }
1628 
1629 /* undo netmap_get_na() */
1630 void
1631 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1632 {
1633 	if (ifp)
1634 		if_rele(ifp);
1635 	if (na)
1636 		netmap_adapter_put(na);
1637 }
1638 
1639 
1640 #define NM_FAIL_ON(t) do {						\
1641 	if (unlikely(t)) {						\
1642 		nm_prlim(5, "%s: fail '" #t "' "				\
1643 			"h %d c %d t %d "				\
1644 			"rh %d rc %d rt %d "				\
1645 			"hc %d ht %d",					\
1646 			kring->name,					\
1647 			head, cur, ring->tail,				\
1648 			kring->rhead, kring->rcur, kring->rtail,	\
1649 			kring->nr_hwcur, kring->nr_hwtail);		\
1650 		return kring->nkr_num_slots;				\
1651 	}								\
1652 } while (0)
1653 
1654 /*
1655  * validate parameters on entry for *_txsync()
1656  * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1657  * in case of error.
1658  *
1659  * rhead, rcur and rtail=hwtail are stored from previous round.
1660  * hwcur is the next packet to send to the ring.
1661  *
1662  * We want
1663  *    hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1664  *
1665  * hwcur, rhead, rtail and hwtail are reliable
1666  */
1667 u_int
1668 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1669 {
1670 	u_int head = ring->head; /* read only once */
1671 	u_int cur = ring->cur; /* read only once */
1672 	u_int n = kring->nkr_num_slots;
1673 
1674 	nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1675 		kring->name,
1676 		kring->nr_hwcur, kring->nr_hwtail,
1677 		ring->head, ring->cur, ring->tail);
1678 #if 1 /* kernel sanity checks; but we can trust the kring. */
1679 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1680 	    kring->rtail >= n ||  kring->nr_hwtail >= n);
1681 #endif /* kernel sanity checks */
1682 	/*
1683 	 * user sanity checks. We only use head,
1684 	 * A, B, ... are possible positions for head:
1685 	 *
1686 	 *  0    A  rhead   B  rtail   C  n-1
1687 	 *  0    D  rtail   E  rhead   F  n-1
1688 	 *
1689 	 * B, F, D are valid. A, C, E are wrong
1690 	 */
1691 	if (kring->rtail >= kring->rhead) {
1692 		/* want rhead <= head <= rtail */
1693 		NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1694 		/* and also head <= cur <= rtail */
1695 		NM_FAIL_ON(cur < head || cur > kring->rtail);
1696 	} else { /* here rtail < rhead */
1697 		/* we need head outside rtail .. rhead */
1698 		NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1699 
1700 		/* two cases now: head <= rtail or head >= rhead  */
1701 		if (head <= kring->rtail) {
1702 			/* want head <= cur <= rtail */
1703 			NM_FAIL_ON(cur < head || cur > kring->rtail);
1704 		} else { /* head >= rhead */
1705 			/* cur must be outside rtail..head */
1706 			NM_FAIL_ON(cur > kring->rtail && cur < head);
1707 		}
1708 	}
1709 	if (ring->tail != kring->rtail) {
1710 		nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1711 			ring->tail, kring->rtail);
1712 		ring->tail = kring->rtail;
1713 	}
1714 	kring->rhead = head;
1715 	kring->rcur = cur;
1716 	return head;
1717 }
1718 
1719 
1720 /*
1721  * validate parameters on entry for *_rxsync()
1722  * Returns ring->head if ok, kring->nkr_num_slots on error.
1723  *
1724  * For a valid configuration,
1725  * hwcur <= head <= cur <= tail <= hwtail
1726  *
1727  * We only consider head and cur.
1728  * hwcur and hwtail are reliable.
1729  *
1730  */
1731 u_int
1732 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1733 {
1734 	uint32_t const n = kring->nkr_num_slots;
1735 	uint32_t head, cur;
1736 
1737 	nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1738 		kring->name,
1739 		kring->nr_hwcur, kring->nr_hwtail,
1740 		ring->head, ring->cur, ring->tail);
1741 	/*
1742 	 * Before storing the new values, we should check they do not
1743 	 * move backwards. However:
1744 	 * - head is not an issue because the previous value is hwcur;
1745 	 * - cur could in principle go back, however it does not matter
1746 	 *   because we are processing a brand new rxsync()
1747 	 */
1748 	cur = kring->rcur = ring->cur;	/* read only once */
1749 	head = kring->rhead = ring->head;	/* read only once */
1750 #if 1 /* kernel sanity checks */
1751 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1752 #endif /* kernel sanity checks */
1753 	/* user sanity checks */
1754 	if (kring->nr_hwtail >= kring->nr_hwcur) {
1755 		/* want hwcur <= rhead <= hwtail */
1756 		NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1757 		/* and also rhead <= rcur <= hwtail */
1758 		NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1759 	} else {
1760 		/* we need rhead outside hwtail..hwcur */
1761 		NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1762 		/* two cases now: head <= hwtail or head >= hwcur  */
1763 		if (head <= kring->nr_hwtail) {
1764 			/* want head <= cur <= hwtail */
1765 			NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1766 		} else {
1767 			/* cur must be outside hwtail..head */
1768 			NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1769 		}
1770 	}
1771 	if (ring->tail != kring->rtail) {
1772 		nm_prlim(5, "%s tail overwritten was %d need %d",
1773 			kring->name,
1774 			ring->tail, kring->rtail);
1775 		ring->tail = kring->rtail;
1776 	}
1777 	return head;
1778 }
1779 
1780 
1781 /*
1782  * Error routine called when txsync/rxsync detects an error.
1783  * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1784  * Return 1 on reinit.
1785  *
1786  * This routine is only called by the upper half of the kernel.
1787  * It only reads hwcur (which is changed only by the upper half, too)
1788  * and hwtail (which may be changed by the lower half, but only on
1789  * a tx ring and only to increase it, so any error will be recovered
1790  * on the next call). For the above, we don't strictly need to call
1791  * it under lock.
1792  */
1793 int
1794 netmap_ring_reinit(struct netmap_kring *kring)
1795 {
1796 	struct netmap_ring *ring = kring->ring;
1797 	u_int i, lim = kring->nkr_num_slots - 1;
1798 	int errors = 0;
1799 
1800 	// XXX KASSERT nm_kr_tryget
1801 	nm_prlim(10, "called for %s", kring->name);
1802 	// XXX probably wrong to trust userspace
1803 	kring->rhead = ring->head;
1804 	kring->rcur  = ring->cur;
1805 	kring->rtail = ring->tail;
1806 
1807 	if (ring->cur > lim)
1808 		errors++;
1809 	if (ring->head > lim)
1810 		errors++;
1811 	if (ring->tail > lim)
1812 		errors++;
1813 	for (i = 0; i <= lim; i++) {
1814 		u_int idx = ring->slot[i].buf_idx;
1815 		u_int len = ring->slot[i].len;
1816 		if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1817 			nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1818 			ring->slot[i].buf_idx = 0;
1819 			ring->slot[i].len = 0;
1820 		} else if (len > NETMAP_BUF_SIZE(kring->na)) {
1821 			ring->slot[i].len = 0;
1822 			nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1823 		}
1824 	}
1825 	if (errors) {
1826 		nm_prlim(10, "total %d errors", errors);
1827 		nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1828 			kring->name,
1829 			ring->cur, kring->nr_hwcur,
1830 			ring->tail, kring->nr_hwtail);
1831 		ring->head = kring->rhead = kring->nr_hwcur;
1832 		ring->cur  = kring->rcur  = kring->nr_hwcur;
1833 		ring->tail = kring->rtail = kring->nr_hwtail;
1834 	}
1835 	return (errors ? 1 : 0);
1836 }
1837 
1838 /* interpret the ringid and flags fields of an nmreq, by translating them
1839  * into a pair of intervals of ring indices:
1840  *
1841  * [priv->np_txqfirst, priv->np_txqlast) and
1842  * [priv->np_rxqfirst, priv->np_rxqlast)
1843  *
1844  */
1845 int
1846 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1847 {
1848 	struct netmap_adapter *na = priv->np_na;
1849 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1850 	int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1851 	enum txrx t;
1852 	u_int j;
1853 	u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1854 	      nr_ringid = reg->nr_ringid;
1855 
1856 	for_rx_tx(t) {
1857 		if (nr_flags & excluded_direction[t]) {
1858 			priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1859 			continue;
1860 		}
1861 		switch (nr_mode) {
1862 		case NR_REG_ALL_NIC:
1863 		case NR_REG_NULL:
1864 			priv->np_qfirst[t] = 0;
1865 			priv->np_qlast[t] = nma_get_nrings(na, t);
1866 			nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1867 				priv->np_qfirst[t], priv->np_qlast[t]);
1868 			break;
1869 		case NR_REG_SW:
1870 		case NR_REG_NIC_SW:
1871 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1872 				nm_prerr("host rings not supported");
1873 				return EINVAL;
1874 			}
1875 			priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1876 				nma_get_nrings(na, t) : 0);
1877 			priv->np_qlast[t] = netmap_all_rings(na, t);
1878 			nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1879 				nm_txrx2str(t),
1880 				priv->np_qfirst[t], priv->np_qlast[t]);
1881 			break;
1882 		case NR_REG_ONE_NIC:
1883 			if (nr_ringid >= na->num_tx_rings &&
1884 					nr_ringid >= na->num_rx_rings) {
1885 				nm_prerr("invalid ring id %d", nr_ringid);
1886 				return EINVAL;
1887 			}
1888 			/* if not enough rings, use the first one */
1889 			j = nr_ringid;
1890 			if (j >= nma_get_nrings(na, t))
1891 				j = 0;
1892 			priv->np_qfirst[t] = j;
1893 			priv->np_qlast[t] = j + 1;
1894 			nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1895 				priv->np_qfirst[t], priv->np_qlast[t]);
1896 			break;
1897 		case NR_REG_ONE_SW:
1898 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1899 				nm_prerr("host rings not supported");
1900 				return EINVAL;
1901 			}
1902 			if (nr_ringid >= na->num_host_tx_rings &&
1903 					nr_ringid >= na->num_host_rx_rings) {
1904 				nm_prerr("invalid ring id %d", nr_ringid);
1905 				return EINVAL;
1906 			}
1907 			/* if not enough rings, use the first one */
1908 			j = nr_ringid;
1909 			if (j >= nma_get_host_nrings(na, t))
1910 				j = 0;
1911 			priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1912 			priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1913 			nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1914 				priv->np_qfirst[t], priv->np_qlast[t]);
1915 			break;
1916 		default:
1917 			nm_prerr("invalid regif type %d", nr_mode);
1918 			return EINVAL;
1919 		}
1920 	}
1921 	priv->np_flags = nr_flags;
1922 
1923 	/* Allow transparent forwarding mode in the host --> nic
1924 	 * direction only if all the TX hw rings have been opened. */
1925 	if (priv->np_qfirst[NR_TX] == 0 &&
1926 			priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1927 		priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1928 	}
1929 
1930 	if (netmap_verbose) {
1931 		nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1932 			na->name,
1933 			priv->np_qfirst[NR_TX],
1934 			priv->np_qlast[NR_TX],
1935 			priv->np_qfirst[NR_RX],
1936 			priv->np_qlast[NR_RX],
1937 			nr_ringid);
1938 	}
1939 	return 0;
1940 }
1941 
1942 
1943 /*
1944  * Set the ring ID. For devices with a single queue, a request
1945  * for all rings is the same as a single ring.
1946  */
1947 static int
1948 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1949 {
1950 	struct netmap_adapter *na = priv->np_na;
1951 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1952 	int error;
1953 	enum txrx t;
1954 
1955 	error = netmap_interp_ringid(priv, hdr);
1956 	if (error) {
1957 		return error;
1958 	}
1959 
1960 	priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1961 
1962 	/* optimization: count the users registered for more than
1963 	 * one ring, which are the ones sleeping on the global queue.
1964 	 * The default netmap_notify() callback will then
1965 	 * avoid signaling the global queue if nobody is using it
1966 	 */
1967 	for_rx_tx(t) {
1968 		if (nm_si_user(priv, t))
1969 			na->si_users[t]++;
1970 	}
1971 	return 0;
1972 }
1973 
1974 static void
1975 netmap_unset_ringid(struct netmap_priv_d *priv)
1976 {
1977 	struct netmap_adapter *na = priv->np_na;
1978 	enum txrx t;
1979 
1980 	for_rx_tx(t) {
1981 		if (nm_si_user(priv, t))
1982 			na->si_users[t]--;
1983 		priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1984 	}
1985 	priv->np_flags = 0;
1986 	priv->np_txpoll = 0;
1987 	priv->np_kloop_state = 0;
1988 }
1989 
1990 #define within_sel(p_, t_, i_)					  	  \
1991 	((i_) < (p_)->np_qlast[(t_)])
1992 #define nonempty_sel(p_, t_)						  \
1993 	(within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
1994 #define foreach_selected_ring(p_, t_, i_, kring_)			  \
1995 	for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX,		  \
1996 	     (i_) = (p_)->np_qfirst[(t_)];				  \
1997 	     (t_ == NR_RX ||						  \
1998 	      (t == NR_TX && within_sel((p_), (t_), (i_)))) &&     	  \
1999 	      ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); 		  \
2000 	     (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 :         \
2001 		(++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2002 
2003 
2004 /* Set the nr_pending_mode for the requested rings.
2005  * If requested, also try to get exclusive access to the rings, provided
2006  * the rings we want to bind are not exclusively owned by a previous bind.
2007  */
2008 static int
2009 netmap_krings_get(struct netmap_priv_d *priv)
2010 {
2011 	struct netmap_adapter *na = priv->np_na;
2012 	u_int i;
2013 	struct netmap_kring *kring;
2014 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2015 	enum txrx t;
2016 
2017 	if (netmap_debug & NM_DEBUG_ON)
2018 		nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2019 			na->name,
2020 			priv->np_qfirst[NR_TX],
2021 			priv->np_qlast[NR_TX],
2022 			priv->np_qfirst[NR_RX],
2023 			priv->np_qlast[NR_RX]);
2024 
2025 	/* first round: check that all the requested rings
2026 	 * are neither alread exclusively owned, nor we
2027 	 * want exclusive ownership when they are already in use
2028 	 */
2029 	foreach_selected_ring(priv, t, i, kring) {
2030 		if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2031 		    (kring->users && excl))
2032 		{
2033 			nm_prdis("ring %s busy", kring->name);
2034 			return EBUSY;
2035 		}
2036 	}
2037 
2038 	/* second round: increment usage count (possibly marking them
2039 	 * as exclusive) and set the nr_pending_mode
2040 	 */
2041 	foreach_selected_ring(priv, t, i, kring) {
2042 		kring->users++;
2043 		if (excl)
2044 			kring->nr_kflags |= NKR_EXCLUSIVE;
2045 		kring->nr_pending_mode = NKR_NETMAP_ON;
2046 	}
2047 
2048 	return 0;
2049 
2050 }
2051 
2052 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2053  * if was asked on regif, and unset the nr_pending_mode if we are the
2054  * last users of the involved rings. */
2055 static void
2056 netmap_krings_put(struct netmap_priv_d *priv)
2057 {
2058 	u_int i;
2059 	struct netmap_kring *kring;
2060 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2061 	enum txrx t;
2062 
2063 	nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2064 			na->name,
2065 			priv->np_qfirst[NR_TX],
2066 			priv->np_qlast[NR_TX],
2067 			priv->np_qfirst[NR_RX],
2068 			priv->np_qlast[MR_RX]);
2069 
2070 	foreach_selected_ring(priv, t, i, kring) {
2071 		if (excl)
2072 			kring->nr_kflags &= ~NKR_EXCLUSIVE;
2073 		kring->users--;
2074 		if (kring->users == 0)
2075 			kring->nr_pending_mode = NKR_NETMAP_OFF;
2076 	}
2077 }
2078 
2079 static int
2080 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2081 {
2082 	return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2083 }
2084 
2085 /* Validate the CSB entries for both directions (atok and ktoa).
2086  * To be called under NMG_LOCK(). */
2087 static int
2088 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2089 {
2090 	struct nm_csb_atok *csb_atok_base =
2091 		(struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2092 	struct nm_csb_ktoa *csb_ktoa_base =
2093 		(struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2094 	enum txrx t;
2095 	int num_rings[NR_TXRX], tot_rings;
2096 	size_t entry_size[2];
2097 	void *csb_start[2];
2098 	int i;
2099 
2100 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2101 		nm_prerr("Cannot update CSB while kloop is running");
2102 		return EBUSY;
2103 	}
2104 
2105 	tot_rings = 0;
2106 	for_rx_tx(t) {
2107 		num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2108 		tot_rings += num_rings[t];
2109 	}
2110 	if (tot_rings <= 0)
2111 		return 0;
2112 
2113 	if (!(priv->np_flags & NR_EXCLUSIVE)) {
2114 		nm_prerr("CSB mode requires NR_EXCLUSIVE");
2115 		return EINVAL;
2116 	}
2117 
2118 	entry_size[0] = sizeof(*csb_atok_base);
2119 	entry_size[1] = sizeof(*csb_ktoa_base);
2120 	csb_start[0] = (void *)csb_atok_base;
2121 	csb_start[1] = (void *)csb_ktoa_base;
2122 
2123 	for (i = 0; i < 2; i++) {
2124 		/* On Linux we could use access_ok() to simplify
2125 		 * the validation. However, the advantage of
2126 		 * this approach is that it works also on
2127 		 * FreeBSD. */
2128 		size_t csb_size = tot_rings * entry_size[i];
2129 		void *tmp;
2130 		int err;
2131 
2132 		if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2133 			nm_prerr("Unaligned CSB address");
2134 			return EINVAL;
2135 		}
2136 
2137 		tmp = nm_os_malloc(csb_size);
2138 		if (!tmp)
2139 			return ENOMEM;
2140 		if (i == 0) {
2141 			/* Application --> kernel direction. */
2142 			err = copyin(csb_start[i], tmp, csb_size);
2143 		} else {
2144 			/* Kernel --> application direction. */
2145 			memset(tmp, 0, csb_size);
2146 			err = copyout(tmp, csb_start[i], csb_size);
2147 		}
2148 		nm_os_free(tmp);
2149 		if (err) {
2150 			nm_prerr("Invalid CSB address");
2151 			return err;
2152 		}
2153 	}
2154 
2155 	priv->np_csb_atok_base = csb_atok_base;
2156 	priv->np_csb_ktoa_base = csb_ktoa_base;
2157 
2158 	/* Initialize the CSB. */
2159 	for_rx_tx(t) {
2160 		for (i = 0; i < num_rings[t]; i++) {
2161 			struct netmap_kring *kring =
2162 				NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2163 			struct nm_csb_atok *csb_atok = csb_atok_base + i;
2164 			struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2165 
2166 			if (t == NR_RX) {
2167 				csb_atok += num_rings[NR_TX];
2168 				csb_ktoa += num_rings[NR_TX];
2169 			}
2170 
2171 			CSB_WRITE(csb_atok, head, kring->rhead);
2172 			CSB_WRITE(csb_atok, cur, kring->rcur);
2173 			CSB_WRITE(csb_atok, appl_need_kick, 1);
2174 			CSB_WRITE(csb_atok, sync_flags, 1);
2175 			CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2176 			CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2177 			CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2178 
2179 			nm_prinf("csb_init for kring %s: head %u, cur %u, "
2180 				"hwcur %u, hwtail %u", kring->name,
2181 				kring->rhead, kring->rcur, kring->nr_hwcur,
2182 				kring->nr_hwtail);
2183 		}
2184 	}
2185 
2186 	return 0;
2187 }
2188 
2189 /* Ensure that the netmap adapter can support the given MTU.
2190  * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2191  */
2192 int
2193 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2194 	unsigned nbs = NETMAP_BUF_SIZE(na);
2195 
2196 	if (mtu <= na->rx_buf_maxsize) {
2197 		/* The MTU fits a single NIC slot. We only
2198 		 * Need to check that netmap buffers are
2199 		 * large enough to hold an MTU. NS_MOREFRAG
2200 		 * cannot be used in this case. */
2201 		if (nbs < mtu) {
2202 			nm_prerr("error: netmap buf size (%u) "
2203 				 "< device MTU (%u)", nbs, mtu);
2204 			return EINVAL;
2205 		}
2206 	} else {
2207 		/* More NIC slots may be needed to receive
2208 		 * or transmit a single packet. Check that
2209 		 * the adapter supports NS_MOREFRAG and that
2210 		 * netmap buffers are large enough to hold
2211 		 * the maximum per-slot size. */
2212 		if (!(na->na_flags & NAF_MOREFRAG)) {
2213 			nm_prerr("error: large MTU (%d) needed "
2214 				 "but %s does not support "
2215 				 "NS_MOREFRAG", mtu,
2216 				 na->ifp->if_xname);
2217 			return EINVAL;
2218 		} else if (nbs < na->rx_buf_maxsize) {
2219 			nm_prerr("error: using NS_MOREFRAG on "
2220 				 "%s requires netmap buf size "
2221 				 ">= %u", na->ifp->if_xname,
2222 				 na->rx_buf_maxsize);
2223 			return EINVAL;
2224 		} else {
2225 			nm_prinf("info: netmap application on "
2226 				 "%s needs to support "
2227 				 "NS_MOREFRAG "
2228 				 "(MTU=%u,netmap_buf_size=%u)",
2229 				 na->ifp->if_xname, mtu, nbs);
2230 		}
2231 	}
2232 	return 0;
2233 }
2234 
2235 
2236 /*
2237  * possibly move the interface to netmap-mode.
2238  * If success it returns a pointer to netmap_if, otherwise NULL.
2239  * This must be called with NMG_LOCK held.
2240  *
2241  * The following na callbacks are called in the process:
2242  *
2243  * na->nm_config()			[by netmap_update_config]
2244  * (get current number and size of rings)
2245  *
2246  *  	We have a generic one for linux (netmap_linux_config).
2247  *  	The bwrap has to override this, since it has to forward
2248  *  	the request to the wrapped adapter (netmap_bwrap_config).
2249  *
2250  *
2251  * na->nm_krings_create()
2252  * (create and init the krings array)
2253  *
2254  * 	One of the following:
2255  *
2256  *	* netmap_hw_krings_create, 			(hw ports)
2257  *		creates the standard layout for the krings
2258  * 		and adds the mbq (used for the host rings).
2259  *
2260  * 	* netmap_vp_krings_create			(VALE ports)
2261  * 		add leases and scratchpads
2262  *
2263  * 	* netmap_pipe_krings_create			(pipes)
2264  * 		create the krings and rings of both ends and
2265  * 		cross-link them
2266  *
2267  *      * netmap_monitor_krings_create 			(monitors)
2268  *      	avoid allocating the mbq
2269  *
2270  *      * netmap_bwrap_krings_create			(bwraps)
2271  *      	create both the brap krings array,
2272  *      	the krings array of the wrapped adapter, and
2273  *      	(if needed) the fake array for the host adapter
2274  *
2275  * na->nm_register(, 1)
2276  * (put the adapter in netmap mode)
2277  *
2278  * 	This may be one of the following:
2279  *
2280  * 	* netmap_hw_reg				        (hw ports)
2281  * 		checks that the ifp is still there, then calls
2282  * 		the hardware specific callback;
2283  *
2284  * 	* netmap_vp_reg					(VALE ports)
2285  *		If the port is connected to a bridge,
2286  *		set the NAF_NETMAP_ON flag under the
2287  *		bridge write lock.
2288  *
2289  *	* netmap_pipe_reg				(pipes)
2290  *		inform the other pipe end that it is no
2291  *		longer responsible for the lifetime of this
2292  *		pipe end
2293  *
2294  *	* netmap_monitor_reg				(monitors)
2295  *		intercept the sync callbacks of the monitored
2296  *		rings
2297  *
2298  *	* netmap_bwrap_reg				(bwraps)
2299  *		cross-link the bwrap and hwna rings,
2300  *		forward the request to the hwna, override
2301  *		the hwna notify callback (to get the frames
2302  *		coming from outside go through the bridge).
2303  *
2304  *
2305  */
2306 int
2307 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2308 	struct nmreq_header *hdr)
2309 {
2310 	struct netmap_if *nifp = NULL;
2311 	int error;
2312 
2313 	NMG_LOCK_ASSERT();
2314 	priv->np_na = na;     /* store the reference */
2315 	error = netmap_mem_finalize(na->nm_mem, na);
2316 	if (error)
2317 		goto err;
2318 
2319 	if (na->active_fds == 0) {
2320 
2321 		/* cache the allocator info in the na */
2322 		error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2323 		if (error)
2324 			goto err_drop_mem;
2325 		nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2326 					    na->na_lut.objsize);
2327 
2328 		/* ring configuration may have changed, fetch from the card */
2329 		netmap_update_config(na);
2330 	}
2331 
2332 	/* compute the range of tx and rx rings to monitor */
2333 	error = netmap_set_ringid(priv, hdr);
2334 	if (error)
2335 		goto err_put_lut;
2336 
2337 	if (na->active_fds == 0) {
2338 		/*
2339 		 * If this is the first registration of the adapter,
2340 		 * perform sanity checks and create the in-kernel view
2341 		 * of the netmap rings (the netmap krings).
2342 		 */
2343 		if (na->ifp && nm_priv_rx_enabled(priv)) {
2344 			/* This netmap adapter is attached to an ifnet. */
2345 			unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2346 
2347 			nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2348 				na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2349 
2350 			if (na->rx_buf_maxsize == 0) {
2351 				nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2352 				error = EIO;
2353 				goto err_drop_mem;
2354 			}
2355 
2356 			error = netmap_buf_size_validate(na, mtu);
2357 			if (error)
2358 				goto err_drop_mem;
2359 		}
2360 
2361 		/*
2362 		 * Depending on the adapter, this may also create
2363 		 * the netmap rings themselves
2364 		 */
2365 		error = na->nm_krings_create(na);
2366 		if (error)
2367 			goto err_put_lut;
2368 
2369 	}
2370 
2371 	/* now the krings must exist and we can check whether some
2372 	 * previous bind has exclusive ownership on them, and set
2373 	 * nr_pending_mode
2374 	 */
2375 	error = netmap_krings_get(priv);
2376 	if (error)
2377 		goto err_del_krings;
2378 
2379 	/* create all needed missing netmap rings */
2380 	error = netmap_mem_rings_create(na);
2381 	if (error)
2382 		goto err_rel_excl;
2383 
2384 	/* in all cases, create a new netmap if */
2385 	nifp = netmap_mem_if_new(na, priv);
2386 	if (nifp == NULL) {
2387 		error = ENOMEM;
2388 		goto err_rel_excl;
2389 	}
2390 
2391 	if (nm_kring_pending(priv)) {
2392 		/* Some kring is switching mode, tell the adapter to
2393 		 * react on this. */
2394 		error = na->nm_register(na, 1);
2395 		if (error)
2396 			goto err_del_if;
2397 	}
2398 
2399 	/* Commit the reference. */
2400 	na->active_fds++;
2401 
2402 	/*
2403 	 * advertise that the interface is ready by setting np_nifp.
2404 	 * The barrier is needed because readers (poll, *SYNC and mmap)
2405 	 * check for priv->np_nifp != NULL without locking
2406 	 */
2407 	mb(); /* make sure previous writes are visible to all CPUs */
2408 	priv->np_nifp = nifp;
2409 
2410 	return 0;
2411 
2412 err_del_if:
2413 	netmap_mem_if_delete(na, nifp);
2414 err_rel_excl:
2415 	netmap_krings_put(priv);
2416 	netmap_mem_rings_delete(na);
2417 err_del_krings:
2418 	if (na->active_fds == 0)
2419 		na->nm_krings_delete(na);
2420 err_put_lut:
2421 	if (na->active_fds == 0)
2422 		memset(&na->na_lut, 0, sizeof(na->na_lut));
2423 err_drop_mem:
2424 	netmap_mem_drop(na);
2425 err:
2426 	priv->np_na = NULL;
2427 	return error;
2428 }
2429 
2430 
2431 /*
2432  * update kring and ring at the end of rxsync/txsync.
2433  */
2434 static inline void
2435 nm_sync_finalize(struct netmap_kring *kring)
2436 {
2437 	/*
2438 	 * Update ring tail to what the kernel knows
2439 	 * After txsync: head/rhead/hwcur might be behind cur/rcur
2440 	 * if no carrier.
2441 	 */
2442 	kring->ring->tail = kring->rtail = kring->nr_hwtail;
2443 
2444 	nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2445 		kring->name, kring->nr_hwcur, kring->nr_hwtail,
2446 		kring->rhead, kring->rcur, kring->rtail);
2447 }
2448 
2449 /* set ring timestamp */
2450 static inline void
2451 ring_timestamp_set(struct netmap_ring *ring)
2452 {
2453 	if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2454 		microtime(&ring->ts);
2455 	}
2456 }
2457 
2458 static int nmreq_copyin(struct nmreq_header *, int);
2459 static int nmreq_copyout(struct nmreq_header *, int);
2460 static int nmreq_checkoptions(struct nmreq_header *);
2461 
2462 /*
2463  * ioctl(2) support for the "netmap" device.
2464  *
2465  * Following a list of accepted commands:
2466  * - NIOCCTRL		device control API
2467  * - NIOCTXSYNC		sync TX rings
2468  * - NIOCRXSYNC		sync RX rings
2469  * - SIOCGIFADDR	just for convenience
2470  * - NIOCGINFO		deprecated (legacy API)
2471  * - NIOCREGIF		deprecated (legacy API)
2472  *
2473  * Return 0 on success, errno otherwise.
2474  */
2475 int
2476 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2477 		struct thread *td, int nr_body_is_user)
2478 {
2479 	struct mbq q;	/* packets from RX hw queues to host stack */
2480 	struct netmap_adapter *na = NULL;
2481 	struct netmap_mem_d *nmd = NULL;
2482 	struct ifnet *ifp = NULL;
2483 	int error = 0;
2484 	u_int i, qfirst, qlast;
2485 	struct netmap_kring **krings;
2486 	int sync_flags;
2487 	enum txrx t;
2488 
2489 	switch (cmd) {
2490 	case NIOCCTRL: {
2491 		struct nmreq_header *hdr = (struct nmreq_header *)data;
2492 
2493 		if (hdr->nr_version < NETMAP_MIN_API ||
2494 		    hdr->nr_version > NETMAP_MAX_API) {
2495 			nm_prerr("API mismatch: got %d need %d",
2496 				hdr->nr_version, NETMAP_API);
2497 			return EINVAL;
2498 		}
2499 
2500 		/* Make a kernel-space copy of the user-space nr_body.
2501 		 * For convenince, the nr_body pointer and the pointers
2502 		 * in the options list will be replaced with their
2503 		 * kernel-space counterparts. The original pointers are
2504 		 * saved internally and later restored by nmreq_copyout
2505 		 */
2506 		error = nmreq_copyin(hdr, nr_body_is_user);
2507 		if (error) {
2508 			return error;
2509 		}
2510 
2511 		/* Sanitize hdr->nr_name. */
2512 		hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2513 
2514 		switch (hdr->nr_reqtype) {
2515 		case NETMAP_REQ_REGISTER: {
2516 			struct nmreq_register *req =
2517 				(struct nmreq_register *)(uintptr_t)hdr->nr_body;
2518 			struct netmap_if *nifp;
2519 
2520 			/* Protect access to priv from concurrent requests. */
2521 			NMG_LOCK();
2522 			do {
2523 				struct nmreq_option *opt;
2524 				u_int memflags;
2525 
2526 				if (priv->np_nifp != NULL) {	/* thread already registered */
2527 					error = EBUSY;
2528 					break;
2529 				}
2530 
2531 #ifdef WITH_EXTMEM
2532 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2533 				if (opt != NULL) {
2534 					struct nmreq_opt_extmem *e =
2535 						(struct nmreq_opt_extmem *)opt;
2536 
2537 					nmd = netmap_mem_ext_create(e->nro_usrptr,
2538 							&e->nro_info, &error);
2539 					opt->nro_status = error;
2540 					if (nmd == NULL)
2541 						break;
2542 				}
2543 #endif /* WITH_EXTMEM */
2544 
2545 				if (nmd == NULL && req->nr_mem_id) {
2546 					/* find the allocator and get a reference */
2547 					nmd = netmap_mem_find(req->nr_mem_id);
2548 					if (nmd == NULL) {
2549 						if (netmap_verbose) {
2550 							nm_prerr("%s: failed to find mem_id %u",
2551 									hdr->nr_name, req->nr_mem_id);
2552 						}
2553 						error = EINVAL;
2554 						break;
2555 					}
2556 				}
2557 				/* find the interface and a reference */
2558 				error = netmap_get_na(hdr, &na, &ifp, nmd,
2559 						      1 /* create */); /* keep reference */
2560 				if (error)
2561 					break;
2562 				if (NETMAP_OWNED_BY_KERN(na)) {
2563 					error = EBUSY;
2564 					break;
2565 				}
2566 
2567 				if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2568 					nm_prerr("virt_hdr_len=%d, but application does "
2569 						"not accept it", na->virt_hdr_len);
2570 					error = EIO;
2571 					break;
2572 				}
2573 
2574 				error = netmap_do_regif(priv, na, hdr);
2575 				if (error) {    /* reg. failed, release priv and ref */
2576 					break;
2577 				}
2578 
2579 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2580 				if (opt != NULL) {
2581 					struct nmreq_opt_csb *csbo =
2582 						(struct nmreq_opt_csb *)opt;
2583 					error = netmap_csb_validate(priv, csbo);
2584 					opt->nro_status = error;
2585 					if (error) {
2586 						netmap_do_unregif(priv);
2587 						break;
2588 					}
2589 				}
2590 
2591 				nifp = priv->np_nifp;
2592 
2593 				/* return the offset of the netmap_if object */
2594 				req->nr_rx_rings = na->num_rx_rings;
2595 				req->nr_tx_rings = na->num_tx_rings;
2596 				req->nr_rx_slots = na->num_rx_desc;
2597 				req->nr_tx_slots = na->num_tx_desc;
2598 				req->nr_host_tx_rings = na->num_host_tx_rings;
2599 				req->nr_host_rx_rings = na->num_host_rx_rings;
2600 				error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2601 					&req->nr_mem_id);
2602 				if (error) {
2603 					netmap_do_unregif(priv);
2604 					break;
2605 				}
2606 				if (memflags & NETMAP_MEM_PRIVATE) {
2607 					*(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2608 				}
2609 				for_rx_tx(t) {
2610 					priv->np_si[t] = nm_si_user(priv, t) ?
2611 						&na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2612 				}
2613 
2614 				if (req->nr_extra_bufs) {
2615 					if (netmap_verbose)
2616 						nm_prinf("requested %d extra buffers",
2617 							req->nr_extra_bufs);
2618 					req->nr_extra_bufs = netmap_extra_alloc(na,
2619 						&nifp->ni_bufs_head, req->nr_extra_bufs);
2620 					if (netmap_verbose)
2621 						nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2622 				}
2623 				req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2624 
2625 				error = nmreq_checkoptions(hdr);
2626 				if (error) {
2627 					netmap_do_unregif(priv);
2628 					break;
2629 				}
2630 
2631 				/* store ifp reference so that priv destructor may release it */
2632 				priv->np_ifp = ifp;
2633 			} while (0);
2634 			if (error) {
2635 				netmap_unget_na(na, ifp);
2636 			}
2637 			/* release the reference from netmap_mem_find() or
2638 			 * netmap_mem_ext_create()
2639 			 */
2640 			if (nmd)
2641 				netmap_mem_put(nmd);
2642 			NMG_UNLOCK();
2643 			break;
2644 		}
2645 
2646 		case NETMAP_REQ_PORT_INFO_GET: {
2647 			struct nmreq_port_info_get *req =
2648 				(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2649 
2650 			NMG_LOCK();
2651 			do {
2652 				u_int memflags;
2653 
2654 				if (hdr->nr_name[0] != '\0') {
2655 					/* Build a nmreq_register out of the nmreq_port_info_get,
2656 					 * so that we can call netmap_get_na(). */
2657 					struct nmreq_register regreq;
2658 					bzero(&regreq, sizeof(regreq));
2659 					regreq.nr_mode = NR_REG_ALL_NIC;
2660 					regreq.nr_tx_slots = req->nr_tx_slots;
2661 					regreq.nr_rx_slots = req->nr_rx_slots;
2662 					regreq.nr_tx_rings = req->nr_tx_rings;
2663 					regreq.nr_rx_rings = req->nr_rx_rings;
2664 					regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2665 					regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2666 					regreq.nr_mem_id = req->nr_mem_id;
2667 
2668 					/* get a refcount */
2669 					hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2670 					hdr->nr_body = (uintptr_t)&regreq;
2671 					error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2672 					hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2673 					hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2674 					if (error) {
2675 						na = NULL;
2676 						ifp = NULL;
2677 						break;
2678 					}
2679 					nmd = na->nm_mem; /* get memory allocator */
2680 				} else {
2681 					nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2682 					if (nmd == NULL) {
2683 						if (netmap_verbose)
2684 							nm_prerr("%s: failed to find mem_id %u",
2685 									hdr->nr_name,
2686 									req->nr_mem_id ? req->nr_mem_id : 1);
2687 						error = EINVAL;
2688 						break;
2689 					}
2690 				}
2691 
2692 				error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2693 					&req->nr_mem_id);
2694 				if (error)
2695 					break;
2696 				if (na == NULL) /* only memory info */
2697 					break;
2698 				netmap_update_config(na);
2699 				req->nr_rx_rings = na->num_rx_rings;
2700 				req->nr_tx_rings = na->num_tx_rings;
2701 				req->nr_rx_slots = na->num_rx_desc;
2702 				req->nr_tx_slots = na->num_tx_desc;
2703 				req->nr_host_tx_rings = na->num_host_tx_rings;
2704 				req->nr_host_rx_rings = na->num_host_rx_rings;
2705 			} while (0);
2706 			netmap_unget_na(na, ifp);
2707 			NMG_UNLOCK();
2708 			break;
2709 		}
2710 #ifdef WITH_VALE
2711 		case NETMAP_REQ_VALE_ATTACH: {
2712 			error = netmap_vale_attach(hdr, NULL /* userspace request */);
2713 			break;
2714 		}
2715 
2716 		case NETMAP_REQ_VALE_DETACH: {
2717 			error = netmap_vale_detach(hdr, NULL /* userspace request */);
2718 			break;
2719 		}
2720 
2721 		case NETMAP_REQ_VALE_LIST: {
2722 			error = netmap_vale_list(hdr);
2723 			break;
2724 		}
2725 
2726 		case NETMAP_REQ_PORT_HDR_SET: {
2727 			struct nmreq_port_hdr *req =
2728 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2729 			/* Build a nmreq_register out of the nmreq_port_hdr,
2730 			 * so that we can call netmap_get_bdg_na(). */
2731 			struct nmreq_register regreq;
2732 			bzero(&regreq, sizeof(regreq));
2733 			regreq.nr_mode = NR_REG_ALL_NIC;
2734 
2735 			/* For now we only support virtio-net headers, and only for
2736 			 * VALE ports, but this may change in future. Valid lengths
2737 			 * for the virtio-net header are 0 (no header), 10 and 12. */
2738 			if (req->nr_hdr_len != 0 &&
2739 				req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2740 					req->nr_hdr_len != 12) {
2741 				if (netmap_verbose)
2742 					nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2743 				error = EINVAL;
2744 				break;
2745 			}
2746 			NMG_LOCK();
2747 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2748 			hdr->nr_body = (uintptr_t)&regreq;
2749 			error = netmap_get_vale_na(hdr, &na, NULL, 0);
2750 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2751 			hdr->nr_body = (uintptr_t)req;
2752 			if (na && !error) {
2753 				struct netmap_vp_adapter *vpna =
2754 					(struct netmap_vp_adapter *)na;
2755 				na->virt_hdr_len = req->nr_hdr_len;
2756 				if (na->virt_hdr_len) {
2757 					vpna->mfs = NETMAP_BUF_SIZE(na);
2758 				}
2759 				if (netmap_verbose)
2760 					nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2761 				netmap_adapter_put(na);
2762 			} else if (!na) {
2763 				error = ENXIO;
2764 			}
2765 			NMG_UNLOCK();
2766 			break;
2767 		}
2768 
2769 		case NETMAP_REQ_PORT_HDR_GET: {
2770 			/* Get vnet-header length for this netmap port */
2771 			struct nmreq_port_hdr *req =
2772 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2773 			/* Build a nmreq_register out of the nmreq_port_hdr,
2774 			 * so that we can call netmap_get_bdg_na(). */
2775 			struct nmreq_register regreq;
2776 			struct ifnet *ifp;
2777 
2778 			bzero(&regreq, sizeof(regreq));
2779 			regreq.nr_mode = NR_REG_ALL_NIC;
2780 			NMG_LOCK();
2781 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2782 			hdr->nr_body = (uintptr_t)&regreq;
2783 			error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
2784 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
2785 			hdr->nr_body = (uintptr_t)req;
2786 			if (na && !error) {
2787 				req->nr_hdr_len = na->virt_hdr_len;
2788 			}
2789 			netmap_unget_na(na, ifp);
2790 			NMG_UNLOCK();
2791 			break;
2792 		}
2793 
2794 		case NETMAP_REQ_VALE_NEWIF: {
2795 			error = nm_vi_create(hdr);
2796 			break;
2797 		}
2798 
2799 		case NETMAP_REQ_VALE_DELIF: {
2800 			error = nm_vi_destroy(hdr->nr_name);
2801 			break;
2802 		}
2803 
2804 		case NETMAP_REQ_VALE_POLLING_ENABLE:
2805 		case NETMAP_REQ_VALE_POLLING_DISABLE: {
2806 			error = nm_bdg_polling(hdr);
2807 			break;
2808 		}
2809 #endif  /* WITH_VALE */
2810 		case NETMAP_REQ_POOLS_INFO_GET: {
2811 			/* Get information from the memory allocator used for
2812 			 * hdr->nr_name. */
2813 			struct nmreq_pools_info *req =
2814 				(struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
2815 			NMG_LOCK();
2816 			do {
2817 				/* Build a nmreq_register out of the nmreq_pools_info,
2818 				 * so that we can call netmap_get_na(). */
2819 				struct nmreq_register regreq;
2820 				bzero(&regreq, sizeof(regreq));
2821 				regreq.nr_mem_id = req->nr_mem_id;
2822 				regreq.nr_mode = NR_REG_ALL_NIC;
2823 
2824 				hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2825 				hdr->nr_body = (uintptr_t)&regreq;
2826 				error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2827 				hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
2828 				hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2829 				if (error) {
2830 					na = NULL;
2831 					ifp = NULL;
2832 					break;
2833 				}
2834 				nmd = na->nm_mem; /* grab the memory allocator */
2835 				if (nmd == NULL) {
2836 					error = EINVAL;
2837 					break;
2838 				}
2839 
2840 				/* Finalize the memory allocator, get the pools
2841 				 * information and release the allocator. */
2842 				error = netmap_mem_finalize(nmd, na);
2843 				if (error) {
2844 					break;
2845 				}
2846 				error = netmap_mem_pools_info_get(req, nmd);
2847 				netmap_mem_drop(na);
2848 			} while (0);
2849 			netmap_unget_na(na, ifp);
2850 			NMG_UNLOCK();
2851 			break;
2852 		}
2853 
2854 		case NETMAP_REQ_CSB_ENABLE: {
2855 			struct nmreq_option *opt;
2856 
2857 			opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2858 			if (opt == NULL) {
2859 				error = EINVAL;
2860 			} else {
2861 				struct nmreq_opt_csb *csbo =
2862 					(struct nmreq_opt_csb *)opt;
2863 				NMG_LOCK();
2864 				error = netmap_csb_validate(priv, csbo);
2865 				NMG_UNLOCK();
2866 				opt->nro_status = error;
2867 			}
2868 			break;
2869 		}
2870 
2871 		case NETMAP_REQ_SYNC_KLOOP_START: {
2872 			error = netmap_sync_kloop(priv, hdr);
2873 			break;
2874 		}
2875 
2876 		case NETMAP_REQ_SYNC_KLOOP_STOP: {
2877 			error = netmap_sync_kloop_stop(priv);
2878 			break;
2879 		}
2880 
2881 		default: {
2882 			error = EINVAL;
2883 			break;
2884 		}
2885 		}
2886 		/* Write back request body to userspace and reset the
2887 		 * user-space pointer. */
2888 		error = nmreq_copyout(hdr, error);
2889 		break;
2890 	}
2891 
2892 	case NIOCTXSYNC:
2893 	case NIOCRXSYNC: {
2894 		if (unlikely(priv->np_nifp == NULL)) {
2895 			error = ENXIO;
2896 			break;
2897 		}
2898 		mb(); /* make sure following reads are not from cache */
2899 
2900 		if (unlikely(priv->np_csb_atok_base)) {
2901 			nm_prerr("Invalid sync in CSB mode");
2902 			error = EBUSY;
2903 			break;
2904 		}
2905 
2906 		na = priv->np_na;      /* we have a reference */
2907 
2908 		mbq_init(&q);
2909 		t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
2910 		krings = NMR(na, t);
2911 		qfirst = priv->np_qfirst[t];
2912 		qlast = priv->np_qlast[t];
2913 		sync_flags = priv->np_sync_flags;
2914 
2915 		for (i = qfirst; i < qlast; i++) {
2916 			struct netmap_kring *kring = krings[i];
2917 			struct netmap_ring *ring = kring->ring;
2918 
2919 			if (unlikely(nm_kr_tryget(kring, 1, &error))) {
2920 				error = (error ? EIO : 0);
2921 				continue;
2922 			}
2923 
2924 			if (cmd == NIOCTXSYNC) {
2925 				if (netmap_debug & NM_DEBUG_TXSYNC)
2926 					nm_prinf("pre txsync ring %d cur %d hwcur %d",
2927 					    i, ring->cur,
2928 					    kring->nr_hwcur);
2929 				if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2930 					netmap_ring_reinit(kring);
2931 				} else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
2932 					nm_sync_finalize(kring);
2933 				}
2934 				if (netmap_debug & NM_DEBUG_TXSYNC)
2935 					nm_prinf("post txsync ring %d cur %d hwcur %d",
2936 					    i, ring->cur,
2937 					    kring->nr_hwcur);
2938 			} else {
2939 				if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
2940 					netmap_ring_reinit(kring);
2941 				}
2942 				if (nm_may_forward_up(kring)) {
2943 					/* transparent forwarding, see netmap_poll() */
2944 					netmap_grab_packets(kring, &q, netmap_fwd);
2945 				}
2946 				if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
2947 					nm_sync_finalize(kring);
2948 				}
2949 				ring_timestamp_set(ring);
2950 			}
2951 			nm_kr_put(kring);
2952 		}
2953 
2954 		if (mbq_peek(&q)) {
2955 			netmap_send_up(na->ifp, &q);
2956 		}
2957 
2958 		break;
2959 	}
2960 
2961 	default: {
2962 		return netmap_ioctl_legacy(priv, cmd, data, td);
2963 		break;
2964 	}
2965 	}
2966 
2967 	return (error);
2968 }
2969 
2970 size_t
2971 nmreq_size_by_type(uint16_t nr_reqtype)
2972 {
2973 	switch (nr_reqtype) {
2974 	case NETMAP_REQ_REGISTER:
2975 		return sizeof(struct nmreq_register);
2976 	case NETMAP_REQ_PORT_INFO_GET:
2977 		return sizeof(struct nmreq_port_info_get);
2978 	case NETMAP_REQ_VALE_ATTACH:
2979 		return sizeof(struct nmreq_vale_attach);
2980 	case NETMAP_REQ_VALE_DETACH:
2981 		return sizeof(struct nmreq_vale_detach);
2982 	case NETMAP_REQ_VALE_LIST:
2983 		return sizeof(struct nmreq_vale_list);
2984 	case NETMAP_REQ_PORT_HDR_SET:
2985 	case NETMAP_REQ_PORT_HDR_GET:
2986 		return sizeof(struct nmreq_port_hdr);
2987 	case NETMAP_REQ_VALE_NEWIF:
2988 		return sizeof(struct nmreq_vale_newif);
2989 	case NETMAP_REQ_VALE_DELIF:
2990 	case NETMAP_REQ_SYNC_KLOOP_STOP:
2991 	case NETMAP_REQ_CSB_ENABLE:
2992 		return 0;
2993 	case NETMAP_REQ_VALE_POLLING_ENABLE:
2994 	case NETMAP_REQ_VALE_POLLING_DISABLE:
2995 		return sizeof(struct nmreq_vale_polling);
2996 	case NETMAP_REQ_POOLS_INFO_GET:
2997 		return sizeof(struct nmreq_pools_info);
2998 	case NETMAP_REQ_SYNC_KLOOP_START:
2999 		return sizeof(struct nmreq_sync_kloop_start);
3000 	}
3001 	return 0;
3002 }
3003 
3004 static size_t
3005 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3006 {
3007 	size_t rv = sizeof(struct nmreq_option);
3008 #ifdef NETMAP_REQ_OPT_DEBUG
3009 	if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3010 		return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3011 #endif /* NETMAP_REQ_OPT_DEBUG */
3012 	switch (nro_reqtype) {
3013 #ifdef WITH_EXTMEM
3014 	case NETMAP_REQ_OPT_EXTMEM:
3015 		rv = sizeof(struct nmreq_opt_extmem);
3016 		break;
3017 #endif /* WITH_EXTMEM */
3018 	case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3019 		if (nro_size >= rv)
3020 			rv = nro_size;
3021 		break;
3022 	case NETMAP_REQ_OPT_CSB:
3023 		rv = sizeof(struct nmreq_opt_csb);
3024 		break;
3025 	case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3026 		rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3027 		break;
3028 	}
3029 	/* subtract the common header */
3030 	return rv - sizeof(struct nmreq_option);
3031 }
3032 
3033 /*
3034  * nmreq_copyin: create an in-kernel version of the request.
3035  *
3036  * We build the following data structure:
3037  *
3038  * hdr -> +-------+                buf
3039  *        |       |          +---------------+
3040  *        +-------+          |usr body ptr   |
3041  *        |options|-.        +---------------+
3042  *        +-------+ |        |usr options ptr|
3043  *        |body   |--------->+---------------+
3044  *        +-------+ |        |               |
3045  *                  |        |  copy of body |
3046  *                  |        |               |
3047  *                  |        +---------------+
3048  *                  |        |    NULL       |
3049  *                  |        +---------------+
3050  *                  |    .---|               |\
3051  *                  |    |   +---------------+ |
3052  *                  | .------|               | |
3053  *                  | |  |   +---------------+  \ option table
3054  *                  | |  |   |      ...      |  / indexed by option
3055  *                  | |  |   +---------------+ |  type
3056  *                  | |  |   |               | |
3057  *                  | |  |   +---------------+/
3058  *                  | |  |   |usr next ptr 1 |
3059  *                  `-|----->+---------------+
3060  *                    |  |   | copy of opt 1 |
3061  *                    |  |   |               |
3062  *                    |  | .-| nro_next      |
3063  *                    |  | | +---------------+
3064  *                    |  | | |usr next ptr 2 |
3065  *                    |  `-`>+---------------+
3066  *                    |      | copy of opt 2 |
3067  *                    |      |               |
3068  *                    |    .-| nro_next      |
3069  *                    |    | +---------------+
3070  *                    |    | |               |
3071  *                    ~    ~ ~      ...      ~
3072  *                    |    .-|               |
3073  *                    `----->+---------------+
3074  *                         | |usr next ptr n |
3075  *                         `>+---------------+
3076  *                           | copy of opt n |
3077  *                           |               |
3078  *                           | nro_next(NULL)|
3079  *                           +---------------+
3080  *
3081  * The options and body fields of the hdr structure are overwritten
3082  * with in-kernel valid pointers inside the buf. The original user
3083  * pointers are saved in the buf and restored on copyout.
3084  * The list of options is copied and the pointers adjusted. The
3085  * original pointers are saved before the option they belonged.
3086  *
3087  * The option table has an entry for every availabe option.  Entries
3088  * for options that have not been passed contain NULL.
3089  *
3090  */
3091 
3092 int
3093 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3094 {
3095 	size_t rqsz, optsz, bufsz;
3096 	int error = 0;
3097 	char *ker = NULL, *p;
3098 	struct nmreq_option **next, *src, **opt_tab;
3099 	struct nmreq_option buf;
3100 	uint64_t *ptrs;
3101 
3102 	if (hdr->nr_reserved) {
3103 		if (netmap_verbose)
3104 			nm_prerr("nr_reserved must be zero");
3105 		return EINVAL;
3106 	}
3107 
3108 	if (!nr_body_is_user)
3109 		return 0;
3110 
3111 	hdr->nr_reserved = nr_body_is_user;
3112 
3113 	/* compute the total size of the buffer */
3114 	rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3115 	if (rqsz > NETMAP_REQ_MAXSIZE) {
3116 		error = EMSGSIZE;
3117 		goto out_err;
3118 	}
3119 	if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3120 		(!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3121 		/* Request body expected, but not found; or
3122 		 * request body found but unexpected. */
3123 		if (netmap_verbose)
3124 			nm_prerr("nr_body expected but not found, or vice versa");
3125 		error = EINVAL;
3126 		goto out_err;
3127 	}
3128 
3129 	bufsz = 2 * sizeof(void *) + rqsz +
3130 		NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3131 	/* compute the size of the buf below the option table.
3132 	 * It must contain a copy of every received option structure.
3133 	 * For every option we also need to store a copy of the user
3134 	 * list pointer.
3135 	 */
3136 	optsz = 0;
3137 	for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3138 	     src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3139 	{
3140 		error = copyin(src, &buf, sizeof(*src));
3141 		if (error)
3142 			goto out_err;
3143 		optsz += sizeof(*src);
3144 		optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3145 		if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3146 			error = EMSGSIZE;
3147 			goto out_err;
3148 		}
3149 		bufsz += sizeof(void *);
3150 	}
3151 	bufsz += optsz;
3152 
3153 	ker = nm_os_malloc(bufsz);
3154 	if (ker == NULL) {
3155 		error = ENOMEM;
3156 		goto out_err;
3157 	}
3158 	p = ker;	/* write pointer into the buffer */
3159 
3160 	/* make a copy of the user pointers */
3161 	ptrs = (uint64_t*)p;
3162 	*ptrs++ = hdr->nr_body;
3163 	*ptrs++ = hdr->nr_options;
3164 	p = (char *)ptrs;
3165 
3166 	/* copy the body */
3167 	error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3168 	if (error)
3169 		goto out_restore;
3170 	/* overwrite the user pointer with the in-kernel one */
3171 	hdr->nr_body = (uintptr_t)p;
3172 	p += rqsz;
3173 	/* start of the options table */
3174 	opt_tab = (struct nmreq_option **)p;
3175 	p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3176 
3177 	/* copy the options */
3178 	next = (struct nmreq_option **)&hdr->nr_options;
3179 	src = *next;
3180 	while (src) {
3181 		struct nmreq_option *opt;
3182 
3183 		/* copy the option header */
3184 		ptrs = (uint64_t *)p;
3185 		opt = (struct nmreq_option *)(ptrs + 1);
3186 		error = copyin(src, opt, sizeof(*src));
3187 		if (error)
3188 			goto out_restore;
3189 		/* make a copy of the user next pointer */
3190 		*ptrs = opt->nro_next;
3191 		/* overwrite the user pointer with the in-kernel one */
3192 		*next = opt;
3193 
3194 		/* initialize the option as not supported.
3195 		 * Recognized options will update this field.
3196 		 */
3197 		opt->nro_status = EOPNOTSUPP;
3198 
3199 		/* check for invalid types */
3200 		if (opt->nro_reqtype < 1) {
3201 			if (netmap_verbose)
3202 				nm_prinf("invalid option type: %u", opt->nro_reqtype);
3203 			opt->nro_status = EINVAL;
3204 			error = EINVAL;
3205 			goto next;
3206 		}
3207 
3208 		if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3209 			/* opt->nro_status is already EOPNOTSUPP */
3210 			error = EOPNOTSUPP;
3211 			goto next;
3212 		}
3213 
3214 		/* if the type is valid, index the option in the table
3215 		 * unless it is a duplicate.
3216 		 */
3217 		if (opt_tab[opt->nro_reqtype] != NULL) {
3218 			if (netmap_verbose)
3219 				nm_prinf("duplicate option: %u", opt->nro_reqtype);
3220 			opt->nro_status = EINVAL;
3221 			opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3222 			error = EINVAL;
3223 			goto next;
3224 		}
3225 		opt_tab[opt->nro_reqtype] = opt;
3226 
3227 		p = (char *)(opt + 1);
3228 
3229 		/* copy the option body */
3230 		optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3231 						opt->nro_size);
3232 		if (optsz) {
3233 			/* the option body follows the option header */
3234 			error = copyin(src + 1, p, optsz);
3235 			if (error)
3236 				goto out_restore;
3237 			p += optsz;
3238 		}
3239 
3240 	next:
3241 		/* move to next option */
3242 		next = (struct nmreq_option **)&opt->nro_next;
3243 		src = *next;
3244 	}
3245 	if (error)
3246 		nmreq_copyout(hdr, error);
3247 	return error;
3248 
3249 out_restore:
3250 	ptrs = (uint64_t *)ker;
3251 	hdr->nr_body = *ptrs++;
3252 	hdr->nr_options = *ptrs++;
3253 	hdr->nr_reserved = 0;
3254 	nm_os_free(ker);
3255 out_err:
3256 	return error;
3257 }
3258 
3259 static int
3260 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3261 {
3262 	struct nmreq_option *src, *dst;
3263 	void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3264 	uint64_t *ptrs;
3265 	size_t bodysz;
3266 	int error;
3267 
3268 	if (!hdr->nr_reserved)
3269 		return rerror;
3270 
3271 	/* restore the user pointers in the header */
3272 	ptrs = (uint64_t *)ker - 2;
3273 	bufstart = ptrs;
3274 	hdr->nr_body = *ptrs++;
3275 	src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3276 	hdr->nr_options = *ptrs;
3277 
3278 	if (!rerror) {
3279 		/* copy the body */
3280 		bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3281 		error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3282 		if (error) {
3283 			rerror = error;
3284 			goto out;
3285 		}
3286 	}
3287 
3288 	/* copy the options */
3289 	dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3290 	while (src) {
3291 		size_t optsz;
3292 		uint64_t next;
3293 
3294 		/* restore the user pointer */
3295 		next = src->nro_next;
3296 		ptrs = (uint64_t *)src - 1;
3297 		src->nro_next = *ptrs;
3298 
3299 		/* always copy the option header */
3300 		error = copyout(src, dst, sizeof(*src));
3301 		if (error) {
3302 			rerror = error;
3303 			goto out;
3304 		}
3305 
3306 		/* copy the option body only if there was no error */
3307 		if (!rerror && !src->nro_status) {
3308 			optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3309 							src->nro_size);
3310 			if (optsz) {
3311 				error = copyout(src + 1, dst + 1, optsz);
3312 				if (error) {
3313 					rerror = error;
3314 					goto out;
3315 				}
3316 			}
3317 		}
3318 		src = (struct nmreq_option *)(uintptr_t)next;
3319 		dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3320 	}
3321 
3322 
3323 out:
3324 	hdr->nr_reserved = 0;
3325 	nm_os_free(bufstart);
3326 	return rerror;
3327 }
3328 
3329 struct nmreq_option *
3330 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3331 {
3332 	struct nmreq_option **opt_tab;
3333 
3334 	if (!hdr->nr_options)
3335 		return NULL;
3336 
3337 	opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3338 	    (NETMAP_REQ_OPT_MAX + 1);
3339 	return opt_tab[reqtype];
3340 }
3341 
3342 static int
3343 nmreq_checkoptions(struct nmreq_header *hdr)
3344 {
3345 	struct nmreq_option *opt;
3346 	/* return error if there is still any option
3347 	 * marked as not supported
3348 	 */
3349 
3350 	for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3351 	     opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3352 		if (opt->nro_status == EOPNOTSUPP)
3353 			return EOPNOTSUPP;
3354 
3355 	return 0;
3356 }
3357 
3358 /*
3359  * select(2) and poll(2) handlers for the "netmap" device.
3360  *
3361  * Can be called for one or more queues.
3362  * Return true the event mask corresponding to ready events.
3363  * If there are no ready events (and 'sr' is not NULL), do a
3364  * selrecord on either individual selinfo or on the global one.
3365  * Device-dependent parts (locking and sync of tx/rx rings)
3366  * are done through callbacks.
3367  *
3368  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3369  * The first one is remapped to pwait as selrecord() uses the name as an
3370  * hidden argument.
3371  */
3372 int
3373 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3374 {
3375 	struct netmap_adapter *na;
3376 	struct netmap_kring *kring;
3377 	struct netmap_ring *ring;
3378 	u_int i, want[NR_TXRX], revents = 0;
3379 	NM_SELINFO_T *si[NR_TXRX];
3380 #define want_tx want[NR_TX]
3381 #define want_rx want[NR_RX]
3382 	struct mbq q;	/* packets from RX hw queues to host stack */
3383 
3384 	/*
3385 	 * In order to avoid nested locks, we need to "double check"
3386 	 * txsync and rxsync if we decide to do a selrecord().
3387 	 * retry_tx (and retry_rx, later) prevent looping forever.
3388 	 */
3389 	int retry_tx = 1, retry_rx = 1;
3390 
3391 	/* Transparent mode: send_down is 1 if we have found some
3392 	 * packets to forward (host RX ring --> NIC) during the rx
3393 	 * scan and we have not sent them down to the NIC yet.
3394 	 * Transparent mode requires to bind all rings to a single
3395 	 * file descriptor.
3396 	 */
3397 	int send_down = 0;
3398 	int sync_flags = priv->np_sync_flags;
3399 
3400 	mbq_init(&q);
3401 
3402 	if (unlikely(priv->np_nifp == NULL)) {
3403 		return POLLERR;
3404 	}
3405 	mb(); /* make sure following reads are not from cache */
3406 
3407 	na = priv->np_na;
3408 
3409 	if (unlikely(!nm_netmap_on(na)))
3410 		return POLLERR;
3411 
3412 	if (unlikely(priv->np_csb_atok_base)) {
3413 		nm_prerr("Invalid poll in CSB mode");
3414 		return POLLERR;
3415 	}
3416 
3417 	if (netmap_debug & NM_DEBUG_ON)
3418 		nm_prinf("device %s events 0x%x", na->name, events);
3419 	want_tx = events & (POLLOUT | POLLWRNORM);
3420 	want_rx = events & (POLLIN | POLLRDNORM);
3421 
3422 	/*
3423 	 * If the card has more than one queue AND the file descriptor is
3424 	 * bound to all of them, we sleep on the "global" selinfo, otherwise
3425 	 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3426 	 * per file descriptor).
3427 	 * The interrupt routine in the driver wake one or the other
3428 	 * (or both) depending on which clients are active.
3429 	 *
3430 	 * rxsync() is only called if we run out of buffers on a POLLIN.
3431 	 * txsync() is called if we run out of buffers on POLLOUT, or
3432 	 * there are pending packets to send. The latter can be disabled
3433 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3434 	 */
3435 	si[NR_RX] = priv->np_si[NR_RX];
3436 	si[NR_TX] = priv->np_si[NR_TX];
3437 
3438 #ifdef __FreeBSD__
3439 	/*
3440 	 * We start with a lock free round which is cheap if we have
3441 	 * slots available. If this fails, then lock and call the sync
3442 	 * routines. We can't do this on Linux, as the contract says
3443 	 * that we must call nm_os_selrecord() unconditionally.
3444 	 */
3445 	if (want_tx) {
3446 		const enum txrx t = NR_TX;
3447 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3448 			kring = NMR(na, t)[i];
3449 			if (kring->ring->cur != kring->ring->tail) {
3450 				/* Some unseen TX space is available, so what
3451 				 * we don't need to run txsync. */
3452 				revents |= want[t];
3453 				want[t] = 0;
3454 				break;
3455 			}
3456 		}
3457 	}
3458 	if (want_rx) {
3459 		const enum txrx t = NR_RX;
3460 		int rxsync_needed = 0;
3461 
3462 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3463 			kring = NMR(na, t)[i];
3464 			if (kring->ring->cur == kring->ring->tail
3465 				|| kring->rhead != kring->ring->head) {
3466 				/* There are no unseen packets on this ring,
3467 				 * or there are some buffers to be returned
3468 				 * to the netmap port. We therefore go ahead
3469 				 * and run rxsync. */
3470 				rxsync_needed = 1;
3471 				break;
3472 			}
3473 		}
3474 		if (!rxsync_needed) {
3475 			revents |= want_rx;
3476 			want_rx = 0;
3477 		}
3478 	}
3479 #endif
3480 
3481 #ifdef linux
3482 	/* The selrecord must be unconditional on linux. */
3483 	nm_os_selrecord(sr, si[NR_RX]);
3484 	nm_os_selrecord(sr, si[NR_TX]);
3485 #endif /* linux */
3486 
3487 	/*
3488 	 * If we want to push packets out (priv->np_txpoll) or
3489 	 * want_tx is still set, we must issue txsync calls
3490 	 * (on all rings, to avoid that the tx rings stall).
3491 	 * Fortunately, normal tx mode has np_txpoll set.
3492 	 */
3493 	if (priv->np_txpoll || want_tx) {
3494 		/*
3495 		 * The first round checks if anyone is ready, if not
3496 		 * do a selrecord and another round to handle races.
3497 		 * want_tx goes to 0 if any space is found, and is
3498 		 * used to skip rings with no pending transmissions.
3499 		 */
3500 flush_tx:
3501 		for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3502 			int found = 0;
3503 
3504 			kring = na->tx_rings[i];
3505 			ring = kring->ring;
3506 
3507 			/*
3508 			 * Don't try to txsync this TX ring if we already found some
3509 			 * space in some of the TX rings (want_tx == 0) and there are no
3510 			 * TX slots in this ring that need to be flushed to the NIC
3511 			 * (head == hwcur).
3512 			 */
3513 			if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3514 				continue;
3515 
3516 			if (nm_kr_tryget(kring, 1, &revents))
3517 				continue;
3518 
3519 			if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3520 				netmap_ring_reinit(kring);
3521 				revents |= POLLERR;
3522 			} else {
3523 				if (kring->nm_sync(kring, sync_flags))
3524 					revents |= POLLERR;
3525 				else
3526 					nm_sync_finalize(kring);
3527 			}
3528 
3529 			/*
3530 			 * If we found new slots, notify potential
3531 			 * listeners on the same ring.
3532 			 * Since we just did a txsync, look at the copies
3533 			 * of cur,tail in the kring.
3534 			 */
3535 			found = kring->rcur != kring->rtail;
3536 			nm_kr_put(kring);
3537 			if (found) { /* notify other listeners */
3538 				revents |= want_tx;
3539 				want_tx = 0;
3540 #ifndef linux
3541 				kring->nm_notify(kring, 0);
3542 #endif /* linux */
3543 			}
3544 		}
3545 		/* if there were any packet to forward we must have handled them by now */
3546 		send_down = 0;
3547 		if (want_tx && retry_tx && sr) {
3548 #ifndef linux
3549 			nm_os_selrecord(sr, si[NR_TX]);
3550 #endif /* !linux */
3551 			retry_tx = 0;
3552 			goto flush_tx;
3553 		}
3554 	}
3555 
3556 	/*
3557 	 * If want_rx is still set scan receive rings.
3558 	 * Do it on all rings because otherwise we starve.
3559 	 */
3560 	if (want_rx) {
3561 		/* two rounds here for race avoidance */
3562 do_retry_rx:
3563 		for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3564 			int found = 0;
3565 
3566 			kring = na->rx_rings[i];
3567 			ring = kring->ring;
3568 
3569 			if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3570 				continue;
3571 
3572 			if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3573 				netmap_ring_reinit(kring);
3574 				revents |= POLLERR;
3575 			}
3576 			/* now we can use kring->rcur, rtail */
3577 
3578 			/*
3579 			 * transparent mode support: collect packets from
3580 			 * hw rxring(s) that have been released by the user
3581 			 */
3582 			if (nm_may_forward_up(kring)) {
3583 				netmap_grab_packets(kring, &q, netmap_fwd);
3584 			}
3585 
3586 			/* Clear the NR_FORWARD flag anyway, it may be set by
3587 			 * the nm_sync() below only on for the host RX ring (see
3588 			 * netmap_rxsync_from_host()). */
3589 			kring->nr_kflags &= ~NR_FORWARD;
3590 			if (kring->nm_sync(kring, sync_flags))
3591 				revents |= POLLERR;
3592 			else
3593 				nm_sync_finalize(kring);
3594 			send_down |= (kring->nr_kflags & NR_FORWARD);
3595 			ring_timestamp_set(ring);
3596 			found = kring->rcur != kring->rtail;
3597 			nm_kr_put(kring);
3598 			if (found) {
3599 				revents |= want_rx;
3600 				retry_rx = 0;
3601 #ifndef linux
3602 				kring->nm_notify(kring, 0);
3603 #endif /* linux */
3604 			}
3605 		}
3606 
3607 #ifndef linux
3608 		if (retry_rx && sr) {
3609 			nm_os_selrecord(sr, si[NR_RX]);
3610 		}
3611 #endif /* !linux */
3612 		if (send_down || retry_rx) {
3613 			retry_rx = 0;
3614 			if (send_down)
3615 				goto flush_tx; /* and retry_rx */
3616 			else
3617 				goto do_retry_rx;
3618 		}
3619 	}
3620 
3621 	/*
3622 	 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3623 	 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3624 	 * to the host stack.
3625 	 */
3626 
3627 	if (mbq_peek(&q)) {
3628 		netmap_send_up(na->ifp, &q);
3629 	}
3630 
3631 	return (revents);
3632 #undef want_tx
3633 #undef want_rx
3634 }
3635 
3636 int
3637 nma_intr_enable(struct netmap_adapter *na, int onoff)
3638 {
3639 	bool changed = false;
3640 	enum txrx t;
3641 	int i;
3642 
3643 	for_rx_tx(t) {
3644 		for (i = 0; i < nma_get_nrings(na, t); i++) {
3645 			struct netmap_kring *kring = NMR(na, t)[i];
3646 			int on = !(kring->nr_kflags & NKR_NOINTR);
3647 
3648 			if (!!onoff != !!on) {
3649 				changed = true;
3650 			}
3651 			if (onoff) {
3652 				kring->nr_kflags &= ~NKR_NOINTR;
3653 			} else {
3654 				kring->nr_kflags |= NKR_NOINTR;
3655 			}
3656 		}
3657 	}
3658 
3659 	if (!changed) {
3660 		return 0; /* nothing to do */
3661 	}
3662 
3663 	if (!na->nm_intr) {
3664 		nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3665 		  na->name);
3666 		return -1;
3667 	}
3668 
3669 	na->nm_intr(na, onoff);
3670 
3671 	return 0;
3672 }
3673 
3674 
3675 /*-------------------- driver support routines -------------------*/
3676 
3677 /* default notify callback */
3678 static int
3679 netmap_notify(struct netmap_kring *kring, int flags)
3680 {
3681 	struct netmap_adapter *na = kring->notify_na;
3682 	enum txrx t = kring->tx;
3683 
3684 	nm_os_selwakeup(&kring->si);
3685 	/* optimization: avoid a wake up on the global
3686 	 * queue if nobody has registered for more
3687 	 * than one ring
3688 	 */
3689 	if (na->si_users[t] > 0)
3690 		nm_os_selwakeup(&na->si[t]);
3691 
3692 	return NM_IRQ_COMPLETED;
3693 }
3694 
3695 /* called by all routines that create netmap_adapters.
3696  * provide some defaults and get a reference to the
3697  * memory allocator
3698  */
3699 int
3700 netmap_attach_common(struct netmap_adapter *na)
3701 {
3702 	if (!na->rx_buf_maxsize) {
3703 		/* Set a conservative default (larger is safer). */
3704 		na->rx_buf_maxsize = PAGE_SIZE;
3705 	}
3706 
3707 #ifdef __FreeBSD__
3708 	if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3709 		na->if_input = na->ifp->if_input; /* for netmap_send_up */
3710 	}
3711 	na->pdev = na; /* make sure netmap_mem_map() is called */
3712 #endif /* __FreeBSD__ */
3713 	if (na->na_flags & NAF_HOST_RINGS) {
3714 		if (na->num_host_rx_rings == 0)
3715 			na->num_host_rx_rings = 1;
3716 		if (na->num_host_tx_rings == 0)
3717 			na->num_host_tx_rings = 1;
3718 	}
3719 	if (na->nm_krings_create == NULL) {
3720 		/* we assume that we have been called by a driver,
3721 		 * since other port types all provide their own
3722 		 * nm_krings_create
3723 		 */
3724 		na->nm_krings_create = netmap_hw_krings_create;
3725 		na->nm_krings_delete = netmap_hw_krings_delete;
3726 	}
3727 	if (na->nm_notify == NULL)
3728 		na->nm_notify = netmap_notify;
3729 	na->active_fds = 0;
3730 
3731 	if (na->nm_mem == NULL) {
3732 		/* use the global allocator */
3733 		na->nm_mem = netmap_mem_get(&nm_mem);
3734 	}
3735 #ifdef WITH_VALE
3736 	if (na->nm_bdg_attach == NULL)
3737 		/* no special nm_bdg_attach callback. On VALE
3738 		 * attach, we need to interpose a bwrap
3739 		 */
3740 		na->nm_bdg_attach = netmap_default_bdg_attach;
3741 #endif
3742 
3743 	return 0;
3744 }
3745 
3746 /* Wrapper for the register callback provided netmap-enabled
3747  * hardware drivers.
3748  * nm_iszombie(na) means that the driver module has been
3749  * unloaded, so we cannot call into it.
3750  * nm_os_ifnet_lock() must guarantee mutual exclusion with
3751  * module unloading.
3752  */
3753 static int
3754 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3755 {
3756 	struct netmap_hw_adapter *hwna =
3757 		(struct netmap_hw_adapter*)na;
3758 	int error = 0;
3759 
3760 	nm_os_ifnet_lock();
3761 
3762 	if (nm_iszombie(na)) {
3763 		if (onoff) {
3764 			error = ENXIO;
3765 		} else if (na != NULL) {
3766 			na->na_flags &= ~NAF_NETMAP_ON;
3767 		}
3768 		goto out;
3769 	}
3770 
3771 	error = hwna->nm_hw_register(na, onoff);
3772 
3773 out:
3774 	nm_os_ifnet_unlock();
3775 
3776 	return error;
3777 }
3778 
3779 static void
3780 netmap_hw_dtor(struct netmap_adapter *na)
3781 {
3782 	if (na->ifp == NULL)
3783 		return;
3784 
3785 	NM_DETACH_NA(na->ifp);
3786 }
3787 
3788 
3789 /*
3790  * Allocate a netmap_adapter object, and initialize it from the
3791  * 'arg' passed by the driver on attach.
3792  * We allocate a block of memory of 'size' bytes, which has room
3793  * for struct netmap_adapter plus additional room private to
3794  * the caller.
3795  * Return 0 on success, ENOMEM otherwise.
3796  */
3797 int
3798 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
3799 {
3800 	struct netmap_hw_adapter *hwna = NULL;
3801 	struct ifnet *ifp = NULL;
3802 
3803 	if (size < sizeof(struct netmap_hw_adapter)) {
3804 		if (netmap_debug & NM_DEBUG_ON)
3805 			nm_prerr("Invalid netmap adapter size %d", (int)size);
3806 		return EINVAL;
3807 	}
3808 
3809 	if (arg == NULL || arg->ifp == NULL) {
3810 		if (netmap_debug & NM_DEBUG_ON)
3811 			nm_prerr("either arg or arg->ifp is NULL");
3812 		return EINVAL;
3813 	}
3814 
3815 	if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
3816 		if (netmap_debug & NM_DEBUG_ON)
3817 			nm_prerr("%s: invalid rings tx %d rx %d",
3818 				arg->name, arg->num_tx_rings, arg->num_rx_rings);
3819 		return EINVAL;
3820 	}
3821 
3822 	ifp = arg->ifp;
3823 	if (NM_NA_CLASH(ifp)) {
3824 		/* If NA(ifp) is not null but there is no valid netmap
3825 		 * adapter it means that someone else is using the same
3826 		 * pointer (e.g. ax25_ptr on linux). This happens for
3827 		 * instance when also PF_RING is in use. */
3828 		nm_prerr("Error: netmap adapter hook is busy");
3829 		return EBUSY;
3830 	}
3831 
3832 	hwna = nm_os_malloc(size);
3833 	if (hwna == NULL)
3834 		goto fail;
3835 	hwna->up = *arg;
3836 	hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
3837 	strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
3838 	if (override_reg) {
3839 		hwna->nm_hw_register = hwna->up.nm_register;
3840 		hwna->up.nm_register = netmap_hw_reg;
3841 	}
3842 	if (netmap_attach_common(&hwna->up)) {
3843 		nm_os_free(hwna);
3844 		goto fail;
3845 	}
3846 	netmap_adapter_get(&hwna->up);
3847 
3848 	NM_ATTACH_NA(ifp, &hwna->up);
3849 
3850 	nm_os_onattach(ifp);
3851 
3852 	if (arg->nm_dtor == NULL) {
3853 		hwna->up.nm_dtor = netmap_hw_dtor;
3854 	}
3855 
3856 	if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
3857 	    hwna->up.num_tx_rings, hwna->up.num_tx_desc,
3858 	    hwna->up.num_rx_rings, hwna->up.num_rx_desc);
3859 	return 0;
3860 
3861 fail:
3862 	nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
3863 	return (hwna ? EINVAL : ENOMEM);
3864 }
3865 
3866 
3867 int
3868 netmap_attach(struct netmap_adapter *arg)
3869 {
3870 	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
3871 			1 /* override nm_reg */);
3872 }
3873 
3874 
3875 void
3876 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
3877 {
3878 	if (!na) {
3879 		return;
3880 	}
3881 
3882 	refcount_acquire(&na->na_refcount);
3883 }
3884 
3885 
3886 /* returns 1 iff the netmap_adapter is destroyed */
3887 int
3888 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
3889 {
3890 	if (!na)
3891 		return 1;
3892 
3893 	if (!refcount_release(&na->na_refcount))
3894 		return 0;
3895 
3896 	if (na->nm_dtor)
3897 		na->nm_dtor(na);
3898 
3899 	if (na->tx_rings) { /* XXX should not happen */
3900 		if (netmap_debug & NM_DEBUG_ON)
3901 			nm_prerr("freeing leftover tx_rings");
3902 		na->nm_krings_delete(na);
3903 	}
3904 	netmap_pipe_dealloc(na);
3905 	if (na->nm_mem)
3906 		netmap_mem_put(na->nm_mem);
3907 	bzero(na, sizeof(*na));
3908 	nm_os_free(na);
3909 
3910 	return 1;
3911 }
3912 
3913 /* nm_krings_create callback for all hardware native adapters */
3914 int
3915 netmap_hw_krings_create(struct netmap_adapter *na)
3916 {
3917 	int ret = netmap_krings_create(na, 0);
3918 	if (ret == 0) {
3919 		/* initialize the mbq for the sw rx ring */
3920 		u_int lim = netmap_real_rings(na, NR_RX), i;
3921 		for (i = na->num_rx_rings; i < lim; i++) {
3922 			mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
3923 		}
3924 		nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
3925 	}
3926 	return ret;
3927 }
3928 
3929 
3930 
3931 /*
3932  * Called on module unload by the netmap-enabled drivers
3933  */
3934 void
3935 netmap_detach(struct ifnet *ifp)
3936 {
3937 	struct netmap_adapter *na = NA(ifp);
3938 
3939 	if (!na)
3940 		return;
3941 
3942 	NMG_LOCK();
3943 	netmap_set_all_rings(na, NM_KR_LOCKED);
3944 	/*
3945 	 * if the netmap adapter is not native, somebody
3946 	 * changed it, so we can not release it here.
3947 	 * The NAF_ZOMBIE flag will notify the new owner that
3948 	 * the driver is gone.
3949 	 */
3950 	if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
3951 		na->na_flags |= NAF_ZOMBIE;
3952 	}
3953 	/* give active users a chance to notice that NAF_ZOMBIE has been
3954 	 * turned on, so that they can stop and return an error to userspace.
3955 	 * Note that this becomes a NOP if there are no active users and,
3956 	 * therefore, the put() above has deleted the na, since now NA(ifp) is
3957 	 * NULL.
3958 	 */
3959 	netmap_enable_all_rings(ifp);
3960 	NMG_UNLOCK();
3961 }
3962 
3963 
3964 /*
3965  * Intercept packets from the network stack and pass them
3966  * to netmap as incoming packets on the 'software' ring.
3967  *
3968  * We only store packets in a bounded mbq and then copy them
3969  * in the relevant rxsync routine.
3970  *
3971  * We rely on the OS to make sure that the ifp and na do not go
3972  * away (typically the caller checks for IFF_DRV_RUNNING or the like).
3973  * In nm_register() or whenever there is a reinitialization,
3974  * we make sure to make the mode change visible here.
3975  */
3976 int
3977 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
3978 {
3979 	struct netmap_adapter *na = NA(ifp);
3980 	struct netmap_kring *kring, *tx_kring;
3981 	u_int len = MBUF_LEN(m);
3982 	u_int error = ENOBUFS;
3983 	unsigned int txr;
3984 	struct mbq *q;
3985 	int busy;
3986 	u_int i;
3987 
3988 	i = MBUF_TXQ(m);
3989 	if (i >= na->num_host_rx_rings) {
3990 		i = i % na->num_host_rx_rings;
3991 	}
3992 	kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
3993 
3994 	// XXX [Linux] we do not need this lock
3995 	// if we follow the down/configure/up protocol -gl
3996 	// mtx_lock(&na->core_lock);
3997 
3998 	if (!nm_netmap_on(na)) {
3999 		nm_prerr("%s not in netmap mode anymore", na->name);
4000 		error = ENXIO;
4001 		goto done;
4002 	}
4003 
4004 	txr = MBUF_TXQ(m);
4005 	if (txr >= na->num_tx_rings) {
4006 		txr %= na->num_tx_rings;
4007 	}
4008 	tx_kring = NMR(na, NR_TX)[txr];
4009 
4010 	if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4011 		return MBUF_TRANSMIT(na, ifp, m);
4012 	}
4013 
4014 	q = &kring->rx_queue;
4015 
4016 	// XXX reconsider long packets if we handle fragments
4017 	if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4018 		nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4019 			len, NETMAP_BUF_SIZE(na));
4020 		goto done;
4021 	}
4022 
4023 	if (!netmap_generic_hwcsum) {
4024 		if (nm_os_mbuf_has_csum_offld(m)) {
4025 			nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4026 			goto done;
4027 		}
4028 	}
4029 
4030 	if (nm_os_mbuf_has_seg_offld(m)) {
4031 		nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4032 		goto done;
4033 	}
4034 
4035 #ifdef __FreeBSD__
4036 	ETHER_BPF_MTAP(ifp, m);
4037 #endif /* __FreeBSD__ */
4038 
4039 	/* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4040 	 * and maybe other instances of netmap_transmit (the latter
4041 	 * not possible on Linux).
4042 	 * We enqueue the mbuf only if we are sure there is going to be
4043 	 * enough room in the host RX ring, otherwise we drop it.
4044 	 */
4045 	mbq_lock(q);
4046 
4047 	busy = kring->nr_hwtail - kring->nr_hwcur;
4048 	if (busy < 0)
4049 		busy += kring->nkr_num_slots;
4050 	if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4051 		nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4052 			kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4053 	} else {
4054 		mbq_enqueue(q, m);
4055 		nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4056 		/* notify outside the lock */
4057 		m = NULL;
4058 		error = 0;
4059 	}
4060 	mbq_unlock(q);
4061 
4062 done:
4063 	if (m)
4064 		m_freem(m);
4065 	/* unconditionally wake up listeners */
4066 	kring->nm_notify(kring, 0);
4067 	/* this is normally netmap_notify(), but for nics
4068 	 * connected to a bridge it is netmap_bwrap_intr_notify(),
4069 	 * that possibly forwards the frames through the switch
4070 	 */
4071 
4072 	return (error);
4073 }
4074 
4075 
4076 /*
4077  * Reset function to be called by the driver routines when reinitializing
4078  * a hardware ring. The driver is in charge of locking to protect the kring
4079  * while this operation is being performed. This is normally achieved by
4080  * calling netmap_disable_all_rings() before triggering a reset.
4081  * If the kring is not in netmap mode, return NULL to inform the caller
4082  * that this is the case.
4083  * If the kring is in netmap mode, set hwofs so that the netmap indices
4084  * seen by userspace (head/cut/tail) do not change, although the internal
4085  * NIC indices have been reset to 0.
4086  * In any case, adjust kring->nr_mode.
4087  */
4088 struct netmap_slot *
4089 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4090 	u_int new_cur)
4091 {
4092 	struct netmap_kring *kring;
4093 	u_int new_hwtail, new_hwofs;
4094 
4095 	if (!nm_native_on(na)) {
4096 		nm_prdis("interface not in native netmap mode");
4097 		return NULL;	/* nothing to reinitialize */
4098 	}
4099 
4100 	if (tx == NR_TX) {
4101 		if (n >= na->num_tx_rings)
4102 			return NULL;
4103 		kring = na->tx_rings[n];
4104 		/*
4105 		 * Set hwofs to rhead, so that slots[rhead] is mapped to
4106 		 * the NIC internal slot 0, and thus the netmap buffer
4107 		 * at rhead is the next to be transmitted. Transmissions
4108 		 * that were pending before the reset are considered as
4109 		 * sent, so that we can have hwcur = rhead. All the slots
4110 		 * are now owned by the user, so we can also reinit hwtail.
4111 		 */
4112 		new_hwofs = kring->rhead;
4113 		new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4114 	} else {
4115 		if (n >= na->num_rx_rings)
4116 			return NULL;
4117 		kring = na->rx_rings[n];
4118 		/*
4119 		 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4120 		 * the NIC internal slot 0, and thus the netmap buffer
4121 		 * at hwtail is the next to be given to the NIC.
4122 		 * Unread slots (the ones in [rhead,hwtail[) are owned by
4123 		 * the user, and thus the caller cannot give them
4124 		 * to the NIC right now.
4125 		 */
4126 		new_hwofs = kring->nr_hwtail;
4127 		new_hwtail = kring->nr_hwtail;
4128 	}
4129 	if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4130 		kring->nr_mode = NKR_NETMAP_OFF;
4131 		return NULL;
4132 	}
4133 	if (netmap_verbose) {
4134 	    nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4135 	        kring->nr_hwcur, kring->rhead,
4136 	        kring->nr_hwtail, new_hwtail,
4137 		kring->nkr_hwofs, new_hwofs);
4138 	}
4139 	kring->nr_hwcur = kring->rhead;
4140 	kring->nr_hwtail = new_hwtail;
4141 	kring->nkr_hwofs = new_hwofs;
4142 
4143 	/*
4144 	 * Wakeup on the individual and global selwait
4145 	 * We do the wakeup here, but the ring is not yet reconfigured.
4146 	 * However, we are under lock so there are no races.
4147 	 */
4148 	kring->nr_mode = NKR_NETMAP_ON;
4149 	kring->nm_notify(kring, 0);
4150 	return kring->ring->slot;
4151 }
4152 
4153 
4154 /*
4155  * Dispatch rx/tx interrupts to the netmap rings.
4156  *
4157  * "work_done" is non-null on the RX path, NULL for the TX path.
4158  * We rely on the OS to make sure that there is only one active
4159  * instance per queue, and that there is appropriate locking.
4160  *
4161  * The 'notify' routine depends on what the ring is attached to.
4162  * - for a netmap file descriptor, do a selwakeup on the individual
4163  *   waitqueue, plus one on the global one if needed
4164  *   (see netmap_notify)
4165  * - for a nic connected to a switch, call the proper forwarding routine
4166  *   (see netmap_bwrap_intr_notify)
4167  */
4168 int
4169 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4170 {
4171 	struct netmap_kring *kring;
4172 	enum txrx t = (work_done ? NR_RX : NR_TX);
4173 
4174 	q &= NETMAP_RING_MASK;
4175 
4176 	if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4177 	        nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4178 	}
4179 
4180 	if (q >= nma_get_nrings(na, t))
4181 		return NM_IRQ_PASS; // not a physical queue
4182 
4183 	kring = NMR(na, t)[q];
4184 
4185 	if (kring->nr_mode == NKR_NETMAP_OFF) {
4186 		return NM_IRQ_PASS;
4187 	}
4188 
4189 	if (t == NR_RX) {
4190 		kring->nr_kflags |= NKR_PENDINTR;	// XXX atomic ?
4191 		*work_done = 1; /* do not fire napi again */
4192 	}
4193 
4194 	return kring->nm_notify(kring, 0);
4195 }
4196 
4197 
4198 /*
4199  * Default functions to handle rx/tx interrupts from a physical device.
4200  * "work_done" is non-null on the RX path, NULL for the TX path.
4201  *
4202  * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4203  * so that the caller proceeds with regular processing.
4204  * Otherwise call netmap_common_irq().
4205  *
4206  * If the card is connected to a netmap file descriptor,
4207  * do a selwakeup on the individual queue, plus one on the global one
4208  * if needed (multiqueue card _and_ there are multiqueue listeners),
4209  * and return NR_IRQ_COMPLETED.
4210  *
4211  * Finally, if called on rx from an interface connected to a switch,
4212  * calls the proper forwarding routine.
4213  */
4214 int
4215 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4216 {
4217 	struct netmap_adapter *na = NA(ifp);
4218 
4219 	/*
4220 	 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4221 	 * we still use the regular driver even though the previous
4222 	 * check fails. It is unclear whether we should use
4223 	 * nm_native_on() here.
4224 	 */
4225 	if (!nm_netmap_on(na))
4226 		return NM_IRQ_PASS;
4227 
4228 	if (na->na_flags & NAF_SKIP_INTR) {
4229 		nm_prdis("use regular interrupt");
4230 		return NM_IRQ_PASS;
4231 	}
4232 
4233 	return netmap_common_irq(na, q, work_done);
4234 }
4235 
4236 /* set/clear native flags and if_transmit/netdev_ops */
4237 void
4238 nm_set_native_flags(struct netmap_adapter *na)
4239 {
4240 	struct ifnet *ifp = na->ifp;
4241 
4242 	/* We do the setup for intercepting packets only if we are the
4243 	 * first user of this adapter. */
4244 	if (na->active_fds > 0) {
4245 		return;
4246 	}
4247 
4248 	na->na_flags |= NAF_NETMAP_ON;
4249 	nm_os_onenter(ifp);
4250 	nm_update_hostrings_mode(na);
4251 }
4252 
4253 void
4254 nm_clear_native_flags(struct netmap_adapter *na)
4255 {
4256 	struct ifnet *ifp = na->ifp;
4257 
4258 	/* We undo the setup for intercepting packets only if we are the
4259 	 * last user of this adapter. */
4260 	if (na->active_fds > 0) {
4261 		return;
4262 	}
4263 
4264 	nm_update_hostrings_mode(na);
4265 	nm_os_onexit(ifp);
4266 
4267 	na->na_flags &= ~NAF_NETMAP_ON;
4268 }
4269 
4270 void
4271 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4272 {
4273 	enum txrx t;
4274 
4275 	for_rx_tx(t) {
4276 		int i;
4277 
4278 		for (i = 0; i < netmap_real_rings(na, t); i++) {
4279 			struct netmap_kring *kring = NMR(na, t)[i];
4280 
4281 			if (onoff && nm_kring_pending_on(kring))
4282 				kring->nr_mode = NKR_NETMAP_ON;
4283 			else if (!onoff && nm_kring_pending_off(kring))
4284 				kring->nr_mode = NKR_NETMAP_OFF;
4285 		}
4286 	}
4287 }
4288 
4289 /*
4290  * Module loader and unloader
4291  *
4292  * netmap_init() creates the /dev/netmap device and initializes
4293  * all global variables. Returns 0 on success, errno on failure
4294  * (but there is no chance)
4295  *
4296  * netmap_fini() destroys everything.
4297  */
4298 
4299 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4300 extern struct cdevsw netmap_cdevsw;
4301 
4302 
4303 void
4304 netmap_fini(void)
4305 {
4306 	if (netmap_dev)
4307 		destroy_dev(netmap_dev);
4308 	/* we assume that there are no longer netmap users */
4309 	nm_os_ifnet_fini();
4310 	netmap_uninit_bridges();
4311 	netmap_mem_fini();
4312 	NMG_LOCK_DESTROY();
4313 	nm_prinf("netmap: unloaded module.");
4314 }
4315 
4316 
4317 int
4318 netmap_init(void)
4319 {
4320 	int error;
4321 
4322 	NMG_LOCK_INIT();
4323 
4324 	error = netmap_mem_init();
4325 	if (error != 0)
4326 		goto fail;
4327 	/*
4328 	 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4329 	 * when the module is compiled in.
4330 	 * XXX could use make_dev_credv() to get error number
4331 	 */
4332 	netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4333 		&netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4334 			      "netmap");
4335 	if (!netmap_dev)
4336 		goto fail;
4337 
4338 	error = netmap_init_bridges();
4339 	if (error)
4340 		goto fail;
4341 
4342 #ifdef __FreeBSD__
4343 	nm_os_vi_init_index();
4344 #endif
4345 
4346 	error = nm_os_ifnet_init();
4347 	if (error)
4348 		goto fail;
4349 
4350 	nm_prinf("netmap: loaded module");
4351 	return (0);
4352 fail:
4353 	netmap_fini();
4354 	return (EINVAL); /* may be incorrect */
4355 }
4356