xref: /freebsd/sys/dev/netmap/netmap.c (revision 550cb4ab85c7e514629c8bacbbb07085b81d916b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2011-2014 Matteo Landi
5  * Copyright (C) 2011-2016 Luigi Rizzo
6  * Copyright (C) 2011-2016 Giuseppe Lettieri
7  * Copyright (C) 2011-2016 Vincenzo Maffione
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *   1. Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *   2. Redistributions in binary form must reproduce the above copyright
16  *      notice, this list of conditions and the following disclaimer in the
17  *      documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 /*
34  * $FreeBSD$
35  *
36  * This module supports memory mapped access to network devices,
37  * see netmap(4).
38  *
39  * The module uses a large, memory pool allocated by the kernel
40  * and accessible as mmapped memory by multiple userspace threads/processes.
41  * The memory pool contains packet buffers and "netmap rings",
42  * i.e. user-accessible copies of the interface's queues.
43  *
44  * Access to the network card works like this:
45  * 1. a process/thread issues one or more open() on /dev/netmap, to create
46  *    select()able file descriptor on which events are reported.
47  * 2. on each descriptor, the process issues an ioctl() to identify
48  *    the interface that should report events to the file descriptor.
49  * 3. on each descriptor, the process issues an mmap() request to
50  *    map the shared memory region within the process' address space.
51  *    The list of interesting queues is indicated by a location in
52  *    the shared memory region.
53  * 4. using the functions in the netmap(4) userspace API, a process
54  *    can look up the occupation state of a queue, access memory buffers,
55  *    and retrieve received packets or enqueue packets to transmit.
56  * 5. using some ioctl()s the process can synchronize the userspace view
57  *    of the queue with the actual status in the kernel. This includes both
58  *    receiving the notification of new packets, and transmitting new
59  *    packets on the output interface.
60  * 6. select() or poll() can be used to wait for events on individual
61  *    transmit or receive queues (or all queues for a given interface).
62  *
63 
64 		SYNCHRONIZATION (USER)
65 
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
72 invalid usage.
73 
74 		LOCKING (INTERNAL)
75 
76 Within the kernel, access to the netmap rings is protected as follows:
77 
78 - a spinlock on each ring, to handle producer/consumer races on
79   RX rings attached to the host stack (against multiple host
80   threads writing from the host stack to the same ring),
81   and on 'destination' rings attached to a VALE switch
82   (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83   protecting multiple active senders for the same destination)
84 
85 - an atomic variable to guarantee that there is at most one
86   instance of *_*xsync() on the ring at any time.
87   For rings connected to user file
88   descriptors, an atomic_test_and_set() protects this, and the
89   lock on the ring is not actually used.
90   For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91   is also used to prevent multiple executions (the driver might indeed
92   already guarantee this).
93   For NIC TX rings connected to a VALE switch, the lock arbitrates
94   access to the queue (both when allocating buffers and when pushing
95   them out).
96 
97 - *xsync() should be protected against initializations of the card.
98   On FreeBSD most devices have the reset routine protected by
99   a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100   the RING protection on rx_reset(), this should be added.
101 
102   On linux there is an external lock on the tx path, which probably
103   also arbitrates access to the reset routine. XXX to be revised
104 
105 - a per-interface core_lock protecting access from the host stack
106   while interfaces may be detached from netmap mode.
107   XXX there should be no need for this lock if we detach the interfaces
108   only while they are down.
109 
110 
111 --- VALE SWITCH ---
112 
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
115 
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
123 
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
130 
131  */
132 
133 
134 /* --- internals ----
135  *
136  * Roadmap to the code that implements the above.
137  *
138  * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139  * >    select()able file descriptor on which events are reported.
140  *
141  *  	Internally, we allocate a netmap_priv_d structure, that will be
142  *  	initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143  *  	structure for each open().
144  *
145  *      os-specific:
146  *  	    FreeBSD: see netmap_open() (netmap_freebsd.c)
147  *  	    linux:   see linux_netmap_open() (netmap_linux.c)
148  *
149  * > 2. on each descriptor, the process issues an ioctl() to identify
150  * >    the interface that should report events to the file descriptor.
151  *
152  * 	Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153  * 	Most important things happen in netmap_get_na() and
154  * 	netmap_do_regif(), called from there. Additional details can be
155  * 	found in the comments above those functions.
156  *
157  * 	In all cases, this action creates/takes-a-reference-to a
158  * 	netmap_*_adapter describing the port, and allocates a netmap_if
159  * 	and all necessary netmap rings, filling them with netmap buffers.
160  *
161  *      In this phase, the sync callbacks for each ring are set (these are used
162  *      in steps 5 and 6 below).  The callbacks depend on the type of adapter.
163  *      The adapter creation/initialization code puts them in the
164  * 	netmap_adapter (fields na->nm_txsync and na->nm_rxsync).  Then, they
165  * 	are copied from there to the netmap_kring's during netmap_do_regif(), by
166  * 	the nm_krings_create() callback.  All the nm_krings_create callbacks
167  * 	actually call netmap_krings_create() to perform this and the other
168  * 	common stuff. netmap_krings_create() also takes care of the host rings,
169  * 	if needed, by setting their sync callbacks appropriately.
170  *
171  * 	Additional actions depend on the kind of netmap_adapter that has been
172  * 	registered:
173  *
174  * 	- netmap_hw_adapter:  	     [netmap.c]
175  * 	     This is a system netdev/ifp with native netmap support.
176  * 	     The ifp is detached from the host stack by redirecting:
177  * 	       - transmissions (from the network stack) to netmap_transmit()
178  * 	       - receive notifications to the nm_notify() callback for
179  * 	         this adapter. The callback is normally netmap_notify(), unless
180  * 	         the ifp is attached to a bridge using bwrap, in which case it
181  * 	         is netmap_bwrap_intr_notify().
182  *
183  * 	- netmap_generic_adapter:      [netmap_generic.c]
184  * 	      A system netdev/ifp without native netmap support.
185  *
186  * 	(the decision about native/non native support is taken in
187  * 	 netmap_get_hw_na(), called by netmap_get_na())
188  *
189  * 	- netmap_vp_adapter 		[netmap_vale.c]
190  * 	      Returned by netmap_get_bdg_na().
191  * 	      This is a persistent or ephemeral VALE port. Ephemeral ports
192  * 	      are created on the fly if they don't already exist, and are
193  * 	      always attached to a bridge.
194  * 	      Persistent VALE ports must must be created separately, and i
195  * 	      then attached like normal NICs. The NIOCREGIF we are examining
196  * 	      will find them only if they had previously been created and
197  * 	      attached (see VALE_CTL below).
198  *
199  * 	- netmap_pipe_adapter 	      [netmap_pipe.c]
200  * 	      Returned by netmap_get_pipe_na().
201  * 	      Both pipe ends are created, if they didn't already exist.
202  *
203  * 	- netmap_monitor_adapter      [netmap_monitor.c]
204  * 	      Returned by netmap_get_monitor_na().
205  * 	      If successful, the nm_sync callbacks of the monitored adapter
206  * 	      will be intercepted by the returned monitor.
207  *
208  * 	- netmap_bwrap_adapter	      [netmap_vale.c]
209  * 	      Cannot be obtained in this way, see VALE_CTL below
210  *
211  *
212  * 	os-specific:
213  * 	    linux: we first go through linux_netmap_ioctl() to
214  * 	           adapt the FreeBSD interface to the linux one.
215  *
216  *
217  * > 3. on each descriptor, the process issues an mmap() request to
218  * >    map the shared memory region within the process' address space.
219  * >    The list of interesting queues is indicated by a location in
220  * >    the shared memory region.
221  *
222  *      os-specific:
223  *  	    FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224  *  	    linux:   linux_netmap_mmap (netmap_linux.c).
225  *
226  * > 4. using the functions in the netmap(4) userspace API, a process
227  * >    can look up the occupation state of a queue, access memory buffers,
228  * >    and retrieve received packets or enqueue packets to transmit.
229  *
230  * 	these actions do not involve the kernel.
231  *
232  * > 5. using some ioctl()s the process can synchronize the userspace view
233  * >    of the queue with the actual status in the kernel. This includes both
234  * >    receiving the notification of new packets, and transmitting new
235  * >    packets on the output interface.
236  *
237  * 	These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238  * 	cases. They invoke the nm_sync callbacks on the netmap_kring
239  * 	structures, as initialized in step 2 and maybe later modified
240  * 	by a monitor. Monitors, however, will always call the original
241  * 	callback before doing anything else.
242  *
243  *
244  * > 6. select() or poll() can be used to wait for events on individual
245  * >    transmit or receive queues (or all queues for a given interface).
246  *
247  * 	Implemented in netmap_poll(). This will call the same nm_sync()
248  * 	callbacks as in step 5 above.
249  *
250  * 	os-specific:
251  * 		linux: we first go through linux_netmap_poll() to adapt
252  * 		       the FreeBSD interface to the linux one.
253  *
254  *
255  *  ----  VALE_CTL -----
256  *
257  *  VALE switches are controlled by issuing a NIOCREGIF with a non-null
258  *  nr_cmd in the nmreq structure. These subcommands are handled by
259  *  netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260  *  and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261  *  subcommands, respectively.
262  *
263  *  Any network interface known to the system (including a persistent VALE
264  *  port) can be attached to a VALE switch by issuing the
265  *  NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266  *  look exactly like ephemeral VALE ports (as created in step 2 above).  The
267  *  attachment of other interfaces, instead, requires the creation of a
268  *  netmap_bwrap_adapter.  Moreover, the attached interface must be put in
269  *  netmap mode. This may require the creation of a netmap_generic_adapter if
270  *  we have no native support for the interface, or if generic adapters have
271  *  been forced by sysctl.
272  *
273  *  Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274  *  called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275  *  callback.  In the case of the bwrap, the callback creates the
276  *  netmap_bwrap_adapter.  The initialization of the bwrap is then
277  *  completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278  *  callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279  *  A generic adapter for the wrapped ifp will be created if needed, when
280  *  netmap_get_bdg_na() calls netmap_get_hw_na().
281  *
282  *
283  *  ---- DATAPATHS -----
284  *
285  *              -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
286  *
287  *    na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
288  *
289  *    - tx from netmap userspace:
290  *	 concurrently:
291  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292  *                kring->nm_sync() == DEVICE_netmap_txsync()
293  *           2) device interrupt handler
294  *                na->nm_notify()  == netmap_notify()
295  *    - rx from netmap userspace:
296  *       concurrently:
297  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298  *                kring->nm_sync() == DEVICE_netmap_rxsync()
299  *           2) device interrupt handler
300  *                na->nm_notify()  == netmap_notify()
301  *    - rx from host stack
302  *       concurrently:
303  *           1) host stack
304  *                netmap_transmit()
305  *                  na->nm_notify  == netmap_notify()
306  *           2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307  *                kring->nm_sync() == netmap_rxsync_from_host
308  *                  netmap_rxsync_from_host(na, NULL, NULL)
309  *    - tx to host stack
310  *           ioctl(NIOCTXSYNC)/netmap_poll() in process context
311  *             kring->nm_sync() == netmap_txsync_to_host
312  *               netmap_txsync_to_host(na)
313  *                 nm_os_send_up()
314  *                   FreeBSD: na->if_input() == ether_input()
315  *                   linux: netif_rx() with NM_MAGIC_PRIORITY_RX
316  *
317  *
318  *               -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
319  *
320  *    na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
321  *
322  *    - tx from netmap userspace:
323  *       concurrently:
324  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325  *               kring->nm_sync() == generic_netmap_txsync()
326  *                   nm_os_generic_xmit_frame()
327  *                       linux:   dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328  *                           ifp->ndo_start_xmit == generic_ndo_start_xmit()
329  *                               gna->save_start_xmit == orig. dev. start_xmit
330  *                       FreeBSD: na->if_transmit() == orig. dev if_transmit
331  *           2) generic_mbuf_destructor()
332  *                   na->nm_notify() == netmap_notify()
333  *    - rx from netmap userspace:
334  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335  *               kring->nm_sync() == generic_netmap_rxsync()
336  *                   mbq_safe_dequeue()
337  *           2) device driver
338  *               generic_rx_handler()
339  *                   mbq_safe_enqueue()
340  *                   na->nm_notify() == netmap_notify()
341  *    - rx from host stack
342  *        FreeBSD: same as native
343  *        Linux: same as native except:
344  *           1) host stack
345  *               dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346  *                   ifp->ndo_start_xmit == generic_ndo_start_xmit()
347  *                       netmap_transmit()
348  *                           na->nm_notify() == netmap_notify()
349  *    - tx to host stack (same as native):
350  *
351  *
352  *                           -= VALE =-
353  *
354  *   INCOMING:
355  *
356  *      - VALE ports:
357  *          ioctl(NIOCTXSYNC)/netmap_poll() in process context
358  *              kring->nm_sync() == netmap_vp_txsync()
359  *
360  *      - system device with native support:
361  *         from cable:
362  *             interrupt
363  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
365  *                     netmap_vp_txsync()
366  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
367  *         from host stack:
368  *             netmap_transmit()
369  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370  *                     kring->nm_sync() == netmap_rxsync_from_host()
371  *                     netmap_vp_txsync()
372  *
373  *      - system device with generic support:
374  *         from device driver:
375  *            generic_rx_handler()
376  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377  *                     kring->nm_sync() == generic_netmap_rxsync()
378  *                     netmap_vp_txsync()
379  *                     kring->nm_sync() == generic_netmap_rxsync()
380  *         from host stack:
381  *            netmap_transmit()
382  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383  *                     kring->nm_sync() == netmap_rxsync_from_host()
384  *                     netmap_vp_txsync()
385  *
386  *   (all cases) --> nm_bdg_flush()
387  *                      dest_na->nm_notify() == (see below)
388  *
389  *   OUTGOING:
390  *
391  *      - VALE ports:
392  *         concurrently:
393  *             1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394  *                    kring->nm_sync() == netmap_vp_rxsync()
395  *             2) from nm_bdg_flush()
396  *                    na->nm_notify() == netmap_notify()
397  *
398  *      - system device with native support:
399  *          to cable:
400  *             na->nm_notify() == netmap_bwrap_notify()
401  *                 netmap_vp_rxsync()
402  *                 kring->nm_sync() == DEVICE_netmap_txsync()
403  *                 netmap_vp_rxsync()
404  *          to host stack:
405  *                 netmap_vp_rxsync()
406  *                 kring->nm_sync() == netmap_txsync_to_host
407  *                 netmap_vp_rxsync_locked()
408  *
409  *      - system device with generic adapter:
410  *          to device driver:
411  *             na->nm_notify() == netmap_bwrap_notify()
412  *                 netmap_vp_rxsync()
413  *                 kring->nm_sync() == generic_netmap_txsync()
414  *                 netmap_vp_rxsync()
415  *          to host stack:
416  *                 netmap_vp_rxsync()
417  *                 kring->nm_sync() == netmap_txsync_to_host
418  *                 netmap_vp_rxsync()
419  *
420  */
421 
422 /*
423  * OS-specific code that is used only within this file.
424  * Other OS-specific code that must be accessed by drivers
425  * is present in netmap_kern.h
426  */
427 
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h>	/* defines used in kernel.h */
433 #include <sys/kernel.h>	/* types used in module initialization */
434 #include <sys/conf.h>	/* cdevsw struct, UID, GID */
435 #include <sys/filio.h>	/* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h>	/* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
448 #include <net/if.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h>		/* BIOCIMMEDIATE */
451 #include <machine/bus.h>	/* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h>	/* ETHER_BPF_MTAP */
455 
456 
457 #elif defined(linux)
458 
459 #include "bsd_glue.h"
460 
461 #elif defined(__APPLE__)
462 
463 #warning OSX support is only partial
464 #include "osx_glue.h"
465 
466 #elif defined (_WIN32)
467 
468 #include "win_glue.h"
469 
470 #else
471 
472 #error	Unsupported platform
473 
474 #endif /* unsupported */
475 
476 /*
477  * common headers
478  */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
482 
483 
484 /* user-controlled variables */
485 int netmap_verbose;
486 #ifdef CONFIG_NETMAP_DEBUG
487 int netmap_debug;
488 #endif /* CONFIG_NETMAP_DEBUG */
489 
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0;	/* force transparent forwarding */
494 
495 /*
496  * netmap_admode selects the netmap mode to use.
497  * Invalid values are reset to NETMAP_ADMODE_BEST
498  */
499 enum {	NETMAP_ADMODE_BEST = 0,	/* use native, fallback to generic */
500 	NETMAP_ADMODE_NATIVE,	/* either native or none */
501 	NETMAP_ADMODE_GENERIC,	/* force generic */
502 	NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
504 
505 /* netmap_generic_mit controls mitigation of RX notifications for
506  * the generic netmap adapter. The value is a time interval in
507  * nanoseconds. */
508 int netmap_generic_mit = 100*1000;
509 
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511  * even if there can be a little performance hit with hardware NICs.
512  * However, using the qdisc is the safer approach, for two reasons:
513  * 1) it prevents non-fifo qdiscs to break the TX notification
514  *    scheme, which is based on mbuf destructors when txqdisc is
515  *    not used.
516  * 2) it makes it possible to transmit over software devices that
517  *    change skb->dev, like bridge, veth, ...
518  *
519  * Anyway users looking for the best performance should
520  * use native adapters.
521  */
522 #ifdef linux
523 int netmap_generic_txqdisc = 1;
524 #endif
525 
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
529 
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
532 
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
535 
536 /*
537  * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538  * in some other operating systems
539  */
540 SYSBEGIN(main_init);
541 
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
544     "Netmap args");
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 		CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 		CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 		CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 		0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 		&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
557 
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 		"Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 		"Adapter mode. 0 selects the best option available,"
562 		"1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 		0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 		"1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 		0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 		&netmap_generic_ringsize, 0,
570 		"Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 		&netmap_generic_rings, 0,
573 		"Number of TX/RX queues for emulated netmap adapters");
574 #ifdef linux
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 		&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
577 #endif
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 		0, "Allow ptnet devices to use virtio-net headers");
580 
581 SYSEND;
582 
583 NMG_LOCK_T	netmap_global_lock;
584 
585 /*
586  * mark the ring as stopped, and run through the locks
587  * to make sure other users get to see it.
588  * stopped must be either NR_KR_STOPPED (for unbounded stop)
589  * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
590  */
591 static void
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
593 {
594 	nm_kr_stop(kr, stopped);
595 	// XXX check if nm_kr_stop is sufficient
596 	mtx_lock(&kr->q_lock);
597 	mtx_unlock(&kr->q_lock);
598 	nm_kr_put(kr);
599 }
600 
601 /* stop or enable a single ring */
602 void
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
604 {
605 	if (stopped)
606 		netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 	else
608 		NMR(na, t)[ring_id]->nkr_stopped = 0;
609 }
610 
611 
612 /* stop or enable all the rings of na */
613 void
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
615 {
616 	int i;
617 	enum txrx t;
618 
619 	if (!nm_netmap_on(na))
620 		return;
621 
622 	if (netmap_verbose) {
623 		nm_prinf("%s: %sable all rings", na->name,
624 		    (stopped ? "dis" : "en"));
625 	}
626 	for_rx_tx(t) {
627 		for (i = 0; i < netmap_real_rings(na, t); i++) {
628 			netmap_set_ring(na, i, t, stopped);
629 		}
630 	}
631 }
632 
633 /*
634  * Convenience function used in drivers.  Waits for current txsync()s/rxsync()s
635  * to finish and prevents any new one from starting.  Call this before turning
636  * netmap mode off, or before removing the hardware rings (e.g., on module
637  * onload).
638  */
639 void
640 netmap_disable_all_rings(struct ifnet *ifp)
641 {
642 	if (NM_NA_VALID(ifp)) {
643 		netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
644 	}
645 }
646 
647 /*
648  * Convenience function used in drivers.  Re-enables rxsync and txsync on the
649  * adapter's rings In linux drivers, this should be placed near each
650  * napi_enable().
651  */
652 void
653 netmap_enable_all_rings(struct ifnet *ifp)
654 {
655 	if (NM_NA_VALID(ifp)) {
656 		netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 	}
658 }
659 
660 void
661 netmap_make_zombie(struct ifnet *ifp)
662 {
663 	if (NM_NA_VALID(ifp)) {
664 		struct netmap_adapter *na = NA(ifp);
665 		netmap_set_all_rings(na, NM_KR_LOCKED);
666 		na->na_flags |= NAF_ZOMBIE;
667 		netmap_set_all_rings(na, 0);
668 	}
669 }
670 
671 void
672 netmap_undo_zombie(struct ifnet *ifp)
673 {
674 	if (NM_NA_VALID(ifp)) {
675 		struct netmap_adapter *na = NA(ifp);
676 		if (na->na_flags & NAF_ZOMBIE) {
677 			netmap_set_all_rings(na, NM_KR_LOCKED);
678 			na->na_flags &= ~NAF_ZOMBIE;
679 			netmap_set_all_rings(na, 0);
680 		}
681 	}
682 }
683 
684 /*
685  * generic bound_checking function
686  */
687 u_int
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
689 {
690 	u_int oldv = *v;
691 	const char *op = NULL;
692 
693 	if (dflt < lo)
694 		dflt = lo;
695 	if (dflt > hi)
696 		dflt = hi;
697 	if (oldv < lo) {
698 		*v = dflt;
699 		op = "Bump";
700 	} else if (oldv > hi) {
701 		*v = hi;
702 		op = "Clamp";
703 	}
704 	if (op && msg)
705 		nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
706 	return *v;
707 }
708 
709 
710 /*
711  * packet-dump function, user-supplied or static buffer.
712  * The destination buffer must be at least 30+4*len
713  */
714 const char *
715 nm_dump_buf(char *p, int len, int lim, char *dst)
716 {
717 	static char _dst[8192];
718 	int i, j, i0;
719 	static char hex[] ="0123456789abcdef";
720 	char *o;	/* output position */
721 
722 #define P_HI(x)	hex[((x) & 0xf0)>>4]
723 #define P_LO(x)	hex[((x) & 0xf)]
724 #define P_C(x)	((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
725 	if (!dst)
726 		dst = _dst;
727 	if (lim <= 0 || lim > len)
728 		lim = len;
729 	o = dst;
730 	sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
731 	o += strlen(o);
732 	/* hexdump routine */
733 	for (i = 0; i < lim; ) {
734 		sprintf(o, "%5d: ", i);
735 		o += strlen(o);
736 		memset(o, ' ', 48);
737 		i0 = i;
738 		for (j=0; j < 16 && i < lim; i++, j++) {
739 			o[j*3] = P_HI(p[i]);
740 			o[j*3+1] = P_LO(p[i]);
741 		}
742 		i = i0;
743 		for (j=0; j < 16 && i < lim; i++, j++)
744 			o[j + 48] = P_C(p[i]);
745 		o[j+48] = '\n';
746 		o += j+49;
747 	}
748 	*o = '\0';
749 #undef P_HI
750 #undef P_LO
751 #undef P_C
752 	return dst;
753 }
754 
755 
756 /*
757  * Fetch configuration from the device, to cope with dynamic
758  * reconfigurations after loading the module.
759  */
760 /* call with NMG_LOCK held */
761 int
762 netmap_update_config(struct netmap_adapter *na)
763 {
764 	struct nm_config_info info;
765 
766 	bzero(&info, sizeof(info));
767 	if (na->nm_config == NULL ||
768 	    na->nm_config(na, &info)) {
769 		/* take whatever we had at init time */
770 		info.num_tx_rings = na->num_tx_rings;
771 		info.num_tx_descs = na->num_tx_desc;
772 		info.num_rx_rings = na->num_rx_rings;
773 		info.num_rx_descs = na->num_rx_desc;
774 		info.rx_buf_maxsize = na->rx_buf_maxsize;
775 	}
776 
777 	if (na->num_tx_rings == info.num_tx_rings &&
778 	    na->num_tx_desc == info.num_tx_descs &&
779 	    na->num_rx_rings == info.num_rx_rings &&
780 	    na->num_rx_desc == info.num_rx_descs &&
781 	    na->rx_buf_maxsize == info.rx_buf_maxsize)
782 		return 0; /* nothing changed */
783 	if (na->active_fds == 0) {
784 		na->num_tx_rings = info.num_tx_rings;
785 		na->num_tx_desc = info.num_tx_descs;
786 		na->num_rx_rings = info.num_rx_rings;
787 		na->num_rx_desc = info.num_rx_descs;
788 		na->rx_buf_maxsize = info.rx_buf_maxsize;
789 		if (netmap_verbose)
790 			nm_prinf("configuration changed for %s: txring %d x %d, "
791 				"rxring %d x %d, rxbufsz %d",
792 				na->name, na->num_tx_rings, na->num_tx_desc,
793 				na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
794 		return 0;
795 	}
796 	nm_prerr("WARNING: configuration changed for %s while active: "
797 		"txring %d x %d, rxring %d x %d, rxbufsz %d",
798 		na->name, info.num_tx_rings, info.num_tx_descs,
799 		info.num_rx_rings, info.num_rx_descs,
800 		info.rx_buf_maxsize);
801 	return 1;
802 }
803 
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
807 
808 static int
809 netmap_default_bufcfg(struct netmap_kring *kring, uint64_t target)
810 {
811 	kring->hwbuf_len = target;
812 	kring->buf_align = 0; /* no alignment */
813 	return 0;
814 }
815 
816 /* create the krings array and initialize the fields common to all adapters.
817  * The array layout is this:
818  *
819  *                    +----------+
820  * na->tx_rings ----->|          | \
821  *                    |          |  } na->num_tx_ring
822  *                    |          | /
823  *                    +----------+
824  *                    |          |    host tx kring
825  * na->rx_rings ----> +----------+
826  *                    |          | \
827  *                    |          |  } na->num_rx_rings
828  *                    |          | /
829  *                    +----------+
830  *                    |          |    host rx kring
831  *                    +----------+
832  * na->tailroom ----->|          | \
833  *                    |          |  } tailroom bytes
834  *                    |          | /
835  *                    +----------+
836  *
837  * Note: for compatibility, host krings are created even when not needed.
838  * The tailroom space is currently used by vale ports for allocating leases.
839  */
840 /* call with NMG_LOCK held */
841 int
842 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
843 {
844 	u_int i, len, ndesc;
845 	struct netmap_kring *kring;
846 	u_int n[NR_TXRX];
847 	enum txrx t;
848 	int err = 0;
849 
850 	if (na->tx_rings != NULL) {
851 		if (netmap_debug & NM_DEBUG_ON)
852 			nm_prerr("warning: krings were already created");
853 		return 0;
854 	}
855 
856 	/* account for the (possibly fake) host rings */
857 	n[NR_TX] = netmap_all_rings(na, NR_TX);
858 	n[NR_RX] = netmap_all_rings(na, NR_RX);
859 
860 	len = (n[NR_TX] + n[NR_RX]) *
861 		(sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
862 		+ tailroom;
863 
864 	na->tx_rings = nm_os_malloc((size_t)len);
865 	if (na->tx_rings == NULL) {
866 		nm_prerr("Cannot allocate krings");
867 		return ENOMEM;
868 	}
869 	na->rx_rings = na->tx_rings + n[NR_TX];
870 	na->tailroom = na->rx_rings + n[NR_RX];
871 
872 	/* link the krings in the krings array */
873 	kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
874 	for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
875 		na->tx_rings[i] = kring;
876 		kring++;
877 	}
878 
879 	/*
880 	 * All fields in krings are 0 except the one initialized below.
881 	 * but better be explicit on important kring fields.
882 	 */
883 	for_rx_tx(t) {
884 		ndesc = nma_get_ndesc(na, t);
885 		for (i = 0; i < n[t]; i++) {
886 			kring = NMR(na, t)[i];
887 			bzero(kring, sizeof(*kring));
888 			kring->notify_na = na;
889 			kring->ring_id = i;
890 			kring->tx = t;
891 			kring->nkr_num_slots = ndesc;
892 			kring->nr_mode = NKR_NETMAP_OFF;
893 			kring->nr_pending_mode = NKR_NETMAP_OFF;
894 			if (i < nma_get_nrings(na, t)) {
895 				kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
896 				kring->nm_bufcfg = na->nm_bufcfg;
897 				if (kring->nm_bufcfg == NULL)
898 					kring->nm_bufcfg = netmap_default_bufcfg;
899 			} else {
900 				if (!(na->na_flags & NAF_HOST_RINGS))
901 					kring->nr_kflags |= NKR_FAKERING;
902 				kring->nm_sync = (t == NR_TX ?
903 						netmap_txsync_to_host:
904 						netmap_rxsync_from_host);
905 				kring->nm_bufcfg = netmap_default_bufcfg;
906 			}
907 			kring->nm_notify = na->nm_notify;
908 			kring->rhead = kring->rcur = kring->nr_hwcur = 0;
909 			/*
910 			 * IMPORTANT: Always keep one slot empty.
911 			 */
912 			kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
913 			snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
914 					nm_txrx2str(t), i);
915 			nm_prdis("ktx %s h %d c %d t %d",
916 				kring->name, kring->rhead, kring->rcur, kring->rtail);
917 			err = nm_os_selinfo_init(&kring->si, kring->name);
918 			if (err) {
919 				netmap_krings_delete(na);
920 				return err;
921 			}
922 			mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
923 			kring->na = na;	/* setting this field marks the mutex as initialized */
924 		}
925 		err = nm_os_selinfo_init(&na->si[t], na->name);
926 		if (err) {
927 			netmap_krings_delete(na);
928 			return err;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 
936 /* undo the actions performed by netmap_krings_create */
937 /* call with NMG_LOCK held */
938 void
939 netmap_krings_delete(struct netmap_adapter *na)
940 {
941 	struct netmap_kring **kring = na->tx_rings;
942 	enum txrx t;
943 
944 	if (na->tx_rings == NULL) {
945 		if (netmap_debug & NM_DEBUG_ON)
946 			nm_prerr("warning: krings were already deleted");
947 		return;
948 	}
949 
950 	for_rx_tx(t)
951 		nm_os_selinfo_uninit(&na->si[t]);
952 
953 	/* we rely on the krings layout described above */
954 	for ( ; kring != na->tailroom; kring++) {
955 		if ((*kring)->na != NULL)
956 			mtx_destroy(&(*kring)->q_lock);
957 		nm_os_selinfo_uninit(&(*kring)->si);
958 	}
959 	nm_os_free(na->tx_rings);
960 	na->tx_rings = na->rx_rings = na->tailroom = NULL;
961 }
962 
963 
964 /*
965  * Destructor for NIC ports. They also have an mbuf queue
966  * on the rings connected to the host so we need to purge
967  * them first.
968  */
969 /* call with NMG_LOCK held */
970 void
971 netmap_hw_krings_delete(struct netmap_adapter *na)
972 {
973 	u_int lim = netmap_real_rings(na, NR_RX), i;
974 
975 	for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
976 		struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
977 		nm_prdis("destroy sw mbq with len %d", mbq_len(q));
978 		mbq_purge(q);
979 		mbq_safe_fini(q);
980 	}
981 	netmap_krings_delete(na);
982 }
983 
984 void
985 netmap_mem_restore(struct netmap_adapter *na)
986 {
987 	if (na->nm_mem_prev) {
988 		netmap_mem_put(na->nm_mem);
989 		na->nm_mem = na->nm_mem_prev;
990 		na->nm_mem_prev = NULL;
991 	}
992 }
993 
994 static void
995 netmap_mem_drop(struct netmap_adapter *na)
996 {
997 	/* if the native allocator had been overridden on regif,
998 	 * restore it now and drop the temporary one
999 	 */
1000 	if (netmap_mem_deref(na->nm_mem, na)) {
1001 		netmap_mem_restore(na);
1002 	}
1003 }
1004 
1005 /*
1006  * Undo everything that was done in netmap_do_regif(). In particular,
1007  * call nm_register(ifp,0) to stop netmap mode on the interface and
1008  * revert to normal operation.
1009  */
1010 /* call with NMG_LOCK held */
1011 static void netmap_unset_ringid(struct netmap_priv_d *);
1012 static void netmap_krings_put(struct netmap_priv_d *);
1013 void
1014 netmap_do_unregif(struct netmap_priv_d *priv)
1015 {
1016 	struct netmap_adapter *na = priv->np_na;
1017 
1018 	NMG_LOCK_ASSERT();
1019 	na->active_fds--;
1020 	/* unset nr_pending_mode and possibly release exclusive mode */
1021 	netmap_krings_put(priv);
1022 
1023 #ifdef	WITH_MONITOR
1024 	/* XXX check whether we have to do something with monitor
1025 	 * when rings change nr_mode. */
1026 	if (na->active_fds <= 0) {
1027 		/* walk through all the rings and tell any monitor
1028 		 * that the port is going to exit netmap mode
1029 		 */
1030 		netmap_monitor_stop(na);
1031 	}
1032 #endif
1033 
1034 	if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1035 		na->nm_register(na, 0);
1036 	}
1037 
1038 	/* delete rings and buffers that are no longer needed */
1039 	netmap_mem_rings_delete(na);
1040 
1041 	if (na->active_fds <= 0) {	/* last instance */
1042 		/*
1043 		 * (TO CHECK) We enter here
1044 		 * when the last reference to this file descriptor goes
1045 		 * away. This means we cannot have any pending poll()
1046 		 * or interrupt routine operating on the structure.
1047 		 * XXX The file may be closed in a thread while
1048 		 * another thread is using it.
1049 		 * Linux keeps the file opened until the last reference
1050 		 * by any outstanding ioctl/poll or mmap is gone.
1051 		 * FreeBSD does not track mmap()s (but we do) and
1052 		 * wakes up any sleeping poll(). Need to check what
1053 		 * happens if the close() occurs while a concurrent
1054 		 * syscall is running.
1055 		 */
1056 		if (netmap_debug & NM_DEBUG_ON)
1057 			nm_prinf("deleting last instance for %s", na->name);
1058 
1059 		if (nm_netmap_on(na)) {
1060 			nm_prerr("BUG: netmap on while going to delete the krings");
1061 		}
1062 
1063 		na->nm_krings_delete(na);
1064 
1065 		/* restore the default number of host tx and rx rings */
1066 		if (na->na_flags & NAF_HOST_RINGS) {
1067 			na->num_host_tx_rings = 1;
1068 			na->num_host_rx_rings = 1;
1069 		} else {
1070 			na->num_host_tx_rings = 0;
1071 			na->num_host_rx_rings = 0;
1072 		}
1073 	}
1074 
1075 	/* possibly decrement counter of tx_si/rx_si users */
1076 	netmap_unset_ringid(priv);
1077 	/* delete the nifp */
1078 	netmap_mem_if_delete(na, priv->np_nifp);
1079 	/* drop the allocator */
1080 	netmap_mem_drop(na);
1081 	/* mark the priv as unregistered */
1082 	priv->np_na = NULL;
1083 	priv->np_nifp = NULL;
1084 }
1085 
1086 struct netmap_priv_d*
1087 netmap_priv_new(void)
1088 {
1089 	struct netmap_priv_d *priv;
1090 
1091 	priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1092 	if (priv == NULL)
1093 		return NULL;
1094 	priv->np_refs = 1;
1095 	nm_os_get_module();
1096 	return priv;
1097 }
1098 
1099 /*
1100  * Destructor of the netmap_priv_d, called when the fd is closed
1101  * Action: undo all the things done by NIOCREGIF,
1102  * On FreeBSD we need to track whether there are active mmap()s,
1103  * and we use np_active_mmaps for that. On linux, the field is always 0.
1104  * Return: 1 if we can free priv, 0 otherwise.
1105  *
1106  */
1107 /* call with NMG_LOCK held */
1108 void
1109 netmap_priv_delete(struct netmap_priv_d *priv)
1110 {
1111 	struct netmap_adapter *na = priv->np_na;
1112 
1113 	/* number of active references to this fd */
1114 	if (--priv->np_refs > 0) {
1115 		return;
1116 	}
1117 	nm_os_put_module();
1118 	if (na) {
1119 		netmap_do_unregif(priv);
1120 	}
1121 	netmap_unget_na(na, priv->np_ifp);
1122 	bzero(priv, sizeof(*priv));	/* for safety */
1123 	nm_os_free(priv);
1124 }
1125 
1126 
1127 /* call with NMG_LOCK *not* held */
1128 void
1129 netmap_dtor(void *data)
1130 {
1131 	struct netmap_priv_d *priv = data;
1132 
1133 	NMG_LOCK();
1134 	netmap_priv_delete(priv);
1135 	NMG_UNLOCK();
1136 }
1137 
1138 
1139 /*
1140  * Handlers for synchronization of the rings from/to the host stack.
1141  * These are associated to a network interface and are just another
1142  * ring pair managed by userspace.
1143  *
1144  * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1145  * flags):
1146  *
1147  * - Before releasing buffers on hw RX rings, the application can mark
1148  *   them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1149  *   will be forwarded to the host stack, similarly to what happened if
1150  *   the application moved them to the host TX ring.
1151  *
1152  * - Before releasing buffers on the host RX ring, the application can
1153  *   mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1154  *   they will be forwarded to the hw TX rings, saving the application
1155  *   from doing the same task in user-space.
1156  *
1157  * Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
1158  * flag, or globally with the netmap_fwd sysctl.
1159  *
1160  * The transfer NIC --> host is relatively easy, just encapsulate
1161  * into mbufs and we are done. The host --> NIC side is slightly
1162  * harder because there might not be room in the tx ring so it
1163  * might take a while before releasing the buffer.
1164  */
1165 
1166 
1167 /*
1168  * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1169  * We do not need to lock because the queue is private.
1170  * After this call the queue is empty.
1171  */
1172 static void
1173 netmap_send_up(struct ifnet *dst, struct mbq *q)
1174 {
1175 	struct mbuf *m;
1176 	struct mbuf *head = NULL, *prev = NULL;
1177 #ifdef __FreeBSD__
1178 	struct epoch_tracker et;
1179 
1180 	NET_EPOCH_ENTER(et);
1181 #endif /* __FreeBSD__ */
1182 	/* Send packets up, outside the lock; head/prev machinery
1183 	 * is only useful for Windows. */
1184 	while ((m = mbq_dequeue(q)) != NULL) {
1185 		if (netmap_debug & NM_DEBUG_HOST)
1186 			nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1187 		prev = nm_os_send_up(dst, m, prev);
1188 		if (head == NULL)
1189 			head = prev;
1190 	}
1191 	if (head)
1192 		nm_os_send_up(dst, NULL, head);
1193 #ifdef __FreeBSD__
1194 	NET_EPOCH_EXIT(et);
1195 #endif /* __FreeBSD__ */
1196 	mbq_fini(q);
1197 }
1198 
1199 
1200 /*
1201  * Scan the buffers from hwcur to ring->head, and put a copy of those
1202  * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1203  * Drop remaining packets in the unlikely event
1204  * of an mbuf shortage.
1205  */
1206 static void
1207 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1208 {
1209 	u_int const lim = kring->nkr_num_slots - 1;
1210 	u_int const head = kring->rhead;
1211 	u_int n;
1212 	struct netmap_adapter *na = kring->na;
1213 
1214 	for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1215 		struct mbuf *m;
1216 		struct netmap_slot *slot = &kring->ring->slot[n];
1217 
1218 		if ((slot->flags & NS_FORWARD) == 0 && !force)
1219 			continue;
1220 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1221 			nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1222 			continue;
1223 		}
1224 		slot->flags &= ~NS_FORWARD; // XXX needed ?
1225 		/* XXX TODO: adapt to the case of a multisegment packet */
1226 		m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1227 
1228 		if (m == NULL)
1229 			break;
1230 		mbq_enqueue(q, m);
1231 	}
1232 }
1233 
1234 static inline int
1235 _nm_may_forward(struct netmap_kring *kring)
1236 {
1237 	return	((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1238 		 kring->na->na_flags & NAF_HOST_RINGS &&
1239 		 kring->tx == NR_RX);
1240 }
1241 
1242 static inline int
1243 nm_may_forward_up(struct netmap_kring *kring)
1244 {
1245 	return	_nm_may_forward(kring) &&
1246 		 kring->ring_id != kring->na->num_rx_rings;
1247 }
1248 
1249 static inline int
1250 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1251 {
1252 	return	_nm_may_forward(kring) &&
1253 		 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1254 		 kring->ring_id == kring->na->num_rx_rings;
1255 }
1256 
1257 /*
1258  * Send to the NIC rings packets marked NS_FORWARD between
1259  * kring->nr_hwcur and kring->rhead.
1260  * Called under kring->rx_queue.lock on the sw rx ring.
1261  *
1262  * It can only be called if the user opened all the TX hw rings,
1263  * see NAF_CAN_FORWARD_DOWN flag.
1264  * We can touch the TX netmap rings (slots, head and cur) since
1265  * we are in poll/ioctl system call context, and the application
1266  * is not supposed to touch the ring (using a different thread)
1267  * during the execution of the system call.
1268  */
1269 static u_int
1270 netmap_sw_to_nic(struct netmap_adapter *na)
1271 {
1272 	struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1273 	struct netmap_slot *rxslot = kring->ring->slot;
1274 	u_int i, rxcur = kring->nr_hwcur;
1275 	u_int const head = kring->rhead;
1276 	u_int const src_lim = kring->nkr_num_slots - 1;
1277 	u_int sent = 0;
1278 
1279 	/* scan rings to find space, then fill as much as possible */
1280 	for (i = 0; i < na->num_tx_rings; i++) {
1281 		struct netmap_kring *kdst = na->tx_rings[i];
1282 		struct netmap_ring *rdst = kdst->ring;
1283 		u_int const dst_lim = kdst->nkr_num_slots - 1;
1284 
1285 		/* XXX do we trust ring or kring->rcur,rtail ? */
1286 		for (; rxcur != head && !nm_ring_empty(rdst);
1287 		     rxcur = nm_next(rxcur, src_lim) ) {
1288 			struct netmap_slot *src, *dst, tmp;
1289 			u_int dst_head = rdst->head;
1290 
1291 			src = &rxslot[rxcur];
1292 			if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1293 				continue;
1294 
1295 			sent++;
1296 
1297 			dst = &rdst->slot[dst_head];
1298 
1299 			tmp = *src;
1300 
1301 			src->buf_idx = dst->buf_idx;
1302 			src->flags = NS_BUF_CHANGED;
1303 
1304 			dst->buf_idx = tmp.buf_idx;
1305 			dst->len = tmp.len;
1306 			dst->flags = NS_BUF_CHANGED;
1307 
1308 			rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1309 		}
1310 		/* if (sent) XXX txsync ? it would be just an optimization */
1311 	}
1312 	return sent;
1313 }
1314 
1315 
1316 /*
1317  * netmap_txsync_to_host() passes packets up. We are called from a
1318  * system call in user process context, and the only contention
1319  * can be among multiple user threads erroneously calling
1320  * this routine concurrently.
1321  */
1322 static int
1323 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1324 {
1325 	struct netmap_adapter *na = kring->na;
1326 	u_int const lim = kring->nkr_num_slots - 1;
1327 	u_int const head = kring->rhead;
1328 	struct mbq q;
1329 
1330 	/* Take packets from hwcur to head and pass them up.
1331 	 * Force hwcur = head since netmap_grab_packets() stops at head
1332 	 */
1333 	mbq_init(&q);
1334 	netmap_grab_packets(kring, &q, 1 /* force */);
1335 	nm_prdis("have %d pkts in queue", mbq_len(&q));
1336 	kring->nr_hwcur = head;
1337 	kring->nr_hwtail = head + lim;
1338 	if (kring->nr_hwtail > lim)
1339 		kring->nr_hwtail -= lim + 1;
1340 
1341 	netmap_send_up(na->ifp, &q);
1342 	return 0;
1343 }
1344 
1345 
1346 /*
1347  * rxsync backend for packets coming from the host stack.
1348  * They have been put in kring->rx_queue by netmap_transmit().
1349  * We protect access to the kring using kring->rx_queue.lock
1350  *
1351  * also moves to the nic hw rings any packet the user has marked
1352  * for transparent-mode forwarding, then sets the NR_FORWARD
1353  * flag in the kring to let the caller push them out
1354  */
1355 static int
1356 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1357 {
1358 	struct netmap_adapter *na = kring->na;
1359 	struct netmap_ring *ring = kring->ring;
1360 	u_int nm_i, n;
1361 	u_int const lim = kring->nkr_num_slots - 1;
1362 	u_int const head = kring->rhead;
1363 	int ret = 0;
1364 	struct mbq *q = &kring->rx_queue, fq;
1365 
1366 	mbq_init(&fq); /* fq holds packets to be freed */
1367 
1368 	mbq_lock(q);
1369 
1370 	/* First part: import newly received packets */
1371 	n = mbq_len(q);
1372 	if (n) { /* grab packets from the queue */
1373 		struct mbuf *m;
1374 		uint32_t stop_i;
1375 
1376 		nm_i = kring->nr_hwtail;
1377 		stop_i = nm_prev(kring->nr_hwcur, lim);
1378 		while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1379 			int len = MBUF_LEN(m);
1380 			struct netmap_slot *slot = &ring->slot[nm_i];
1381 
1382 			m_copydata(m, 0, len, NMB(na, slot));
1383 			nm_prdis("nm %d len %d", nm_i, len);
1384 			if (netmap_debug & NM_DEBUG_HOST)
1385 				nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1386 
1387 			slot->len = len;
1388 			slot->flags = 0;
1389 			nm_i = nm_next(nm_i, lim);
1390 			mbq_enqueue(&fq, m);
1391 		}
1392 		kring->nr_hwtail = nm_i;
1393 	}
1394 
1395 	/*
1396 	 * Second part: skip past packets that userspace has released.
1397 	 */
1398 	nm_i = kring->nr_hwcur;
1399 	if (nm_i != head) { /* something was released */
1400 		if (nm_may_forward_down(kring, flags)) {
1401 			ret = netmap_sw_to_nic(na);
1402 			if (ret > 0) {
1403 				kring->nr_kflags |= NR_FORWARD;
1404 				ret = 0;
1405 			}
1406 		}
1407 		kring->nr_hwcur = head;
1408 	}
1409 
1410 	mbq_unlock(q);
1411 
1412 	mbq_purge(&fq);
1413 	mbq_fini(&fq);
1414 
1415 	return ret;
1416 }
1417 
1418 
1419 /* Get a netmap adapter for the port.
1420  *
1421  * If it is possible to satisfy the request, return 0
1422  * with *na containing the netmap adapter found.
1423  * Otherwise return an error code, with *na containing NULL.
1424  *
1425  * When the port is attached to a bridge, we always return
1426  * EBUSY.
1427  * Otherwise, if the port is already bound to a file descriptor,
1428  * then we unconditionally return the existing adapter into *na.
1429  * In all the other cases, we return (into *na) either native,
1430  * generic or NULL, according to the following table:
1431  *
1432  *					native_support
1433  * active_fds   dev.netmap.admode         YES     NO
1434  * -------------------------------------------------------
1435  *    >0              *                 NA(ifp) NA(ifp)
1436  *
1437  *     0        NETMAP_ADMODE_BEST      NATIVE  GENERIC
1438  *     0        NETMAP_ADMODE_NATIVE    NATIVE   NULL
1439  *     0        NETMAP_ADMODE_GENERIC   GENERIC GENERIC
1440  *
1441  */
1442 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1443 int
1444 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1445 {
1446 	/* generic support */
1447 	int i = netmap_admode;	/* Take a snapshot. */
1448 	struct netmap_adapter *prev_na;
1449 	int error = 0;
1450 
1451 	*na = NULL; /* default */
1452 
1453 	/* reset in case of invalid value */
1454 	if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1455 		i = netmap_admode = NETMAP_ADMODE_BEST;
1456 
1457 	if (NM_NA_VALID(ifp)) {
1458 		prev_na = NA(ifp);
1459 		/* If an adapter already exists, return it if
1460 		 * there are active file descriptors or if
1461 		 * netmap is not forced to use generic
1462 		 * adapters.
1463 		 */
1464 		if (NETMAP_OWNED_BY_ANY(prev_na)
1465 			|| i != NETMAP_ADMODE_GENERIC
1466 			|| prev_na->na_flags & NAF_FORCE_NATIVE
1467 #ifdef WITH_PIPES
1468 			/* ugly, but we cannot allow an adapter switch
1469 			 * if some pipe is referring to this one
1470 			 */
1471 			|| prev_na->na_next_pipe > 0
1472 #endif
1473 		) {
1474 			*na = prev_na;
1475 			goto assign_mem;
1476 		}
1477 	}
1478 
1479 	/* If there isn't native support and netmap is not allowed
1480 	 * to use generic adapters, we cannot satisfy the request.
1481 	 */
1482 	if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1483 		return EOPNOTSUPP;
1484 
1485 	/* Otherwise, create a generic adapter and return it,
1486 	 * saving the previously used netmap adapter, if any.
1487 	 *
1488 	 * Note that here 'prev_na', if not NULL, MUST be a
1489 	 * native adapter, and CANNOT be a generic one. This is
1490 	 * true because generic adapters are created on demand, and
1491 	 * destroyed when not used anymore. Therefore, if the adapter
1492 	 * currently attached to an interface 'ifp' is generic, it
1493 	 * must be that
1494 	 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1495 	 * Consequently, if NA(ifp) is generic, we will enter one of
1496 	 * the branches above. This ensures that we never override
1497 	 * a generic adapter with another generic adapter.
1498 	 */
1499 	error = generic_netmap_attach(ifp);
1500 	if (error)
1501 		return error;
1502 
1503 	*na = NA(ifp);
1504 
1505 assign_mem:
1506 	if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1507 	    (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1508 		(*na)->nm_mem_prev = (*na)->nm_mem;
1509 		(*na)->nm_mem = netmap_mem_get(nmd);
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 /*
1516  * MUST BE CALLED UNDER NMG_LOCK()
1517  *
1518  * Get a refcounted reference to a netmap adapter attached
1519  * to the interface specified by req.
1520  * This is always called in the execution of an ioctl().
1521  *
1522  * Return ENXIO if the interface specified by the request does
1523  * not exist, ENOTSUP if netmap is not supported by the interface,
1524  * EBUSY if the interface is already attached to a bridge,
1525  * EINVAL if parameters are invalid, ENOMEM if needed resources
1526  * could not be allocated.
1527  * If successful, hold a reference to the netmap adapter.
1528  *
1529  * If the interface specified by req is a system one, also keep
1530  * a reference to it and return a valid *ifp.
1531  */
1532 int
1533 netmap_get_na(struct nmreq_header *hdr,
1534 	      struct netmap_adapter **na, struct ifnet **ifp,
1535 	      struct netmap_mem_d *nmd, int create)
1536 {
1537 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1538 	int error = 0;
1539 	struct netmap_adapter *ret = NULL;
1540 	int nmd_ref = 0;
1541 
1542 	*na = NULL;     /* default return value */
1543 	*ifp = NULL;
1544 
1545 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1546 		return EINVAL;
1547 	}
1548 
1549 	if (req->nr_mode == NR_REG_PIPE_MASTER ||
1550 			req->nr_mode == NR_REG_PIPE_SLAVE) {
1551 		/* Do not accept deprecated pipe modes. */
1552 		nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1553 		return EINVAL;
1554 	}
1555 
1556 	NMG_LOCK_ASSERT();
1557 
1558 	/* if the request contain a memid, try to find the
1559 	 * corresponding memory region
1560 	 */
1561 	if (nmd == NULL && req->nr_mem_id) {
1562 		nmd = netmap_mem_find(req->nr_mem_id);
1563 		if (nmd == NULL)
1564 			return EINVAL;
1565 		/* keep the rereference */
1566 		nmd_ref = 1;
1567 	}
1568 
1569 	/* We cascade through all possible types of netmap adapter.
1570 	 * All netmap_get_*_na() functions return an error and an na,
1571 	 * with the following combinations:
1572 	 *
1573 	 * error    na
1574 	 *   0	   NULL		type doesn't match
1575 	 *  !0	   NULL		type matches, but na creation/lookup failed
1576 	 *   0	  !NULL		type matches and na created/found
1577 	 *  !0    !NULL		impossible
1578 	 */
1579 	error = netmap_get_null_na(hdr, na, nmd, create);
1580 	if (error || *na != NULL)
1581 		goto out;
1582 
1583 	/* try to see if this is a monitor port */
1584 	error = netmap_get_monitor_na(hdr, na, nmd, create);
1585 	if (error || *na != NULL)
1586 		goto out;
1587 
1588 	/* try to see if this is a pipe port */
1589 	error = netmap_get_pipe_na(hdr, na, nmd, create);
1590 	if (error || *na != NULL)
1591 		goto out;
1592 
1593 	/* try to see if this is a vale port */
1594 	error = netmap_get_vale_na(hdr, na, nmd, create);
1595 	if (error)
1596 		goto out;
1597 
1598 	if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1599 		goto out;
1600 
1601 	/*
1602 	 * This must be a hardware na, lookup the name in the system.
1603 	 * Note that by hardware we actually mean "it shows up in ifconfig".
1604 	 * This may still be a tap, a veth/epair, or even a
1605 	 * persistent VALE port.
1606 	 */
1607 	*ifp = ifunit_ref(hdr->nr_name);
1608 	if (*ifp == NULL) {
1609 		error = ENXIO;
1610 		goto out;
1611 	}
1612 
1613 	error = netmap_get_hw_na(*ifp, nmd, &ret);
1614 	if (error)
1615 		goto out;
1616 
1617 	*na = ret;
1618 	netmap_adapter_get(ret);
1619 
1620 	/*
1621 	 * if the adapter supports the host rings and it is not already open,
1622 	 * try to set the number of host rings as requested by the user
1623 	 */
1624 	if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1625 		if (req->nr_host_tx_rings)
1626 			(*na)->num_host_tx_rings = req->nr_host_tx_rings;
1627 		if (req->nr_host_rx_rings)
1628 			(*na)->num_host_rx_rings = req->nr_host_rx_rings;
1629 	}
1630 	nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1631 			(*na)->num_host_rx_rings);
1632 
1633 out:
1634 	if (error) {
1635 		if (ret)
1636 			netmap_adapter_put(ret);
1637 		if (*ifp) {
1638 			if_rele(*ifp);
1639 			*ifp = NULL;
1640 		}
1641 	}
1642 	if (nmd_ref)
1643 		netmap_mem_put(nmd);
1644 
1645 	return error;
1646 }
1647 
1648 /* undo netmap_get_na() */
1649 void
1650 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1651 {
1652 	if (ifp)
1653 		if_rele(ifp);
1654 	if (na)
1655 		netmap_adapter_put(na);
1656 }
1657 
1658 
1659 #define NM_FAIL_ON(t) do {						\
1660 	if (unlikely(t)) {						\
1661 		nm_prlim(5, "%s: fail '" #t "' "				\
1662 			"h %d c %d t %d "				\
1663 			"rh %d rc %d rt %d "				\
1664 			"hc %d ht %d",					\
1665 			kring->name,					\
1666 			head, cur, ring->tail,				\
1667 			kring->rhead, kring->rcur, kring->rtail,	\
1668 			kring->nr_hwcur, kring->nr_hwtail);		\
1669 		return kring->nkr_num_slots;				\
1670 	}								\
1671 } while (0)
1672 
1673 /*
1674  * validate parameters on entry for *_txsync()
1675  * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1676  * in case of error.
1677  *
1678  * rhead, rcur and rtail=hwtail are stored from previous round.
1679  * hwcur is the next packet to send to the ring.
1680  *
1681  * We want
1682  *    hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1683  *
1684  * hwcur, rhead, rtail and hwtail are reliable
1685  */
1686 u_int
1687 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1688 {
1689 	u_int head = ring->head; /* read only once */
1690 	u_int cur = ring->cur; /* read only once */
1691 	u_int n = kring->nkr_num_slots;
1692 
1693 	nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1694 		kring->name,
1695 		kring->nr_hwcur, kring->nr_hwtail,
1696 		ring->head, ring->cur, ring->tail);
1697 #if 1 /* kernel sanity checks; but we can trust the kring. */
1698 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1699 	    kring->rtail >= n ||  kring->nr_hwtail >= n);
1700 #endif /* kernel sanity checks */
1701 	/*
1702 	 * user sanity checks. We only use head,
1703 	 * A, B, ... are possible positions for head:
1704 	 *
1705 	 *  0    A  rhead   B  rtail   C  n-1
1706 	 *  0    D  rtail   E  rhead   F  n-1
1707 	 *
1708 	 * B, F, D are valid. A, C, E are wrong
1709 	 */
1710 	if (kring->rtail >= kring->rhead) {
1711 		/* want rhead <= head <= rtail */
1712 		NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1713 		/* and also head <= cur <= rtail */
1714 		NM_FAIL_ON(cur < head || cur > kring->rtail);
1715 	} else { /* here rtail < rhead */
1716 		/* we need head outside rtail .. rhead */
1717 		NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1718 
1719 		/* two cases now: head <= rtail or head >= rhead  */
1720 		if (head <= kring->rtail) {
1721 			/* want head <= cur <= rtail */
1722 			NM_FAIL_ON(cur < head || cur > kring->rtail);
1723 		} else { /* head >= rhead */
1724 			/* cur must be outside rtail..head */
1725 			NM_FAIL_ON(cur > kring->rtail && cur < head);
1726 		}
1727 	}
1728 	if (ring->tail != kring->rtail) {
1729 		nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1730 			ring->tail, kring->rtail);
1731 		ring->tail = kring->rtail;
1732 	}
1733 	kring->rhead = head;
1734 	kring->rcur = cur;
1735 	return head;
1736 }
1737 
1738 
1739 /*
1740  * validate parameters on entry for *_rxsync()
1741  * Returns ring->head if ok, kring->nkr_num_slots on error.
1742  *
1743  * For a valid configuration,
1744  * hwcur <= head <= cur <= tail <= hwtail
1745  *
1746  * We only consider head and cur.
1747  * hwcur and hwtail are reliable.
1748  *
1749  */
1750 u_int
1751 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1752 {
1753 	uint32_t const n = kring->nkr_num_slots;
1754 	uint32_t head, cur;
1755 
1756 	nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1757 		kring->name,
1758 		kring->nr_hwcur, kring->nr_hwtail,
1759 		ring->head, ring->cur, ring->tail);
1760 	/*
1761 	 * Before storing the new values, we should check they do not
1762 	 * move backwards. However:
1763 	 * - head is not an issue because the previous value is hwcur;
1764 	 * - cur could in principle go back, however it does not matter
1765 	 *   because we are processing a brand new rxsync()
1766 	 */
1767 	cur = kring->rcur = ring->cur;	/* read only once */
1768 	head = kring->rhead = ring->head;	/* read only once */
1769 #if 1 /* kernel sanity checks */
1770 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1771 #endif /* kernel sanity checks */
1772 	/* user sanity checks */
1773 	if (kring->nr_hwtail >= kring->nr_hwcur) {
1774 		/* want hwcur <= rhead <= hwtail */
1775 		NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1776 		/* and also rhead <= rcur <= hwtail */
1777 		NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1778 	} else {
1779 		/* we need rhead outside hwtail..hwcur */
1780 		NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1781 		/* two cases now: head <= hwtail or head >= hwcur  */
1782 		if (head <= kring->nr_hwtail) {
1783 			/* want head <= cur <= hwtail */
1784 			NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1785 		} else {
1786 			/* cur must be outside hwtail..head */
1787 			NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1788 		}
1789 	}
1790 	if (ring->tail != kring->rtail) {
1791 		nm_prlim(5, "%s tail overwritten was %d need %d",
1792 			kring->name,
1793 			ring->tail, kring->rtail);
1794 		ring->tail = kring->rtail;
1795 	}
1796 	return head;
1797 }
1798 
1799 
1800 /*
1801  * Error routine called when txsync/rxsync detects an error.
1802  * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1803  * Return 1 on reinit.
1804  *
1805  * This routine is only called by the upper half of the kernel.
1806  * It only reads hwcur (which is changed only by the upper half, too)
1807  * and hwtail (which may be changed by the lower half, but only on
1808  * a tx ring and only to increase it, so any error will be recovered
1809  * on the next call). For the above, we don't strictly need to call
1810  * it under lock.
1811  */
1812 int
1813 netmap_ring_reinit(struct netmap_kring *kring)
1814 {
1815 	struct netmap_ring *ring = kring->ring;
1816 	u_int i, lim = kring->nkr_num_slots - 1;
1817 	int errors = 0;
1818 
1819 	// XXX KASSERT nm_kr_tryget
1820 	nm_prlim(10, "called for %s", kring->name);
1821 	// XXX probably wrong to trust userspace
1822 	kring->rhead = ring->head;
1823 	kring->rcur  = ring->cur;
1824 	kring->rtail = ring->tail;
1825 
1826 	if (ring->cur > lim)
1827 		errors++;
1828 	if (ring->head > lim)
1829 		errors++;
1830 	if (ring->tail > lim)
1831 		errors++;
1832 	for (i = 0; i <= lim; i++) {
1833 		u_int idx = ring->slot[i].buf_idx;
1834 		u_int len = ring->slot[i].len;
1835 		if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1836 			nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1837 			ring->slot[i].buf_idx = 0;
1838 			ring->slot[i].len = 0;
1839 		} else if (len > NETMAP_BUF_SIZE(kring->na)) {
1840 			ring->slot[i].len = 0;
1841 			nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1842 		}
1843 	}
1844 	if (errors) {
1845 		nm_prlim(10, "total %d errors", errors);
1846 		nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1847 			kring->name,
1848 			ring->cur, kring->nr_hwcur,
1849 			ring->tail, kring->nr_hwtail);
1850 		ring->head = kring->rhead = kring->nr_hwcur;
1851 		ring->cur  = kring->rcur  = kring->nr_hwcur;
1852 		ring->tail = kring->rtail = kring->nr_hwtail;
1853 	}
1854 	return (errors ? 1 : 0);
1855 }
1856 
1857 /* interpret the ringid and flags fields of an nmreq, by translating them
1858  * into a pair of intervals of ring indices:
1859  *
1860  * [priv->np_txqfirst, priv->np_txqlast) and
1861  * [priv->np_rxqfirst, priv->np_rxqlast)
1862  *
1863  */
1864 int
1865 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1866 {
1867 	struct netmap_adapter *na = priv->np_na;
1868 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1869 	int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1870 	enum txrx t;
1871 	u_int j;
1872 	u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1873 	      nr_ringid = reg->nr_ringid;
1874 
1875 	for_rx_tx(t) {
1876 		if (nr_flags & excluded_direction[t]) {
1877 			priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1878 			continue;
1879 		}
1880 		switch (nr_mode) {
1881 		case NR_REG_ALL_NIC:
1882 		case NR_REG_NULL:
1883 			priv->np_qfirst[t] = 0;
1884 			priv->np_qlast[t] = nma_get_nrings(na, t);
1885 			nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1886 				priv->np_qfirst[t], priv->np_qlast[t]);
1887 			break;
1888 		case NR_REG_SW:
1889 		case NR_REG_NIC_SW:
1890 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1891 				nm_prerr("host rings not supported");
1892 				return EINVAL;
1893 			}
1894 			priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1895 				nma_get_nrings(na, t) : 0);
1896 			priv->np_qlast[t] = netmap_all_rings(na, t);
1897 			nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1898 				nm_txrx2str(t),
1899 				priv->np_qfirst[t], priv->np_qlast[t]);
1900 			break;
1901 		case NR_REG_ONE_NIC:
1902 			if (nr_ringid >= na->num_tx_rings &&
1903 					nr_ringid >= na->num_rx_rings) {
1904 				nm_prerr("invalid ring id %d", nr_ringid);
1905 				return EINVAL;
1906 			}
1907 			/* if not enough rings, use the first one */
1908 			j = nr_ringid;
1909 			if (j >= nma_get_nrings(na, t))
1910 				j = 0;
1911 			priv->np_qfirst[t] = j;
1912 			priv->np_qlast[t] = j + 1;
1913 			nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1914 				priv->np_qfirst[t], priv->np_qlast[t]);
1915 			break;
1916 		case NR_REG_ONE_SW:
1917 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1918 				nm_prerr("host rings not supported");
1919 				return EINVAL;
1920 			}
1921 			if (nr_ringid >= na->num_host_tx_rings &&
1922 					nr_ringid >= na->num_host_rx_rings) {
1923 				nm_prerr("invalid ring id %d", nr_ringid);
1924 				return EINVAL;
1925 			}
1926 			/* if not enough rings, use the first one */
1927 			j = nr_ringid;
1928 			if (j >= nma_get_host_nrings(na, t))
1929 				j = 0;
1930 			priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1931 			priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1932 			nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1933 				priv->np_qfirst[t], priv->np_qlast[t]);
1934 			break;
1935 		default:
1936 			nm_prerr("invalid regif type %d", nr_mode);
1937 			return EINVAL;
1938 		}
1939 	}
1940 	priv->np_flags = nr_flags;
1941 
1942 	/* Allow transparent forwarding mode in the host --> nic
1943 	 * direction only if all the TX hw rings have been opened. */
1944 	if (priv->np_qfirst[NR_TX] == 0 &&
1945 			priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1946 		priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1947 	}
1948 
1949 	if (netmap_verbose) {
1950 		nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1951 			na->name,
1952 			priv->np_qfirst[NR_TX],
1953 			priv->np_qlast[NR_TX],
1954 			priv->np_qfirst[NR_RX],
1955 			priv->np_qlast[NR_RX],
1956 			nr_ringid);
1957 	}
1958 	return 0;
1959 }
1960 
1961 
1962 /*
1963  * Set the ring ID. For devices with a single queue, a request
1964  * for all rings is the same as a single ring.
1965  */
1966 static int
1967 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1968 {
1969 	struct netmap_adapter *na = priv->np_na;
1970 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1971 	int error;
1972 	enum txrx t;
1973 
1974 	error = netmap_interp_ringid(priv, hdr);
1975 	if (error) {
1976 		return error;
1977 	}
1978 
1979 	priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1980 
1981 	/* optimization: count the users registered for more than
1982 	 * one ring, which are the ones sleeping on the global queue.
1983 	 * The default netmap_notify() callback will then
1984 	 * avoid signaling the global queue if nobody is using it
1985 	 */
1986 	for_rx_tx(t) {
1987 		if (nm_si_user(priv, t))
1988 			na->si_users[t]++;
1989 	}
1990 	return 0;
1991 }
1992 
1993 static void
1994 netmap_unset_ringid(struct netmap_priv_d *priv)
1995 {
1996 	struct netmap_adapter *na = priv->np_na;
1997 	enum txrx t;
1998 
1999 	for_rx_tx(t) {
2000 		if (nm_si_user(priv, t))
2001 			na->si_users[t]--;
2002 		priv->np_qfirst[t] = priv->np_qlast[t] = 0;
2003 	}
2004 	priv->np_flags = 0;
2005 	priv->np_txpoll = 0;
2006 	priv->np_kloop_state = 0;
2007 }
2008 
2009 #define within_sel(p_, t_, i_)					  	  \
2010 	((i_) < (p_)->np_qlast[(t_)])
2011 #define nonempty_sel(p_, t_)						  \
2012 	(within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
2013 #define foreach_selected_ring(p_, t_, i_, kring_)			  \
2014 	for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX,		  \
2015 	     (i_) = (p_)->np_qfirst[(t_)];				  \
2016 	     (t_ == NR_RX ||						  \
2017 	      (t == NR_TX && within_sel((p_), (t_), (i_)))) &&     	  \
2018 	      ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); 		  \
2019 	     (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 :         \
2020 		(++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2021 
2022 
2023 /* Set the nr_pending_mode for the requested rings.
2024  * If requested, also try to get exclusive access to the rings, provided
2025  * the rings we want to bind are not exclusively owned by a previous bind.
2026  */
2027 static int
2028 netmap_krings_get(struct netmap_priv_d *priv)
2029 {
2030 	struct netmap_adapter *na = priv->np_na;
2031 	u_int i;
2032 	struct netmap_kring *kring;
2033 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2034 	enum txrx t;
2035 
2036 	if (netmap_debug & NM_DEBUG_ON)
2037 		nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2038 			na->name,
2039 			priv->np_qfirst[NR_TX],
2040 			priv->np_qlast[NR_TX],
2041 			priv->np_qfirst[NR_RX],
2042 			priv->np_qlast[NR_RX]);
2043 
2044 	/* first round: check that all the requested rings
2045 	 * are neither already exclusively owned, nor we
2046 	 * want exclusive ownership when they are already in use
2047 	 */
2048 	foreach_selected_ring(priv, t, i, kring) {
2049 		if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2050 		    (kring->users && excl))
2051 		{
2052 			nm_prdis("ring %s busy", kring->name);
2053 			return EBUSY;
2054 		}
2055 	}
2056 
2057 	/* second round: increment usage count (possibly marking them
2058 	 * as exclusive) and set the nr_pending_mode
2059 	 */
2060 	foreach_selected_ring(priv, t, i, kring) {
2061 		kring->users++;
2062 		if (excl)
2063 			kring->nr_kflags |= NKR_EXCLUSIVE;
2064 		kring->nr_pending_mode = NKR_NETMAP_ON;
2065 	}
2066 
2067 	return 0;
2068 
2069 }
2070 
2071 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2072  * if was asked on regif, and unset the nr_pending_mode if we are the
2073  * last users of the involved rings. */
2074 static void
2075 netmap_krings_put(struct netmap_priv_d *priv)
2076 {
2077 	u_int i;
2078 	struct netmap_kring *kring;
2079 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2080 	enum txrx t;
2081 
2082 	nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2083 			na->name,
2084 			priv->np_qfirst[NR_TX],
2085 			priv->np_qlast[NR_TX],
2086 			priv->np_qfirst[NR_RX],
2087 			priv->np_qlast[MR_RX]);
2088 
2089 	foreach_selected_ring(priv, t, i, kring) {
2090 		if (excl)
2091 			kring->nr_kflags &= ~NKR_EXCLUSIVE;
2092 		kring->users--;
2093 		if (kring->users == 0)
2094 			kring->nr_pending_mode = NKR_NETMAP_OFF;
2095 	}
2096 }
2097 
2098 static int
2099 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2100 {
2101 	return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2102 }
2103 
2104 /* Validate the CSB entries for both directions (atok and ktoa).
2105  * To be called under NMG_LOCK(). */
2106 static int
2107 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2108 {
2109 	struct nm_csb_atok *csb_atok_base =
2110 		(struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2111 	struct nm_csb_ktoa *csb_ktoa_base =
2112 		(struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2113 	enum txrx t;
2114 	int num_rings[NR_TXRX], tot_rings;
2115 	size_t entry_size[2];
2116 	void *csb_start[2];
2117 	int i;
2118 
2119 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2120 		nm_prerr("Cannot update CSB while kloop is running");
2121 		return EBUSY;
2122 	}
2123 
2124 	tot_rings = 0;
2125 	for_rx_tx(t) {
2126 		num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2127 		tot_rings += num_rings[t];
2128 	}
2129 	if (tot_rings <= 0)
2130 		return 0;
2131 
2132 	if (!(priv->np_flags & NR_EXCLUSIVE)) {
2133 		nm_prerr("CSB mode requires NR_EXCLUSIVE");
2134 		return EINVAL;
2135 	}
2136 
2137 	entry_size[0] = sizeof(*csb_atok_base);
2138 	entry_size[1] = sizeof(*csb_ktoa_base);
2139 	csb_start[0] = (void *)csb_atok_base;
2140 	csb_start[1] = (void *)csb_ktoa_base;
2141 
2142 	for (i = 0; i < 2; i++) {
2143 		/* On Linux we could use access_ok() to simplify
2144 		 * the validation. However, the advantage of
2145 		 * this approach is that it works also on
2146 		 * FreeBSD. */
2147 		size_t csb_size = tot_rings * entry_size[i];
2148 		void *tmp;
2149 		int err;
2150 
2151 		if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2152 			nm_prerr("Unaligned CSB address");
2153 			return EINVAL;
2154 		}
2155 
2156 		tmp = nm_os_malloc(csb_size);
2157 		if (!tmp)
2158 			return ENOMEM;
2159 		if (i == 0) {
2160 			/* Application --> kernel direction. */
2161 			err = copyin(csb_start[i], tmp, csb_size);
2162 		} else {
2163 			/* Kernel --> application direction. */
2164 			memset(tmp, 0, csb_size);
2165 			err = copyout(tmp, csb_start[i], csb_size);
2166 		}
2167 		nm_os_free(tmp);
2168 		if (err) {
2169 			nm_prerr("Invalid CSB address");
2170 			return err;
2171 		}
2172 	}
2173 
2174 	priv->np_csb_atok_base = csb_atok_base;
2175 	priv->np_csb_ktoa_base = csb_ktoa_base;
2176 
2177 	/* Initialize the CSB. */
2178 	for_rx_tx(t) {
2179 		for (i = 0; i < num_rings[t]; i++) {
2180 			struct netmap_kring *kring =
2181 				NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2182 			struct nm_csb_atok *csb_atok = csb_atok_base + i;
2183 			struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2184 
2185 			if (t == NR_RX) {
2186 				csb_atok += num_rings[NR_TX];
2187 				csb_ktoa += num_rings[NR_TX];
2188 			}
2189 
2190 			CSB_WRITE(csb_atok, head, kring->rhead);
2191 			CSB_WRITE(csb_atok, cur, kring->rcur);
2192 			CSB_WRITE(csb_atok, appl_need_kick, 1);
2193 			CSB_WRITE(csb_atok, sync_flags, 1);
2194 			CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2195 			CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2196 			CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2197 
2198 			nm_prinf("csb_init for kring %s: head %u, cur %u, "
2199 				"hwcur %u, hwtail %u", kring->name,
2200 				kring->rhead, kring->rcur, kring->nr_hwcur,
2201 				kring->nr_hwtail);
2202 		}
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 /* Ensure that the netmap adapter can support the given MTU.
2209  * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2210  */
2211 int
2212 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2213 	unsigned nbs = NETMAP_BUF_SIZE(na);
2214 
2215 	if (mtu <= na->rx_buf_maxsize) {
2216 		/* The MTU fits a single NIC slot. We only
2217 		 * Need to check that netmap buffers are
2218 		 * large enough to hold an MTU. NS_MOREFRAG
2219 		 * cannot be used in this case. */
2220 		if (nbs < mtu) {
2221 			nm_prerr("error: netmap buf size (%u) "
2222 				 "< device MTU (%u)", nbs, mtu);
2223 			return EINVAL;
2224 		}
2225 	} else {
2226 		/* More NIC slots may be needed to receive
2227 		 * or transmit a single packet. Check that
2228 		 * the adapter supports NS_MOREFRAG and that
2229 		 * netmap buffers are large enough to hold
2230 		 * the maximum per-slot size. */
2231 		if (!(na->na_flags & NAF_MOREFRAG)) {
2232 			nm_prerr("error: large MTU (%d) needed "
2233 				 "but %s does not support "
2234 				 "NS_MOREFRAG", mtu,
2235 				 na->ifp->if_xname);
2236 			return EINVAL;
2237 		} else if (nbs < na->rx_buf_maxsize) {
2238 			nm_prerr("error: using NS_MOREFRAG on "
2239 				 "%s requires netmap buf size "
2240 				 ">= %u", na->ifp->if_xname,
2241 				 na->rx_buf_maxsize);
2242 			return EINVAL;
2243 		} else {
2244 			nm_prinf("info: netmap application on "
2245 				 "%s needs to support "
2246 				 "NS_MOREFRAG "
2247 				 "(MTU=%u,netmap_buf_size=%u)",
2248 				 na->ifp->if_xname, mtu, nbs);
2249 		}
2250 	}
2251 	return 0;
2252 }
2253 
2254 /* Handle the offset option, if present in the hdr.
2255  * Returns 0 on success, or an error.
2256  */
2257 static int
2258 netmap_offsets_init(struct netmap_priv_d *priv, struct nmreq_header *hdr)
2259 {
2260 	struct nmreq_opt_offsets *opt;
2261 	struct netmap_adapter *na = priv->np_na;
2262 	struct netmap_kring *kring;
2263 	uint64_t mask = 0, bits = 0, maxbits = sizeof(uint64_t) * 8,
2264 		 max_offset = 0, initial_offset = 0, min_gap = 0;
2265 	u_int i;
2266 	enum txrx t;
2267 	int error = 0;
2268 
2269 	opt = (struct nmreq_opt_offsets *)
2270 		nmreq_getoption(hdr, NETMAP_REQ_OPT_OFFSETS);
2271 	if (opt == NULL)
2272 		return 0;
2273 
2274 	if (!(na->na_flags & NAF_OFFSETS)) {
2275 		if (netmap_verbose)
2276 			nm_prerr("%s does not support offsets",
2277 				na->name);
2278 		error = EOPNOTSUPP;
2279 		goto out;
2280 	}
2281 
2282 	/* check sanity of the opt values */
2283 	max_offset = opt->nro_max_offset;
2284 	min_gap = opt->nro_min_gap;
2285 	initial_offset = opt->nro_initial_offset;
2286 	bits = opt->nro_offset_bits;
2287 
2288 	if (bits > maxbits) {
2289 		if (netmap_verbose)
2290 			nm_prerr("bits: %llu too large (max %llu)",
2291 				(unsigned long long)bits,
2292 				(unsigned long long)maxbits);
2293 		error = EINVAL;
2294 		goto out;
2295 	}
2296 	/* we take bits == 0 as a request to use the entire field */
2297 	if (bits == 0 || bits == maxbits) {
2298 		/* shifting a type by sizeof(type) is undefined */
2299 		bits = maxbits;
2300 		mask = 0xffffffffffffffff;
2301 	} else {
2302 		mask = (1ULL << bits) - 1;
2303 	}
2304 	if (max_offset > NETMAP_BUF_SIZE(na)) {
2305 		if (netmap_verbose)
2306 			nm_prerr("max offset %llu > buf size %u",
2307 				(unsigned long long)max_offset, NETMAP_BUF_SIZE(na));
2308 		error = EINVAL;
2309 		goto out;
2310 	}
2311 	if ((max_offset & mask) != max_offset) {
2312 		if (netmap_verbose)
2313 			nm_prerr("max offset %llu to large for %llu bits",
2314 				(unsigned long long)max_offset,
2315 				(unsigned long long)bits);
2316 		error = EINVAL;
2317 		goto out;
2318 	}
2319 	if (initial_offset > max_offset) {
2320 		if (netmap_verbose)
2321 			nm_prerr("initial offset %llu > max offset %llu",
2322 				(unsigned long long)initial_offset,
2323 				(unsigned long long)max_offset);
2324 		error = EINVAL;
2325 		goto out;
2326 	}
2327 
2328 	/* initialize the kring and ring fields. */
2329 	foreach_selected_ring(priv, t, i, kring) {
2330 		struct netmap_kring *kring = NMR(na, t)[i];
2331 		struct netmap_ring *ring = kring->ring;
2332 		u_int j;
2333 
2334 		/* it the ring is already in use we check that the
2335 		 * new request is compatible with the existing one
2336 		 */
2337 		if (kring->offset_mask) {
2338 			if ((kring->offset_mask & mask) != mask ||
2339 			     kring->offset_max < max_offset) {
2340 				if (netmap_verbose)
2341 					nm_prinf("%s: cannot increase"
2342 						 "offset mask and/or max"
2343 						 "(current: mask=%llx,max=%llu",
2344 							kring->name,
2345 							(unsigned long long)kring->offset_mask,
2346 							(unsigned long long)kring->offset_max);
2347 				error = EBUSY;
2348 				goto out;
2349 			}
2350 			mask = kring->offset_mask;
2351 			max_offset = kring->offset_max;
2352 		} else {
2353 			kring->offset_mask = mask;
2354 			*(uint64_t *)(uintptr_t)&ring->offset_mask = mask;
2355 			kring->offset_max = max_offset;
2356 			kring->offset_gap = min_gap;
2357 		}
2358 
2359 		/* if there is an initial offset, put it into
2360 		 * all the slots
2361 		 *
2362 		 * Note: we cannot change the offsets if the
2363 		 * ring is already in use.
2364 		 */
2365 		if (!initial_offset || kring->users > 1)
2366 			continue;
2367 
2368 		for (j = 0; j < kring->nkr_num_slots; j++) {
2369 			struct netmap_slot *slot = ring->slot + j;
2370 
2371 			nm_write_offset(kring, slot, initial_offset);
2372 		}
2373 	}
2374 
2375 out:
2376 	opt->nro_opt.nro_status = error;
2377 	if (!error) {
2378 		opt->nro_max_offset = max_offset;
2379 	}
2380 	return error;
2381 
2382 }
2383 
2384 static int
2385 netmap_compute_buf_len(struct netmap_priv_d *priv)
2386 {
2387 	enum txrx t;
2388 	u_int i;
2389 	struct netmap_kring *kring;
2390 	int error = 0;
2391 	unsigned mtu = 0;
2392 	struct netmap_adapter *na = priv->np_na;
2393 	uint64_t target, maxframe;
2394 
2395 	if (na->ifp != NULL)
2396 		mtu = nm_os_ifnet_mtu(na->ifp);
2397 
2398 	foreach_selected_ring(priv, t, i, kring) {
2399 
2400 		if (kring->users > 1)
2401 			continue;
2402 
2403 		target = NETMAP_BUF_SIZE(kring->na) -
2404 			kring->offset_max;
2405 		if (!kring->offset_gap)
2406 			kring->offset_gap =
2407 				NETMAP_BUF_SIZE(kring->na);
2408 		if (kring->offset_gap < target)
2409 			target = kring->offset_gap;
2410 
2411 		if (mtu) {
2412 			maxframe = mtu + ETH_HLEN +
2413 				ETH_FCS_LEN + VLAN_HLEN;
2414 			if (maxframe < target) {
2415 				target = maxframe;
2416 			}
2417 		}
2418 
2419 		error = kring->nm_bufcfg(kring, target);
2420 		if (error)
2421 			goto out;
2422 
2423 		*(uint64_t *)(uintptr_t)&kring->ring->buf_align = kring->buf_align;
2424 
2425 		if (mtu && t == NR_RX && kring->hwbuf_len < mtu) {
2426 			if (!(na->na_flags & NAF_MOREFRAG)) {
2427 				nm_prerr("error: large MTU (%d) needed "
2428 					 "but %s does not support "
2429 					 "NS_MOREFRAG", mtu,
2430 					 na->name);
2431 				error = EINVAL;
2432 				goto out;
2433 			} else {
2434 				nm_prinf("info: netmap application on "
2435 					 "%s needs to support "
2436 					 "NS_MOREFRAG "
2437 					 "(MTU=%u,buf_size=%llu)",
2438 					 kring->name, mtu,
2439 					 (unsigned long long)kring->hwbuf_len);
2440 			}
2441 		}
2442 	}
2443 out:
2444 	return error;
2445 }
2446 
2447 /*
2448  * possibly move the interface to netmap-mode.
2449  * If success it returns a pointer to netmap_if, otherwise NULL.
2450  * This must be called with NMG_LOCK held.
2451  *
2452  * The following na callbacks are called in the process:
2453  *
2454  * na->nm_config()			[by netmap_update_config]
2455  * (get current number and size of rings)
2456  *
2457  *  	We have a generic one for linux (netmap_linux_config).
2458  *  	The bwrap has to override this, since it has to forward
2459  *  	the request to the wrapped adapter (netmap_bwrap_config).
2460  *
2461  *
2462  * na->nm_krings_create()
2463  * (create and init the krings array)
2464  *
2465  * 	One of the following:
2466  *
2467  *	* netmap_hw_krings_create, 			(hw ports)
2468  *		creates the standard layout for the krings
2469  * 		and adds the mbq (used for the host rings).
2470  *
2471  * 	* netmap_vp_krings_create			(VALE ports)
2472  * 		add leases and scratchpads
2473  *
2474  * 	* netmap_pipe_krings_create			(pipes)
2475  * 		create the krings and rings of both ends and
2476  * 		cross-link them
2477  *
2478  *      * netmap_monitor_krings_create 			(monitors)
2479  *      	avoid allocating the mbq
2480  *
2481  *      * netmap_bwrap_krings_create			(bwraps)
2482  *      	create both the brap krings array,
2483  *      	the krings array of the wrapped adapter, and
2484  *      	(if needed) the fake array for the host adapter
2485  *
2486  * na->nm_register(, 1)
2487  * (put the adapter in netmap mode)
2488  *
2489  * 	This may be one of the following:
2490  *
2491  * 	* netmap_hw_reg				        (hw ports)
2492  * 		checks that the ifp is still there, then calls
2493  * 		the hardware specific callback;
2494  *
2495  * 	* netmap_vp_reg					(VALE ports)
2496  *		If the port is connected to a bridge,
2497  *		set the NAF_NETMAP_ON flag under the
2498  *		bridge write lock.
2499  *
2500  *	* netmap_pipe_reg				(pipes)
2501  *		inform the other pipe end that it is no
2502  *		longer responsible for the lifetime of this
2503  *		pipe end
2504  *
2505  *	* netmap_monitor_reg				(monitors)
2506  *		intercept the sync callbacks of the monitored
2507  *		rings
2508  *
2509  *	* netmap_bwrap_reg				(bwraps)
2510  *		cross-link the bwrap and hwna rings,
2511  *		forward the request to the hwna, override
2512  *		the hwna notify callback (to get the frames
2513  *		coming from outside go through the bridge).
2514  *
2515  *
2516  */
2517 int
2518 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2519 	struct nmreq_header *hdr)
2520 {
2521 	struct netmap_if *nifp = NULL;
2522 	int error;
2523 
2524 	NMG_LOCK_ASSERT();
2525 	priv->np_na = na;     /* store the reference */
2526 	error = netmap_mem_finalize(na->nm_mem, na);
2527 	if (error)
2528 		goto err;
2529 
2530 	if (na->active_fds == 0) {
2531 
2532 		/* cache the allocator info in the na */
2533 		error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2534 		if (error)
2535 			goto err_drop_mem;
2536 		nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2537 					    na->na_lut.objsize);
2538 
2539 		/* ring configuration may have changed, fetch from the card */
2540 		netmap_update_config(na);
2541 	}
2542 
2543 	/* compute the range of tx and rx rings to monitor */
2544 	error = netmap_set_ringid(priv, hdr);
2545 	if (error)
2546 		goto err_put_lut;
2547 
2548 	if (na->active_fds == 0) {
2549 		/*
2550 		 * If this is the first registration of the adapter,
2551 		 * perform sanity checks and create the in-kernel view
2552 		 * of the netmap rings (the netmap krings).
2553 		 */
2554 		if (na->ifp && nm_priv_rx_enabled(priv)) {
2555 			/* This netmap adapter is attached to an ifnet. */
2556 			unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2557 
2558 			nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2559 				na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2560 
2561 			if (na->rx_buf_maxsize == 0) {
2562 				nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2563 				error = EIO;
2564 				goto err_drop_mem;
2565 			}
2566 
2567 			error = netmap_buf_size_validate(na, mtu);
2568 			if (error)
2569 				goto err_drop_mem;
2570 		}
2571 
2572 		/*
2573 		 * Depending on the adapter, this may also create
2574 		 * the netmap rings themselves
2575 		 */
2576 		error = na->nm_krings_create(na);
2577 		if (error)
2578 			goto err_put_lut;
2579 
2580 	}
2581 
2582 	/* now the krings must exist and we can check whether some
2583 	 * previous bind has exclusive ownership on them, and set
2584 	 * nr_pending_mode
2585 	 */
2586 	error = netmap_krings_get(priv);
2587 	if (error)
2588 		goto err_del_krings;
2589 
2590 	/* create all needed missing netmap rings */
2591 	error = netmap_mem_rings_create(na);
2592 	if (error)
2593 		goto err_rel_excl;
2594 
2595 	/* initialize offsets if requested */
2596 	error = netmap_offsets_init(priv, hdr);
2597 	if (error)
2598 		goto err_rel_excl;
2599 
2600 	/* compute and validate the buf lengths */
2601 	error = netmap_compute_buf_len(priv);
2602 	if (error)
2603 		goto err_rel_excl;
2604 
2605 	/* in all cases, create a new netmap if */
2606 	nifp = netmap_mem_if_new(na, priv);
2607 	if (nifp == NULL) {
2608 		error = ENOMEM;
2609 		goto err_rel_excl;
2610 	}
2611 
2612 	if (nm_kring_pending(priv)) {
2613 		/* Some kring is switching mode, tell the adapter to
2614 		 * react on this. */
2615 		error = na->nm_register(na, 1);
2616 		if (error)
2617 			goto err_del_if;
2618 	}
2619 
2620 	/* Commit the reference. */
2621 	na->active_fds++;
2622 
2623 	/*
2624 	 * advertise that the interface is ready by setting np_nifp.
2625 	 * The barrier is needed because readers (poll, *SYNC and mmap)
2626 	 * check for priv->np_nifp != NULL without locking
2627 	 */
2628 	mb(); /* make sure previous writes are visible to all CPUs */
2629 	priv->np_nifp = nifp;
2630 
2631 	return 0;
2632 
2633 err_del_if:
2634 	netmap_mem_if_delete(na, nifp);
2635 err_rel_excl:
2636 	netmap_krings_put(priv);
2637 	netmap_mem_rings_delete(na);
2638 err_del_krings:
2639 	if (na->active_fds == 0)
2640 		na->nm_krings_delete(na);
2641 err_put_lut:
2642 	if (na->active_fds == 0)
2643 		memset(&na->na_lut, 0, sizeof(na->na_lut));
2644 err_drop_mem:
2645 	netmap_mem_drop(na);
2646 err:
2647 	priv->np_na = NULL;
2648 	return error;
2649 }
2650 
2651 
2652 /*
2653  * update kring and ring at the end of rxsync/txsync.
2654  */
2655 static inline void
2656 nm_sync_finalize(struct netmap_kring *kring)
2657 {
2658 	/*
2659 	 * Update ring tail to what the kernel knows
2660 	 * After txsync: head/rhead/hwcur might be behind cur/rcur
2661 	 * if no carrier.
2662 	 */
2663 	kring->ring->tail = kring->rtail = kring->nr_hwtail;
2664 
2665 	nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2666 		kring->name, kring->nr_hwcur, kring->nr_hwtail,
2667 		kring->rhead, kring->rcur, kring->rtail);
2668 }
2669 
2670 /* set ring timestamp */
2671 static inline void
2672 ring_timestamp_set(struct netmap_ring *ring)
2673 {
2674 	if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2675 		microtime(&ring->ts);
2676 	}
2677 }
2678 
2679 static int nmreq_copyin(struct nmreq_header *, int);
2680 static int nmreq_copyout(struct nmreq_header *, int);
2681 static int nmreq_checkoptions(struct nmreq_header *);
2682 
2683 /*
2684  * ioctl(2) support for the "netmap" device.
2685  *
2686  * Following a list of accepted commands:
2687  * - NIOCCTRL		device control API
2688  * - NIOCTXSYNC		sync TX rings
2689  * - NIOCRXSYNC		sync RX rings
2690  * - SIOCGIFADDR	just for convenience
2691  * - NIOCGINFO		deprecated (legacy API)
2692  * - NIOCREGIF		deprecated (legacy API)
2693  *
2694  * Return 0 on success, errno otherwise.
2695  */
2696 int
2697 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2698 		struct thread *td, int nr_body_is_user)
2699 {
2700 	struct mbq q;	/* packets from RX hw queues to host stack */
2701 	struct netmap_adapter *na = NULL;
2702 	struct netmap_mem_d *nmd = NULL;
2703 	struct ifnet *ifp = NULL;
2704 	int error = 0;
2705 	u_int i, qfirst, qlast;
2706 	struct netmap_kring **krings;
2707 	int sync_flags;
2708 	enum txrx t;
2709 
2710 	switch (cmd) {
2711 	case NIOCCTRL: {
2712 		struct nmreq_header *hdr = (struct nmreq_header *)data;
2713 
2714 		if (hdr->nr_version < NETMAP_MIN_API ||
2715 		    hdr->nr_version > NETMAP_MAX_API) {
2716 			nm_prerr("API mismatch: got %d need %d",
2717 				hdr->nr_version, NETMAP_API);
2718 			return EINVAL;
2719 		}
2720 
2721 		/* Make a kernel-space copy of the user-space nr_body.
2722 		 * For convenience, the nr_body pointer and the pointers
2723 		 * in the options list will be replaced with their
2724 		 * kernel-space counterparts. The original pointers are
2725 		 * saved internally and later restored by nmreq_copyout
2726 		 */
2727 		error = nmreq_copyin(hdr, nr_body_is_user);
2728 		if (error) {
2729 			return error;
2730 		}
2731 
2732 		/* Sanitize hdr->nr_name. */
2733 		hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2734 
2735 		switch (hdr->nr_reqtype) {
2736 		case NETMAP_REQ_REGISTER: {
2737 			struct nmreq_register *req =
2738 				(struct nmreq_register *)(uintptr_t)hdr->nr_body;
2739 			struct netmap_if *nifp;
2740 
2741 			/* Protect access to priv from concurrent requests. */
2742 			NMG_LOCK();
2743 			do {
2744 				struct nmreq_option *opt;
2745 				u_int memflags;
2746 
2747 				if (priv->np_nifp != NULL) {	/* thread already registered */
2748 					error = EBUSY;
2749 					break;
2750 				}
2751 
2752 #ifdef WITH_EXTMEM
2753 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2754 				if (opt != NULL) {
2755 					struct nmreq_opt_extmem *e =
2756 						(struct nmreq_opt_extmem *)opt;
2757 
2758 					nmd = netmap_mem_ext_create(e->nro_usrptr,
2759 							&e->nro_info, &error);
2760 					opt->nro_status = error;
2761 					if (nmd == NULL)
2762 						break;
2763 				}
2764 #endif /* WITH_EXTMEM */
2765 
2766 				if (nmd == NULL && req->nr_mem_id) {
2767 					/* find the allocator and get a reference */
2768 					nmd = netmap_mem_find(req->nr_mem_id);
2769 					if (nmd == NULL) {
2770 						if (netmap_verbose) {
2771 							nm_prerr("%s: failed to find mem_id %u",
2772 									hdr->nr_name, req->nr_mem_id);
2773 						}
2774 						error = EINVAL;
2775 						break;
2776 					}
2777 				}
2778 				/* find the interface and a reference */
2779 				error = netmap_get_na(hdr, &na, &ifp, nmd,
2780 						      1 /* create */); /* keep reference */
2781 				if (error)
2782 					break;
2783 				if (NETMAP_OWNED_BY_KERN(na)) {
2784 					error = EBUSY;
2785 					break;
2786 				}
2787 
2788 				if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2789 					nm_prerr("virt_hdr_len=%d, but application does "
2790 						"not accept it", na->virt_hdr_len);
2791 					error = EIO;
2792 					break;
2793 				}
2794 
2795 				error = netmap_do_regif(priv, na, hdr);
2796 				if (error) {    /* reg. failed, release priv and ref */
2797 					break;
2798 				}
2799 
2800 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2801 				if (opt != NULL) {
2802 					struct nmreq_opt_csb *csbo =
2803 						(struct nmreq_opt_csb *)opt;
2804 					error = netmap_csb_validate(priv, csbo);
2805 					opt->nro_status = error;
2806 					if (error) {
2807 						netmap_do_unregif(priv);
2808 						break;
2809 					}
2810 				}
2811 
2812 				nifp = priv->np_nifp;
2813 
2814 				/* return the offset of the netmap_if object */
2815 				req->nr_rx_rings = na->num_rx_rings;
2816 				req->nr_tx_rings = na->num_tx_rings;
2817 				req->nr_rx_slots = na->num_rx_desc;
2818 				req->nr_tx_slots = na->num_tx_desc;
2819 				req->nr_host_tx_rings = na->num_host_tx_rings;
2820 				req->nr_host_rx_rings = na->num_host_rx_rings;
2821 				error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2822 					&req->nr_mem_id);
2823 				if (error) {
2824 					netmap_do_unregif(priv);
2825 					break;
2826 				}
2827 				if (memflags & NETMAP_MEM_PRIVATE) {
2828 					*(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2829 				}
2830 				for_rx_tx(t) {
2831 					priv->np_si[t] = nm_si_user(priv, t) ?
2832 						&na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2833 				}
2834 
2835 				if (req->nr_extra_bufs) {
2836 					if (netmap_verbose)
2837 						nm_prinf("requested %d extra buffers",
2838 							req->nr_extra_bufs);
2839 					req->nr_extra_bufs = netmap_extra_alloc(na,
2840 						&nifp->ni_bufs_head, req->nr_extra_bufs);
2841 					if (netmap_verbose)
2842 						nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2843 				}
2844 				req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2845 
2846 				error = nmreq_checkoptions(hdr);
2847 				if (error) {
2848 					netmap_do_unregif(priv);
2849 					break;
2850 				}
2851 
2852 				/* store ifp reference so that priv destructor may release it */
2853 				priv->np_ifp = ifp;
2854 			} while (0);
2855 			if (error) {
2856 				netmap_unget_na(na, ifp);
2857 			}
2858 			/* release the reference from netmap_mem_find() or
2859 			 * netmap_mem_ext_create()
2860 			 */
2861 			if (nmd)
2862 				netmap_mem_put(nmd);
2863 			NMG_UNLOCK();
2864 			break;
2865 		}
2866 
2867 		case NETMAP_REQ_PORT_INFO_GET: {
2868 			struct nmreq_port_info_get *req =
2869 				(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2870 			int nmd_ref = 0;
2871 
2872 			NMG_LOCK();
2873 			do {
2874 				u_int memflags;
2875 
2876 				if (hdr->nr_name[0] != '\0') {
2877 					/* Build a nmreq_register out of the nmreq_port_info_get,
2878 					 * so that we can call netmap_get_na(). */
2879 					struct nmreq_register regreq;
2880 					bzero(&regreq, sizeof(regreq));
2881 					regreq.nr_mode = NR_REG_ALL_NIC;
2882 					regreq.nr_tx_slots = req->nr_tx_slots;
2883 					regreq.nr_rx_slots = req->nr_rx_slots;
2884 					regreq.nr_tx_rings = req->nr_tx_rings;
2885 					regreq.nr_rx_rings = req->nr_rx_rings;
2886 					regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2887 					regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2888 					regreq.nr_mem_id = req->nr_mem_id;
2889 
2890 					/* get a refcount */
2891 					hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2892 					hdr->nr_body = (uintptr_t)&regreq;
2893 					error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2894 					hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2895 					hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2896 					if (error) {
2897 						na = NULL;
2898 						ifp = NULL;
2899 						break;
2900 					}
2901 					nmd = na->nm_mem; /* get memory allocator */
2902 				} else {
2903 					nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2904 					if (nmd == NULL) {
2905 						if (netmap_verbose)
2906 							nm_prerr("%s: failed to find mem_id %u",
2907 									hdr->nr_name,
2908 									req->nr_mem_id ? req->nr_mem_id : 1);
2909 						error = EINVAL;
2910 						break;
2911 					}
2912 					nmd_ref = 1;
2913 				}
2914 
2915 				error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2916 					&req->nr_mem_id);
2917 				if (error)
2918 					break;
2919 				if (na == NULL) /* only memory info */
2920 					break;
2921 				netmap_update_config(na);
2922 				req->nr_rx_rings = na->num_rx_rings;
2923 				req->nr_tx_rings = na->num_tx_rings;
2924 				req->nr_rx_slots = na->num_rx_desc;
2925 				req->nr_tx_slots = na->num_tx_desc;
2926 				req->nr_host_tx_rings = na->num_host_tx_rings;
2927 				req->nr_host_rx_rings = na->num_host_rx_rings;
2928 			} while (0);
2929 			netmap_unget_na(na, ifp);
2930 			if (nmd_ref)
2931 				netmap_mem_put(nmd);
2932 			NMG_UNLOCK();
2933 			break;
2934 		}
2935 #ifdef WITH_VALE
2936 		case NETMAP_REQ_VALE_ATTACH: {
2937 			error = netmap_bdg_attach(hdr, NULL /* userspace request */);
2938 			break;
2939 		}
2940 
2941 		case NETMAP_REQ_VALE_DETACH: {
2942 			error = netmap_bdg_detach(hdr, NULL /* userspace request */);
2943 			break;
2944 		}
2945 
2946 		case NETMAP_REQ_PORT_HDR_SET: {
2947 			struct nmreq_port_hdr *req =
2948 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2949 			/* Build a nmreq_register out of the nmreq_port_hdr,
2950 			 * so that we can call netmap_get_bdg_na(). */
2951 			struct nmreq_register regreq;
2952 			bzero(&regreq, sizeof(regreq));
2953 			regreq.nr_mode = NR_REG_ALL_NIC;
2954 
2955 			/* For now we only support virtio-net headers, and only for
2956 			 * VALE ports, but this may change in future. Valid lengths
2957 			 * for the virtio-net header are 0 (no header), 10 and 12. */
2958 			if (req->nr_hdr_len != 0 &&
2959 				req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
2960 					req->nr_hdr_len != 12) {
2961 				if (netmap_verbose)
2962 					nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
2963 				error = EINVAL;
2964 				break;
2965 			}
2966 			NMG_LOCK();
2967 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2968 			hdr->nr_body = (uintptr_t)&regreq;
2969 			error = netmap_get_vale_na(hdr, &na, NULL, 0);
2970 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
2971 			hdr->nr_body = (uintptr_t)req;
2972 			if (na && !error) {
2973 				struct netmap_vp_adapter *vpna =
2974 					(struct netmap_vp_adapter *)na;
2975 				na->virt_hdr_len = req->nr_hdr_len;
2976 				if (na->virt_hdr_len) {
2977 					vpna->mfs = NETMAP_BUF_SIZE(na);
2978 				}
2979 				if (netmap_verbose)
2980 					nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
2981 				netmap_adapter_put(na);
2982 			} else if (!na) {
2983 				error = ENXIO;
2984 			}
2985 			NMG_UNLOCK();
2986 			break;
2987 		}
2988 
2989 		case NETMAP_REQ_PORT_HDR_GET: {
2990 			/* Get vnet-header length for this netmap port */
2991 			struct nmreq_port_hdr *req =
2992 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2993 			/* Build a nmreq_register out of the nmreq_port_hdr,
2994 			 * so that we can call netmap_get_bdg_na(). */
2995 			struct nmreq_register regreq;
2996 			struct ifnet *ifp;
2997 
2998 			bzero(&regreq, sizeof(regreq));
2999 			regreq.nr_mode = NR_REG_ALL_NIC;
3000 			NMG_LOCK();
3001 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3002 			hdr->nr_body = (uintptr_t)&regreq;
3003 			error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
3004 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
3005 			hdr->nr_body = (uintptr_t)req;
3006 			if (na && !error) {
3007 				req->nr_hdr_len = na->virt_hdr_len;
3008 			}
3009 			netmap_unget_na(na, ifp);
3010 			NMG_UNLOCK();
3011 			break;
3012 		}
3013 
3014 		case NETMAP_REQ_VALE_LIST: {
3015 			error = netmap_vale_list(hdr);
3016 			break;
3017 		}
3018 
3019 		case NETMAP_REQ_VALE_NEWIF: {
3020 			error = nm_vi_create(hdr);
3021 			break;
3022 		}
3023 
3024 		case NETMAP_REQ_VALE_DELIF: {
3025 			error = nm_vi_destroy(hdr->nr_name);
3026 			break;
3027 		}
3028 #endif  /* WITH_VALE */
3029 
3030 		case NETMAP_REQ_VALE_POLLING_ENABLE:
3031 		case NETMAP_REQ_VALE_POLLING_DISABLE: {
3032 			error = nm_bdg_polling(hdr);
3033 			break;
3034 		}
3035 		case NETMAP_REQ_POOLS_INFO_GET: {
3036 			/* Get information from the memory allocator used for
3037 			 * hdr->nr_name. */
3038 			struct nmreq_pools_info *req =
3039 				(struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
3040 			NMG_LOCK();
3041 			do {
3042 				/* Build a nmreq_register out of the nmreq_pools_info,
3043 				 * so that we can call netmap_get_na(). */
3044 				struct nmreq_register regreq;
3045 				bzero(&regreq, sizeof(regreq));
3046 				regreq.nr_mem_id = req->nr_mem_id;
3047 				regreq.nr_mode = NR_REG_ALL_NIC;
3048 
3049 				hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3050 				hdr->nr_body = (uintptr_t)&regreq;
3051 				error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
3052 				hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
3053 				hdr->nr_body = (uintptr_t)req; /* reset nr_body */
3054 				if (error) {
3055 					na = NULL;
3056 					ifp = NULL;
3057 					break;
3058 				}
3059 				nmd = na->nm_mem; /* grab the memory allocator */
3060 				if (nmd == NULL) {
3061 					error = EINVAL;
3062 					break;
3063 				}
3064 
3065 				/* Finalize the memory allocator, get the pools
3066 				 * information and release the allocator. */
3067 				error = netmap_mem_finalize(nmd, na);
3068 				if (error) {
3069 					break;
3070 				}
3071 				error = netmap_mem_pools_info_get(req, nmd);
3072 				netmap_mem_drop(na);
3073 			} while (0);
3074 			netmap_unget_na(na, ifp);
3075 			NMG_UNLOCK();
3076 			break;
3077 		}
3078 
3079 		case NETMAP_REQ_CSB_ENABLE: {
3080 			struct nmreq_option *opt;
3081 
3082 			opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
3083 			if (opt == NULL) {
3084 				error = EINVAL;
3085 			} else {
3086 				struct nmreq_opt_csb *csbo =
3087 					(struct nmreq_opt_csb *)opt;
3088 				NMG_LOCK();
3089 				error = netmap_csb_validate(priv, csbo);
3090 				NMG_UNLOCK();
3091 				opt->nro_status = error;
3092 			}
3093 			break;
3094 		}
3095 
3096 		case NETMAP_REQ_SYNC_KLOOP_START: {
3097 			error = netmap_sync_kloop(priv, hdr);
3098 			break;
3099 		}
3100 
3101 		case NETMAP_REQ_SYNC_KLOOP_STOP: {
3102 			error = netmap_sync_kloop_stop(priv);
3103 			break;
3104 		}
3105 
3106 		default: {
3107 			error = EINVAL;
3108 			break;
3109 		}
3110 		}
3111 		/* Write back request body to userspace and reset the
3112 		 * user-space pointer. */
3113 		error = nmreq_copyout(hdr, error);
3114 		break;
3115 	}
3116 
3117 	case NIOCTXSYNC:
3118 	case NIOCRXSYNC: {
3119 		if (unlikely(priv->np_nifp == NULL)) {
3120 			error = ENXIO;
3121 			break;
3122 		}
3123 		mb(); /* make sure following reads are not from cache */
3124 
3125 		if (unlikely(priv->np_csb_atok_base)) {
3126 			nm_prerr("Invalid sync in CSB mode");
3127 			error = EBUSY;
3128 			break;
3129 		}
3130 
3131 		na = priv->np_na;      /* we have a reference */
3132 
3133 		mbq_init(&q);
3134 		t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
3135 		krings = NMR(na, t);
3136 		qfirst = priv->np_qfirst[t];
3137 		qlast = priv->np_qlast[t];
3138 		sync_flags = priv->np_sync_flags;
3139 
3140 		for (i = qfirst; i < qlast; i++) {
3141 			struct netmap_kring *kring = krings[i];
3142 			struct netmap_ring *ring = kring->ring;
3143 
3144 			if (unlikely(nm_kr_tryget(kring, 1, &error))) {
3145 				error = (error ? EIO : 0);
3146 				continue;
3147 			}
3148 
3149 			if (cmd == NIOCTXSYNC) {
3150 				if (netmap_debug & NM_DEBUG_TXSYNC)
3151 					nm_prinf("pre txsync ring %d cur %d hwcur %d",
3152 					    i, ring->cur,
3153 					    kring->nr_hwcur);
3154 				if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3155 					netmap_ring_reinit(kring);
3156 				} else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
3157 					nm_sync_finalize(kring);
3158 				}
3159 				if (netmap_debug & NM_DEBUG_TXSYNC)
3160 					nm_prinf("post txsync ring %d cur %d hwcur %d",
3161 					    i, ring->cur,
3162 					    kring->nr_hwcur);
3163 			} else {
3164 				if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3165 					netmap_ring_reinit(kring);
3166 				}
3167 				if (nm_may_forward_up(kring)) {
3168 					/* transparent forwarding, see netmap_poll() */
3169 					netmap_grab_packets(kring, &q, netmap_fwd);
3170 				}
3171 				if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
3172 					nm_sync_finalize(kring);
3173 				}
3174 				ring_timestamp_set(ring);
3175 			}
3176 			nm_kr_put(kring);
3177 		}
3178 
3179 		if (mbq_peek(&q)) {
3180 			netmap_send_up(na->ifp, &q);
3181 		}
3182 
3183 		break;
3184 	}
3185 
3186 	default: {
3187 		return netmap_ioctl_legacy(priv, cmd, data, td);
3188 		break;
3189 	}
3190 	}
3191 
3192 	return (error);
3193 }
3194 
3195 size_t
3196 nmreq_size_by_type(uint16_t nr_reqtype)
3197 {
3198 	switch (nr_reqtype) {
3199 	case NETMAP_REQ_REGISTER:
3200 		return sizeof(struct nmreq_register);
3201 	case NETMAP_REQ_PORT_INFO_GET:
3202 		return sizeof(struct nmreq_port_info_get);
3203 	case NETMAP_REQ_VALE_ATTACH:
3204 		return sizeof(struct nmreq_vale_attach);
3205 	case NETMAP_REQ_VALE_DETACH:
3206 		return sizeof(struct nmreq_vale_detach);
3207 	case NETMAP_REQ_VALE_LIST:
3208 		return sizeof(struct nmreq_vale_list);
3209 	case NETMAP_REQ_PORT_HDR_SET:
3210 	case NETMAP_REQ_PORT_HDR_GET:
3211 		return sizeof(struct nmreq_port_hdr);
3212 	case NETMAP_REQ_VALE_NEWIF:
3213 		return sizeof(struct nmreq_vale_newif);
3214 	case NETMAP_REQ_VALE_DELIF:
3215 	case NETMAP_REQ_SYNC_KLOOP_STOP:
3216 	case NETMAP_REQ_CSB_ENABLE:
3217 		return 0;
3218 	case NETMAP_REQ_VALE_POLLING_ENABLE:
3219 	case NETMAP_REQ_VALE_POLLING_DISABLE:
3220 		return sizeof(struct nmreq_vale_polling);
3221 	case NETMAP_REQ_POOLS_INFO_GET:
3222 		return sizeof(struct nmreq_pools_info);
3223 	case NETMAP_REQ_SYNC_KLOOP_START:
3224 		return sizeof(struct nmreq_sync_kloop_start);
3225 	}
3226 	return 0;
3227 }
3228 
3229 static size_t
3230 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3231 {
3232 	size_t rv = sizeof(struct nmreq_option);
3233 #ifdef NETMAP_REQ_OPT_DEBUG
3234 	if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3235 		return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3236 #endif /* NETMAP_REQ_OPT_DEBUG */
3237 	switch (nro_reqtype) {
3238 #ifdef WITH_EXTMEM
3239 	case NETMAP_REQ_OPT_EXTMEM:
3240 		rv = sizeof(struct nmreq_opt_extmem);
3241 		break;
3242 #endif /* WITH_EXTMEM */
3243 	case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3244 		if (nro_size >= rv)
3245 			rv = nro_size;
3246 		break;
3247 	case NETMAP_REQ_OPT_CSB:
3248 		rv = sizeof(struct nmreq_opt_csb);
3249 		break;
3250 	case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3251 		rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3252 		break;
3253 	case NETMAP_REQ_OPT_OFFSETS:
3254 		rv = sizeof(struct nmreq_opt_offsets);
3255 		break;
3256 	}
3257 	/* subtract the common header */
3258 	return rv - sizeof(struct nmreq_option);
3259 }
3260 
3261 /*
3262  * nmreq_copyin: create an in-kernel version of the request.
3263  *
3264  * We build the following data structure:
3265  *
3266  * hdr -> +-------+                buf
3267  *        |       |          +---------------+
3268  *        +-------+          |usr body ptr   |
3269  *        |options|-.        +---------------+
3270  *        +-------+ |        |usr options ptr|
3271  *        |body   |--------->+---------------+
3272  *        +-------+ |        |               |
3273  *                  |        |  copy of body |
3274  *                  |        |               |
3275  *                  |        +---------------+
3276  *                  |        |    NULL       |
3277  *                  |        +---------------+
3278  *                  |    .---|               |\
3279  *                  |    |   +---------------+ |
3280  *                  | .------|               | |
3281  *                  | |  |   +---------------+  \ option table
3282  *                  | |  |   |      ...      |  / indexed by option
3283  *                  | |  |   +---------------+ |  type
3284  *                  | |  |   |               | |
3285  *                  | |  |   +---------------+/
3286  *                  | |  |   |usr next ptr 1 |
3287  *                  `-|----->+---------------+
3288  *                    |  |   | copy of opt 1 |
3289  *                    |  |   |               |
3290  *                    |  | .-| nro_next      |
3291  *                    |  | | +---------------+
3292  *                    |  | | |usr next ptr 2 |
3293  *                    |  `-`>+---------------+
3294  *                    |      | copy of opt 2 |
3295  *                    |      |               |
3296  *                    |    .-| nro_next      |
3297  *                    |    | +---------------+
3298  *                    |    | |               |
3299  *                    ~    ~ ~      ...      ~
3300  *                    |    .-|               |
3301  *                    `----->+---------------+
3302  *                         | |usr next ptr n |
3303  *                         `>+---------------+
3304  *                           | copy of opt n |
3305  *                           |               |
3306  *                           | nro_next(NULL)|
3307  *                           +---------------+
3308  *
3309  * The options and body fields of the hdr structure are overwritten
3310  * with in-kernel valid pointers inside the buf. The original user
3311  * pointers are saved in the buf and restored on copyout.
3312  * The list of options is copied and the pointers adjusted. The
3313  * original pointers are saved before the option they belonged.
3314  *
3315  * The option table has an entry for every available option.  Entries
3316  * for options that have not been passed contain NULL.
3317  *
3318  */
3319 
3320 int
3321 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3322 {
3323 	size_t rqsz, optsz, bufsz;
3324 	int error = 0;
3325 	char *ker = NULL, *p;
3326 	struct nmreq_option **next, *src, **opt_tab;
3327 	struct nmreq_option buf;
3328 	uint64_t *ptrs;
3329 
3330 	if (hdr->nr_reserved) {
3331 		if (netmap_verbose)
3332 			nm_prerr("nr_reserved must be zero");
3333 		return EINVAL;
3334 	}
3335 
3336 	if (!nr_body_is_user)
3337 		return 0;
3338 
3339 	hdr->nr_reserved = nr_body_is_user;
3340 
3341 	/* compute the total size of the buffer */
3342 	rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3343 	if (rqsz > NETMAP_REQ_MAXSIZE) {
3344 		error = EMSGSIZE;
3345 		goto out_err;
3346 	}
3347 	if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3348 		(!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3349 		/* Request body expected, but not found; or
3350 		 * request body found but unexpected. */
3351 		if (netmap_verbose)
3352 			nm_prerr("nr_body expected but not found, or vice versa");
3353 		error = EINVAL;
3354 		goto out_err;
3355 	}
3356 
3357 	bufsz = 2 * sizeof(void *) + rqsz +
3358 		NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3359 	/* compute the size of the buf below the option table.
3360 	 * It must contain a copy of every received option structure.
3361 	 * For every option we also need to store a copy of the user
3362 	 * list pointer.
3363 	 */
3364 	optsz = 0;
3365 	for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3366 	     src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3367 	{
3368 		error = copyin(src, &buf, sizeof(*src));
3369 		if (error)
3370 			goto out_err;
3371 		optsz += sizeof(*src);
3372 		optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3373 		if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3374 			error = EMSGSIZE;
3375 			goto out_err;
3376 		}
3377 		bufsz += sizeof(void *);
3378 	}
3379 	bufsz += optsz;
3380 
3381 	ker = nm_os_malloc(bufsz);
3382 	if (ker == NULL) {
3383 		error = ENOMEM;
3384 		goto out_err;
3385 	}
3386 	p = ker;	/* write pointer into the buffer */
3387 
3388 	/* make a copy of the user pointers */
3389 	ptrs = (uint64_t*)p;
3390 	*ptrs++ = hdr->nr_body;
3391 	*ptrs++ = hdr->nr_options;
3392 	p = (char *)ptrs;
3393 
3394 	/* copy the body */
3395 	error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3396 	if (error)
3397 		goto out_restore;
3398 	/* overwrite the user pointer with the in-kernel one */
3399 	hdr->nr_body = (uintptr_t)p;
3400 	p += rqsz;
3401 	/* start of the options table */
3402 	opt_tab = (struct nmreq_option **)p;
3403 	p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3404 
3405 	/* copy the options */
3406 	next = (struct nmreq_option **)&hdr->nr_options;
3407 	src = *next;
3408 	while (src) {
3409 		struct nmreq_option *opt;
3410 
3411 		/* copy the option header */
3412 		ptrs = (uint64_t *)p;
3413 		opt = (struct nmreq_option *)(ptrs + 1);
3414 		error = copyin(src, opt, sizeof(*src));
3415 		if (error)
3416 			goto out_restore;
3417 		/* make a copy of the user next pointer */
3418 		*ptrs = opt->nro_next;
3419 		/* overwrite the user pointer with the in-kernel one */
3420 		*next = opt;
3421 
3422 		/* initialize the option as not supported.
3423 		 * Recognized options will update this field.
3424 		 */
3425 		opt->nro_status = EOPNOTSUPP;
3426 
3427 		/* check for invalid types */
3428 		if (opt->nro_reqtype < 1) {
3429 			if (netmap_verbose)
3430 				nm_prinf("invalid option type: %u", opt->nro_reqtype);
3431 			opt->nro_status = EINVAL;
3432 			error = EINVAL;
3433 			goto next;
3434 		}
3435 
3436 		if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3437 			/* opt->nro_status is already EOPNOTSUPP */
3438 			error = EOPNOTSUPP;
3439 			goto next;
3440 		}
3441 
3442 		/* if the type is valid, index the option in the table
3443 		 * unless it is a duplicate.
3444 		 */
3445 		if (opt_tab[opt->nro_reqtype] != NULL) {
3446 			if (netmap_verbose)
3447 				nm_prinf("duplicate option: %u", opt->nro_reqtype);
3448 			opt->nro_status = EINVAL;
3449 			opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3450 			error = EINVAL;
3451 			goto next;
3452 		}
3453 		opt_tab[opt->nro_reqtype] = opt;
3454 
3455 		p = (char *)(opt + 1);
3456 
3457 		/* copy the option body */
3458 		optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3459 						opt->nro_size);
3460 		if (optsz) {
3461 			/* the option body follows the option header */
3462 			error = copyin(src + 1, p, optsz);
3463 			if (error)
3464 				goto out_restore;
3465 			p += optsz;
3466 		}
3467 
3468 	next:
3469 		/* move to next option */
3470 		next = (struct nmreq_option **)&opt->nro_next;
3471 		src = *next;
3472 	}
3473 	if (error)
3474 		nmreq_copyout(hdr, error);
3475 	return error;
3476 
3477 out_restore:
3478 	ptrs = (uint64_t *)ker;
3479 	hdr->nr_body = *ptrs++;
3480 	hdr->nr_options = *ptrs++;
3481 	hdr->nr_reserved = 0;
3482 	nm_os_free(ker);
3483 out_err:
3484 	return error;
3485 }
3486 
3487 static int
3488 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3489 {
3490 	struct nmreq_option *src, *dst;
3491 	void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3492 	uint64_t *ptrs;
3493 	size_t bodysz;
3494 	int error;
3495 
3496 	if (!hdr->nr_reserved)
3497 		return rerror;
3498 
3499 	/* restore the user pointers in the header */
3500 	ptrs = (uint64_t *)ker - 2;
3501 	bufstart = ptrs;
3502 	hdr->nr_body = *ptrs++;
3503 	src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3504 	hdr->nr_options = *ptrs;
3505 
3506 	if (!rerror) {
3507 		/* copy the body */
3508 		bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3509 		error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3510 		if (error) {
3511 			rerror = error;
3512 			goto out;
3513 		}
3514 	}
3515 
3516 	/* copy the options */
3517 	dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3518 	while (src) {
3519 		size_t optsz;
3520 		uint64_t next;
3521 
3522 		/* restore the user pointer */
3523 		next = src->nro_next;
3524 		ptrs = (uint64_t *)src - 1;
3525 		src->nro_next = *ptrs;
3526 
3527 		/* always copy the option header */
3528 		error = copyout(src, dst, sizeof(*src));
3529 		if (error) {
3530 			rerror = error;
3531 			goto out;
3532 		}
3533 
3534 		/* copy the option body only if there was no error */
3535 		if (!rerror && !src->nro_status) {
3536 			optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3537 							src->nro_size);
3538 			if (optsz) {
3539 				error = copyout(src + 1, dst + 1, optsz);
3540 				if (error) {
3541 					rerror = error;
3542 					goto out;
3543 				}
3544 			}
3545 		}
3546 		src = (struct nmreq_option *)(uintptr_t)next;
3547 		dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3548 	}
3549 
3550 
3551 out:
3552 	hdr->nr_reserved = 0;
3553 	nm_os_free(bufstart);
3554 	return rerror;
3555 }
3556 
3557 struct nmreq_option *
3558 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3559 {
3560 	struct nmreq_option **opt_tab;
3561 
3562 	if (!hdr->nr_options)
3563 		return NULL;
3564 
3565 	opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3566 	    (NETMAP_REQ_OPT_MAX + 1);
3567 	return opt_tab[reqtype];
3568 }
3569 
3570 static int
3571 nmreq_checkoptions(struct nmreq_header *hdr)
3572 {
3573 	struct nmreq_option *opt;
3574 	/* return error if there is still any option
3575 	 * marked as not supported
3576 	 */
3577 
3578 	for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3579 	     opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3580 		if (opt->nro_status == EOPNOTSUPP)
3581 			return EOPNOTSUPP;
3582 
3583 	return 0;
3584 }
3585 
3586 /*
3587  * select(2) and poll(2) handlers for the "netmap" device.
3588  *
3589  * Can be called for one or more queues.
3590  * Return true the event mask corresponding to ready events.
3591  * If there are no ready events (and 'sr' is not NULL), do a
3592  * selrecord on either individual selinfo or on the global one.
3593  * Device-dependent parts (locking and sync of tx/rx rings)
3594  * are done through callbacks.
3595  *
3596  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3597  * The first one is remapped to pwait as selrecord() uses the name as an
3598  * hidden argument.
3599  */
3600 int
3601 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3602 {
3603 	struct netmap_adapter *na;
3604 	struct netmap_kring *kring;
3605 	struct netmap_ring *ring;
3606 	u_int i, want[NR_TXRX], revents = 0;
3607 	NM_SELINFO_T *si[NR_TXRX];
3608 #define want_tx want[NR_TX]
3609 #define want_rx want[NR_RX]
3610 	struct mbq q;	/* packets from RX hw queues to host stack */
3611 
3612 	/*
3613 	 * In order to avoid nested locks, we need to "double check"
3614 	 * txsync and rxsync if we decide to do a selrecord().
3615 	 * retry_tx (and retry_rx, later) prevent looping forever.
3616 	 */
3617 	int retry_tx = 1, retry_rx = 1;
3618 
3619 	/* Transparent mode: send_down is 1 if we have found some
3620 	 * packets to forward (host RX ring --> NIC) during the rx
3621 	 * scan and we have not sent them down to the NIC yet.
3622 	 * Transparent mode requires to bind all rings to a single
3623 	 * file descriptor.
3624 	 */
3625 	int send_down = 0;
3626 	int sync_flags = priv->np_sync_flags;
3627 
3628 	mbq_init(&q);
3629 
3630 	if (unlikely(priv->np_nifp == NULL)) {
3631 		return POLLERR;
3632 	}
3633 	mb(); /* make sure following reads are not from cache */
3634 
3635 	na = priv->np_na;
3636 
3637 	if (unlikely(!nm_netmap_on(na)))
3638 		return POLLERR;
3639 
3640 	if (unlikely(priv->np_csb_atok_base)) {
3641 		nm_prerr("Invalid poll in CSB mode");
3642 		return POLLERR;
3643 	}
3644 
3645 	if (netmap_debug & NM_DEBUG_ON)
3646 		nm_prinf("device %s events 0x%x", na->name, events);
3647 	want_tx = events & (POLLOUT | POLLWRNORM);
3648 	want_rx = events & (POLLIN | POLLRDNORM);
3649 
3650 	/*
3651 	 * If the card has more than one queue AND the file descriptor is
3652 	 * bound to all of them, we sleep on the "global" selinfo, otherwise
3653 	 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3654 	 * per file descriptor).
3655 	 * The interrupt routine in the driver wake one or the other
3656 	 * (or both) depending on which clients are active.
3657 	 *
3658 	 * rxsync() is only called if we run out of buffers on a POLLIN.
3659 	 * txsync() is called if we run out of buffers on POLLOUT, or
3660 	 * there are pending packets to send. The latter can be disabled
3661 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3662 	 */
3663 	si[NR_RX] = priv->np_si[NR_RX];
3664 	si[NR_TX] = priv->np_si[NR_TX];
3665 
3666 #ifdef __FreeBSD__
3667 	/*
3668 	 * We start with a lock free round which is cheap if we have
3669 	 * slots available. If this fails, then lock and call the sync
3670 	 * routines. We can't do this on Linux, as the contract says
3671 	 * that we must call nm_os_selrecord() unconditionally.
3672 	 */
3673 	if (want_tx) {
3674 		const enum txrx t = NR_TX;
3675 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3676 			kring = NMR(na, t)[i];
3677 			if (kring->ring->cur != kring->ring->tail) {
3678 				/* Some unseen TX space is available, so what
3679 				 * we don't need to run txsync. */
3680 				revents |= want[t];
3681 				want[t] = 0;
3682 				break;
3683 			}
3684 		}
3685 	}
3686 	if (want_rx) {
3687 		const enum txrx t = NR_RX;
3688 		int rxsync_needed = 0;
3689 
3690 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3691 			kring = NMR(na, t)[i];
3692 			if (kring->ring->cur == kring->ring->tail
3693 				|| kring->rhead != kring->ring->head) {
3694 				/* There are no unseen packets on this ring,
3695 				 * or there are some buffers to be returned
3696 				 * to the netmap port. We therefore go ahead
3697 				 * and run rxsync. */
3698 				rxsync_needed = 1;
3699 				break;
3700 			}
3701 		}
3702 		if (!rxsync_needed) {
3703 			revents |= want_rx;
3704 			want_rx = 0;
3705 		}
3706 	}
3707 #endif
3708 
3709 #ifdef linux
3710 	/* The selrecord must be unconditional on linux. */
3711 	nm_os_selrecord(sr, si[NR_RX]);
3712 	nm_os_selrecord(sr, si[NR_TX]);
3713 #endif /* linux */
3714 
3715 	/*
3716 	 * If we want to push packets out (priv->np_txpoll) or
3717 	 * want_tx is still set, we must issue txsync calls
3718 	 * (on all rings, to avoid that the tx rings stall).
3719 	 * Fortunately, normal tx mode has np_txpoll set.
3720 	 */
3721 	if (priv->np_txpoll || want_tx) {
3722 		/*
3723 		 * The first round checks if anyone is ready, if not
3724 		 * do a selrecord and another round to handle races.
3725 		 * want_tx goes to 0 if any space is found, and is
3726 		 * used to skip rings with no pending transmissions.
3727 		 */
3728 flush_tx:
3729 		for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3730 			int found = 0;
3731 
3732 			kring = na->tx_rings[i];
3733 			ring = kring->ring;
3734 
3735 			/*
3736 			 * Don't try to txsync this TX ring if we already found some
3737 			 * space in some of the TX rings (want_tx == 0) and there are no
3738 			 * TX slots in this ring that need to be flushed to the NIC
3739 			 * (head == hwcur).
3740 			 */
3741 			if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3742 				continue;
3743 
3744 			if (nm_kr_tryget(kring, 1, &revents))
3745 				continue;
3746 
3747 			if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3748 				netmap_ring_reinit(kring);
3749 				revents |= POLLERR;
3750 			} else {
3751 				if (kring->nm_sync(kring, sync_flags))
3752 					revents |= POLLERR;
3753 				else
3754 					nm_sync_finalize(kring);
3755 			}
3756 
3757 			/*
3758 			 * If we found new slots, notify potential
3759 			 * listeners on the same ring.
3760 			 * Since we just did a txsync, look at the copies
3761 			 * of cur,tail in the kring.
3762 			 */
3763 			found = kring->rcur != kring->rtail;
3764 			nm_kr_put(kring);
3765 			if (found) { /* notify other listeners */
3766 				revents |= want_tx;
3767 				want_tx = 0;
3768 #ifndef linux
3769 				kring->nm_notify(kring, 0);
3770 #endif /* linux */
3771 			}
3772 		}
3773 		/* if there were any packet to forward we must have handled them by now */
3774 		send_down = 0;
3775 		if (want_tx && retry_tx && sr) {
3776 #ifndef linux
3777 			nm_os_selrecord(sr, si[NR_TX]);
3778 #endif /* !linux */
3779 			retry_tx = 0;
3780 			goto flush_tx;
3781 		}
3782 	}
3783 
3784 	/*
3785 	 * If want_rx is still set scan receive rings.
3786 	 * Do it on all rings because otherwise we starve.
3787 	 */
3788 	if (want_rx) {
3789 		/* two rounds here for race avoidance */
3790 do_retry_rx:
3791 		for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3792 			int found = 0;
3793 
3794 			kring = na->rx_rings[i];
3795 			ring = kring->ring;
3796 
3797 			if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3798 				continue;
3799 
3800 			if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3801 				netmap_ring_reinit(kring);
3802 				revents |= POLLERR;
3803 			}
3804 			/* now we can use kring->rcur, rtail */
3805 
3806 			/*
3807 			 * transparent mode support: collect packets from
3808 			 * hw rxring(s) that have been released by the user
3809 			 */
3810 			if (nm_may_forward_up(kring)) {
3811 				netmap_grab_packets(kring, &q, netmap_fwd);
3812 			}
3813 
3814 			/* Clear the NR_FORWARD flag anyway, it may be set by
3815 			 * the nm_sync() below only on for the host RX ring (see
3816 			 * netmap_rxsync_from_host()). */
3817 			kring->nr_kflags &= ~NR_FORWARD;
3818 			if (kring->nm_sync(kring, sync_flags))
3819 				revents |= POLLERR;
3820 			else
3821 				nm_sync_finalize(kring);
3822 			send_down |= (kring->nr_kflags & NR_FORWARD);
3823 			ring_timestamp_set(ring);
3824 			found = kring->rcur != kring->rtail;
3825 			nm_kr_put(kring);
3826 			if (found) {
3827 				revents |= want_rx;
3828 				retry_rx = 0;
3829 #ifndef linux
3830 				kring->nm_notify(kring, 0);
3831 #endif /* linux */
3832 			}
3833 		}
3834 
3835 #ifndef linux
3836 		if (retry_rx && sr) {
3837 			nm_os_selrecord(sr, si[NR_RX]);
3838 		}
3839 #endif /* !linux */
3840 		if (send_down || retry_rx) {
3841 			retry_rx = 0;
3842 			if (send_down)
3843 				goto flush_tx; /* and retry_rx */
3844 			else
3845 				goto do_retry_rx;
3846 		}
3847 	}
3848 
3849 	/*
3850 	 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3851 	 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3852 	 * to the host stack.
3853 	 */
3854 
3855 	if (mbq_peek(&q)) {
3856 		netmap_send_up(na->ifp, &q);
3857 	}
3858 
3859 	return (revents);
3860 #undef want_tx
3861 #undef want_rx
3862 }
3863 
3864 int
3865 nma_intr_enable(struct netmap_adapter *na, int onoff)
3866 {
3867 	bool changed = false;
3868 	enum txrx t;
3869 	int i;
3870 
3871 	for_rx_tx(t) {
3872 		for (i = 0; i < nma_get_nrings(na, t); i++) {
3873 			struct netmap_kring *kring = NMR(na, t)[i];
3874 			int on = !(kring->nr_kflags & NKR_NOINTR);
3875 
3876 			if (!!onoff != !!on) {
3877 				changed = true;
3878 			}
3879 			if (onoff) {
3880 				kring->nr_kflags &= ~NKR_NOINTR;
3881 			} else {
3882 				kring->nr_kflags |= NKR_NOINTR;
3883 			}
3884 		}
3885 	}
3886 
3887 	if (!changed) {
3888 		return 0; /* nothing to do */
3889 	}
3890 
3891 	if (!na->nm_intr) {
3892 		nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3893 		  na->name);
3894 		return -1;
3895 	}
3896 
3897 	na->nm_intr(na, onoff);
3898 
3899 	return 0;
3900 }
3901 
3902 
3903 /*-------------------- driver support routines -------------------*/
3904 
3905 /* default notify callback */
3906 static int
3907 netmap_notify(struct netmap_kring *kring, int flags)
3908 {
3909 	struct netmap_adapter *na = kring->notify_na;
3910 	enum txrx t = kring->tx;
3911 
3912 	nm_os_selwakeup(&kring->si);
3913 	/* optimization: avoid a wake up on the global
3914 	 * queue if nobody has registered for more
3915 	 * than one ring
3916 	 */
3917 	if (na->si_users[t] > 0)
3918 		nm_os_selwakeup(&na->si[t]);
3919 
3920 	return NM_IRQ_COMPLETED;
3921 }
3922 
3923 /* called by all routines that create netmap_adapters.
3924  * provide some defaults and get a reference to the
3925  * memory allocator
3926  */
3927 int
3928 netmap_attach_common(struct netmap_adapter *na)
3929 {
3930 	if (!na->rx_buf_maxsize) {
3931 		/* Set a conservative default (larger is safer). */
3932 		na->rx_buf_maxsize = PAGE_SIZE;
3933 	}
3934 
3935 #ifdef __FreeBSD__
3936 	if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3937 		na->if_input = na->ifp->if_input; /* for netmap_send_up */
3938 	}
3939 	na->pdev = na; /* make sure netmap_mem_map() is called */
3940 #endif /* __FreeBSD__ */
3941 	if (na->na_flags & NAF_HOST_RINGS) {
3942 		if (na->num_host_rx_rings == 0)
3943 			na->num_host_rx_rings = 1;
3944 		if (na->num_host_tx_rings == 0)
3945 			na->num_host_tx_rings = 1;
3946 	}
3947 	if (na->nm_krings_create == NULL) {
3948 		/* we assume that we have been called by a driver,
3949 		 * since other port types all provide their own
3950 		 * nm_krings_create
3951 		 */
3952 		na->nm_krings_create = netmap_hw_krings_create;
3953 		na->nm_krings_delete = netmap_hw_krings_delete;
3954 	}
3955 	if (na->nm_notify == NULL)
3956 		na->nm_notify = netmap_notify;
3957 	na->active_fds = 0;
3958 
3959 	if (na->nm_mem == NULL) {
3960 		/* use iommu or global allocator */
3961 		na->nm_mem = netmap_mem_get_iommu(na);
3962 	}
3963 	if (na->nm_bdg_attach == NULL)
3964 		/* no special nm_bdg_attach callback. On VALE
3965 		 * attach, we need to interpose a bwrap
3966 		 */
3967 		na->nm_bdg_attach = netmap_default_bdg_attach;
3968 
3969 	return 0;
3970 }
3971 
3972 /* Wrapper for the register callback provided netmap-enabled
3973  * hardware drivers.
3974  * nm_iszombie(na) means that the driver module has been
3975  * unloaded, so we cannot call into it.
3976  * nm_os_ifnet_lock() must guarantee mutual exclusion with
3977  * module unloading.
3978  */
3979 static int
3980 netmap_hw_reg(struct netmap_adapter *na, int onoff)
3981 {
3982 	struct netmap_hw_adapter *hwna =
3983 		(struct netmap_hw_adapter*)na;
3984 	int error = 0;
3985 
3986 	nm_os_ifnet_lock();
3987 
3988 	if (nm_iszombie(na)) {
3989 		if (onoff) {
3990 			error = ENXIO;
3991 		} else if (na != NULL) {
3992 			na->na_flags &= ~NAF_NETMAP_ON;
3993 		}
3994 		goto out;
3995 	}
3996 
3997 	error = hwna->nm_hw_register(na, onoff);
3998 
3999 out:
4000 	nm_os_ifnet_unlock();
4001 
4002 	return error;
4003 }
4004 
4005 static void
4006 netmap_hw_dtor(struct netmap_adapter *na)
4007 {
4008 	if (na->ifp == NULL)
4009 		return;
4010 
4011 	NM_DETACH_NA(na->ifp);
4012 }
4013 
4014 
4015 /*
4016  * Allocate a netmap_adapter object, and initialize it from the
4017  * 'arg' passed by the driver on attach.
4018  * We allocate a block of memory of 'size' bytes, which has room
4019  * for struct netmap_adapter plus additional room private to
4020  * the caller.
4021  * Return 0 on success, ENOMEM otherwise.
4022  */
4023 int
4024 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
4025 {
4026 	struct netmap_hw_adapter *hwna = NULL;
4027 	struct ifnet *ifp = NULL;
4028 
4029 	if (size < sizeof(struct netmap_hw_adapter)) {
4030 		if (netmap_debug & NM_DEBUG_ON)
4031 			nm_prerr("Invalid netmap adapter size %d", (int)size);
4032 		return EINVAL;
4033 	}
4034 
4035 	if (arg == NULL || arg->ifp == NULL) {
4036 		if (netmap_debug & NM_DEBUG_ON)
4037 			nm_prerr("either arg or arg->ifp is NULL");
4038 		return EINVAL;
4039 	}
4040 
4041 	if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
4042 		if (netmap_debug & NM_DEBUG_ON)
4043 			nm_prerr("%s: invalid rings tx %d rx %d",
4044 				arg->name, arg->num_tx_rings, arg->num_rx_rings);
4045 		return EINVAL;
4046 	}
4047 
4048 	ifp = arg->ifp;
4049 	if (NM_NA_CLASH(ifp)) {
4050 		/* If NA(ifp) is not null but there is no valid netmap
4051 		 * adapter it means that someone else is using the same
4052 		 * pointer (e.g. ax25_ptr on linux). This happens for
4053 		 * instance when also PF_RING is in use. */
4054 		nm_prerr("Error: netmap adapter hook is busy");
4055 		return EBUSY;
4056 	}
4057 
4058 	hwna = nm_os_malloc(size);
4059 	if (hwna == NULL)
4060 		goto fail;
4061 	hwna->up = *arg;
4062 	hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
4063 	strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
4064 	if (override_reg) {
4065 		hwna->nm_hw_register = hwna->up.nm_register;
4066 		hwna->up.nm_register = netmap_hw_reg;
4067 	}
4068 	if (netmap_attach_common(&hwna->up)) {
4069 		nm_os_free(hwna);
4070 		goto fail;
4071 	}
4072 	netmap_adapter_get(&hwna->up);
4073 
4074 	NM_ATTACH_NA(ifp, &hwna->up);
4075 
4076 	nm_os_onattach(ifp);
4077 
4078 	if (arg->nm_dtor == NULL) {
4079 		hwna->up.nm_dtor = netmap_hw_dtor;
4080 	}
4081 
4082 	if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
4083 	    hwna->up.num_tx_rings, hwna->up.num_tx_desc,
4084 	    hwna->up.num_rx_rings, hwna->up.num_rx_desc);
4085 	return 0;
4086 
4087 fail:
4088 	nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
4089 	return (hwna ? EINVAL : ENOMEM);
4090 }
4091 
4092 
4093 int
4094 netmap_attach(struct netmap_adapter *arg)
4095 {
4096 	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
4097 			1 /* override nm_reg */);
4098 }
4099 
4100 
4101 void
4102 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
4103 {
4104 	if (!na) {
4105 		return;
4106 	}
4107 
4108 	refcount_acquire(&na->na_refcount);
4109 }
4110 
4111 
4112 /* returns 1 iff the netmap_adapter is destroyed */
4113 int
4114 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
4115 {
4116 	if (!na)
4117 		return 1;
4118 
4119 	if (!refcount_release(&na->na_refcount))
4120 		return 0;
4121 
4122 	if (na->nm_dtor)
4123 		na->nm_dtor(na);
4124 
4125 	if (na->tx_rings) { /* XXX should not happen */
4126 		if (netmap_debug & NM_DEBUG_ON)
4127 			nm_prerr("freeing leftover tx_rings");
4128 		na->nm_krings_delete(na);
4129 	}
4130 	netmap_pipe_dealloc(na);
4131 	if (na->nm_mem)
4132 		netmap_mem_put(na->nm_mem);
4133 	bzero(na, sizeof(*na));
4134 	nm_os_free(na);
4135 
4136 	return 1;
4137 }
4138 
4139 /* nm_krings_create callback for all hardware native adapters */
4140 int
4141 netmap_hw_krings_create(struct netmap_adapter *na)
4142 {
4143 	int ret = netmap_krings_create(na, 0);
4144 	if (ret == 0) {
4145 		/* initialize the mbq for the sw rx ring */
4146 		u_int lim = netmap_real_rings(na, NR_RX), i;
4147 		for (i = na->num_rx_rings; i < lim; i++) {
4148 			mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
4149 		}
4150 		nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
4151 	}
4152 	return ret;
4153 }
4154 
4155 
4156 
4157 /*
4158  * Called on module unload by the netmap-enabled drivers
4159  */
4160 void
4161 netmap_detach(struct ifnet *ifp)
4162 {
4163 	struct netmap_adapter *na = NA(ifp);
4164 
4165 	if (!na)
4166 		return;
4167 
4168 	NMG_LOCK();
4169 	netmap_set_all_rings(na, NM_KR_LOCKED);
4170 	/*
4171 	 * if the netmap adapter is not native, somebody
4172 	 * changed it, so we can not release it here.
4173 	 * The NAF_ZOMBIE flag will notify the new owner that
4174 	 * the driver is gone.
4175 	 */
4176 	if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
4177 		na->na_flags |= NAF_ZOMBIE;
4178 	}
4179 	/* give active users a chance to notice that NAF_ZOMBIE has been
4180 	 * turned on, so that they can stop and return an error to userspace.
4181 	 * Note that this becomes a NOP if there are no active users and,
4182 	 * therefore, the put() above has deleted the na, since now NA(ifp) is
4183 	 * NULL.
4184 	 */
4185 	netmap_enable_all_rings(ifp);
4186 	NMG_UNLOCK();
4187 }
4188 
4189 
4190 /*
4191  * Intercept packets from the network stack and pass them
4192  * to netmap as incoming packets on the 'software' ring.
4193  *
4194  * We only store packets in a bounded mbq and then copy them
4195  * in the relevant rxsync routine.
4196  *
4197  * We rely on the OS to make sure that the ifp and na do not go
4198  * away (typically the caller checks for IFF_DRV_RUNNING or the like).
4199  * In nm_register() or whenever there is a reinitialization,
4200  * we make sure to make the mode change visible here.
4201  */
4202 int
4203 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
4204 {
4205 	struct netmap_adapter *na = NA(ifp);
4206 	struct netmap_kring *kring, *tx_kring;
4207 	u_int len = MBUF_LEN(m);
4208 	u_int error = ENOBUFS;
4209 	unsigned int txr;
4210 	struct mbq *q;
4211 	int busy;
4212 	u_int i;
4213 
4214 	i = MBUF_TXQ(m);
4215 	if (i >= na->num_host_rx_rings) {
4216 		i = i % na->num_host_rx_rings;
4217 	}
4218 	kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
4219 
4220 	// XXX [Linux] we do not need this lock
4221 	// if we follow the down/configure/up protocol -gl
4222 	// mtx_lock(&na->core_lock);
4223 
4224 	if (!nm_netmap_on(na)) {
4225 		nm_prerr("%s not in netmap mode anymore", na->name);
4226 		error = ENXIO;
4227 		goto done;
4228 	}
4229 
4230 	txr = MBUF_TXQ(m);
4231 	if (txr >= na->num_tx_rings) {
4232 		txr %= na->num_tx_rings;
4233 	}
4234 	tx_kring = NMR(na, NR_TX)[txr];
4235 
4236 	if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4237 		return MBUF_TRANSMIT(na, ifp, m);
4238 	}
4239 
4240 	q = &kring->rx_queue;
4241 
4242 	// XXX reconsider long packets if we handle fragments
4243 	if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4244 		nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4245 			len, NETMAP_BUF_SIZE(na));
4246 		goto done;
4247 	}
4248 
4249 	if (!netmap_generic_hwcsum) {
4250 		if (nm_os_mbuf_has_csum_offld(m)) {
4251 			nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4252 			goto done;
4253 		}
4254 	}
4255 
4256 	if (nm_os_mbuf_has_seg_offld(m)) {
4257 		nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4258 		goto done;
4259 	}
4260 
4261 #ifdef __FreeBSD__
4262 	ETHER_BPF_MTAP(ifp, m);
4263 #endif /* __FreeBSD__ */
4264 
4265 	/* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4266 	 * and maybe other instances of netmap_transmit (the latter
4267 	 * not possible on Linux).
4268 	 * We enqueue the mbuf only if we are sure there is going to be
4269 	 * enough room in the host RX ring, otherwise we drop it.
4270 	 */
4271 	mbq_lock(q);
4272 
4273 	busy = kring->nr_hwtail - kring->nr_hwcur;
4274 	if (busy < 0)
4275 		busy += kring->nkr_num_slots;
4276 	if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4277 		nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4278 			kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4279 	} else {
4280 		mbq_enqueue(q, m);
4281 		nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4282 		/* notify outside the lock */
4283 		m = NULL;
4284 		error = 0;
4285 	}
4286 	mbq_unlock(q);
4287 
4288 done:
4289 	if (m)
4290 		m_freem(m);
4291 	/* unconditionally wake up listeners */
4292 	kring->nm_notify(kring, 0);
4293 	/* this is normally netmap_notify(), but for nics
4294 	 * connected to a bridge it is netmap_bwrap_intr_notify(),
4295 	 * that possibly forwards the frames through the switch
4296 	 */
4297 
4298 	return (error);
4299 }
4300 
4301 
4302 /*
4303  * Reset function to be called by the driver routines when reinitializing
4304  * a hardware ring. The driver is in charge of locking to protect the kring
4305  * while this operation is being performed. This is normally achieved by
4306  * calling netmap_disable_all_rings() before triggering a reset.
4307  * If the kring is not in netmap mode, return NULL to inform the caller
4308  * that this is the case.
4309  * If the kring is in netmap mode, set hwofs so that the netmap indices
4310  * seen by userspace (head/cut/tail) do not change, although the internal
4311  * NIC indices have been reset to 0.
4312  * In any case, adjust kring->nr_mode.
4313  */
4314 struct netmap_slot *
4315 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4316 	u_int new_cur)
4317 {
4318 	struct netmap_kring *kring;
4319 	u_int new_hwtail, new_hwofs;
4320 
4321 	if (!nm_native_on(na)) {
4322 		nm_prdis("interface not in native netmap mode");
4323 		return NULL;	/* nothing to reinitialize */
4324 	}
4325 
4326 	if (tx == NR_TX) {
4327 		if (n >= na->num_tx_rings)
4328 			return NULL;
4329 		kring = na->tx_rings[n];
4330 		/*
4331 		 * Set hwofs to rhead, so that slots[rhead] is mapped to
4332 		 * the NIC internal slot 0, and thus the netmap buffer
4333 		 * at rhead is the next to be transmitted. Transmissions
4334 		 * that were pending before the reset are considered as
4335 		 * sent, so that we can have hwcur = rhead. All the slots
4336 		 * are now owned by the user, so we can also reinit hwtail.
4337 		 */
4338 		new_hwofs = kring->rhead;
4339 		new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4340 	} else {
4341 		if (n >= na->num_rx_rings)
4342 			return NULL;
4343 		kring = na->rx_rings[n];
4344 		/*
4345 		 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4346 		 * the NIC internal slot 0, and thus the netmap buffer
4347 		 * at hwtail is the next to be given to the NIC.
4348 		 * Unread slots (the ones in [rhead,hwtail[) are owned by
4349 		 * the user, and thus the caller cannot give them
4350 		 * to the NIC right now.
4351 		 */
4352 		new_hwofs = kring->nr_hwtail;
4353 		new_hwtail = kring->nr_hwtail;
4354 	}
4355 	if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4356 		kring->nr_mode = NKR_NETMAP_OFF;
4357 		return NULL;
4358 	}
4359 	if (netmap_verbose) {
4360 	    nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4361 	        kring->nr_hwcur, kring->rhead,
4362 	        kring->nr_hwtail, new_hwtail,
4363 		kring->nkr_hwofs, new_hwofs);
4364 	}
4365 	kring->nr_hwcur = kring->rhead;
4366 	kring->nr_hwtail = new_hwtail;
4367 	kring->nkr_hwofs = new_hwofs;
4368 
4369 	/*
4370 	 * Wakeup on the individual and global selwait
4371 	 * We do the wakeup here, but the ring is not yet reconfigured.
4372 	 * However, we are under lock so there are no races.
4373 	 */
4374 	kring->nr_mode = NKR_NETMAP_ON;
4375 	kring->nm_notify(kring, 0);
4376 	return kring->ring->slot;
4377 }
4378 
4379 
4380 /*
4381  * Dispatch rx/tx interrupts to the netmap rings.
4382  *
4383  * "work_done" is non-null on the RX path, NULL for the TX path.
4384  * We rely on the OS to make sure that there is only one active
4385  * instance per queue, and that there is appropriate locking.
4386  *
4387  * The 'notify' routine depends on what the ring is attached to.
4388  * - for a netmap file descriptor, do a selwakeup on the individual
4389  *   waitqueue, plus one on the global one if needed
4390  *   (see netmap_notify)
4391  * - for a nic connected to a switch, call the proper forwarding routine
4392  *   (see netmap_bwrap_intr_notify)
4393  */
4394 int
4395 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4396 {
4397 	struct netmap_kring *kring;
4398 	enum txrx t = (work_done ? NR_RX : NR_TX);
4399 
4400 	q &= NETMAP_RING_MASK;
4401 
4402 	if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4403 	        nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4404 	}
4405 
4406 	if (q >= nma_get_nrings(na, t))
4407 		return NM_IRQ_PASS; // not a physical queue
4408 
4409 	kring = NMR(na, t)[q];
4410 
4411 	if (kring->nr_mode == NKR_NETMAP_OFF) {
4412 		return NM_IRQ_PASS;
4413 	}
4414 
4415 	if (t == NR_RX) {
4416 		kring->nr_kflags |= NKR_PENDINTR;	// XXX atomic ?
4417 		*work_done = 1; /* do not fire napi again */
4418 	}
4419 
4420 	return kring->nm_notify(kring, 0);
4421 }
4422 
4423 
4424 /*
4425  * Default functions to handle rx/tx interrupts from a physical device.
4426  * "work_done" is non-null on the RX path, NULL for the TX path.
4427  *
4428  * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4429  * so that the caller proceeds with regular processing.
4430  * Otherwise call netmap_common_irq().
4431  *
4432  * If the card is connected to a netmap file descriptor,
4433  * do a selwakeup on the individual queue, plus one on the global one
4434  * if needed (multiqueue card _and_ there are multiqueue listeners),
4435  * and return NR_IRQ_COMPLETED.
4436  *
4437  * Finally, if called on rx from an interface connected to a switch,
4438  * calls the proper forwarding routine.
4439  */
4440 int
4441 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4442 {
4443 	struct netmap_adapter *na = NA(ifp);
4444 
4445 	/*
4446 	 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4447 	 * we still use the regular driver even though the previous
4448 	 * check fails. It is unclear whether we should use
4449 	 * nm_native_on() here.
4450 	 */
4451 	if (!nm_netmap_on(na))
4452 		return NM_IRQ_PASS;
4453 
4454 	if (na->na_flags & NAF_SKIP_INTR) {
4455 		nm_prdis("use regular interrupt");
4456 		return NM_IRQ_PASS;
4457 	}
4458 
4459 	return netmap_common_irq(na, q, work_done);
4460 }
4461 
4462 /* set/clear native flags and if_transmit/netdev_ops */
4463 void
4464 nm_set_native_flags(struct netmap_adapter *na)
4465 {
4466 	struct ifnet *ifp = na->ifp;
4467 
4468 	/* We do the setup for intercepting packets only if we are the
4469 	 * first user of this adapter. */
4470 	if (na->active_fds > 0) {
4471 		return;
4472 	}
4473 
4474 	na->na_flags |= NAF_NETMAP_ON;
4475 	nm_os_onenter(ifp);
4476 	nm_update_hostrings_mode(na);
4477 }
4478 
4479 void
4480 nm_clear_native_flags(struct netmap_adapter *na)
4481 {
4482 	struct ifnet *ifp = na->ifp;
4483 
4484 	/* We undo the setup for intercepting packets only if we are the
4485 	 * last user of this adapter. */
4486 	if (na->active_fds > 0) {
4487 		return;
4488 	}
4489 
4490 	nm_update_hostrings_mode(na);
4491 	nm_os_onexit(ifp);
4492 
4493 	na->na_flags &= ~NAF_NETMAP_ON;
4494 }
4495 
4496 void
4497 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4498 {
4499 	enum txrx t;
4500 
4501 	for_rx_tx(t) {
4502 		int i;
4503 
4504 		for (i = 0; i < netmap_real_rings(na, t); i++) {
4505 			struct netmap_kring *kring = NMR(na, t)[i];
4506 
4507 			if (onoff && nm_kring_pending_on(kring))
4508 				kring->nr_mode = NKR_NETMAP_ON;
4509 			else if (!onoff && nm_kring_pending_off(kring))
4510 				kring->nr_mode = NKR_NETMAP_OFF;
4511 		}
4512 	}
4513 }
4514 
4515 /*
4516  * Module loader and unloader
4517  *
4518  * netmap_init() creates the /dev/netmap device and initializes
4519  * all global variables. Returns 0 on success, errno on failure
4520  * (but there is no chance)
4521  *
4522  * netmap_fini() destroys everything.
4523  */
4524 
4525 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4526 extern struct cdevsw netmap_cdevsw;
4527 
4528 
4529 void
4530 netmap_fini(void)
4531 {
4532 	if (netmap_dev)
4533 		destroy_dev(netmap_dev);
4534 	/* we assume that there are no longer netmap users */
4535 	nm_os_ifnet_fini();
4536 	netmap_uninit_bridges();
4537 	netmap_mem_fini();
4538 	NMG_LOCK_DESTROY();
4539 	nm_prinf("netmap: unloaded module.");
4540 }
4541 
4542 
4543 int
4544 netmap_init(void)
4545 {
4546 	int error;
4547 
4548 	NMG_LOCK_INIT();
4549 
4550 	error = netmap_mem_init();
4551 	if (error != 0)
4552 		goto fail;
4553 	/*
4554 	 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4555 	 * when the module is compiled in.
4556 	 * XXX could use make_dev_credv() to get error number
4557 	 */
4558 	netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4559 		&netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4560 			      "netmap");
4561 	if (!netmap_dev)
4562 		goto fail;
4563 
4564 	error = netmap_init_bridges();
4565 	if (error)
4566 		goto fail;
4567 
4568 #ifdef __FreeBSD__
4569 	nm_os_vi_init_index();
4570 #endif
4571 
4572 	error = nm_os_ifnet_init();
4573 	if (error)
4574 		goto fail;
4575 
4576 #if !defined(__FreeBSD__) || defined(KLD_MODULE)
4577 	nm_prinf("netmap: loaded module");
4578 #endif
4579 	return (0);
4580 fail:
4581 	netmap_fini();
4582 	return (EINVAL); /* may be incorrect */
4583 }
4584