xref: /freebsd/sys/dev/netmap/netmap.c (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2011-2014 Matteo Landi
5  * Copyright (C) 2011-2016 Luigi Rizzo
6  * Copyright (C) 2011-2016 Giuseppe Lettieri
7  * Copyright (C) 2011-2016 Vincenzo Maffione
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *   1. Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *   2. Redistributions in binary form must reproduce the above copyright
16  *      notice, this list of conditions and the following disclaimer in the
17  *      documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 /*
34  * $FreeBSD$
35  *
36  * This module supports memory mapped access to network devices,
37  * see netmap(4).
38  *
39  * The module uses a large, memory pool allocated by the kernel
40  * and accessible as mmapped memory by multiple userspace threads/processes.
41  * The memory pool contains packet buffers and "netmap rings",
42  * i.e. user-accessible copies of the interface's queues.
43  *
44  * Access to the network card works like this:
45  * 1. a process/thread issues one or more open() on /dev/netmap, to create
46  *    select()able file descriptor on which events are reported.
47  * 2. on each descriptor, the process issues an ioctl() to identify
48  *    the interface that should report events to the file descriptor.
49  * 3. on each descriptor, the process issues an mmap() request to
50  *    map the shared memory region within the process' address space.
51  *    The list of interesting queues is indicated by a location in
52  *    the shared memory region.
53  * 4. using the functions in the netmap(4) userspace API, a process
54  *    can look up the occupation state of a queue, access memory buffers,
55  *    and retrieve received packets or enqueue packets to transmit.
56  * 5. using some ioctl()s the process can synchronize the userspace view
57  *    of the queue with the actual status in the kernel. This includes both
58  *    receiving the notification of new packets, and transmitting new
59  *    packets on the output interface.
60  * 6. select() or poll() can be used to wait for events on individual
61  *    transmit or receive queues (or all queues for a given interface).
62  *
63 
64 		SYNCHRONIZATION (USER)
65 
66 The netmap rings and data structures may be shared among multiple
67 user threads or even independent processes.
68 Any synchronization among those threads/processes is delegated
69 to the threads themselves. Only one thread at a time can be in
70 a system call on the same netmap ring. The OS does not enforce
71 this and only guarantees against system crashes in case of
72 invalid usage.
73 
74 		LOCKING (INTERNAL)
75 
76 Within the kernel, access to the netmap rings is protected as follows:
77 
78 - a spinlock on each ring, to handle producer/consumer races on
79   RX rings attached to the host stack (against multiple host
80   threads writing from the host stack to the same ring),
81   and on 'destination' rings attached to a VALE switch
82   (i.e. RX rings in VALE ports, and TX rings in NIC/host ports)
83   protecting multiple active senders for the same destination)
84 
85 - an atomic variable to guarantee that there is at most one
86   instance of *_*xsync() on the ring at any time.
87   For rings connected to user file
88   descriptors, an atomic_test_and_set() protects this, and the
89   lock on the ring is not actually used.
90   For NIC RX rings connected to a VALE switch, an atomic_test_and_set()
91   is also used to prevent multiple executions (the driver might indeed
92   already guarantee this).
93   For NIC TX rings connected to a VALE switch, the lock arbitrates
94   access to the queue (both when allocating buffers and when pushing
95   them out).
96 
97 - *xsync() should be protected against initializations of the card.
98   On FreeBSD most devices have the reset routine protected by
99   a RING lock (ixgbe, igb, em) or core lock (re). lem is missing
100   the RING protection on rx_reset(), this should be added.
101 
102   On linux there is an external lock on the tx path, which probably
103   also arbitrates access to the reset routine. XXX to be revised
104 
105 - a per-interface core_lock protecting access from the host stack
106   while interfaces may be detached from netmap mode.
107   XXX there should be no need for this lock if we detach the interfaces
108   only while they are down.
109 
110 
111 --- VALE SWITCH ---
112 
113 NMG_LOCK() serializes all modifications to switches and ports.
114 A switch cannot be deleted until all ports are gone.
115 
116 For each switch, an SX lock (RWlock on linux) protects
117 deletion of ports. When configuring or deleting a new port, the
118 lock is acquired in exclusive mode (after holding NMG_LOCK).
119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK).
120 The lock is held throughout the entire forwarding cycle,
121 during which the thread may incur in a page fault.
122 Hence it is important that sleepable shared locks are used.
123 
124 On the rx ring, the per-port lock is grabbed initially to reserve
125 a number of slot in the ring, then the lock is released,
126 packets are copied from source to destination, and then
127 the lock is acquired again and the receive ring is updated.
128 (A similar thing is done on the tx ring for NIC and host stack
129 ports attached to the switch)
130 
131  */
132 
133 
134 /* --- internals ----
135  *
136  * Roadmap to the code that implements the above.
137  *
138  * > 1. a process/thread issues one or more open() on /dev/netmap, to create
139  * >    select()able file descriptor on which events are reported.
140  *
141  *  	Internally, we allocate a netmap_priv_d structure, that will be
142  *  	initialized on ioctl(NIOCREGIF). There is one netmap_priv_d
143  *  	structure for each open().
144  *
145  *      os-specific:
146  *  	    FreeBSD: see netmap_open() (netmap_freebsd.c)
147  *  	    linux:   see linux_netmap_open() (netmap_linux.c)
148  *
149  * > 2. on each descriptor, the process issues an ioctl() to identify
150  * >    the interface that should report events to the file descriptor.
151  *
152  * 	Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0.
153  * 	Most important things happen in netmap_get_na() and
154  * 	netmap_do_regif(), called from there. Additional details can be
155  * 	found in the comments above those functions.
156  *
157  * 	In all cases, this action creates/takes-a-reference-to a
158  * 	netmap_*_adapter describing the port, and allocates a netmap_if
159  * 	and all necessary netmap rings, filling them with netmap buffers.
160  *
161  *      In this phase, the sync callbacks for each ring are set (these are used
162  *      in steps 5 and 6 below).  The callbacks depend on the type of adapter.
163  *      The adapter creation/initialization code puts them in the
164  * 	netmap_adapter (fields na->nm_txsync and na->nm_rxsync).  Then, they
165  * 	are copied from there to the netmap_kring's during netmap_do_regif(), by
166  * 	the nm_krings_create() callback.  All the nm_krings_create callbacks
167  * 	actually call netmap_krings_create() to perform this and the other
168  * 	common stuff. netmap_krings_create() also takes care of the host rings,
169  * 	if needed, by setting their sync callbacks appropriately.
170  *
171  * 	Additional actions depend on the kind of netmap_adapter that has been
172  * 	registered:
173  *
174  * 	- netmap_hw_adapter:  	     [netmap.c]
175  * 	     This is a system netdev/ifp with native netmap support.
176  * 	     The ifp is detached from the host stack by redirecting:
177  * 	       - transmissions (from the network stack) to netmap_transmit()
178  * 	       - receive notifications to the nm_notify() callback for
179  * 	         this adapter. The callback is normally netmap_notify(), unless
180  * 	         the ifp is attached to a bridge using bwrap, in which case it
181  * 	         is netmap_bwrap_intr_notify().
182  *
183  * 	- netmap_generic_adapter:      [netmap_generic.c]
184  * 	      A system netdev/ifp without native netmap support.
185  *
186  * 	(the decision about native/non native support is taken in
187  * 	 netmap_get_hw_na(), called by netmap_get_na())
188  *
189  * 	- netmap_vp_adapter 		[netmap_vale.c]
190  * 	      Returned by netmap_get_bdg_na().
191  * 	      This is a persistent or ephemeral VALE port. Ephemeral ports
192  * 	      are created on the fly if they don't already exist, and are
193  * 	      always attached to a bridge.
194  * 	      Persistent VALE ports must must be created separately, and i
195  * 	      then attached like normal NICs. The NIOCREGIF we are examining
196  * 	      will find them only if they had previously been created and
197  * 	      attached (see VALE_CTL below).
198  *
199  * 	- netmap_pipe_adapter 	      [netmap_pipe.c]
200  * 	      Returned by netmap_get_pipe_na().
201  * 	      Both pipe ends are created, if they didn't already exist.
202  *
203  * 	- netmap_monitor_adapter      [netmap_monitor.c]
204  * 	      Returned by netmap_get_monitor_na().
205  * 	      If successful, the nm_sync callbacks of the monitored adapter
206  * 	      will be intercepted by the returned monitor.
207  *
208  * 	- netmap_bwrap_adapter	      [netmap_vale.c]
209  * 	      Cannot be obtained in this way, see VALE_CTL below
210  *
211  *
212  * 	os-specific:
213  * 	    linux: we first go through linux_netmap_ioctl() to
214  * 	           adapt the FreeBSD interface to the linux one.
215  *
216  *
217  * > 3. on each descriptor, the process issues an mmap() request to
218  * >    map the shared memory region within the process' address space.
219  * >    The list of interesting queues is indicated by a location in
220  * >    the shared memory region.
221  *
222  *      os-specific:
223  *  	    FreeBSD: netmap_mmap_single (netmap_freebsd.c).
224  *  	    linux:   linux_netmap_mmap (netmap_linux.c).
225  *
226  * > 4. using the functions in the netmap(4) userspace API, a process
227  * >    can look up the occupation state of a queue, access memory buffers,
228  * >    and retrieve received packets or enqueue packets to transmit.
229  *
230  * 	these actions do not involve the kernel.
231  *
232  * > 5. using some ioctl()s the process can synchronize the userspace view
233  * >    of the queue with the actual status in the kernel. This includes both
234  * >    receiving the notification of new packets, and transmitting new
235  * >    packets on the output interface.
236  *
237  * 	These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC
238  * 	cases. They invoke the nm_sync callbacks on the netmap_kring
239  * 	structures, as initialized in step 2 and maybe later modified
240  * 	by a monitor. Monitors, however, will always call the original
241  * 	callback before doing anything else.
242  *
243  *
244  * > 6. select() or poll() can be used to wait for events on individual
245  * >    transmit or receive queues (or all queues for a given interface).
246  *
247  * 	Implemented in netmap_poll(). This will call the same nm_sync()
248  * 	callbacks as in step 5 above.
249  *
250  * 	os-specific:
251  * 		linux: we first go through linux_netmap_poll() to adapt
252  * 		       the FreeBSD interface to the linux one.
253  *
254  *
255  *  ----  VALE_CTL -----
256  *
257  *  VALE switches are controlled by issuing a NIOCREGIF with a non-null
258  *  nr_cmd in the nmreq structure. These subcommands are handled by
259  *  netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created
260  *  and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF
261  *  subcommands, respectively.
262  *
263  *  Any network interface known to the system (including a persistent VALE
264  *  port) can be attached to a VALE switch by issuing the
265  *  NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports
266  *  look exactly like ephemeral VALE ports (as created in step 2 above).  The
267  *  attachment of other interfaces, instead, requires the creation of a
268  *  netmap_bwrap_adapter.  Moreover, the attached interface must be put in
269  *  netmap mode. This may require the creation of a netmap_generic_adapter if
270  *  we have no native support for the interface, or if generic adapters have
271  *  been forced by sysctl.
272  *
273  *  Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(),
274  *  called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach()
275  *  callback.  In the case of the bwrap, the callback creates the
276  *  netmap_bwrap_adapter.  The initialization of the bwrap is then
277  *  completed by calling netmap_do_regif() on it, in the nm_bdg_ctl()
278  *  callback (netmap_bwrap_bdg_ctl in netmap_vale.c).
279  *  A generic adapter for the wrapped ifp will be created if needed, when
280  *  netmap_get_bdg_na() calls netmap_get_hw_na().
281  *
282  *
283  *  ---- DATAPATHS -----
284  *
285  *              -= SYSTEM DEVICE WITH NATIVE SUPPORT =-
286  *
287  *    na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach()
288  *
289  *    - tx from netmap userspace:
290  *	 concurrently:
291  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
292  *                kring->nm_sync() == DEVICE_netmap_txsync()
293  *           2) device interrupt handler
294  *                na->nm_notify()  == netmap_notify()
295  *    - rx from netmap userspace:
296  *       concurrently:
297  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
298  *                kring->nm_sync() == DEVICE_netmap_rxsync()
299  *           2) device interrupt handler
300  *                na->nm_notify()  == netmap_notify()
301  *    - rx from host stack
302  *       concurrently:
303  *           1) host stack
304  *                netmap_transmit()
305  *                  na->nm_notify  == netmap_notify()
306  *           2) ioctl(NIOCRXSYNC)/netmap_poll() in process context
307  *                kring->nm_sync() == netmap_rxsync_from_host
308  *                  netmap_rxsync_from_host(na, NULL, NULL)
309  *    - tx to host stack
310  *           ioctl(NIOCTXSYNC)/netmap_poll() in process context
311  *             kring->nm_sync() == netmap_txsync_to_host
312  *               netmap_txsync_to_host(na)
313  *                 nm_os_send_up()
314  *                   FreeBSD: na->if_input() == ether_input()
315  *                   linux: netif_rx() with NM_MAGIC_PRIORITY_RX
316  *
317  *
318  *               -= SYSTEM DEVICE WITH GENERIC SUPPORT =-
319  *
320  *    na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach()
321  *
322  *    - tx from netmap userspace:
323  *       concurrently:
324  *           1) ioctl(NIOCTXSYNC)/netmap_poll() in process context
325  *               kring->nm_sync() == generic_netmap_txsync()
326  *                   nm_os_generic_xmit_frame()
327  *                       linux:   dev_queue_xmit() with NM_MAGIC_PRIORITY_TX
328  *                           ifp->ndo_start_xmit == generic_ndo_start_xmit()
329  *                               gna->save_start_xmit == orig. dev. start_xmit
330  *                       FreeBSD: na->if_transmit() == orig. dev if_transmit
331  *           2) generic_mbuf_destructor()
332  *                   na->nm_notify() == netmap_notify()
333  *    - rx from netmap userspace:
334  *           1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
335  *               kring->nm_sync() == generic_netmap_rxsync()
336  *                   mbq_safe_dequeue()
337  *           2) device driver
338  *               generic_rx_handler()
339  *                   mbq_safe_enqueue()
340  *                   na->nm_notify() == netmap_notify()
341  *    - rx from host stack
342  *        FreeBSD: same as native
343  *        Linux: same as native except:
344  *           1) host stack
345  *               dev_queue_xmit() without NM_MAGIC_PRIORITY_TX
346  *                   ifp->ndo_start_xmit == generic_ndo_start_xmit()
347  *                       netmap_transmit()
348  *                           na->nm_notify() == netmap_notify()
349  *    - tx to host stack (same as native):
350  *
351  *
352  *                           -= VALE =-
353  *
354  *   INCOMING:
355  *
356  *      - VALE ports:
357  *          ioctl(NIOCTXSYNC)/netmap_poll() in process context
358  *              kring->nm_sync() == netmap_vp_txsync()
359  *
360  *      - system device with native support:
361  *         from cable:
362  *             interrupt
363  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
364  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
365  *                     netmap_vp_txsync()
366  *                     kring->nm_sync() == DEVICE_netmap_rxsync()
367  *         from host stack:
368  *             netmap_transmit()
369  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
370  *                     kring->nm_sync() == netmap_rxsync_from_host()
371  *                     netmap_vp_txsync()
372  *
373  *      - system device with generic support:
374  *         from device driver:
375  *            generic_rx_handler()
376  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring)
377  *                     kring->nm_sync() == generic_netmap_rxsync()
378  *                     netmap_vp_txsync()
379  *                     kring->nm_sync() == generic_netmap_rxsync()
380  *         from host stack:
381  *            netmap_transmit()
382  *                na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring)
383  *                     kring->nm_sync() == netmap_rxsync_from_host()
384  *                     netmap_vp_txsync()
385  *
386  *   (all cases) --> nm_bdg_flush()
387  *                      dest_na->nm_notify() == (see below)
388  *
389  *   OUTGOING:
390  *
391  *      - VALE ports:
392  *         concurrently:
393  *             1) ioctl(NIOCRXSYNC)/netmap_poll() in process context
394  *                    kring->nm_sync() == netmap_vp_rxsync()
395  *             2) from nm_bdg_flush()
396  *                    na->nm_notify() == netmap_notify()
397  *
398  *      - system device with native support:
399  *          to cable:
400  *             na->nm_notify() == netmap_bwrap_notify()
401  *                 netmap_vp_rxsync()
402  *                 kring->nm_sync() == DEVICE_netmap_txsync()
403  *                 netmap_vp_rxsync()
404  *          to host stack:
405  *                 netmap_vp_rxsync()
406  *                 kring->nm_sync() == netmap_txsync_to_host
407  *                 netmap_vp_rxsync_locked()
408  *
409  *      - system device with generic adapter:
410  *          to device driver:
411  *             na->nm_notify() == netmap_bwrap_notify()
412  *                 netmap_vp_rxsync()
413  *                 kring->nm_sync() == generic_netmap_txsync()
414  *                 netmap_vp_rxsync()
415  *          to host stack:
416  *                 netmap_vp_rxsync()
417  *                 kring->nm_sync() == netmap_txsync_to_host
418  *                 netmap_vp_rxsync()
419  *
420  */
421 
422 /*
423  * OS-specific code that is used only within this file.
424  * Other OS-specific code that must be accessed by drivers
425  * is present in netmap_kern.h
426  */
427 
428 #if defined(__FreeBSD__)
429 #include <sys/cdefs.h> /* prerequisite */
430 #include <sys/types.h>
431 #include <sys/errno.h>
432 #include <sys/param.h>	/* defines used in kernel.h */
433 #include <sys/kernel.h>	/* types used in module initialization */
434 #include <sys/conf.h>	/* cdevsw struct, UID, GID */
435 #include <sys/filio.h>	/* FIONBIO */
436 #include <sys/sockio.h>
437 #include <sys/socketvar.h>	/* struct socket */
438 #include <sys/malloc.h>
439 #include <sys/poll.h>
440 #include <sys/proc.h>
441 #include <sys/rwlock.h>
442 #include <sys/socket.h> /* sockaddrs */
443 #include <sys/selinfo.h>
444 #include <sys/sysctl.h>
445 #include <sys/jail.h>
446 #include <sys/epoch.h>
447 #include <net/vnet.h>
448 #include <net/if.h>
449 #include <net/if_var.h>
450 #include <net/bpf.h>		/* BIOCIMMEDIATE */
451 #include <machine/bus.h>	/* bus_dmamap_* */
452 #include <sys/endian.h>
453 #include <sys/refcount.h>
454 #include <net/ethernet.h>	/* ETHER_BPF_MTAP */
455 
456 
457 #elif defined(linux)
458 
459 #include "bsd_glue.h"
460 
461 #elif defined(__APPLE__)
462 
463 #warning OSX support is only partial
464 #include "osx_glue.h"
465 
466 #elif defined (_WIN32)
467 
468 #include "win_glue.h"
469 
470 #else
471 
472 #error	Unsupported platform
473 
474 #endif /* unsupported */
475 
476 /*
477  * common headers
478  */
479 #include <net/netmap.h>
480 #include <dev/netmap/netmap_kern.h>
481 #include <dev/netmap/netmap_mem2.h>
482 
483 
484 /* user-controlled variables */
485 int netmap_verbose;
486 #ifdef CONFIG_NETMAP_DEBUG
487 int netmap_debug;
488 #endif /* CONFIG_NETMAP_DEBUG */
489 
490 static int netmap_no_timestamp; /* don't timestamp on rxsync */
491 int netmap_no_pendintr = 1;
492 int netmap_txsync_retry = 2;
493 static int netmap_fwd = 0;	/* force transparent forwarding */
494 
495 /*
496  * netmap_admode selects the netmap mode to use.
497  * Invalid values are reset to NETMAP_ADMODE_BEST
498  */
499 enum {	NETMAP_ADMODE_BEST = 0,	/* use native, fallback to generic */
500 	NETMAP_ADMODE_NATIVE,	/* either native or none */
501 	NETMAP_ADMODE_GENERIC,	/* force generic */
502 	NETMAP_ADMODE_LAST };
503 static int netmap_admode = NETMAP_ADMODE_BEST;
504 
505 /* netmap_generic_mit controls mitigation of RX notifications for
506  * the generic netmap adapter. The value is a time interval in
507  * nanoseconds. */
508 int netmap_generic_mit = 100*1000;
509 
510 /* We use by default netmap-aware qdiscs with generic netmap adapters,
511  * even if there can be a little performance hit with hardware NICs.
512  * However, using the qdisc is the safer approach, for two reasons:
513  * 1) it prevents non-fifo qdiscs to break the TX notification
514  *    scheme, which is based on mbuf destructors when txqdisc is
515  *    not used.
516  * 2) it makes it possible to transmit over software devices that
517  *    change skb->dev, like bridge, veth, ...
518  *
519  * Anyway users looking for the best performance should
520  * use native adapters.
521  */
522 #ifdef linux
523 int netmap_generic_txqdisc = 1;
524 #endif
525 
526 /* Default number of slots and queues for generic adapters. */
527 int netmap_generic_ringsize = 1024;
528 int netmap_generic_rings = 1;
529 
530 /* Non-zero to enable checksum offloading in NIC drivers */
531 int netmap_generic_hwcsum = 0;
532 
533 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */
534 int ptnet_vnet_hdr = 1;
535 
536 /*
537  * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated
538  * in some other operating systems
539  */
540 SYSBEGIN(main_init);
541 
542 SYSCTL_DECL(_dev_netmap);
543 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
544     "Netmap args");
545 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
546 		CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
547 #ifdef CONFIG_NETMAP_DEBUG
548 SYSCTL_INT(_dev_netmap, OID_AUTO, debug,
549 		CTLFLAG_RW, &netmap_debug, 0, "Debug messages");
550 #endif /* CONFIG_NETMAP_DEBUG */
551 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
552 		CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
553 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
554 		0, "Always look for new received packets.");
555 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
556 		&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
557 
558 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
559 		"Force NR_FORWARD mode");
560 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
561 		"Adapter mode. 0 selects the best option available,"
562 		"1 forces native adapter, 2 forces emulated adapter");
563 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_hwcsum, CTLFLAG_RW, &netmap_generic_hwcsum,
564 		0, "Hardware checksums. 0 to disable checksum generation by the NIC (default),"
565 		"1 to enable checksum generation by the NIC");
566 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
567 		0, "RX notification interval in nanoseconds");
568 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
569 		&netmap_generic_ringsize, 0,
570 		"Number of per-ring slots for emulated netmap mode");
571 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
572 		&netmap_generic_rings, 0,
573 		"Number of TX/RX queues for emulated netmap adapters");
574 #ifdef linux
575 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
576 		&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
577 #endif
578 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
579 		0, "Allow ptnet devices to use virtio-net headers");
580 
581 SYSEND;
582 
583 NMG_LOCK_T	netmap_global_lock;
584 
585 /*
586  * mark the ring as stopped, and run through the locks
587  * to make sure other users get to see it.
588  * stopped must be either NR_KR_STOPPED (for unbounded stop)
589  * of NR_KR_LOCKED (brief stop for mutual exclusion purposes)
590  */
591 static void
592 netmap_disable_ring(struct netmap_kring *kr, int stopped)
593 {
594 	nm_kr_stop(kr, stopped);
595 	// XXX check if nm_kr_stop is sufficient
596 	mtx_lock(&kr->q_lock);
597 	mtx_unlock(&kr->q_lock);
598 	nm_kr_put(kr);
599 }
600 
601 /* stop or enable a single ring */
602 void
603 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped)
604 {
605 	if (stopped)
606 		netmap_disable_ring(NMR(na, t)[ring_id], stopped);
607 	else
608 		NMR(na, t)[ring_id]->nkr_stopped = 0;
609 }
610 
611 
612 /* stop or enable all the rings of na */
613 void
614 netmap_set_all_rings(struct netmap_adapter *na, int stopped)
615 {
616 	int i;
617 	enum txrx t;
618 
619 	if (!nm_netmap_on(na))
620 		return;
621 
622 	if (netmap_verbose) {
623 		nm_prinf("%s: %sable all rings", na->name,
624 		    (stopped ? "dis" : "en"));
625 	}
626 	for_rx_tx(t) {
627 		for (i = 0; i < netmap_real_rings(na, t); i++) {
628 			netmap_set_ring(na, i, t, stopped);
629 		}
630 	}
631 }
632 
633 /*
634  * Convenience function used in drivers.  Waits for current txsync()s/rxsync()s
635  * to finish and prevents any new one from starting.  Call this before turning
636  * netmap mode off, or before removing the hardware rings (e.g., on module
637  * onload).
638  */
639 void
640 netmap_disable_all_rings(struct ifnet *ifp)
641 {
642 	if (NM_NA_VALID(ifp)) {
643 		netmap_set_all_rings(NA(ifp), NM_KR_LOCKED);
644 	}
645 }
646 
647 /*
648  * Convenience function used in drivers.  Re-enables rxsync and txsync on the
649  * adapter's rings In linux drivers, this should be placed near each
650  * napi_enable().
651  */
652 void
653 netmap_enable_all_rings(struct ifnet *ifp)
654 {
655 	if (NM_NA_VALID(ifp)) {
656 		netmap_set_all_rings(NA(ifp), 0 /* enabled */);
657 	}
658 }
659 
660 void
661 netmap_make_zombie(struct ifnet *ifp)
662 {
663 	if (NM_NA_VALID(ifp)) {
664 		struct netmap_adapter *na = NA(ifp);
665 		netmap_set_all_rings(na, NM_KR_LOCKED);
666 		na->na_flags |= NAF_ZOMBIE;
667 		netmap_set_all_rings(na, 0);
668 	}
669 }
670 
671 void
672 netmap_undo_zombie(struct ifnet *ifp)
673 {
674 	if (NM_NA_VALID(ifp)) {
675 		struct netmap_adapter *na = NA(ifp);
676 		if (na->na_flags & NAF_ZOMBIE) {
677 			netmap_set_all_rings(na, NM_KR_LOCKED);
678 			na->na_flags &= ~NAF_ZOMBIE;
679 			netmap_set_all_rings(na, 0);
680 		}
681 	}
682 }
683 
684 /*
685  * generic bound_checking function
686  */
687 u_int
688 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg)
689 {
690 	u_int oldv = *v;
691 	const char *op = NULL;
692 
693 	if (dflt < lo)
694 		dflt = lo;
695 	if (dflt > hi)
696 		dflt = hi;
697 	if (oldv < lo) {
698 		*v = dflt;
699 		op = "Bump";
700 	} else if (oldv > hi) {
701 		*v = hi;
702 		op = "Clamp";
703 	}
704 	if (op && msg)
705 		nm_prinf("%s %s to %d (was %d)", op, msg, *v, oldv);
706 	return *v;
707 }
708 
709 
710 /*
711  * packet-dump function, user-supplied or static buffer.
712  * The destination buffer must be at least 30+4*len
713  */
714 const char *
715 nm_dump_buf(char *p, int len, int lim, char *dst)
716 {
717 	static char _dst[8192];
718 	int i, j, i0;
719 	static char hex[] ="0123456789abcdef";
720 	char *o;	/* output position */
721 
722 #define P_HI(x)	hex[((x) & 0xf0)>>4]
723 #define P_LO(x)	hex[((x) & 0xf)]
724 #define P_C(x)	((x) >= 0x20 && (x) <= 0x7e ? (x) : '.')
725 	if (!dst)
726 		dst = _dst;
727 	if (lim <= 0 || lim > len)
728 		lim = len;
729 	o = dst;
730 	sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim);
731 	o += strlen(o);
732 	/* hexdump routine */
733 	for (i = 0; i < lim; ) {
734 		sprintf(o, "%5d: ", i);
735 		o += strlen(o);
736 		memset(o, ' ', 48);
737 		i0 = i;
738 		for (j=0; j < 16 && i < lim; i++, j++) {
739 			o[j*3] = P_HI(p[i]);
740 			o[j*3+1] = P_LO(p[i]);
741 		}
742 		i = i0;
743 		for (j=0; j < 16 && i < lim; i++, j++)
744 			o[j + 48] = P_C(p[i]);
745 		o[j+48] = '\n';
746 		o += j+49;
747 	}
748 	*o = '\0';
749 #undef P_HI
750 #undef P_LO
751 #undef P_C
752 	return dst;
753 }
754 
755 
756 /*
757  * Fetch configuration from the device, to cope with dynamic
758  * reconfigurations after loading the module.
759  */
760 /* call with NMG_LOCK held */
761 int
762 netmap_update_config(struct netmap_adapter *na)
763 {
764 	struct nm_config_info info;
765 
766 	bzero(&info, sizeof(info));
767 	if (na->nm_config == NULL ||
768 	    na->nm_config(na, &info)) {
769 		/* take whatever we had at init time */
770 		info.num_tx_rings = na->num_tx_rings;
771 		info.num_tx_descs = na->num_tx_desc;
772 		info.num_rx_rings = na->num_rx_rings;
773 		info.num_rx_descs = na->num_rx_desc;
774 		info.rx_buf_maxsize = na->rx_buf_maxsize;
775 	}
776 
777 	if (na->num_tx_rings == info.num_tx_rings &&
778 	    na->num_tx_desc == info.num_tx_descs &&
779 	    na->num_rx_rings == info.num_rx_rings &&
780 	    na->num_rx_desc == info.num_rx_descs &&
781 	    na->rx_buf_maxsize == info.rx_buf_maxsize)
782 		return 0; /* nothing changed */
783 	if (na->active_fds == 0) {
784 		na->num_tx_rings = info.num_tx_rings;
785 		na->num_tx_desc = info.num_tx_descs;
786 		na->num_rx_rings = info.num_rx_rings;
787 		na->num_rx_desc = info.num_rx_descs;
788 		na->rx_buf_maxsize = info.rx_buf_maxsize;
789 		if (netmap_verbose)
790 			nm_prinf("configuration changed for %s: txring %d x %d, "
791 				"rxring %d x %d, rxbufsz %d",
792 				na->name, na->num_tx_rings, na->num_tx_desc,
793 				na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
794 		return 0;
795 	}
796 	nm_prerr("WARNING: configuration changed for %s while active: "
797 		"txring %d x %d, rxring %d x %d, rxbufsz %d",
798 		na->name, info.num_tx_rings, info.num_tx_descs,
799 		info.num_rx_rings, info.num_rx_descs,
800 		info.rx_buf_maxsize);
801 	return 1;
802 }
803 
804 /* nm_sync callbacks for the host rings */
805 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags);
806 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags);
807 
808 static int
809 netmap_default_bufcfg(struct netmap_kring *kring, uint64_t target)
810 {
811 	kring->hwbuf_len = target;
812 	kring->buf_align = 0; /* no alignment */
813 	return 0;
814 }
815 
816 /* create the krings array and initialize the fields common to all adapters.
817  * The array layout is this:
818  *
819  *                    +----------+
820  * na->tx_rings ----->|          | \
821  *                    |          |  } na->num_tx_ring
822  *                    |          | /
823  *                    +----------+
824  *                    |          |    host tx kring
825  * na->rx_rings ----> +----------+
826  *                    |          | \
827  *                    |          |  } na->num_rx_rings
828  *                    |          | /
829  *                    +----------+
830  *                    |          |    host rx kring
831  *                    +----------+
832  * na->tailroom ----->|          | \
833  *                    |          |  } tailroom bytes
834  *                    |          | /
835  *                    +----------+
836  *
837  * Note: for compatibility, host krings are created even when not needed.
838  * The tailroom space is currently used by vale ports for allocating leases.
839  */
840 /* call with NMG_LOCK held */
841 int
842 netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
843 {
844 	u_int i, len, ndesc;
845 	struct netmap_kring *kring;
846 	u_int n[NR_TXRX];
847 	enum txrx t;
848 	int err = 0;
849 
850 	if (na->tx_rings != NULL) {
851 		if (netmap_debug & NM_DEBUG_ON)
852 			nm_prerr("warning: krings were already created");
853 		return 0;
854 	}
855 
856 	/* account for the (possibly fake) host rings */
857 	n[NR_TX] = netmap_all_rings(na, NR_TX);
858 	n[NR_RX] = netmap_all_rings(na, NR_RX);
859 
860 	len = (n[NR_TX] + n[NR_RX]) *
861 		(sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
862 		+ tailroom;
863 
864 	na->tx_rings = nm_os_malloc((size_t)len);
865 	if (na->tx_rings == NULL) {
866 		nm_prerr("Cannot allocate krings");
867 		return ENOMEM;
868 	}
869 	na->rx_rings = na->tx_rings + n[NR_TX];
870 	na->tailroom = na->rx_rings + n[NR_RX];
871 
872 	/* link the krings in the krings array */
873 	kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
874 	for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
875 		na->tx_rings[i] = kring;
876 		kring++;
877 	}
878 
879 	/*
880 	 * All fields in krings are 0 except the one initialized below.
881 	 * but better be explicit on important kring fields.
882 	 */
883 	for_rx_tx(t) {
884 		ndesc = nma_get_ndesc(na, t);
885 		for (i = 0; i < n[t]; i++) {
886 			kring = NMR(na, t)[i];
887 			bzero(kring, sizeof(*kring));
888 			kring->notify_na = na;
889 			kring->ring_id = i;
890 			kring->tx = t;
891 			kring->nkr_num_slots = ndesc;
892 			kring->nr_mode = NKR_NETMAP_OFF;
893 			kring->nr_pending_mode = NKR_NETMAP_OFF;
894 			if (i < nma_get_nrings(na, t)) {
895 				kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync);
896 				kring->nm_bufcfg = na->nm_bufcfg;
897 				if (kring->nm_bufcfg == NULL)
898 					kring->nm_bufcfg = netmap_default_bufcfg;
899 			} else {
900 				if (!(na->na_flags & NAF_HOST_RINGS))
901 					kring->nr_kflags |= NKR_FAKERING;
902 				kring->nm_sync = (t == NR_TX ?
903 						netmap_txsync_to_host:
904 						netmap_rxsync_from_host);
905 				kring->nm_bufcfg = netmap_default_bufcfg;
906 			}
907 			kring->nm_notify = na->nm_notify;
908 			kring->rhead = kring->rcur = kring->nr_hwcur = 0;
909 			/*
910 			 * IMPORTANT: Always keep one slot empty.
911 			 */
912 			kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0);
913 			snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name,
914 					nm_txrx2str(t), i);
915 			nm_prdis("ktx %s h %d c %d t %d",
916 				kring->name, kring->rhead, kring->rcur, kring->rtail);
917 			err = nm_os_selinfo_init(&kring->si, kring->name);
918 			if (err) {
919 				netmap_krings_delete(na);
920 				return err;
921 			}
922 			mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF);
923 			kring->na = na;	/* setting this field marks the mutex as initialized */
924 		}
925 		err = nm_os_selinfo_init(&na->si[t], na->name);
926 		if (err) {
927 			netmap_krings_delete(na);
928 			return err;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 
936 /* undo the actions performed by netmap_krings_create */
937 /* call with NMG_LOCK held */
938 void
939 netmap_krings_delete(struct netmap_adapter *na)
940 {
941 	struct netmap_kring **kring = na->tx_rings;
942 	enum txrx t;
943 
944 	if (na->tx_rings == NULL) {
945 		if (netmap_debug & NM_DEBUG_ON)
946 			nm_prerr("warning: krings were already deleted");
947 		return;
948 	}
949 
950 	for_rx_tx(t)
951 		nm_os_selinfo_uninit(&na->si[t]);
952 
953 	/* we rely on the krings layout described above */
954 	for ( ; kring != na->tailroom; kring++) {
955 		if ((*kring)->na != NULL)
956 			mtx_destroy(&(*kring)->q_lock);
957 		nm_os_selinfo_uninit(&(*kring)->si);
958 	}
959 	nm_os_free(na->tx_rings);
960 	na->tx_rings = na->rx_rings = na->tailroom = NULL;
961 }
962 
963 
964 /*
965  * Destructor for NIC ports. They also have an mbuf queue
966  * on the rings connected to the host so we need to purge
967  * them first.
968  */
969 /* call with NMG_LOCK held */
970 void
971 netmap_hw_krings_delete(struct netmap_adapter *na)
972 {
973 	u_int lim = netmap_real_rings(na, NR_RX), i;
974 
975 	for (i = nma_get_nrings(na, NR_RX); i < lim; i++) {
976 		struct mbq *q = &NMR(na, NR_RX)[i]->rx_queue;
977 		nm_prdis("destroy sw mbq with len %d", mbq_len(q));
978 		mbq_purge(q);
979 		mbq_safe_fini(q);
980 	}
981 	netmap_krings_delete(na);
982 }
983 
984 void
985 netmap_mem_restore(struct netmap_adapter *na)
986 {
987 	if (na->nm_mem_prev) {
988 		netmap_mem_put(na->nm_mem);
989 		na->nm_mem = na->nm_mem_prev;
990 		na->nm_mem_prev = NULL;
991 	}
992 }
993 
994 static void
995 netmap_mem_drop(struct netmap_adapter *na)
996 {
997 	/* if the native allocator had been overridden on regif,
998 	 * restore it now and drop the temporary one
999 	 */
1000 	if (netmap_mem_deref(na->nm_mem, na)) {
1001 		netmap_mem_restore(na);
1002 	}
1003 }
1004 
1005 static void
1006 netmap_update_hostrings_mode(struct netmap_adapter *na)
1007 {
1008 	enum txrx t;
1009 	struct netmap_kring *kring;
1010 	int i;
1011 
1012 	for_rx_tx(t) {
1013 		for (i = nma_get_nrings(na, t);
1014 		     i < netmap_real_rings(na, t); i++) {
1015 			kring = NMR(na, t)[i];
1016 			kring->nr_mode = kring->nr_pending_mode;
1017 		}
1018 	}
1019 }
1020 
1021 /*
1022  * Undo everything that was done in netmap_do_regif(). In particular,
1023  * call nm_register(ifp,0) to stop netmap mode on the interface and
1024  * revert to normal operation.
1025  */
1026 /* call with NMG_LOCK held */
1027 static void netmap_unset_ringid(struct netmap_priv_d *);
1028 static void netmap_krings_put(struct netmap_priv_d *);
1029 void
1030 netmap_do_unregif(struct netmap_priv_d *priv)
1031 {
1032 	struct netmap_adapter *na = priv->np_na;
1033 
1034 	NMG_LOCK_ASSERT();
1035 	na->active_fds--;
1036 	/* unset nr_pending_mode and possibly release exclusive mode */
1037 	netmap_krings_put(priv);
1038 
1039 #ifdef	WITH_MONITOR
1040 	/* XXX check whether we have to do something with monitor
1041 	 * when rings change nr_mode. */
1042 	if (na->active_fds <= 0) {
1043 		/* walk through all the rings and tell any monitor
1044 		 * that the port is going to exit netmap mode
1045 		 */
1046 		netmap_monitor_stop(na);
1047 	}
1048 #endif
1049 
1050 	if (na->active_fds <= 0 || nm_kring_pending(priv)) {
1051 		netmap_set_all_rings(na, NM_KR_LOCKED);
1052 		na->nm_register(na, 0);
1053 		netmap_set_all_rings(na, 0);
1054 	}
1055 
1056 	/* delete rings and buffers that are no longer needed */
1057 	netmap_mem_rings_delete(na);
1058 
1059 	if (na->active_fds <= 0) {	/* last instance */
1060 		/*
1061 		 * (TO CHECK) We enter here
1062 		 * when the last reference to this file descriptor goes
1063 		 * away. This means we cannot have any pending poll()
1064 		 * or interrupt routine operating on the structure.
1065 		 * XXX The file may be closed in a thread while
1066 		 * another thread is using it.
1067 		 * Linux keeps the file opened until the last reference
1068 		 * by any outstanding ioctl/poll or mmap is gone.
1069 		 * FreeBSD does not track mmap()s (but we do) and
1070 		 * wakes up any sleeping poll(). Need to check what
1071 		 * happens if the close() occurs while a concurrent
1072 		 * syscall is running.
1073 		 */
1074 		if (netmap_debug & NM_DEBUG_ON)
1075 			nm_prinf("deleting last instance for %s", na->name);
1076 
1077 		if (nm_netmap_on(na)) {
1078 			nm_prerr("BUG: netmap on while going to delete the krings");
1079 		}
1080 
1081 		na->nm_krings_delete(na);
1082 
1083 		/* restore the default number of host tx and rx rings */
1084 		if (na->na_flags & NAF_HOST_RINGS) {
1085 			na->num_host_tx_rings = 1;
1086 			na->num_host_rx_rings = 1;
1087 		} else {
1088 			na->num_host_tx_rings = 0;
1089 			na->num_host_rx_rings = 0;
1090 		}
1091 	}
1092 
1093 	/* possibly decrement counter of tx_si/rx_si users */
1094 	netmap_unset_ringid(priv);
1095 	/* delete the nifp */
1096 	netmap_mem_if_delete(na, priv->np_nifp);
1097 	/* drop the allocator */
1098 	netmap_mem_drop(na);
1099 	/* mark the priv as unregistered */
1100 	priv->np_na = NULL;
1101 	priv->np_nifp = NULL;
1102 }
1103 
1104 struct netmap_priv_d*
1105 netmap_priv_new(void)
1106 {
1107 	struct netmap_priv_d *priv;
1108 
1109 	priv = nm_os_malloc(sizeof(struct netmap_priv_d));
1110 	if (priv == NULL)
1111 		return NULL;
1112 	priv->np_refs = 1;
1113 	nm_os_get_module();
1114 	return priv;
1115 }
1116 
1117 /*
1118  * Destructor of the netmap_priv_d, called when the fd is closed
1119  * Action: undo all the things done by NIOCREGIF,
1120  * On FreeBSD we need to track whether there are active mmap()s,
1121  * and we use np_active_mmaps for that. On linux, the field is always 0.
1122  * Return: 1 if we can free priv, 0 otherwise.
1123  *
1124  */
1125 /* call with NMG_LOCK held */
1126 void
1127 netmap_priv_delete(struct netmap_priv_d *priv)
1128 {
1129 	struct netmap_adapter *na = priv->np_na;
1130 
1131 	/* number of active references to this fd */
1132 	if (--priv->np_refs > 0) {
1133 		return;
1134 	}
1135 	nm_os_put_module();
1136 	if (na) {
1137 		netmap_do_unregif(priv);
1138 	}
1139 	netmap_unget_na(na, priv->np_ifp);
1140 	bzero(priv, sizeof(*priv));	/* for safety */
1141 	nm_os_free(priv);
1142 }
1143 
1144 
1145 /* call with NMG_LOCK *not* held */
1146 void
1147 netmap_dtor(void *data)
1148 {
1149 	struct netmap_priv_d *priv = data;
1150 
1151 	NMG_LOCK();
1152 	netmap_priv_delete(priv);
1153 	NMG_UNLOCK();
1154 }
1155 
1156 
1157 /*
1158  * Handlers for synchronization of the rings from/to the host stack.
1159  * These are associated to a network interface and are just another
1160  * ring pair managed by userspace.
1161  *
1162  * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD
1163  * flags):
1164  *
1165  * - Before releasing buffers on hw RX rings, the application can mark
1166  *   them with the NS_FORWARD flag. During the next RXSYNC or poll(), they
1167  *   will be forwarded to the host stack, similarly to what happened if
1168  *   the application moved them to the host TX ring.
1169  *
1170  * - Before releasing buffers on the host RX ring, the application can
1171  *   mark them with the NS_FORWARD flag. During the next RXSYNC or poll(),
1172  *   they will be forwarded to the hw TX rings, saving the application
1173  *   from doing the same task in user-space.
1174  *
1175  * Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
1176  * flag, or globally with the netmap_fwd sysctl.
1177  *
1178  * The transfer NIC --> host is relatively easy, just encapsulate
1179  * into mbufs and we are done. The host --> NIC side is slightly
1180  * harder because there might not be room in the tx ring so it
1181  * might take a while before releasing the buffer.
1182  */
1183 
1184 
1185 /*
1186  * Pass a whole queue of mbufs to the host stack as coming from 'dst'
1187  * We do not need to lock because the queue is private.
1188  * After this call the queue is empty.
1189  */
1190 static void
1191 netmap_send_up(struct ifnet *dst, struct mbq *q)
1192 {
1193 	struct mbuf *m;
1194 	struct mbuf *head = NULL, *prev = NULL;
1195 #ifdef __FreeBSD__
1196 	struct epoch_tracker et;
1197 
1198 	NET_EPOCH_ENTER(et);
1199 #endif /* __FreeBSD__ */
1200 	/* Send packets up, outside the lock; head/prev machinery
1201 	 * is only useful for Windows. */
1202 	while ((m = mbq_dequeue(q)) != NULL) {
1203 		if (netmap_debug & NM_DEBUG_HOST)
1204 			nm_prinf("sending up pkt %p size %d", m, MBUF_LEN(m));
1205 		prev = nm_os_send_up(dst, m, prev);
1206 		if (head == NULL)
1207 			head = prev;
1208 	}
1209 	if (head)
1210 		nm_os_send_up(dst, NULL, head);
1211 #ifdef __FreeBSD__
1212 	NET_EPOCH_EXIT(et);
1213 #endif /* __FreeBSD__ */
1214 	mbq_fini(q);
1215 }
1216 
1217 
1218 /*
1219  * Scan the buffers from hwcur to ring->head, and put a copy of those
1220  * marked NS_FORWARD (or all of them if forced) into a queue of mbufs.
1221  * Drop remaining packets in the unlikely event
1222  * of an mbuf shortage.
1223  */
1224 static void
1225 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force)
1226 {
1227 	u_int const lim = kring->nkr_num_slots - 1;
1228 	u_int const head = kring->rhead;
1229 	u_int n;
1230 	struct netmap_adapter *na = kring->na;
1231 
1232 	for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) {
1233 		struct mbuf *m;
1234 		struct netmap_slot *slot = &kring->ring->slot[n];
1235 
1236 		if ((slot->flags & NS_FORWARD) == 0 && !force)
1237 			continue;
1238 		if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) {
1239 			nm_prlim(5, "bad pkt at %d len %d", n, slot->len);
1240 			continue;
1241 		}
1242 		slot->flags &= ~NS_FORWARD; // XXX needed ?
1243 		/* XXX TODO: adapt to the case of a multisegment packet */
1244 		m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL);
1245 
1246 		if (m == NULL)
1247 			break;
1248 		mbq_enqueue(q, m);
1249 	}
1250 }
1251 
1252 static inline int
1253 _nm_may_forward(struct netmap_kring *kring)
1254 {
1255 	return	((netmap_fwd || kring->ring->flags & NR_FORWARD) &&
1256 		 kring->na->na_flags & NAF_HOST_RINGS &&
1257 		 kring->tx == NR_RX);
1258 }
1259 
1260 static inline int
1261 nm_may_forward_up(struct netmap_kring *kring)
1262 {
1263 	return	_nm_may_forward(kring) &&
1264 		 kring->ring_id != kring->na->num_rx_rings;
1265 }
1266 
1267 static inline int
1268 nm_may_forward_down(struct netmap_kring *kring, int sync_flags)
1269 {
1270 	return	_nm_may_forward(kring) &&
1271 		 (sync_flags & NAF_CAN_FORWARD_DOWN) &&
1272 		 kring->ring_id == kring->na->num_rx_rings;
1273 }
1274 
1275 /*
1276  * Send to the NIC rings packets marked NS_FORWARD between
1277  * kring->nr_hwcur and kring->rhead.
1278  * Called under kring->rx_queue.lock on the sw rx ring.
1279  *
1280  * It can only be called if the user opened all the TX hw rings,
1281  * see NAF_CAN_FORWARD_DOWN flag.
1282  * We can touch the TX netmap rings (slots, head and cur) since
1283  * we are in poll/ioctl system call context, and the application
1284  * is not supposed to touch the ring (using a different thread)
1285  * during the execution of the system call.
1286  */
1287 static u_int
1288 netmap_sw_to_nic(struct netmap_adapter *na)
1289 {
1290 	struct netmap_kring *kring = na->rx_rings[na->num_rx_rings];
1291 	struct netmap_slot *rxslot = kring->ring->slot;
1292 	u_int i, rxcur = kring->nr_hwcur;
1293 	u_int const head = kring->rhead;
1294 	u_int const src_lim = kring->nkr_num_slots - 1;
1295 	u_int sent = 0;
1296 
1297 	/* scan rings to find space, then fill as much as possible */
1298 	for (i = 0; i < na->num_tx_rings; i++) {
1299 		struct netmap_kring *kdst = na->tx_rings[i];
1300 		struct netmap_ring *rdst = kdst->ring;
1301 		u_int const dst_lim = kdst->nkr_num_slots - 1;
1302 
1303 		/* XXX do we trust ring or kring->rcur,rtail ? */
1304 		for (; rxcur != head && !nm_ring_empty(rdst);
1305 		     rxcur = nm_next(rxcur, src_lim) ) {
1306 			struct netmap_slot *src, *dst, tmp;
1307 			u_int dst_head = rdst->head;
1308 
1309 			src = &rxslot[rxcur];
1310 			if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd)
1311 				continue;
1312 
1313 			sent++;
1314 
1315 			dst = &rdst->slot[dst_head];
1316 
1317 			tmp = *src;
1318 
1319 			src->buf_idx = dst->buf_idx;
1320 			src->flags = NS_BUF_CHANGED;
1321 
1322 			dst->buf_idx = tmp.buf_idx;
1323 			dst->len = tmp.len;
1324 			dst->flags = NS_BUF_CHANGED;
1325 
1326 			rdst->head = rdst->cur = nm_next(dst_head, dst_lim);
1327 		}
1328 		/* if (sent) XXX txsync ? it would be just an optimization */
1329 	}
1330 	return sent;
1331 }
1332 
1333 
1334 /*
1335  * netmap_txsync_to_host() passes packets up. We are called from a
1336  * system call in user process context, and the only contention
1337  * can be among multiple user threads erroneously calling
1338  * this routine concurrently.
1339  */
1340 static int
1341 netmap_txsync_to_host(struct netmap_kring *kring, int flags)
1342 {
1343 	struct netmap_adapter *na = kring->na;
1344 	u_int const lim = kring->nkr_num_slots - 1;
1345 	u_int const head = kring->rhead;
1346 	struct mbq q;
1347 
1348 	/* Take packets from hwcur to head and pass them up.
1349 	 * Force hwcur = head since netmap_grab_packets() stops at head
1350 	 */
1351 	mbq_init(&q);
1352 	netmap_grab_packets(kring, &q, 1 /* force */);
1353 	nm_prdis("have %d pkts in queue", mbq_len(&q));
1354 	kring->nr_hwcur = head;
1355 	kring->nr_hwtail = head + lim;
1356 	if (kring->nr_hwtail > lim)
1357 		kring->nr_hwtail -= lim + 1;
1358 
1359 	netmap_send_up(na->ifp, &q);
1360 	return 0;
1361 }
1362 
1363 
1364 /*
1365  * rxsync backend for packets coming from the host stack.
1366  * They have been put in kring->rx_queue by netmap_transmit().
1367  * We protect access to the kring using kring->rx_queue.lock
1368  *
1369  * also moves to the nic hw rings any packet the user has marked
1370  * for transparent-mode forwarding, then sets the NR_FORWARD
1371  * flag in the kring to let the caller push them out
1372  */
1373 static int
1374 netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
1375 {
1376 	struct netmap_adapter *na = kring->na;
1377 	struct netmap_ring *ring = kring->ring;
1378 	u_int nm_i, n;
1379 	u_int const lim = kring->nkr_num_slots - 1;
1380 	u_int const head = kring->rhead;
1381 	int ret = 0;
1382 	struct mbq *q = &kring->rx_queue, fq;
1383 
1384 	mbq_init(&fq); /* fq holds packets to be freed */
1385 
1386 	mbq_lock(q);
1387 
1388 	/* First part: import newly received packets */
1389 	n = mbq_len(q);
1390 	if (n) { /* grab packets from the queue */
1391 		struct mbuf *m;
1392 		uint32_t stop_i;
1393 
1394 		nm_i = kring->nr_hwtail;
1395 		stop_i = nm_prev(kring->nr_hwcur, lim);
1396 		while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) {
1397 			int len = MBUF_LEN(m);
1398 			struct netmap_slot *slot = &ring->slot[nm_i];
1399 
1400 			m_copydata(m, 0, len, NMB(na, slot));
1401 			nm_prdis("nm %d len %d", nm_i, len);
1402 			if (netmap_debug & NM_DEBUG_HOST)
1403 				nm_prinf("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
1404 
1405 			slot->len = len;
1406 			slot->flags = 0;
1407 			nm_i = nm_next(nm_i, lim);
1408 			mbq_enqueue(&fq, m);
1409 		}
1410 		kring->nr_hwtail = nm_i;
1411 	}
1412 
1413 	/*
1414 	 * Second part: skip past packets that userspace has released.
1415 	 */
1416 	nm_i = kring->nr_hwcur;
1417 	if (nm_i != head) { /* something was released */
1418 		if (nm_may_forward_down(kring, flags)) {
1419 			ret = netmap_sw_to_nic(na);
1420 			if (ret > 0) {
1421 				kring->nr_kflags |= NR_FORWARD;
1422 				ret = 0;
1423 			}
1424 		}
1425 		kring->nr_hwcur = head;
1426 	}
1427 
1428 	mbq_unlock(q);
1429 
1430 	mbq_purge(&fq);
1431 	mbq_fini(&fq);
1432 
1433 	return ret;
1434 }
1435 
1436 
1437 /* Get a netmap adapter for the port.
1438  *
1439  * If it is possible to satisfy the request, return 0
1440  * with *na containing the netmap adapter found.
1441  * Otherwise return an error code, with *na containing NULL.
1442  *
1443  * When the port is attached to a bridge, we always return
1444  * EBUSY.
1445  * Otherwise, if the port is already bound to a file descriptor,
1446  * then we unconditionally return the existing adapter into *na.
1447  * In all the other cases, we return (into *na) either native,
1448  * generic or NULL, according to the following table:
1449  *
1450  *					native_support
1451  * active_fds   dev.netmap.admode         YES     NO
1452  * -------------------------------------------------------
1453  *    >0              *                 NA(ifp) NA(ifp)
1454  *
1455  *     0        NETMAP_ADMODE_BEST      NATIVE  GENERIC
1456  *     0        NETMAP_ADMODE_NATIVE    NATIVE   NULL
1457  *     0        NETMAP_ADMODE_GENERIC   GENERIC GENERIC
1458  *
1459  */
1460 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */
1461 int
1462 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na)
1463 {
1464 	/* generic support */
1465 	int i = netmap_admode;	/* Take a snapshot. */
1466 	struct netmap_adapter *prev_na;
1467 	int error = 0;
1468 
1469 	*na = NULL; /* default */
1470 
1471 	/* reset in case of invalid value */
1472 	if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST)
1473 		i = netmap_admode = NETMAP_ADMODE_BEST;
1474 
1475 	if (NM_NA_VALID(ifp)) {
1476 		prev_na = NA(ifp);
1477 		/* If an adapter already exists, return it if
1478 		 * there are active file descriptors or if
1479 		 * netmap is not forced to use generic
1480 		 * adapters.
1481 		 */
1482 		if (NETMAP_OWNED_BY_ANY(prev_na)
1483 			|| i != NETMAP_ADMODE_GENERIC
1484 			|| prev_na->na_flags & NAF_FORCE_NATIVE
1485 #ifdef WITH_PIPES
1486 			/* ugly, but we cannot allow an adapter switch
1487 			 * if some pipe is referring to this one
1488 			 */
1489 			|| prev_na->na_next_pipe > 0
1490 #endif
1491 		) {
1492 			*na = prev_na;
1493 			goto assign_mem;
1494 		}
1495 	}
1496 
1497 	/* If there isn't native support and netmap is not allowed
1498 	 * to use generic adapters, we cannot satisfy the request.
1499 	 */
1500 	if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE)
1501 		return EOPNOTSUPP;
1502 
1503 	/* Otherwise, create a generic adapter and return it,
1504 	 * saving the previously used netmap adapter, if any.
1505 	 *
1506 	 * Note that here 'prev_na', if not NULL, MUST be a
1507 	 * native adapter, and CANNOT be a generic one. This is
1508 	 * true because generic adapters are created on demand, and
1509 	 * destroyed when not used anymore. Therefore, if the adapter
1510 	 * currently attached to an interface 'ifp' is generic, it
1511 	 * must be that
1512 	 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))).
1513 	 * Consequently, if NA(ifp) is generic, we will enter one of
1514 	 * the branches above. This ensures that we never override
1515 	 * a generic adapter with another generic adapter.
1516 	 */
1517 	error = generic_netmap_attach(ifp);
1518 	if (error)
1519 		return error;
1520 
1521 	*na = NA(ifp);
1522 
1523 assign_mem:
1524 	if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) &&
1525 	    (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) {
1526 		(*na)->nm_mem_prev = (*na)->nm_mem;
1527 		(*na)->nm_mem = netmap_mem_get(nmd);
1528 	}
1529 
1530 	return 0;
1531 }
1532 
1533 /*
1534  * MUST BE CALLED UNDER NMG_LOCK()
1535  *
1536  * Get a refcounted reference to a netmap adapter attached
1537  * to the interface specified by req.
1538  * This is always called in the execution of an ioctl().
1539  *
1540  * Return ENXIO if the interface specified by the request does
1541  * not exist, ENOTSUP if netmap is not supported by the interface,
1542  * EBUSY if the interface is already attached to a bridge,
1543  * EINVAL if parameters are invalid, ENOMEM if needed resources
1544  * could not be allocated.
1545  * If successful, hold a reference to the netmap adapter.
1546  *
1547  * If the interface specified by req is a system one, also keep
1548  * a reference to it and return a valid *ifp.
1549  */
1550 int
1551 netmap_get_na(struct nmreq_header *hdr,
1552 	      struct netmap_adapter **na, struct ifnet **ifp,
1553 	      struct netmap_mem_d *nmd, int create)
1554 {
1555 	struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1556 	int error = 0;
1557 	struct netmap_adapter *ret = NULL;
1558 	int nmd_ref = 0;
1559 
1560 	*na = NULL;     /* default return value */
1561 	*ifp = NULL;
1562 
1563 	if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) {
1564 		return EINVAL;
1565 	}
1566 
1567 	if (req->nr_mode == NR_REG_PIPE_MASTER ||
1568 			req->nr_mode == NR_REG_PIPE_SLAVE) {
1569 		/* Do not accept deprecated pipe modes. */
1570 		nm_prerr("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax");
1571 		return EINVAL;
1572 	}
1573 
1574 	NMG_LOCK_ASSERT();
1575 
1576 	/* if the request contain a memid, try to find the
1577 	 * corresponding memory region
1578 	 */
1579 	if (nmd == NULL && req->nr_mem_id) {
1580 		nmd = netmap_mem_find(req->nr_mem_id);
1581 		if (nmd == NULL)
1582 			return EINVAL;
1583 		/* keep the rereference */
1584 		nmd_ref = 1;
1585 	}
1586 
1587 	/* We cascade through all possible types of netmap adapter.
1588 	 * All netmap_get_*_na() functions return an error and an na,
1589 	 * with the following combinations:
1590 	 *
1591 	 * error    na
1592 	 *   0	   NULL		type doesn't match
1593 	 *  !0	   NULL		type matches, but na creation/lookup failed
1594 	 *   0	  !NULL		type matches and na created/found
1595 	 *  !0    !NULL		impossible
1596 	 */
1597 	error = netmap_get_null_na(hdr, na, nmd, create);
1598 	if (error || *na != NULL)
1599 		goto out;
1600 
1601 	/* try to see if this is a monitor port */
1602 	error = netmap_get_monitor_na(hdr, na, nmd, create);
1603 	if (error || *na != NULL)
1604 		goto out;
1605 
1606 	/* try to see if this is a pipe port */
1607 	error = netmap_get_pipe_na(hdr, na, nmd, create);
1608 	if (error || *na != NULL)
1609 		goto out;
1610 
1611 	/* try to see if this is a vale port */
1612 	error = netmap_get_vale_na(hdr, na, nmd, create);
1613 	if (error)
1614 		goto out;
1615 
1616 	if (*na != NULL) /* valid match in netmap_get_bdg_na() */
1617 		goto out;
1618 
1619 	/*
1620 	 * This must be a hardware na, lookup the name in the system.
1621 	 * Note that by hardware we actually mean "it shows up in ifconfig".
1622 	 * This may still be a tap, a veth/epair, or even a
1623 	 * persistent VALE port.
1624 	 */
1625 	*ifp = ifunit_ref(hdr->nr_name);
1626 	if (*ifp == NULL) {
1627 		error = ENXIO;
1628 		goto out;
1629 	}
1630 
1631 	error = netmap_get_hw_na(*ifp, nmd, &ret);
1632 	if (error)
1633 		goto out;
1634 
1635 	*na = ret;
1636 	netmap_adapter_get(ret);
1637 
1638 	/*
1639 	 * if the adapter supports the host rings and it is not already open,
1640 	 * try to set the number of host rings as requested by the user
1641 	 */
1642 	if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
1643 		if (req->nr_host_tx_rings)
1644 			(*na)->num_host_tx_rings = req->nr_host_tx_rings;
1645 		if (req->nr_host_rx_rings)
1646 			(*na)->num_host_rx_rings = req->nr_host_rx_rings;
1647 	}
1648 	nm_prdis("%s: host tx %d rx %u", (*na)->name, (*na)->num_host_tx_rings,
1649 			(*na)->num_host_rx_rings);
1650 
1651 out:
1652 	if (error) {
1653 		if (ret)
1654 			netmap_adapter_put(ret);
1655 		if (*ifp) {
1656 			if_rele(*ifp);
1657 			*ifp = NULL;
1658 		}
1659 	}
1660 	if (nmd_ref)
1661 		netmap_mem_put(nmd);
1662 
1663 	return error;
1664 }
1665 
1666 /* undo netmap_get_na() */
1667 void
1668 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp)
1669 {
1670 	if (ifp)
1671 		if_rele(ifp);
1672 	if (na)
1673 		netmap_adapter_put(na);
1674 }
1675 
1676 
1677 #define NM_FAIL_ON(t) do {						\
1678 	if (unlikely(t)) {						\
1679 		nm_prlim(5, "%s: fail '" #t "' "				\
1680 			"h %d c %d t %d "				\
1681 			"rh %d rc %d rt %d "				\
1682 			"hc %d ht %d",					\
1683 			kring->name,					\
1684 			head, cur, ring->tail,				\
1685 			kring->rhead, kring->rcur, kring->rtail,	\
1686 			kring->nr_hwcur, kring->nr_hwtail);		\
1687 		return kring->nkr_num_slots;				\
1688 	}								\
1689 } while (0)
1690 
1691 /*
1692  * validate parameters on entry for *_txsync()
1693  * Returns ring->cur if ok, or something >= kring->nkr_num_slots
1694  * in case of error.
1695  *
1696  * rhead, rcur and rtail=hwtail are stored from previous round.
1697  * hwcur is the next packet to send to the ring.
1698  *
1699  * We want
1700  *    hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail
1701  *
1702  * hwcur, rhead, rtail and hwtail are reliable
1703  */
1704 u_int
1705 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1706 {
1707 	u_int head = ring->head; /* read only once */
1708 	u_int cur = ring->cur; /* read only once */
1709 	u_int n = kring->nkr_num_slots;
1710 
1711 	nm_prdis(5, "%s kcur %d ktail %d head %d cur %d tail %d",
1712 		kring->name,
1713 		kring->nr_hwcur, kring->nr_hwtail,
1714 		ring->head, ring->cur, ring->tail);
1715 #if 1 /* kernel sanity checks; but we can trust the kring. */
1716 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n ||
1717 	    kring->rtail >= n ||  kring->nr_hwtail >= n);
1718 #endif /* kernel sanity checks */
1719 	/*
1720 	 * user sanity checks. We only use head,
1721 	 * A, B, ... are possible positions for head:
1722 	 *
1723 	 *  0    A  rhead   B  rtail   C  n-1
1724 	 *  0    D  rtail   E  rhead   F  n-1
1725 	 *
1726 	 * B, F, D are valid. A, C, E are wrong
1727 	 */
1728 	if (kring->rtail >= kring->rhead) {
1729 		/* want rhead <= head <= rtail */
1730 		NM_FAIL_ON(head < kring->rhead || head > kring->rtail);
1731 		/* and also head <= cur <= rtail */
1732 		NM_FAIL_ON(cur < head || cur > kring->rtail);
1733 	} else { /* here rtail < rhead */
1734 		/* we need head outside rtail .. rhead */
1735 		NM_FAIL_ON(head > kring->rtail && head < kring->rhead);
1736 
1737 		/* two cases now: head <= rtail or head >= rhead  */
1738 		if (head <= kring->rtail) {
1739 			/* want head <= cur <= rtail */
1740 			NM_FAIL_ON(cur < head || cur > kring->rtail);
1741 		} else { /* head >= rhead */
1742 			/* cur must be outside rtail..head */
1743 			NM_FAIL_ON(cur > kring->rtail && cur < head);
1744 		}
1745 	}
1746 	if (ring->tail != kring->rtail) {
1747 		nm_prlim(5, "%s tail overwritten was %d need %d", kring->name,
1748 			ring->tail, kring->rtail);
1749 		ring->tail = kring->rtail;
1750 	}
1751 	kring->rhead = head;
1752 	kring->rcur = cur;
1753 	return head;
1754 }
1755 
1756 
1757 /*
1758  * validate parameters on entry for *_rxsync()
1759  * Returns ring->head if ok, kring->nkr_num_slots on error.
1760  *
1761  * For a valid configuration,
1762  * hwcur <= head <= cur <= tail <= hwtail
1763  *
1764  * We only consider head and cur.
1765  * hwcur and hwtail are reliable.
1766  *
1767  */
1768 u_int
1769 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring)
1770 {
1771 	uint32_t const n = kring->nkr_num_slots;
1772 	uint32_t head, cur;
1773 
1774 	nm_prdis(5,"%s kc %d kt %d h %d c %d t %d",
1775 		kring->name,
1776 		kring->nr_hwcur, kring->nr_hwtail,
1777 		ring->head, ring->cur, ring->tail);
1778 	/*
1779 	 * Before storing the new values, we should check they do not
1780 	 * move backwards. However:
1781 	 * - head is not an issue because the previous value is hwcur;
1782 	 * - cur could in principle go back, however it does not matter
1783 	 *   because we are processing a brand new rxsync()
1784 	 */
1785 	cur = kring->rcur = ring->cur;	/* read only once */
1786 	head = kring->rhead = ring->head;	/* read only once */
1787 #if 1 /* kernel sanity checks */
1788 	NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n);
1789 #endif /* kernel sanity checks */
1790 	/* user sanity checks */
1791 	if (kring->nr_hwtail >= kring->nr_hwcur) {
1792 		/* want hwcur <= rhead <= hwtail */
1793 		NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail);
1794 		/* and also rhead <= rcur <= hwtail */
1795 		NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1796 	} else {
1797 		/* we need rhead outside hwtail..hwcur */
1798 		NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail);
1799 		/* two cases now: head <= hwtail or head >= hwcur  */
1800 		if (head <= kring->nr_hwtail) {
1801 			/* want head <= cur <= hwtail */
1802 			NM_FAIL_ON(cur < head || cur > kring->nr_hwtail);
1803 		} else {
1804 			/* cur must be outside hwtail..head */
1805 			NM_FAIL_ON(cur < head && cur > kring->nr_hwtail);
1806 		}
1807 	}
1808 	if (ring->tail != kring->rtail) {
1809 		nm_prlim(5, "%s tail overwritten was %d need %d",
1810 			kring->name,
1811 			ring->tail, kring->rtail);
1812 		ring->tail = kring->rtail;
1813 	}
1814 	return head;
1815 }
1816 
1817 
1818 /*
1819  * Error routine called when txsync/rxsync detects an error.
1820  * Can't do much more than resetting head = cur = hwcur, tail = hwtail
1821  * Return 1 on reinit.
1822  *
1823  * This routine is only called by the upper half of the kernel.
1824  * It only reads hwcur (which is changed only by the upper half, too)
1825  * and hwtail (which may be changed by the lower half, but only on
1826  * a tx ring and only to increase it, so any error will be recovered
1827  * on the next call). For the above, we don't strictly need to call
1828  * it under lock.
1829  */
1830 int
1831 netmap_ring_reinit(struct netmap_kring *kring)
1832 {
1833 	struct netmap_ring *ring = kring->ring;
1834 	u_int i, lim = kring->nkr_num_slots - 1;
1835 	int errors = 0;
1836 
1837 	// XXX KASSERT nm_kr_tryget
1838 	nm_prlim(10, "called for %s", kring->name);
1839 	// XXX probably wrong to trust userspace
1840 	kring->rhead = ring->head;
1841 	kring->rcur  = ring->cur;
1842 	kring->rtail = ring->tail;
1843 
1844 	if (ring->cur > lim)
1845 		errors++;
1846 	if (ring->head > lim)
1847 		errors++;
1848 	if (ring->tail > lim)
1849 		errors++;
1850 	for (i = 0; i <= lim; i++) {
1851 		u_int idx = ring->slot[i].buf_idx;
1852 		u_int len = ring->slot[i].len;
1853 		if (idx < 2 || idx >= kring->na->na_lut.objtotal) {
1854 			nm_prlim(5, "bad index at slot %d idx %d len %d ", i, idx, len);
1855 			ring->slot[i].buf_idx = 0;
1856 			ring->slot[i].len = 0;
1857 		} else if (len > NETMAP_BUF_SIZE(kring->na)) {
1858 			ring->slot[i].len = 0;
1859 			nm_prlim(5, "bad len at slot %d idx %d len %d", i, idx, len);
1860 		}
1861 	}
1862 	if (errors) {
1863 		nm_prlim(10, "total %d errors", errors);
1864 		nm_prlim(10, "%s reinit, cur %d -> %d tail %d -> %d",
1865 			kring->name,
1866 			ring->cur, kring->nr_hwcur,
1867 			ring->tail, kring->nr_hwtail);
1868 		ring->head = kring->rhead = kring->nr_hwcur;
1869 		ring->cur  = kring->rcur  = kring->nr_hwcur;
1870 		ring->tail = kring->rtail = kring->nr_hwtail;
1871 	}
1872 	return (errors ? 1 : 0);
1873 }
1874 
1875 /* interpret the ringid and flags fields of an nmreq, by translating them
1876  * into a pair of intervals of ring indices:
1877  *
1878  * [priv->np_txqfirst, priv->np_txqlast) and
1879  * [priv->np_rxqfirst, priv->np_rxqlast)
1880  *
1881  */
1882 int
1883 netmap_interp_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1884 {
1885 	struct netmap_adapter *na = priv->np_na;
1886 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1887 	int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY };
1888 	enum txrx t;
1889 	u_int j;
1890 	u_int nr_flags = reg->nr_flags, nr_mode = reg->nr_mode,
1891 	      nr_ringid = reg->nr_ringid;
1892 
1893 	for_rx_tx(t) {
1894 		if (nr_flags & excluded_direction[t]) {
1895 			priv->np_qfirst[t] = priv->np_qlast[t] = 0;
1896 			continue;
1897 		}
1898 		switch (nr_mode) {
1899 		case NR_REG_ALL_NIC:
1900 		case NR_REG_NULL:
1901 			priv->np_qfirst[t] = 0;
1902 			priv->np_qlast[t] = nma_get_nrings(na, t);
1903 			nm_prdis("ALL/PIPE: %s %d %d", nm_txrx2str(t),
1904 				priv->np_qfirst[t], priv->np_qlast[t]);
1905 			break;
1906 		case NR_REG_SW:
1907 		case NR_REG_NIC_SW:
1908 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1909 				nm_prerr("host rings not supported");
1910 				return EINVAL;
1911 			}
1912 			priv->np_qfirst[t] = (nr_mode == NR_REG_SW ?
1913 				nma_get_nrings(na, t) : 0);
1914 			priv->np_qlast[t] = netmap_all_rings(na, t);
1915 			nm_prdis("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW",
1916 				nm_txrx2str(t),
1917 				priv->np_qfirst[t], priv->np_qlast[t]);
1918 			break;
1919 		case NR_REG_ONE_NIC:
1920 			if (nr_ringid >= na->num_tx_rings &&
1921 					nr_ringid >= na->num_rx_rings) {
1922 				nm_prerr("invalid ring id %d", nr_ringid);
1923 				return EINVAL;
1924 			}
1925 			/* if not enough rings, use the first one */
1926 			j = nr_ringid;
1927 			if (j >= nma_get_nrings(na, t))
1928 				j = 0;
1929 			priv->np_qfirst[t] = j;
1930 			priv->np_qlast[t] = j + 1;
1931 			nm_prdis("ONE_NIC: %s %d %d", nm_txrx2str(t),
1932 				priv->np_qfirst[t], priv->np_qlast[t]);
1933 			break;
1934 		case NR_REG_ONE_SW:
1935 			if (!(na->na_flags & NAF_HOST_RINGS)) {
1936 				nm_prerr("host rings not supported");
1937 				return EINVAL;
1938 			}
1939 			if (nr_ringid >= na->num_host_tx_rings &&
1940 					nr_ringid >= na->num_host_rx_rings) {
1941 				nm_prerr("invalid ring id %d", nr_ringid);
1942 				return EINVAL;
1943 			}
1944 			/* if not enough rings, use the first one */
1945 			j = nr_ringid;
1946 			if (j >= nma_get_host_nrings(na, t))
1947 				j = 0;
1948 			priv->np_qfirst[t] = nma_get_nrings(na, t) + j;
1949 			priv->np_qlast[t] = nma_get_nrings(na, t) + j + 1;
1950 			nm_prdis("ONE_SW: %s %d %d", nm_txrx2str(t),
1951 				priv->np_qfirst[t], priv->np_qlast[t]);
1952 			break;
1953 		default:
1954 			nm_prerr("invalid regif type %d", nr_mode);
1955 			return EINVAL;
1956 		}
1957 	}
1958 	priv->np_flags = nr_flags;
1959 
1960 	/* Allow transparent forwarding mode in the host --> nic
1961 	 * direction only if all the TX hw rings have been opened. */
1962 	if (priv->np_qfirst[NR_TX] == 0 &&
1963 			priv->np_qlast[NR_TX] >= na->num_tx_rings) {
1964 		priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN;
1965 	}
1966 
1967 	if (netmap_verbose) {
1968 		nm_prinf("%s: tx [%d,%d) rx [%d,%d) id %d",
1969 			na->name,
1970 			priv->np_qfirst[NR_TX],
1971 			priv->np_qlast[NR_TX],
1972 			priv->np_qfirst[NR_RX],
1973 			priv->np_qlast[NR_RX],
1974 			nr_ringid);
1975 	}
1976 	return 0;
1977 }
1978 
1979 
1980 /*
1981  * Set the ring ID. For devices with a single queue, a request
1982  * for all rings is the same as a single ring.
1983  */
1984 static int
1985 netmap_set_ringid(struct netmap_priv_d *priv, struct nmreq_header *hdr)
1986 {
1987 	struct netmap_adapter *na = priv->np_na;
1988 	struct nmreq_register *reg = (struct nmreq_register *)hdr->nr_body;
1989 	int error;
1990 	enum txrx t;
1991 
1992 	error = netmap_interp_ringid(priv, hdr);
1993 	if (error) {
1994 		return error;
1995 	}
1996 
1997 	priv->np_txpoll = (reg->nr_flags & NR_NO_TX_POLL) ? 0 : 1;
1998 
1999 	/* optimization: count the users registered for more than
2000 	 * one ring, which are the ones sleeping on the global queue.
2001 	 * The default netmap_notify() callback will then
2002 	 * avoid signaling the global queue if nobody is using it
2003 	 */
2004 	for_rx_tx(t) {
2005 		if (nm_si_user(priv, t))
2006 			na->si_users[t]++;
2007 	}
2008 	return 0;
2009 }
2010 
2011 static void
2012 netmap_unset_ringid(struct netmap_priv_d *priv)
2013 {
2014 	struct netmap_adapter *na = priv->np_na;
2015 	enum txrx t;
2016 
2017 	for_rx_tx(t) {
2018 		if (nm_si_user(priv, t))
2019 			na->si_users[t]--;
2020 		priv->np_qfirst[t] = priv->np_qlast[t] = 0;
2021 	}
2022 	priv->np_flags = 0;
2023 	priv->np_txpoll = 0;
2024 	priv->np_kloop_state = 0;
2025 }
2026 
2027 #define within_sel(p_, t_, i_)					  	  \
2028 	((i_) < (p_)->np_qlast[(t_)])
2029 #define nonempty_sel(p_, t_)						  \
2030 	(within_sel((p_), (t_), (p_)->np_qfirst[(t_)]))
2031 #define foreach_selected_ring(p_, t_, i_, kring_)			  \
2032 	for ((t_) = nonempty_sel((p_), NR_RX) ? NR_RX : NR_TX,		  \
2033 	     (i_) = (p_)->np_qfirst[(t_)];				  \
2034 	     (t_ == NR_RX ||						  \
2035 	      (t == NR_TX && within_sel((p_), (t_), (i_)))) &&     	  \
2036 	      ((kring_) = NMR((p_)->np_na, (t_))[(i_)]); 		  \
2037 	     (i_) = within_sel((p_), (t_), (i_) + 1) ? (i_) + 1 :         \
2038 		(++(t_) < NR_TXRX ? (p_)->np_qfirst[(t_)] : (i_)))
2039 
2040 
2041 /* Set the nr_pending_mode for the requested rings.
2042  * If requested, also try to get exclusive access to the rings, provided
2043  * the rings we want to bind are not exclusively owned by a previous bind.
2044  */
2045 static int
2046 netmap_krings_get(struct netmap_priv_d *priv)
2047 {
2048 	struct netmap_adapter *na = priv->np_na;
2049 	u_int i;
2050 	struct netmap_kring *kring;
2051 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2052 	enum txrx t;
2053 
2054 	if (netmap_debug & NM_DEBUG_ON)
2055 		nm_prinf("%s: grabbing tx [%d, %d) rx [%d, %d)",
2056 			na->name,
2057 			priv->np_qfirst[NR_TX],
2058 			priv->np_qlast[NR_TX],
2059 			priv->np_qfirst[NR_RX],
2060 			priv->np_qlast[NR_RX]);
2061 
2062 	/* first round: check that all the requested rings
2063 	 * are neither already exclusively owned, nor we
2064 	 * want exclusive ownership when they are already in use
2065 	 */
2066 	foreach_selected_ring(priv, t, i, kring) {
2067 		if ((kring->nr_kflags & NKR_EXCLUSIVE) ||
2068 		    (kring->users && excl))
2069 		{
2070 			nm_prdis("ring %s busy", kring->name);
2071 			return EBUSY;
2072 		}
2073 	}
2074 
2075 	/* second round: increment usage count (possibly marking them
2076 	 * as exclusive) and set the nr_pending_mode
2077 	 */
2078 	foreach_selected_ring(priv, t, i, kring) {
2079 		kring->users++;
2080 		if (excl)
2081 			kring->nr_kflags |= NKR_EXCLUSIVE;
2082 		kring->nr_pending_mode = NKR_NETMAP_ON;
2083 	}
2084 
2085 	return 0;
2086 
2087 }
2088 
2089 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode
2090  * if was asked on regif, and unset the nr_pending_mode if we are the
2091  * last users of the involved rings. */
2092 static void
2093 netmap_krings_put(struct netmap_priv_d *priv)
2094 {
2095 	u_int i;
2096 	struct netmap_kring *kring;
2097 	int excl = (priv->np_flags & NR_EXCLUSIVE);
2098 	enum txrx t;
2099 
2100 	nm_prdis("%s: releasing tx [%d, %d) rx [%d, %d)",
2101 			na->name,
2102 			priv->np_qfirst[NR_TX],
2103 			priv->np_qlast[NR_TX],
2104 			priv->np_qfirst[NR_RX],
2105 			priv->np_qlast[MR_RX]);
2106 
2107 	foreach_selected_ring(priv, t, i, kring) {
2108 		if (excl)
2109 			kring->nr_kflags &= ~NKR_EXCLUSIVE;
2110 		kring->users--;
2111 		if (kring->users == 0)
2112 			kring->nr_pending_mode = NKR_NETMAP_OFF;
2113 	}
2114 }
2115 
2116 static int
2117 nm_priv_rx_enabled(struct netmap_priv_d *priv)
2118 {
2119 	return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]);
2120 }
2121 
2122 /* Validate the CSB entries for both directions (atok and ktoa).
2123  * To be called under NMG_LOCK(). */
2124 static int
2125 netmap_csb_validate(struct netmap_priv_d *priv, struct nmreq_opt_csb *csbo)
2126 {
2127 	struct nm_csb_atok *csb_atok_base =
2128 		(struct nm_csb_atok *)(uintptr_t)csbo->csb_atok;
2129 	struct nm_csb_ktoa *csb_ktoa_base =
2130 		(struct nm_csb_ktoa *)(uintptr_t)csbo->csb_ktoa;
2131 	enum txrx t;
2132 	int num_rings[NR_TXRX], tot_rings;
2133 	size_t entry_size[2];
2134 	void *csb_start[2];
2135 	int i;
2136 
2137 	if (priv->np_kloop_state & NM_SYNC_KLOOP_RUNNING) {
2138 		nm_prerr("Cannot update CSB while kloop is running");
2139 		return EBUSY;
2140 	}
2141 
2142 	tot_rings = 0;
2143 	for_rx_tx(t) {
2144 		num_rings[t] = priv->np_qlast[t] - priv->np_qfirst[t];
2145 		tot_rings += num_rings[t];
2146 	}
2147 	if (tot_rings <= 0)
2148 		return 0;
2149 
2150 	if (!(priv->np_flags & NR_EXCLUSIVE)) {
2151 		nm_prerr("CSB mode requires NR_EXCLUSIVE");
2152 		return EINVAL;
2153 	}
2154 
2155 	entry_size[0] = sizeof(*csb_atok_base);
2156 	entry_size[1] = sizeof(*csb_ktoa_base);
2157 	csb_start[0] = (void *)csb_atok_base;
2158 	csb_start[1] = (void *)csb_ktoa_base;
2159 
2160 	for (i = 0; i < 2; i++) {
2161 		/* On Linux we could use access_ok() to simplify
2162 		 * the validation. However, the advantage of
2163 		 * this approach is that it works also on
2164 		 * FreeBSD. */
2165 		size_t csb_size = tot_rings * entry_size[i];
2166 		void *tmp;
2167 		int err;
2168 
2169 		if ((uintptr_t)csb_start[i] & (entry_size[i]-1)) {
2170 			nm_prerr("Unaligned CSB address");
2171 			return EINVAL;
2172 		}
2173 
2174 		tmp = nm_os_malloc(csb_size);
2175 		if (!tmp)
2176 			return ENOMEM;
2177 		if (i == 0) {
2178 			/* Application --> kernel direction. */
2179 			err = copyin(csb_start[i], tmp, csb_size);
2180 		} else {
2181 			/* Kernel --> application direction. */
2182 			memset(tmp, 0, csb_size);
2183 			err = copyout(tmp, csb_start[i], csb_size);
2184 		}
2185 		nm_os_free(tmp);
2186 		if (err) {
2187 			nm_prerr("Invalid CSB address");
2188 			return err;
2189 		}
2190 	}
2191 
2192 	priv->np_csb_atok_base = csb_atok_base;
2193 	priv->np_csb_ktoa_base = csb_ktoa_base;
2194 
2195 	/* Initialize the CSB. */
2196 	for_rx_tx(t) {
2197 		for (i = 0; i < num_rings[t]; i++) {
2198 			struct netmap_kring *kring =
2199 				NMR(priv->np_na, t)[i + priv->np_qfirst[t]];
2200 			struct nm_csb_atok *csb_atok = csb_atok_base + i;
2201 			struct nm_csb_ktoa *csb_ktoa = csb_ktoa_base + i;
2202 
2203 			if (t == NR_RX) {
2204 				csb_atok += num_rings[NR_TX];
2205 				csb_ktoa += num_rings[NR_TX];
2206 			}
2207 
2208 			CSB_WRITE(csb_atok, head, kring->rhead);
2209 			CSB_WRITE(csb_atok, cur, kring->rcur);
2210 			CSB_WRITE(csb_atok, appl_need_kick, 1);
2211 			CSB_WRITE(csb_atok, sync_flags, 1);
2212 			CSB_WRITE(csb_ktoa, hwcur, kring->nr_hwcur);
2213 			CSB_WRITE(csb_ktoa, hwtail, kring->nr_hwtail);
2214 			CSB_WRITE(csb_ktoa, kern_need_kick, 1);
2215 
2216 			nm_prinf("csb_init for kring %s: head %u, cur %u, "
2217 				"hwcur %u, hwtail %u", kring->name,
2218 				kring->rhead, kring->rcur, kring->nr_hwcur,
2219 				kring->nr_hwtail);
2220 		}
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 /* Ensure that the netmap adapter can support the given MTU.
2227  * @return EINVAL if the na cannot be set to mtu, 0 otherwise.
2228  */
2229 int
2230 netmap_buf_size_validate(const struct netmap_adapter *na, unsigned mtu) {
2231 	unsigned nbs = NETMAP_BUF_SIZE(na);
2232 
2233 	if (mtu <= na->rx_buf_maxsize) {
2234 		/* The MTU fits a single NIC slot. We only
2235 		 * Need to check that netmap buffers are
2236 		 * large enough to hold an MTU. NS_MOREFRAG
2237 		 * cannot be used in this case. */
2238 		if (nbs < mtu) {
2239 			nm_prerr("error: netmap buf size (%u) "
2240 				 "< device MTU (%u)", nbs, mtu);
2241 			return EINVAL;
2242 		}
2243 	} else {
2244 		/* More NIC slots may be needed to receive
2245 		 * or transmit a single packet. Check that
2246 		 * the adapter supports NS_MOREFRAG and that
2247 		 * netmap buffers are large enough to hold
2248 		 * the maximum per-slot size. */
2249 		if (!(na->na_flags & NAF_MOREFRAG)) {
2250 			nm_prerr("error: large MTU (%d) needed "
2251 				 "but %s does not support "
2252 				 "NS_MOREFRAG", mtu,
2253 				 na->ifp->if_xname);
2254 			return EINVAL;
2255 		} else if (nbs < na->rx_buf_maxsize) {
2256 			nm_prerr("error: using NS_MOREFRAG on "
2257 				 "%s requires netmap buf size "
2258 				 ">= %u", na->ifp->if_xname,
2259 				 na->rx_buf_maxsize);
2260 			return EINVAL;
2261 		} else {
2262 			nm_prinf("info: netmap application on "
2263 				 "%s needs to support "
2264 				 "NS_MOREFRAG "
2265 				 "(MTU=%u,netmap_buf_size=%u)",
2266 				 na->ifp->if_xname, mtu, nbs);
2267 		}
2268 	}
2269 	return 0;
2270 }
2271 
2272 /* Handle the offset option, if present in the hdr.
2273  * Returns 0 on success, or an error.
2274  */
2275 static int
2276 netmap_offsets_init(struct netmap_priv_d *priv, struct nmreq_header *hdr)
2277 {
2278 	struct nmreq_opt_offsets *opt;
2279 	struct netmap_adapter *na = priv->np_na;
2280 	struct netmap_kring *kring;
2281 	uint64_t mask = 0, bits = 0, maxbits = sizeof(uint64_t) * 8,
2282 		 max_offset = 0, initial_offset = 0, min_gap = 0;
2283 	u_int i;
2284 	enum txrx t;
2285 	int error = 0;
2286 
2287 	opt = (struct nmreq_opt_offsets *)
2288 		nmreq_getoption(hdr, NETMAP_REQ_OPT_OFFSETS);
2289 	if (opt == NULL)
2290 		return 0;
2291 
2292 	if (!(na->na_flags & NAF_OFFSETS)) {
2293 		if (netmap_verbose)
2294 			nm_prerr("%s does not support offsets",
2295 				na->name);
2296 		error = EOPNOTSUPP;
2297 		goto out;
2298 	}
2299 
2300 	/* check sanity of the opt values */
2301 	max_offset = opt->nro_max_offset;
2302 	min_gap = opt->nro_min_gap;
2303 	initial_offset = opt->nro_initial_offset;
2304 	bits = opt->nro_offset_bits;
2305 
2306 	if (bits > maxbits) {
2307 		if (netmap_verbose)
2308 			nm_prerr("bits: %llu too large (max %llu)",
2309 				(unsigned long long)bits,
2310 				(unsigned long long)maxbits);
2311 		error = EINVAL;
2312 		goto out;
2313 	}
2314 	/* we take bits == 0 as a request to use the entire field */
2315 	if (bits == 0 || bits == maxbits) {
2316 		/* shifting a type by sizeof(type) is undefined */
2317 		bits = maxbits;
2318 		mask = 0xffffffffffffffff;
2319 	} else {
2320 		mask = (1ULL << bits) - 1;
2321 	}
2322 	if (max_offset > NETMAP_BUF_SIZE(na)) {
2323 		if (netmap_verbose)
2324 			nm_prerr("max offset %llu > buf size %u",
2325 				(unsigned long long)max_offset, NETMAP_BUF_SIZE(na));
2326 		error = EINVAL;
2327 		goto out;
2328 	}
2329 	if ((max_offset & mask) != max_offset) {
2330 		if (netmap_verbose)
2331 			nm_prerr("max offset %llu to large for %llu bits",
2332 				(unsigned long long)max_offset,
2333 				(unsigned long long)bits);
2334 		error = EINVAL;
2335 		goto out;
2336 	}
2337 	if (initial_offset > max_offset) {
2338 		if (netmap_verbose)
2339 			nm_prerr("initial offset %llu > max offset %llu",
2340 				(unsigned long long)initial_offset,
2341 				(unsigned long long)max_offset);
2342 		error = EINVAL;
2343 		goto out;
2344 	}
2345 
2346 	/* initialize the kring and ring fields. */
2347 	foreach_selected_ring(priv, t, i, kring) {
2348 		struct netmap_kring *kring = NMR(na, t)[i];
2349 		struct netmap_ring *ring = kring->ring;
2350 		u_int j;
2351 
2352 		/* it the ring is already in use we check that the
2353 		 * new request is compatible with the existing one
2354 		 */
2355 		if (kring->offset_mask) {
2356 			if ((kring->offset_mask & mask) != mask ||
2357 			     kring->offset_max < max_offset) {
2358 				if (netmap_verbose)
2359 					nm_prinf("%s: cannot increase"
2360 						 "offset mask and/or max"
2361 						 "(current: mask=%llx,max=%llu",
2362 							kring->name,
2363 							(unsigned long long)kring->offset_mask,
2364 							(unsigned long long)kring->offset_max);
2365 				error = EBUSY;
2366 				goto out;
2367 			}
2368 			mask = kring->offset_mask;
2369 			max_offset = kring->offset_max;
2370 		} else {
2371 			kring->offset_mask = mask;
2372 			*(uint64_t *)(uintptr_t)&ring->offset_mask = mask;
2373 			kring->offset_max = max_offset;
2374 			kring->offset_gap = min_gap;
2375 		}
2376 
2377 		/* if there is an initial offset, put it into
2378 		 * all the slots
2379 		 *
2380 		 * Note: we cannot change the offsets if the
2381 		 * ring is already in use.
2382 		 */
2383 		if (!initial_offset || kring->users > 1)
2384 			continue;
2385 
2386 		for (j = 0; j < kring->nkr_num_slots; j++) {
2387 			struct netmap_slot *slot = ring->slot + j;
2388 
2389 			nm_write_offset(kring, slot, initial_offset);
2390 		}
2391 	}
2392 
2393 out:
2394 	opt->nro_opt.nro_status = error;
2395 	if (!error) {
2396 		opt->nro_max_offset = max_offset;
2397 	}
2398 	return error;
2399 
2400 }
2401 
2402 
2403 /* set the hardware buffer length in each one of the newly opened rings
2404  * (hwbuf_len field in the kring struct). The purpose it to select
2405  * the maximum supported input buffer lenght that will not cause writes
2406  * outside of the available space, even when offsets are in use.
2407  */
2408 static int
2409 netmap_compute_buf_len(struct netmap_priv_d *priv)
2410 {
2411 	enum txrx t;
2412 	u_int i;
2413 	struct netmap_kring *kring;
2414 	int error = 0;
2415 	unsigned mtu = 0;
2416 	struct netmap_adapter *na = priv->np_na;
2417 	uint64_t target;
2418 
2419 	foreach_selected_ring(priv, t, i, kring) {
2420 		/* rings that are already active have their hwbuf_len
2421 		 * already set and we cannot change it.
2422 		 */
2423 		if (kring->users > 1)
2424 			continue;
2425 
2426 		/* For netmap buffers which are not shared among several ring
2427 		 * slots (the normal case), the available space is the buf size
2428 		 * minus the max offset declared by the user at open time.  If
2429 		 * the user plans to have several slots pointing to different
2430 		 * offsets into the same large buffer, she must also declare a
2431 		 * "minimum gap" between two such consecutive offsets. In this
2432 		 * case the user-declared 'offset_gap' is taken as the
2433 		 * available space and offset_max is ignored.
2434 		 */
2435 
2436 		/* start with the normal case (unshared buffers) */
2437 		target = NETMAP_BUF_SIZE(kring->na) -
2438 			kring->offset_max;
2439 		/* if offset_gap is zero, the user does not intend to use
2440 		 * shared buffers. In this case the minimum gap between
2441 		 * two consective offsets into the same buffer can be
2442 		 * assumed to be equal to the buffer size. In this way
2443 		 * offset_gap always contains the available space ignoring
2444 		 * offset_max. This may be used by drivers of NICs that
2445 		 * are guaranteed to never write more than MTU bytes, even
2446 		 * if the input buffer is larger: if the MTU is less
2447 		 * than the target they can set hwbuf_len to offset_gap.
2448 		 */
2449 		if (!kring->offset_gap)
2450 			kring->offset_gap =
2451 				NETMAP_BUF_SIZE(kring->na);
2452 
2453 		if (kring->offset_gap < target)
2454 			target = kring->offset_gap;
2455 		error = kring->nm_bufcfg(kring, target);
2456 		if (error)
2457 			goto out;
2458 
2459 		*(uint64_t *)(uintptr_t)&kring->ring->buf_align = kring->buf_align;
2460 
2461 		if (mtu && t == NR_RX && kring->hwbuf_len < mtu) {
2462 			if (!(na->na_flags & NAF_MOREFRAG)) {
2463 				nm_prerr("error: large MTU (%d) needed "
2464 					 "but %s does not support "
2465 					 "NS_MOREFRAG", mtu,
2466 					 na->name);
2467 				error = EINVAL;
2468 				goto out;
2469 			} else {
2470 				nm_prinf("info: netmap application on "
2471 					 "%s needs to support "
2472 					 "NS_MOREFRAG "
2473 					 "(MTU=%u,buf_size=%llu)",
2474 					 kring->name, mtu,
2475 					 (unsigned long long)kring->hwbuf_len);
2476 			}
2477 		}
2478 	}
2479 out:
2480 	return error;
2481 }
2482 
2483 /*
2484  * possibly move the interface to netmap-mode.
2485  * If success it returns a pointer to netmap_if, otherwise NULL.
2486  * This must be called with NMG_LOCK held.
2487  *
2488  * The following na callbacks are called in the process:
2489  *
2490  * na->nm_config()			[by netmap_update_config]
2491  * (get current number and size of rings)
2492  *
2493  *  	We have a generic one for linux (netmap_linux_config).
2494  *  	The bwrap has to override this, since it has to forward
2495  *  	the request to the wrapped adapter (netmap_bwrap_config).
2496  *
2497  *
2498  * na->nm_krings_create()
2499  * (create and init the krings array)
2500  *
2501  * 	One of the following:
2502  *
2503  *	* netmap_hw_krings_create, 			(hw ports)
2504  *		creates the standard layout for the krings
2505  * 		and adds the mbq (used for the host rings).
2506  *
2507  * 	* netmap_vp_krings_create			(VALE ports)
2508  * 		add leases and scratchpads
2509  *
2510  * 	* netmap_pipe_krings_create			(pipes)
2511  * 		create the krings and rings of both ends and
2512  * 		cross-link them
2513  *
2514  *      * netmap_monitor_krings_create 			(monitors)
2515  *      	avoid allocating the mbq
2516  *
2517  *      * netmap_bwrap_krings_create			(bwraps)
2518  *      	create both the brap krings array,
2519  *      	the krings array of the wrapped adapter, and
2520  *      	(if needed) the fake array for the host adapter
2521  *
2522  * na->nm_register(, 1)
2523  * (put the adapter in netmap mode)
2524  *
2525  * 	This may be one of the following:
2526  *
2527  * 	* netmap_hw_reg				        (hw ports)
2528  * 		checks that the ifp is still there, then calls
2529  * 		the hardware specific callback;
2530  *
2531  * 	* netmap_vp_reg					(VALE ports)
2532  *		If the port is connected to a bridge,
2533  *		set the NAF_NETMAP_ON flag under the
2534  *		bridge write lock.
2535  *
2536  *	* netmap_pipe_reg				(pipes)
2537  *		inform the other pipe end that it is no
2538  *		longer responsible for the lifetime of this
2539  *		pipe end
2540  *
2541  *	* netmap_monitor_reg				(monitors)
2542  *		intercept the sync callbacks of the monitored
2543  *		rings
2544  *
2545  *	* netmap_bwrap_reg				(bwraps)
2546  *		cross-link the bwrap and hwna rings,
2547  *		forward the request to the hwna, override
2548  *		the hwna notify callback (to get the frames
2549  *		coming from outside go through the bridge).
2550  *
2551  *
2552  */
2553 int
2554 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
2555 	struct nmreq_header *hdr)
2556 {
2557 	struct netmap_if *nifp = NULL;
2558 	int error;
2559 
2560 	NMG_LOCK_ASSERT();
2561 	priv->np_na = na;     /* store the reference */
2562 	error = netmap_mem_finalize(na->nm_mem, na);
2563 	if (error)
2564 		goto err;
2565 
2566 	if (na->active_fds == 0) {
2567 
2568 		/* cache the allocator info in the na */
2569 		error = netmap_mem_get_lut(na->nm_mem, &na->na_lut);
2570 		if (error)
2571 			goto err_drop_mem;
2572 		nm_prdis("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal,
2573 					    na->na_lut.objsize);
2574 
2575 		/* ring configuration may have changed, fetch from the card */
2576 		netmap_update_config(na);
2577 	}
2578 
2579 	/* compute the range of tx and rx rings to monitor */
2580 	error = netmap_set_ringid(priv, hdr);
2581 	if (error)
2582 		goto err_put_lut;
2583 
2584 	if (na->active_fds == 0) {
2585 		/*
2586 		 * If this is the first registration of the adapter,
2587 		 * perform sanity checks and create the in-kernel view
2588 		 * of the netmap rings (the netmap krings).
2589 		 */
2590 		if (na->ifp && nm_priv_rx_enabled(priv)) {
2591 			/* This netmap adapter is attached to an ifnet. */
2592 			unsigned mtu = nm_os_ifnet_mtu(na->ifp);
2593 
2594 			nm_prdis("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
2595 				na->name, mtu, na->rx_buf_maxsize, NETMAP_BUF_SIZE(na));
2596 
2597 			if (na->rx_buf_maxsize == 0) {
2598 				nm_prerr("%s: error: rx_buf_maxsize == 0", na->name);
2599 				error = EIO;
2600 				goto err_drop_mem;
2601 			}
2602 
2603 			error = netmap_buf_size_validate(na, mtu);
2604 			if (error)
2605 				goto err_drop_mem;
2606 		}
2607 
2608 		/*
2609 		 * Depending on the adapter, this may also create
2610 		 * the netmap rings themselves
2611 		 */
2612 		error = na->nm_krings_create(na);
2613 		if (error)
2614 			goto err_put_lut;
2615 
2616 	}
2617 
2618 	/* now the krings must exist and we can check whether some
2619 	 * previous bind has exclusive ownership on them, and set
2620 	 * nr_pending_mode
2621 	 */
2622 	error = netmap_krings_get(priv);
2623 	if (error)
2624 		goto err_del_krings;
2625 
2626 	/* create all needed missing netmap rings */
2627 	error = netmap_mem_rings_create(na);
2628 	if (error)
2629 		goto err_rel_excl;
2630 
2631 	/* initialize offsets if requested */
2632 	error = netmap_offsets_init(priv, hdr);
2633 	if (error)
2634 		goto err_rel_excl;
2635 
2636 	/* compute and validate the buf lengths */
2637 	error = netmap_compute_buf_len(priv);
2638 	if (error)
2639 		goto err_rel_excl;
2640 
2641 	/* in all cases, create a new netmap if */
2642 	nifp = netmap_mem_if_new(na, priv);
2643 	if (nifp == NULL) {
2644 		error = ENOMEM;
2645 		goto err_rel_excl;
2646 	}
2647 
2648 	if (nm_kring_pending(priv)) {
2649 		/* Some kring is switching mode, tell the adapter to
2650 		 * react on this. */
2651 		netmap_set_all_rings(na, NM_KR_LOCKED);
2652 		error = na->nm_register(na, 1);
2653 		netmap_set_all_rings(na, 0);
2654 		if (error)
2655 			goto err_del_if;
2656 	}
2657 
2658 	/* Commit the reference. */
2659 	na->active_fds++;
2660 
2661 	/*
2662 	 * advertise that the interface is ready by setting np_nifp.
2663 	 * The barrier is needed because readers (poll, *SYNC and mmap)
2664 	 * check for priv->np_nifp != NULL without locking
2665 	 */
2666 	mb(); /* make sure previous writes are visible to all CPUs */
2667 	priv->np_nifp = nifp;
2668 
2669 	return 0;
2670 
2671 err_del_if:
2672 	netmap_mem_if_delete(na, nifp);
2673 err_rel_excl:
2674 	netmap_krings_put(priv);
2675 	netmap_mem_rings_delete(na);
2676 err_del_krings:
2677 	if (na->active_fds == 0)
2678 		na->nm_krings_delete(na);
2679 err_put_lut:
2680 	if (na->active_fds == 0)
2681 		memset(&na->na_lut, 0, sizeof(na->na_lut));
2682 err_drop_mem:
2683 	netmap_mem_drop(na);
2684 err:
2685 	priv->np_na = NULL;
2686 	return error;
2687 }
2688 
2689 
2690 /*
2691  * update kring and ring at the end of rxsync/txsync.
2692  */
2693 static inline void
2694 nm_sync_finalize(struct netmap_kring *kring)
2695 {
2696 	/*
2697 	 * Update ring tail to what the kernel knows
2698 	 * After txsync: head/rhead/hwcur might be behind cur/rcur
2699 	 * if no carrier.
2700 	 */
2701 	kring->ring->tail = kring->rtail = kring->nr_hwtail;
2702 
2703 	nm_prdis(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d",
2704 		kring->name, kring->nr_hwcur, kring->nr_hwtail,
2705 		kring->rhead, kring->rcur, kring->rtail);
2706 }
2707 
2708 /* set ring timestamp */
2709 static inline void
2710 ring_timestamp_set(struct netmap_ring *ring)
2711 {
2712 	if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) {
2713 		microtime(&ring->ts);
2714 	}
2715 }
2716 
2717 static int nmreq_copyin(struct nmreq_header *, int);
2718 static int nmreq_copyout(struct nmreq_header *, int);
2719 static int nmreq_checkoptions(struct nmreq_header *);
2720 
2721 /*
2722  * ioctl(2) support for the "netmap" device.
2723  *
2724  * Following a list of accepted commands:
2725  * - NIOCCTRL		device control API
2726  * - NIOCTXSYNC		sync TX rings
2727  * - NIOCRXSYNC		sync RX rings
2728  * - SIOCGIFADDR	just for convenience
2729  * - NIOCGINFO		deprecated (legacy API)
2730  * - NIOCREGIF		deprecated (legacy API)
2731  *
2732  * Return 0 on success, errno otherwise.
2733  */
2734 int
2735 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
2736 		struct thread *td, int nr_body_is_user)
2737 {
2738 	struct mbq q;	/* packets from RX hw queues to host stack */
2739 	struct netmap_adapter *na = NULL;
2740 	struct netmap_mem_d *nmd = NULL;
2741 	struct ifnet *ifp = NULL;
2742 	int error = 0;
2743 	u_int i, qfirst, qlast;
2744 	struct netmap_kring **krings;
2745 	int sync_flags;
2746 	enum txrx t;
2747 
2748 	switch (cmd) {
2749 	case NIOCCTRL: {
2750 		struct nmreq_header *hdr = (struct nmreq_header *)data;
2751 
2752 		if (hdr->nr_version < NETMAP_MIN_API ||
2753 		    hdr->nr_version > NETMAP_MAX_API) {
2754 			nm_prerr("API mismatch: got %d need %d",
2755 				hdr->nr_version, NETMAP_API);
2756 			return EINVAL;
2757 		}
2758 
2759 		/* Make a kernel-space copy of the user-space nr_body.
2760 		 * For convenience, the nr_body pointer and the pointers
2761 		 * in the options list will be replaced with their
2762 		 * kernel-space counterparts. The original pointers are
2763 		 * saved internally and later restored by nmreq_copyout
2764 		 */
2765 		error = nmreq_copyin(hdr, nr_body_is_user);
2766 		if (error) {
2767 			return error;
2768 		}
2769 
2770 		/* Sanitize hdr->nr_name. */
2771 		hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0';
2772 
2773 		switch (hdr->nr_reqtype) {
2774 		case NETMAP_REQ_REGISTER: {
2775 			struct nmreq_register *req =
2776 				(struct nmreq_register *)(uintptr_t)hdr->nr_body;
2777 			struct netmap_if *nifp;
2778 
2779 			/* Protect access to priv from concurrent requests. */
2780 			NMG_LOCK();
2781 			do {
2782 				struct nmreq_option *opt;
2783 				u_int memflags;
2784 
2785 				if (priv->np_nifp != NULL) {	/* thread already registered */
2786 					error = EBUSY;
2787 					break;
2788 				}
2789 
2790 #ifdef WITH_EXTMEM
2791 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
2792 				if (opt != NULL) {
2793 					struct nmreq_opt_extmem *e =
2794 						(struct nmreq_opt_extmem *)opt;
2795 
2796 					nmd = netmap_mem_ext_create(e->nro_usrptr,
2797 							&e->nro_info, &error);
2798 					opt->nro_status = error;
2799 					if (nmd == NULL)
2800 						break;
2801 				}
2802 #endif /* WITH_EXTMEM */
2803 
2804 				if (nmd == NULL && req->nr_mem_id) {
2805 					/* find the allocator and get a reference */
2806 					nmd = netmap_mem_find(req->nr_mem_id);
2807 					if (nmd == NULL) {
2808 						if (netmap_verbose) {
2809 							nm_prerr("%s: failed to find mem_id %u",
2810 									hdr->nr_name, req->nr_mem_id);
2811 						}
2812 						error = EINVAL;
2813 						break;
2814 					}
2815 				}
2816 				/* find the interface and a reference */
2817 				error = netmap_get_na(hdr, &na, &ifp, nmd,
2818 						      1 /* create */); /* keep reference */
2819 				if (error)
2820 					break;
2821 				if (NETMAP_OWNED_BY_KERN(na)) {
2822 					error = EBUSY;
2823 					break;
2824 				}
2825 
2826 				if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) {
2827 					nm_prerr("virt_hdr_len=%d, but application does "
2828 						"not accept it", na->virt_hdr_len);
2829 					error = EIO;
2830 					break;
2831 				}
2832 
2833 				error = netmap_do_regif(priv, na, hdr);
2834 				if (error) {    /* reg. failed, release priv and ref */
2835 					break;
2836 				}
2837 
2838 				opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
2839 				if (opt != NULL) {
2840 					struct nmreq_opt_csb *csbo =
2841 						(struct nmreq_opt_csb *)opt;
2842 					error = netmap_csb_validate(priv, csbo);
2843 					opt->nro_status = error;
2844 					if (error) {
2845 						netmap_do_unregif(priv);
2846 						break;
2847 					}
2848 				}
2849 
2850 				nifp = priv->np_nifp;
2851 
2852 				/* return the offset of the netmap_if object */
2853 				req->nr_rx_rings = na->num_rx_rings;
2854 				req->nr_tx_rings = na->num_tx_rings;
2855 				req->nr_rx_slots = na->num_rx_desc;
2856 				req->nr_tx_slots = na->num_tx_desc;
2857 				req->nr_host_tx_rings = na->num_host_tx_rings;
2858 				req->nr_host_rx_rings = na->num_host_rx_rings;
2859 				error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags,
2860 					&req->nr_mem_id);
2861 				if (error) {
2862 					netmap_do_unregif(priv);
2863 					break;
2864 				}
2865 				if (memflags & NETMAP_MEM_PRIVATE) {
2866 					*(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM;
2867 				}
2868 				for_rx_tx(t) {
2869 					priv->np_si[t] = nm_si_user(priv, t) ?
2870 						&na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si;
2871 				}
2872 
2873 				if (req->nr_extra_bufs) {
2874 					if (netmap_verbose)
2875 						nm_prinf("requested %d extra buffers",
2876 							req->nr_extra_bufs);
2877 					req->nr_extra_bufs = netmap_extra_alloc(na,
2878 						&nifp->ni_bufs_head, req->nr_extra_bufs);
2879 					if (netmap_verbose)
2880 						nm_prinf("got %d extra buffers", req->nr_extra_bufs);
2881 				} else {
2882 					nifp->ni_bufs_head = 0;
2883 				}
2884 				req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp);
2885 
2886 				error = nmreq_checkoptions(hdr);
2887 				if (error) {
2888 					netmap_do_unregif(priv);
2889 					break;
2890 				}
2891 
2892 				/* store ifp reference so that priv destructor may release it */
2893 				priv->np_ifp = ifp;
2894 			} while (0);
2895 			if (error) {
2896 				netmap_unget_na(na, ifp);
2897 			}
2898 			/* release the reference from netmap_mem_find() or
2899 			 * netmap_mem_ext_create()
2900 			 */
2901 			if (nmd)
2902 				netmap_mem_put(nmd);
2903 			NMG_UNLOCK();
2904 			break;
2905 		}
2906 
2907 		case NETMAP_REQ_PORT_INFO_GET: {
2908 			struct nmreq_port_info_get *req =
2909 				(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
2910 			int nmd_ref = 0;
2911 
2912 			NMG_LOCK();
2913 			do {
2914 				u_int memflags;
2915 
2916 				if (hdr->nr_name[0] != '\0') {
2917 					/* Build a nmreq_register out of the nmreq_port_info_get,
2918 					 * so that we can call netmap_get_na(). */
2919 					struct nmreq_register regreq;
2920 					bzero(&regreq, sizeof(regreq));
2921 					regreq.nr_mode = NR_REG_ALL_NIC;
2922 					regreq.nr_tx_slots = req->nr_tx_slots;
2923 					regreq.nr_rx_slots = req->nr_rx_slots;
2924 					regreq.nr_tx_rings = req->nr_tx_rings;
2925 					regreq.nr_rx_rings = req->nr_rx_rings;
2926 					regreq.nr_host_tx_rings = req->nr_host_tx_rings;
2927 					regreq.nr_host_rx_rings = req->nr_host_rx_rings;
2928 					regreq.nr_mem_id = req->nr_mem_id;
2929 
2930 					/* get a refcount */
2931 					hdr->nr_reqtype = NETMAP_REQ_REGISTER;
2932 					hdr->nr_body = (uintptr_t)&regreq;
2933 					error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
2934 					hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
2935 					hdr->nr_body = (uintptr_t)req; /* reset nr_body */
2936 					if (error) {
2937 						na = NULL;
2938 						ifp = NULL;
2939 						break;
2940 					}
2941 					nmd = na->nm_mem; /* get memory allocator */
2942 				} else {
2943 					nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1);
2944 					if (nmd == NULL) {
2945 						if (netmap_verbose)
2946 							nm_prerr("%s: failed to find mem_id %u",
2947 									hdr->nr_name,
2948 									req->nr_mem_id ? req->nr_mem_id : 1);
2949 						error = EINVAL;
2950 						break;
2951 					}
2952 					nmd_ref = 1;
2953 				}
2954 
2955 				error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags,
2956 					&req->nr_mem_id);
2957 				if (error)
2958 					break;
2959 				if (na == NULL) /* only memory info */
2960 					break;
2961 				netmap_update_config(na);
2962 				req->nr_rx_rings = na->num_rx_rings;
2963 				req->nr_tx_rings = na->num_tx_rings;
2964 				req->nr_rx_slots = na->num_rx_desc;
2965 				req->nr_tx_slots = na->num_tx_desc;
2966 				req->nr_host_tx_rings = na->num_host_tx_rings;
2967 				req->nr_host_rx_rings = na->num_host_rx_rings;
2968 			} while (0);
2969 			netmap_unget_na(na, ifp);
2970 			if (nmd_ref)
2971 				netmap_mem_put(nmd);
2972 			NMG_UNLOCK();
2973 			break;
2974 		}
2975 #ifdef WITH_VALE
2976 		case NETMAP_REQ_VALE_ATTACH: {
2977 			error = netmap_bdg_attach(hdr, NULL /* userspace request */);
2978 			break;
2979 		}
2980 
2981 		case NETMAP_REQ_VALE_DETACH: {
2982 			error = netmap_bdg_detach(hdr, NULL /* userspace request */);
2983 			break;
2984 		}
2985 
2986 		case NETMAP_REQ_PORT_HDR_SET: {
2987 			struct nmreq_port_hdr *req =
2988 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
2989 			/* Build a nmreq_register out of the nmreq_port_hdr,
2990 			 * so that we can call netmap_get_bdg_na(). */
2991 			struct nmreq_register regreq;
2992 			bzero(&regreq, sizeof(regreq));
2993 			regreq.nr_mode = NR_REG_ALL_NIC;
2994 
2995 			/* For now we only support virtio-net headers, and only for
2996 			 * VALE ports, but this may change in future. Valid lengths
2997 			 * for the virtio-net header are 0 (no header), 10 and 12. */
2998 			if (req->nr_hdr_len != 0 &&
2999 				req->nr_hdr_len != sizeof(struct nm_vnet_hdr) &&
3000 					req->nr_hdr_len != 12) {
3001 				if (netmap_verbose)
3002 					nm_prerr("invalid hdr_len %u", req->nr_hdr_len);
3003 				error = EINVAL;
3004 				break;
3005 			}
3006 			NMG_LOCK();
3007 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3008 			hdr->nr_body = (uintptr_t)&regreq;
3009 			error = netmap_get_vale_na(hdr, &na, NULL, 0);
3010 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
3011 			hdr->nr_body = (uintptr_t)req;
3012 			if (na && !error) {
3013 				struct netmap_vp_adapter *vpna =
3014 					(struct netmap_vp_adapter *)na;
3015 				na->virt_hdr_len = req->nr_hdr_len;
3016 				if (na->virt_hdr_len) {
3017 					vpna->mfs = NETMAP_BUF_SIZE(na);
3018 				}
3019 				if (netmap_verbose)
3020 					nm_prinf("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na);
3021 				netmap_adapter_put(na);
3022 			} else if (!na) {
3023 				error = ENXIO;
3024 			}
3025 			NMG_UNLOCK();
3026 			break;
3027 		}
3028 
3029 		case NETMAP_REQ_PORT_HDR_GET: {
3030 			/* Get vnet-header length for this netmap port */
3031 			struct nmreq_port_hdr *req =
3032 				(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
3033 			/* Build a nmreq_register out of the nmreq_port_hdr,
3034 			 * so that we can call netmap_get_bdg_na(). */
3035 			struct nmreq_register regreq;
3036 			struct ifnet *ifp;
3037 
3038 			bzero(&regreq, sizeof(regreq));
3039 			regreq.nr_mode = NR_REG_ALL_NIC;
3040 			NMG_LOCK();
3041 			hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3042 			hdr->nr_body = (uintptr_t)&regreq;
3043 			error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
3044 			hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
3045 			hdr->nr_body = (uintptr_t)req;
3046 			if (na && !error) {
3047 				req->nr_hdr_len = na->virt_hdr_len;
3048 			}
3049 			netmap_unget_na(na, ifp);
3050 			NMG_UNLOCK();
3051 			break;
3052 		}
3053 
3054 		case NETMAP_REQ_VALE_LIST: {
3055 			error = netmap_vale_list(hdr);
3056 			break;
3057 		}
3058 
3059 		case NETMAP_REQ_VALE_NEWIF: {
3060 			error = nm_vi_create(hdr);
3061 			break;
3062 		}
3063 
3064 		case NETMAP_REQ_VALE_DELIF: {
3065 			error = nm_vi_destroy(hdr->nr_name);
3066 			break;
3067 		}
3068 #endif  /* WITH_VALE */
3069 
3070 		case NETMAP_REQ_VALE_POLLING_ENABLE:
3071 		case NETMAP_REQ_VALE_POLLING_DISABLE: {
3072 			error = nm_bdg_polling(hdr);
3073 			break;
3074 		}
3075 		case NETMAP_REQ_POOLS_INFO_GET: {
3076 			/* Get information from the memory allocator used for
3077 			 * hdr->nr_name. */
3078 			struct nmreq_pools_info *req =
3079 				(struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
3080 			NMG_LOCK();
3081 			do {
3082 				/* Build a nmreq_register out of the nmreq_pools_info,
3083 				 * so that we can call netmap_get_na(). */
3084 				struct nmreq_register regreq;
3085 				bzero(&regreq, sizeof(regreq));
3086 				regreq.nr_mem_id = req->nr_mem_id;
3087 				regreq.nr_mode = NR_REG_ALL_NIC;
3088 
3089 				hdr->nr_reqtype = NETMAP_REQ_REGISTER;
3090 				hdr->nr_body = (uintptr_t)&regreq;
3091 				error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
3092 				hdr->nr_reqtype = NETMAP_REQ_POOLS_INFO_GET; /* reset type */
3093 				hdr->nr_body = (uintptr_t)req; /* reset nr_body */
3094 				if (error) {
3095 					na = NULL;
3096 					ifp = NULL;
3097 					break;
3098 				}
3099 				nmd = na->nm_mem; /* grab the memory allocator */
3100 				if (nmd == NULL) {
3101 					error = EINVAL;
3102 					break;
3103 				}
3104 
3105 				/* Finalize the memory allocator, get the pools
3106 				 * information and release the allocator. */
3107 				error = netmap_mem_finalize(nmd, na);
3108 				if (error) {
3109 					break;
3110 				}
3111 				error = netmap_mem_pools_info_get(req, nmd);
3112 				netmap_mem_drop(na);
3113 			} while (0);
3114 			netmap_unget_na(na, ifp);
3115 			NMG_UNLOCK();
3116 			break;
3117 		}
3118 
3119 		case NETMAP_REQ_CSB_ENABLE: {
3120 			struct nmreq_option *opt;
3121 
3122 			opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
3123 			if (opt == NULL) {
3124 				error = EINVAL;
3125 			} else {
3126 				struct nmreq_opt_csb *csbo =
3127 					(struct nmreq_opt_csb *)opt;
3128 				NMG_LOCK();
3129 				error = netmap_csb_validate(priv, csbo);
3130 				NMG_UNLOCK();
3131 				opt->nro_status = error;
3132 			}
3133 			break;
3134 		}
3135 
3136 		case NETMAP_REQ_SYNC_KLOOP_START: {
3137 			error = netmap_sync_kloop(priv, hdr);
3138 			break;
3139 		}
3140 
3141 		case NETMAP_REQ_SYNC_KLOOP_STOP: {
3142 			error = netmap_sync_kloop_stop(priv);
3143 			break;
3144 		}
3145 
3146 		default: {
3147 			error = EINVAL;
3148 			break;
3149 		}
3150 		}
3151 		/* Write back request body to userspace and reset the
3152 		 * user-space pointer. */
3153 		error = nmreq_copyout(hdr, error);
3154 		break;
3155 	}
3156 
3157 	case NIOCTXSYNC:
3158 	case NIOCRXSYNC: {
3159 		if (unlikely(priv->np_nifp == NULL)) {
3160 			error = ENXIO;
3161 			break;
3162 		}
3163 		mb(); /* make sure following reads are not from cache */
3164 
3165 		if (unlikely(priv->np_csb_atok_base)) {
3166 			nm_prerr("Invalid sync in CSB mode");
3167 			error = EBUSY;
3168 			break;
3169 		}
3170 
3171 		na = priv->np_na;      /* we have a reference */
3172 
3173 		mbq_init(&q);
3174 		t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX);
3175 		krings = NMR(na, t);
3176 		qfirst = priv->np_qfirst[t];
3177 		qlast = priv->np_qlast[t];
3178 		sync_flags = priv->np_sync_flags;
3179 
3180 		for (i = qfirst; i < qlast; i++) {
3181 			struct netmap_kring *kring = krings[i];
3182 			struct netmap_ring *ring = kring->ring;
3183 
3184 			if (unlikely(nm_kr_tryget(kring, 1, &error))) {
3185 				error = (error ? EIO : 0);
3186 				continue;
3187 			}
3188 
3189 			if (cmd == NIOCTXSYNC) {
3190 				if (netmap_debug & NM_DEBUG_TXSYNC)
3191 					nm_prinf("pre txsync ring %d cur %d hwcur %d",
3192 					    i, ring->cur,
3193 					    kring->nr_hwcur);
3194 				if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3195 					netmap_ring_reinit(kring);
3196 				} else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) {
3197 					nm_sync_finalize(kring);
3198 				}
3199 				if (netmap_debug & NM_DEBUG_TXSYNC)
3200 					nm_prinf("post txsync ring %d cur %d hwcur %d",
3201 					    i, ring->cur,
3202 					    kring->nr_hwcur);
3203 			} else {
3204 				if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3205 					netmap_ring_reinit(kring);
3206 				}
3207 				if (nm_may_forward_up(kring)) {
3208 					/* transparent forwarding, see netmap_poll() */
3209 					netmap_grab_packets(kring, &q, netmap_fwd);
3210 				}
3211 				if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) {
3212 					nm_sync_finalize(kring);
3213 				}
3214 				ring_timestamp_set(ring);
3215 			}
3216 			nm_kr_put(kring);
3217 		}
3218 
3219 		if (mbq_peek(&q)) {
3220 			netmap_send_up(na->ifp, &q);
3221 		}
3222 
3223 		break;
3224 	}
3225 
3226 	default: {
3227 		return netmap_ioctl_legacy(priv, cmd, data, td);
3228 		break;
3229 	}
3230 	}
3231 
3232 	return (error);
3233 }
3234 
3235 size_t
3236 nmreq_size_by_type(uint16_t nr_reqtype)
3237 {
3238 	switch (nr_reqtype) {
3239 	case NETMAP_REQ_REGISTER:
3240 		return sizeof(struct nmreq_register);
3241 	case NETMAP_REQ_PORT_INFO_GET:
3242 		return sizeof(struct nmreq_port_info_get);
3243 	case NETMAP_REQ_VALE_ATTACH:
3244 		return sizeof(struct nmreq_vale_attach);
3245 	case NETMAP_REQ_VALE_DETACH:
3246 		return sizeof(struct nmreq_vale_detach);
3247 	case NETMAP_REQ_VALE_LIST:
3248 		return sizeof(struct nmreq_vale_list);
3249 	case NETMAP_REQ_PORT_HDR_SET:
3250 	case NETMAP_REQ_PORT_HDR_GET:
3251 		return sizeof(struct nmreq_port_hdr);
3252 	case NETMAP_REQ_VALE_NEWIF:
3253 		return sizeof(struct nmreq_vale_newif);
3254 	case NETMAP_REQ_VALE_DELIF:
3255 	case NETMAP_REQ_SYNC_KLOOP_STOP:
3256 	case NETMAP_REQ_CSB_ENABLE:
3257 		return 0;
3258 	case NETMAP_REQ_VALE_POLLING_ENABLE:
3259 	case NETMAP_REQ_VALE_POLLING_DISABLE:
3260 		return sizeof(struct nmreq_vale_polling);
3261 	case NETMAP_REQ_POOLS_INFO_GET:
3262 		return sizeof(struct nmreq_pools_info);
3263 	case NETMAP_REQ_SYNC_KLOOP_START:
3264 		return sizeof(struct nmreq_sync_kloop_start);
3265 	}
3266 	return 0;
3267 }
3268 
3269 static size_t
3270 nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
3271 {
3272 	size_t rv = sizeof(struct nmreq_option);
3273 #ifdef NETMAP_REQ_OPT_DEBUG
3274 	if (nro_reqtype & NETMAP_REQ_OPT_DEBUG)
3275 		return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG);
3276 #endif /* NETMAP_REQ_OPT_DEBUG */
3277 	switch (nro_reqtype) {
3278 #ifdef WITH_EXTMEM
3279 	case NETMAP_REQ_OPT_EXTMEM:
3280 		rv = sizeof(struct nmreq_opt_extmem);
3281 		break;
3282 #endif /* WITH_EXTMEM */
3283 	case NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS:
3284 		if (nro_size >= rv)
3285 			rv = nro_size;
3286 		break;
3287 	case NETMAP_REQ_OPT_CSB:
3288 		rv = sizeof(struct nmreq_opt_csb);
3289 		break;
3290 	case NETMAP_REQ_OPT_SYNC_KLOOP_MODE:
3291 		rv = sizeof(struct nmreq_opt_sync_kloop_mode);
3292 		break;
3293 	case NETMAP_REQ_OPT_OFFSETS:
3294 		rv = sizeof(struct nmreq_opt_offsets);
3295 		break;
3296 	}
3297 	/* subtract the common header */
3298 	return rv - sizeof(struct nmreq_option);
3299 }
3300 
3301 /*
3302  * nmreq_copyin: create an in-kernel version of the request.
3303  *
3304  * We build the following data structure:
3305  *
3306  * hdr -> +-------+                buf
3307  *        |       |          +---------------+
3308  *        +-------+          |usr body ptr   |
3309  *        |options|-.        +---------------+
3310  *        +-------+ |        |usr options ptr|
3311  *        |body   |--------->+---------------+
3312  *        +-------+ |        |               |
3313  *                  |        |  copy of body |
3314  *                  |        |               |
3315  *                  |        +---------------+
3316  *                  |        |    NULL       |
3317  *                  |        +---------------+
3318  *                  |    .---|               |\
3319  *                  |    |   +---------------+ |
3320  *                  | .------|               | |
3321  *                  | |  |   +---------------+  \ option table
3322  *                  | |  |   |      ...      |  / indexed by option
3323  *                  | |  |   +---------------+ |  type
3324  *                  | |  |   |               | |
3325  *                  | |  |   +---------------+/
3326  *                  | |  |   |usr next ptr 1 |
3327  *                  `-|----->+---------------+
3328  *                    |  |   | copy of opt 1 |
3329  *                    |  |   |               |
3330  *                    |  | .-| nro_next      |
3331  *                    |  | | +---------------+
3332  *                    |  | | |usr next ptr 2 |
3333  *                    |  `-`>+---------------+
3334  *                    |      | copy of opt 2 |
3335  *                    |      |               |
3336  *                    |    .-| nro_next      |
3337  *                    |    | +---------------+
3338  *                    |    | |               |
3339  *                    ~    ~ ~      ...      ~
3340  *                    |    .-|               |
3341  *                    `----->+---------------+
3342  *                         | |usr next ptr n |
3343  *                         `>+---------------+
3344  *                           | copy of opt n |
3345  *                           |               |
3346  *                           | nro_next(NULL)|
3347  *                           +---------------+
3348  *
3349  * The options and body fields of the hdr structure are overwritten
3350  * with in-kernel valid pointers inside the buf. The original user
3351  * pointers are saved in the buf and restored on copyout.
3352  * The list of options is copied and the pointers adjusted. The
3353  * original pointers are saved before the option they belonged.
3354  *
3355  * The option table has an entry for every available option.  Entries
3356  * for options that have not been passed contain NULL.
3357  *
3358  */
3359 
3360 int
3361 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
3362 {
3363 	size_t rqsz, optsz, bufsz;
3364 	int error = 0;
3365 	char *ker = NULL, *p;
3366 	struct nmreq_option **next, *src, **opt_tab;
3367 	struct nmreq_option buf;
3368 	uint64_t *ptrs;
3369 
3370 	if (hdr->nr_reserved) {
3371 		if (netmap_verbose)
3372 			nm_prerr("nr_reserved must be zero");
3373 		return EINVAL;
3374 	}
3375 
3376 	if (!nr_body_is_user)
3377 		return 0;
3378 
3379 	hdr->nr_reserved = nr_body_is_user;
3380 
3381 	/* compute the total size of the buffer */
3382 	rqsz = nmreq_size_by_type(hdr->nr_reqtype);
3383 	if (rqsz > NETMAP_REQ_MAXSIZE) {
3384 		error = EMSGSIZE;
3385 		goto out_err;
3386 	}
3387 	if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
3388 		(!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
3389 		/* Request body expected, but not found; or
3390 		 * request body found but unexpected. */
3391 		if (netmap_verbose)
3392 			nm_prerr("nr_body expected but not found, or vice versa");
3393 		error = EINVAL;
3394 		goto out_err;
3395 	}
3396 
3397 	bufsz = 2 * sizeof(void *) + rqsz +
3398 		NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
3399 	/* compute the size of the buf below the option table.
3400 	 * It must contain a copy of every received option structure.
3401 	 * For every option we also need to store a copy of the user
3402 	 * list pointer.
3403 	 */
3404 	optsz = 0;
3405 	for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
3406 	     src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
3407 	{
3408 		error = copyin(src, &buf, sizeof(*src));
3409 		if (error)
3410 			goto out_err;
3411 		optsz += sizeof(*src);
3412 		optsz += nmreq_opt_size_by_type(buf.nro_reqtype, buf.nro_size);
3413 		if (rqsz + optsz > NETMAP_REQ_MAXSIZE) {
3414 			error = EMSGSIZE;
3415 			goto out_err;
3416 		}
3417 		bufsz += sizeof(void *);
3418 	}
3419 	bufsz += optsz;
3420 
3421 	ker = nm_os_malloc(bufsz);
3422 	if (ker == NULL) {
3423 		error = ENOMEM;
3424 		goto out_err;
3425 	}
3426 	p = ker;	/* write pointer into the buffer */
3427 
3428 	/* make a copy of the user pointers */
3429 	ptrs = (uint64_t*)p;
3430 	*ptrs++ = hdr->nr_body;
3431 	*ptrs++ = hdr->nr_options;
3432 	p = (char *)ptrs;
3433 
3434 	/* copy the body */
3435 	error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
3436 	if (error)
3437 		goto out_restore;
3438 	/* overwrite the user pointer with the in-kernel one */
3439 	hdr->nr_body = (uintptr_t)p;
3440 	p += rqsz;
3441 	/* start of the options table */
3442 	opt_tab = (struct nmreq_option **)p;
3443 	p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
3444 
3445 	/* copy the options */
3446 	next = (struct nmreq_option **)&hdr->nr_options;
3447 	src = *next;
3448 	while (src) {
3449 		struct nmreq_option *opt;
3450 
3451 		/* copy the option header */
3452 		ptrs = (uint64_t *)p;
3453 		opt = (struct nmreq_option *)(ptrs + 1);
3454 		error = copyin(src, opt, sizeof(*src));
3455 		if (error)
3456 			goto out_restore;
3457 		/* make a copy of the user next pointer */
3458 		*ptrs = opt->nro_next;
3459 		/* overwrite the user pointer with the in-kernel one */
3460 		*next = opt;
3461 
3462 		/* initialize the option as not supported.
3463 		 * Recognized options will update this field.
3464 		 */
3465 		opt->nro_status = EOPNOTSUPP;
3466 
3467 		/* check for invalid types */
3468 		if (opt->nro_reqtype < 1) {
3469 			if (netmap_verbose)
3470 				nm_prinf("invalid option type: %u", opt->nro_reqtype);
3471 			opt->nro_status = EINVAL;
3472 			error = EINVAL;
3473 			goto next;
3474 		}
3475 
3476 		if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
3477 			/* opt->nro_status is already EOPNOTSUPP */
3478 			error = EOPNOTSUPP;
3479 			goto next;
3480 		}
3481 
3482 		/* if the type is valid, index the option in the table
3483 		 * unless it is a duplicate.
3484 		 */
3485 		if (opt_tab[opt->nro_reqtype] != NULL) {
3486 			if (netmap_verbose)
3487 				nm_prinf("duplicate option: %u", opt->nro_reqtype);
3488 			opt->nro_status = EINVAL;
3489 			opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
3490 			error = EINVAL;
3491 			goto next;
3492 		}
3493 		opt_tab[opt->nro_reqtype] = opt;
3494 
3495 		p = (char *)(opt + 1);
3496 
3497 		/* copy the option body */
3498 		optsz = nmreq_opt_size_by_type(opt->nro_reqtype,
3499 						opt->nro_size);
3500 		if (optsz) {
3501 			/* the option body follows the option header */
3502 			error = copyin(src + 1, p, optsz);
3503 			if (error)
3504 				goto out_restore;
3505 			p += optsz;
3506 		}
3507 
3508 	next:
3509 		/* move to next option */
3510 		next = (struct nmreq_option **)&opt->nro_next;
3511 		src = *next;
3512 	}
3513 	if (error)
3514 		nmreq_copyout(hdr, error);
3515 	return error;
3516 
3517 out_restore:
3518 	ptrs = (uint64_t *)ker;
3519 	hdr->nr_body = *ptrs++;
3520 	hdr->nr_options = *ptrs++;
3521 	hdr->nr_reserved = 0;
3522 	nm_os_free(ker);
3523 out_err:
3524 	return error;
3525 }
3526 
3527 static int
3528 nmreq_copyout(struct nmreq_header *hdr, int rerror)
3529 {
3530 	struct nmreq_option *src, *dst;
3531 	void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
3532 	uint64_t *ptrs;
3533 	size_t bodysz;
3534 	int error;
3535 
3536 	if (!hdr->nr_reserved)
3537 		return rerror;
3538 
3539 	/* restore the user pointers in the header */
3540 	ptrs = (uint64_t *)ker - 2;
3541 	bufstart = ptrs;
3542 	hdr->nr_body = *ptrs++;
3543 	src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3544 	hdr->nr_options = *ptrs;
3545 
3546 	if (!rerror) {
3547 		/* copy the body */
3548 		bodysz = nmreq_size_by_type(hdr->nr_reqtype);
3549 		error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
3550 		if (error) {
3551 			rerror = error;
3552 			goto out;
3553 		}
3554 	}
3555 
3556 	/* copy the options */
3557 	dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
3558 	while (src) {
3559 		size_t optsz;
3560 		uint64_t next;
3561 
3562 		/* restore the user pointer */
3563 		next = src->nro_next;
3564 		ptrs = (uint64_t *)src - 1;
3565 		src->nro_next = *ptrs;
3566 
3567 		/* always copy the option header */
3568 		error = copyout(src, dst, sizeof(*src));
3569 		if (error) {
3570 			rerror = error;
3571 			goto out;
3572 		}
3573 
3574 		/* copy the option body only if there was no error */
3575 		if (!rerror && !src->nro_status) {
3576 			optsz = nmreq_opt_size_by_type(src->nro_reqtype,
3577 							src->nro_size);
3578 			if (optsz) {
3579 				error = copyout(src + 1, dst + 1, optsz);
3580 				if (error) {
3581 					rerror = error;
3582 					goto out;
3583 				}
3584 			}
3585 		}
3586 		src = (struct nmreq_option *)(uintptr_t)next;
3587 		dst = (struct nmreq_option *)(uintptr_t)*ptrs;
3588 	}
3589 
3590 
3591 out:
3592 	hdr->nr_reserved = 0;
3593 	nm_os_free(bufstart);
3594 	return rerror;
3595 }
3596 
3597 struct nmreq_option *
3598 nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
3599 {
3600 	struct nmreq_option **opt_tab;
3601 
3602 	if (!hdr->nr_options)
3603 		return NULL;
3604 
3605 	opt_tab = (struct nmreq_option **)((uintptr_t)hdr->nr_options) -
3606 	    (NETMAP_REQ_OPT_MAX + 1);
3607 	return opt_tab[reqtype];
3608 }
3609 
3610 static int
3611 nmreq_checkoptions(struct nmreq_header *hdr)
3612 {
3613 	struct nmreq_option *opt;
3614 	/* return error if there is still any option
3615 	 * marked as not supported
3616 	 */
3617 
3618 	for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
3619 	     opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
3620 		if (opt->nro_status == EOPNOTSUPP)
3621 			return EOPNOTSUPP;
3622 
3623 	return 0;
3624 }
3625 
3626 /*
3627  * select(2) and poll(2) handlers for the "netmap" device.
3628  *
3629  * Can be called for one or more queues.
3630  * Return true the event mask corresponding to ready events.
3631  * If there are no ready events (and 'sr' is not NULL), do a
3632  * selrecord on either individual selinfo or on the global one.
3633  * Device-dependent parts (locking and sync of tx/rx rings)
3634  * are done through callbacks.
3635  *
3636  * On linux, arguments are really pwait, the poll table, and 'td' is struct file *
3637  * The first one is remapped to pwait as selrecord() uses the name as an
3638  * hidden argument.
3639  */
3640 int
3641 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr)
3642 {
3643 	struct netmap_adapter *na;
3644 	struct netmap_kring *kring;
3645 	struct netmap_ring *ring;
3646 	u_int i, want[NR_TXRX], revents = 0;
3647 	NM_SELINFO_T *si[NR_TXRX];
3648 #define want_tx want[NR_TX]
3649 #define want_rx want[NR_RX]
3650 	struct mbq q;	/* packets from RX hw queues to host stack */
3651 
3652 	/*
3653 	 * In order to avoid nested locks, we need to "double check"
3654 	 * txsync and rxsync if we decide to do a selrecord().
3655 	 * retry_tx (and retry_rx, later) prevent looping forever.
3656 	 */
3657 	int retry_tx = 1, retry_rx = 1;
3658 
3659 	/* Transparent mode: send_down is 1 if we have found some
3660 	 * packets to forward (host RX ring --> NIC) during the rx
3661 	 * scan and we have not sent them down to the NIC yet.
3662 	 * Transparent mode requires to bind all rings to a single
3663 	 * file descriptor.
3664 	 */
3665 	int send_down = 0;
3666 	int sync_flags = priv->np_sync_flags;
3667 
3668 	mbq_init(&q);
3669 
3670 	if (unlikely(priv->np_nifp == NULL)) {
3671 		return POLLERR;
3672 	}
3673 	mb(); /* make sure following reads are not from cache */
3674 
3675 	na = priv->np_na;
3676 
3677 	if (unlikely(!nm_netmap_on(na)))
3678 		return POLLERR;
3679 
3680 	if (unlikely(priv->np_csb_atok_base)) {
3681 		nm_prerr("Invalid poll in CSB mode");
3682 		return POLLERR;
3683 	}
3684 
3685 	if (netmap_debug & NM_DEBUG_ON)
3686 		nm_prinf("device %s events 0x%x", na->name, events);
3687 	want_tx = events & (POLLOUT | POLLWRNORM);
3688 	want_rx = events & (POLLIN | POLLRDNORM);
3689 
3690 	/*
3691 	 * If the card has more than one queue AND the file descriptor is
3692 	 * bound to all of them, we sleep on the "global" selinfo, otherwise
3693 	 * we sleep on individual selinfo (FreeBSD only allows two selinfo's
3694 	 * per file descriptor).
3695 	 * The interrupt routine in the driver wake one or the other
3696 	 * (or both) depending on which clients are active.
3697 	 *
3698 	 * rxsync() is only called if we run out of buffers on a POLLIN.
3699 	 * txsync() is called if we run out of buffers on POLLOUT, or
3700 	 * there are pending packets to send. The latter can be disabled
3701 	 * passing NETMAP_NO_TX_POLL in the NIOCREG call.
3702 	 */
3703 	si[NR_RX] = priv->np_si[NR_RX];
3704 	si[NR_TX] = priv->np_si[NR_TX];
3705 
3706 #ifdef __FreeBSD__
3707 	/*
3708 	 * We start with a lock free round which is cheap if we have
3709 	 * slots available. If this fails, then lock and call the sync
3710 	 * routines. We can't do this on Linux, as the contract says
3711 	 * that we must call nm_os_selrecord() unconditionally.
3712 	 */
3713 	if (want_tx) {
3714 		const enum txrx t = NR_TX;
3715 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3716 			kring = NMR(na, t)[i];
3717 			if (kring->ring->cur != kring->ring->tail) {
3718 				/* Some unseen TX space is available, so what
3719 				 * we don't need to run txsync. */
3720 				revents |= want[t];
3721 				want[t] = 0;
3722 				break;
3723 			}
3724 		}
3725 	}
3726 	if (want_rx) {
3727 		const enum txrx t = NR_RX;
3728 		int rxsync_needed = 0;
3729 
3730 		for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) {
3731 			kring = NMR(na, t)[i];
3732 			if (kring->ring->cur == kring->ring->tail
3733 				|| kring->rhead != kring->ring->head) {
3734 				/* There are no unseen packets on this ring,
3735 				 * or there are some buffers to be returned
3736 				 * to the netmap port. We therefore go ahead
3737 				 * and run rxsync. */
3738 				rxsync_needed = 1;
3739 				break;
3740 			}
3741 		}
3742 		if (!rxsync_needed) {
3743 			revents |= want_rx;
3744 			want_rx = 0;
3745 		}
3746 	}
3747 #endif
3748 
3749 #ifdef linux
3750 	/* The selrecord must be unconditional on linux. */
3751 	nm_os_selrecord(sr, si[NR_RX]);
3752 	nm_os_selrecord(sr, si[NR_TX]);
3753 #endif /* linux */
3754 
3755 	/*
3756 	 * If we want to push packets out (priv->np_txpoll) or
3757 	 * want_tx is still set, we must issue txsync calls
3758 	 * (on all rings, to avoid that the tx rings stall).
3759 	 * Fortunately, normal tx mode has np_txpoll set.
3760 	 */
3761 	if (priv->np_txpoll || want_tx) {
3762 		/*
3763 		 * The first round checks if anyone is ready, if not
3764 		 * do a selrecord and another round to handle races.
3765 		 * want_tx goes to 0 if any space is found, and is
3766 		 * used to skip rings with no pending transmissions.
3767 		 */
3768 flush_tx:
3769 		for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) {
3770 			int found = 0;
3771 
3772 			kring = na->tx_rings[i];
3773 			ring = kring->ring;
3774 
3775 			/*
3776 			 * Don't try to txsync this TX ring if we already found some
3777 			 * space in some of the TX rings (want_tx == 0) and there are no
3778 			 * TX slots in this ring that need to be flushed to the NIC
3779 			 * (head == hwcur).
3780 			 */
3781 			if (!send_down && !want_tx && ring->head == kring->nr_hwcur)
3782 				continue;
3783 
3784 			if (nm_kr_tryget(kring, 1, &revents))
3785 				continue;
3786 
3787 			if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3788 				netmap_ring_reinit(kring);
3789 				revents |= POLLERR;
3790 			} else {
3791 				if (kring->nm_sync(kring, sync_flags))
3792 					revents |= POLLERR;
3793 				else
3794 					nm_sync_finalize(kring);
3795 			}
3796 
3797 			/*
3798 			 * If we found new slots, notify potential
3799 			 * listeners on the same ring.
3800 			 * Since we just did a txsync, look at the copies
3801 			 * of cur,tail in the kring.
3802 			 */
3803 			found = kring->rcur != kring->rtail;
3804 			nm_kr_put(kring);
3805 			if (found) { /* notify other listeners */
3806 				revents |= want_tx;
3807 				want_tx = 0;
3808 #ifndef linux
3809 				kring->nm_notify(kring, 0);
3810 #endif /* linux */
3811 			}
3812 		}
3813 		/* if there were any packet to forward we must have handled them by now */
3814 		send_down = 0;
3815 		if (want_tx && retry_tx && sr) {
3816 #ifndef linux
3817 			nm_os_selrecord(sr, si[NR_TX]);
3818 #endif /* !linux */
3819 			retry_tx = 0;
3820 			goto flush_tx;
3821 		}
3822 	}
3823 
3824 	/*
3825 	 * If want_rx is still set scan receive rings.
3826 	 * Do it on all rings because otherwise we starve.
3827 	 */
3828 	if (want_rx) {
3829 		/* two rounds here for race avoidance */
3830 do_retry_rx:
3831 		for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) {
3832 			int found = 0;
3833 
3834 			kring = na->rx_rings[i];
3835 			ring = kring->ring;
3836 
3837 			if (unlikely(nm_kr_tryget(kring, 1, &revents)))
3838 				continue;
3839 
3840 			if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) {
3841 				netmap_ring_reinit(kring);
3842 				revents |= POLLERR;
3843 			}
3844 			/* now we can use kring->rcur, rtail */
3845 
3846 			/*
3847 			 * transparent mode support: collect packets from
3848 			 * hw rxring(s) that have been released by the user
3849 			 */
3850 			if (nm_may_forward_up(kring)) {
3851 				netmap_grab_packets(kring, &q, netmap_fwd);
3852 			}
3853 
3854 			/* Clear the NR_FORWARD flag anyway, it may be set by
3855 			 * the nm_sync() below only on for the host RX ring (see
3856 			 * netmap_rxsync_from_host()). */
3857 			kring->nr_kflags &= ~NR_FORWARD;
3858 			if (kring->nm_sync(kring, sync_flags))
3859 				revents |= POLLERR;
3860 			else
3861 				nm_sync_finalize(kring);
3862 			send_down |= (kring->nr_kflags & NR_FORWARD);
3863 			ring_timestamp_set(ring);
3864 			found = kring->rcur != kring->rtail;
3865 			nm_kr_put(kring);
3866 			if (found) {
3867 				revents |= want_rx;
3868 				retry_rx = 0;
3869 #ifndef linux
3870 				kring->nm_notify(kring, 0);
3871 #endif /* linux */
3872 			}
3873 		}
3874 
3875 #ifndef linux
3876 		if (retry_rx && sr) {
3877 			nm_os_selrecord(sr, si[NR_RX]);
3878 		}
3879 #endif /* !linux */
3880 		if (send_down || retry_rx) {
3881 			retry_rx = 0;
3882 			if (send_down)
3883 				goto flush_tx; /* and retry_rx */
3884 			else
3885 				goto do_retry_rx;
3886 		}
3887 	}
3888 
3889 	/*
3890 	 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and
3891 	 * ring->head) marked with NS_FORWARD on hw rx rings are passed up
3892 	 * to the host stack.
3893 	 */
3894 
3895 	if (mbq_peek(&q)) {
3896 		netmap_send_up(na->ifp, &q);
3897 	}
3898 
3899 	return (revents);
3900 #undef want_tx
3901 #undef want_rx
3902 }
3903 
3904 int
3905 nma_intr_enable(struct netmap_adapter *na, int onoff)
3906 {
3907 	bool changed = false;
3908 	enum txrx t;
3909 	int i;
3910 
3911 	for_rx_tx(t) {
3912 		for (i = 0; i < nma_get_nrings(na, t); i++) {
3913 			struct netmap_kring *kring = NMR(na, t)[i];
3914 			int on = !(kring->nr_kflags & NKR_NOINTR);
3915 
3916 			if (!!onoff != !!on) {
3917 				changed = true;
3918 			}
3919 			if (onoff) {
3920 				kring->nr_kflags &= ~NKR_NOINTR;
3921 			} else {
3922 				kring->nr_kflags |= NKR_NOINTR;
3923 			}
3924 		}
3925 	}
3926 
3927 	if (!changed) {
3928 		return 0; /* nothing to do */
3929 	}
3930 
3931 	if (!na->nm_intr) {
3932 		nm_prerr("Cannot %s interrupts for %s", onoff ? "enable" : "disable",
3933 		  na->name);
3934 		return -1;
3935 	}
3936 
3937 	na->nm_intr(na, onoff);
3938 
3939 	return 0;
3940 }
3941 
3942 
3943 /*-------------------- driver support routines -------------------*/
3944 
3945 /* default notify callback */
3946 static int
3947 netmap_notify(struct netmap_kring *kring, int flags)
3948 {
3949 	struct netmap_adapter *na = kring->notify_na;
3950 	enum txrx t = kring->tx;
3951 
3952 	nm_os_selwakeup(&kring->si);
3953 	/* optimization: avoid a wake up on the global
3954 	 * queue if nobody has registered for more
3955 	 * than one ring
3956 	 */
3957 	if (na->si_users[t] > 0)
3958 		nm_os_selwakeup(&na->si[t]);
3959 
3960 	return NM_IRQ_COMPLETED;
3961 }
3962 
3963 /* called by all routines that create netmap_adapters.
3964  * provide some defaults and get a reference to the
3965  * memory allocator
3966  */
3967 int
3968 netmap_attach_common(struct netmap_adapter *na)
3969 {
3970 	if (!na->rx_buf_maxsize) {
3971 		/* Set a conservative default (larger is safer). */
3972 		na->rx_buf_maxsize = PAGE_SIZE;
3973 	}
3974 
3975 #ifdef __FreeBSD__
3976 	if (na->na_flags & NAF_HOST_RINGS && na->ifp) {
3977 		na->if_input = na->ifp->if_input; /* for netmap_send_up */
3978 	}
3979 	na->pdev = na; /* make sure netmap_mem_map() is called */
3980 #endif /* __FreeBSD__ */
3981 	if (na->na_flags & NAF_HOST_RINGS) {
3982 		if (na->num_host_rx_rings == 0)
3983 			na->num_host_rx_rings = 1;
3984 		if (na->num_host_tx_rings == 0)
3985 			na->num_host_tx_rings = 1;
3986 	}
3987 	if (na->nm_krings_create == NULL) {
3988 		/* we assume that we have been called by a driver,
3989 		 * since other port types all provide their own
3990 		 * nm_krings_create
3991 		 */
3992 		na->nm_krings_create = netmap_hw_krings_create;
3993 		na->nm_krings_delete = netmap_hw_krings_delete;
3994 	}
3995 	if (na->nm_notify == NULL)
3996 		na->nm_notify = netmap_notify;
3997 	na->active_fds = 0;
3998 
3999 	if (na->nm_mem == NULL) {
4000 		/* use iommu or global allocator */
4001 		na->nm_mem = netmap_mem_get_iommu(na);
4002 	}
4003 	if (na->nm_bdg_attach == NULL)
4004 		/* no special nm_bdg_attach callback. On VALE
4005 		 * attach, we need to interpose a bwrap
4006 		 */
4007 		na->nm_bdg_attach = netmap_default_bdg_attach;
4008 
4009 	return 0;
4010 }
4011 
4012 /* Wrapper for the register callback provided netmap-enabled
4013  * hardware drivers.
4014  * nm_iszombie(na) means that the driver module has been
4015  * unloaded, so we cannot call into it.
4016  * nm_os_ifnet_lock() must guarantee mutual exclusion with
4017  * module unloading.
4018  */
4019 static int
4020 netmap_hw_reg(struct netmap_adapter *na, int onoff)
4021 {
4022 	struct netmap_hw_adapter *hwna =
4023 		(struct netmap_hw_adapter*)na;
4024 	int error = 0;
4025 
4026 	nm_os_ifnet_lock();
4027 
4028 	if (nm_iszombie(na)) {
4029 		if (onoff) {
4030 			error = ENXIO;
4031 		} else if (na != NULL) {
4032 			na->na_flags &= ~NAF_NETMAP_ON;
4033 		}
4034 		goto out;
4035 	}
4036 
4037 	error = hwna->nm_hw_register(na, onoff);
4038 
4039 out:
4040 	nm_os_ifnet_unlock();
4041 
4042 	return error;
4043 }
4044 
4045 static void
4046 netmap_hw_dtor(struct netmap_adapter *na)
4047 {
4048 	if (na->ifp == NULL)
4049 		return;
4050 
4051 	NM_DETACH_NA(na->ifp);
4052 }
4053 
4054 
4055 /*
4056  * Allocate a netmap_adapter object, and initialize it from the
4057  * 'arg' passed by the driver on attach.
4058  * We allocate a block of memory of 'size' bytes, which has room
4059  * for struct netmap_adapter plus additional room private to
4060  * the caller.
4061  * Return 0 on success, ENOMEM otherwise.
4062  */
4063 int
4064 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg)
4065 {
4066 	struct netmap_hw_adapter *hwna = NULL;
4067 	struct ifnet *ifp = NULL;
4068 
4069 	if (size < sizeof(struct netmap_hw_adapter)) {
4070 		if (netmap_debug & NM_DEBUG_ON)
4071 			nm_prerr("Invalid netmap adapter size %d", (int)size);
4072 		return EINVAL;
4073 	}
4074 
4075 	if (arg == NULL || arg->ifp == NULL) {
4076 		if (netmap_debug & NM_DEBUG_ON)
4077 			nm_prerr("either arg or arg->ifp is NULL");
4078 		return EINVAL;
4079 	}
4080 
4081 	if (arg->num_tx_rings == 0 || arg->num_rx_rings == 0) {
4082 		if (netmap_debug & NM_DEBUG_ON)
4083 			nm_prerr("%s: invalid rings tx %d rx %d",
4084 				arg->name, arg->num_tx_rings, arg->num_rx_rings);
4085 		return EINVAL;
4086 	}
4087 
4088 	ifp = arg->ifp;
4089 	if (NM_NA_CLASH(ifp)) {
4090 		/* If NA(ifp) is not null but there is no valid netmap
4091 		 * adapter it means that someone else is using the same
4092 		 * pointer (e.g. ax25_ptr on linux). This happens for
4093 		 * instance when also PF_RING is in use. */
4094 		nm_prerr("Error: netmap adapter hook is busy");
4095 		return EBUSY;
4096 	}
4097 
4098 	hwna = nm_os_malloc(size);
4099 	if (hwna == NULL)
4100 		goto fail;
4101 	hwna->up = *arg;
4102 	hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE;
4103 	strlcpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name));
4104 	if (override_reg) {
4105 		hwna->nm_hw_register = hwna->up.nm_register;
4106 		hwna->up.nm_register = netmap_hw_reg;
4107 	}
4108 	if (netmap_attach_common(&hwna->up)) {
4109 		nm_os_free(hwna);
4110 		goto fail;
4111 	}
4112 	netmap_adapter_get(&hwna->up);
4113 
4114 	NM_ATTACH_NA(ifp, &hwna->up);
4115 
4116 	nm_os_onattach(ifp);
4117 
4118 	if (arg->nm_dtor == NULL) {
4119 		hwna->up.nm_dtor = netmap_hw_dtor;
4120 	}
4121 
4122 	if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n",
4123 	    hwna->up.num_tx_rings, hwna->up.num_tx_desc,
4124 	    hwna->up.num_rx_rings, hwna->up.num_rx_desc);
4125 	return 0;
4126 
4127 fail:
4128 	nm_prerr("fail, arg %p ifp %p na %p", arg, ifp, hwna);
4129 	return (hwna ? EINVAL : ENOMEM);
4130 }
4131 
4132 
4133 int
4134 netmap_attach(struct netmap_adapter *arg)
4135 {
4136 	return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter),
4137 			1 /* override nm_reg */);
4138 }
4139 
4140 
4141 void
4142 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na)
4143 {
4144 	if (!na) {
4145 		return;
4146 	}
4147 
4148 	refcount_acquire(&na->na_refcount);
4149 }
4150 
4151 
4152 /* returns 1 iff the netmap_adapter is destroyed */
4153 int
4154 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na)
4155 {
4156 	if (!na)
4157 		return 1;
4158 
4159 	if (!refcount_release(&na->na_refcount))
4160 		return 0;
4161 
4162 	if (na->nm_dtor)
4163 		na->nm_dtor(na);
4164 
4165 	if (na->tx_rings) { /* XXX should not happen */
4166 		if (netmap_debug & NM_DEBUG_ON)
4167 			nm_prerr("freeing leftover tx_rings");
4168 		na->nm_krings_delete(na);
4169 	}
4170 	netmap_pipe_dealloc(na);
4171 	if (na->nm_mem)
4172 		netmap_mem_put(na->nm_mem);
4173 	bzero(na, sizeof(*na));
4174 	nm_os_free(na);
4175 
4176 	return 1;
4177 }
4178 
4179 /* nm_krings_create callback for all hardware native adapters */
4180 int
4181 netmap_hw_krings_create(struct netmap_adapter *na)
4182 {
4183 	int ret = netmap_krings_create(na, 0);
4184 	if (ret == 0) {
4185 		/* initialize the mbq for the sw rx ring */
4186 		u_int lim = netmap_real_rings(na, NR_RX), i;
4187 		for (i = na->num_rx_rings; i < lim; i++) {
4188 			mbq_safe_init(&NMR(na, NR_RX)[i]->rx_queue);
4189 		}
4190 		nm_prdis("initialized sw rx queue %d", na->num_rx_rings);
4191 	}
4192 	return ret;
4193 }
4194 
4195 
4196 
4197 /*
4198  * Called on module unload by the netmap-enabled drivers
4199  */
4200 void
4201 netmap_detach(struct ifnet *ifp)
4202 {
4203 	struct netmap_adapter *na;
4204 
4205 	NMG_LOCK();
4206 
4207 	if (!NM_NA_VALID(ifp)) {
4208 		NMG_UNLOCK();
4209 		return;
4210 	}
4211 
4212 	na = NA(ifp);
4213 	netmap_set_all_rings(na, NM_KR_LOCKED);
4214 	/*
4215 	 * if the netmap adapter is not native, somebody
4216 	 * changed it, so we can not release it here.
4217 	 * The NAF_ZOMBIE flag will notify the new owner that
4218 	 * the driver is gone.
4219 	 */
4220 	if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) {
4221 		na->na_flags |= NAF_ZOMBIE;
4222 	}
4223 	/* give active users a chance to notice that NAF_ZOMBIE has been
4224 	 * turned on, so that they can stop and return an error to userspace.
4225 	 * Note that this becomes a NOP if there are no active users and,
4226 	 * therefore, the put() above has deleted the na, since now NA(ifp) is
4227 	 * NULL.
4228 	 */
4229 	netmap_enable_all_rings(ifp);
4230 	NMG_UNLOCK();
4231 }
4232 
4233 
4234 /*
4235  * Intercept packets from the network stack and pass them
4236  * to netmap as incoming packets on the 'software' ring.
4237  *
4238  * We only store packets in a bounded mbq and then copy them
4239  * in the relevant rxsync routine.
4240  *
4241  * We rely on the OS to make sure that the ifp and na do not go
4242  * away (typically the caller checks for IFF_DRV_RUNNING or the like).
4243  * In nm_register() or whenever there is a reinitialization,
4244  * we make sure to make the mode change visible here.
4245  */
4246 int
4247 netmap_transmit(struct ifnet *ifp, struct mbuf *m)
4248 {
4249 	struct netmap_adapter *na = NA(ifp);
4250 	struct netmap_kring *kring, *tx_kring;
4251 	u_int len = MBUF_LEN(m);
4252 	u_int error = ENOBUFS;
4253 	unsigned int txr;
4254 	struct mbq *q;
4255 	int busy;
4256 	u_int i;
4257 
4258 	i = MBUF_TXQ(m);
4259 	if (i >= na->num_host_rx_rings) {
4260 		i = i % na->num_host_rx_rings;
4261 	}
4262 	kring = NMR(na, NR_RX)[nma_get_nrings(na, NR_RX) + i];
4263 
4264 	// XXX [Linux] we do not need this lock
4265 	// if we follow the down/configure/up protocol -gl
4266 	// mtx_lock(&na->core_lock);
4267 
4268 	if (!nm_netmap_on(na)) {
4269 		nm_prerr("%s not in netmap mode anymore", na->name);
4270 		error = ENXIO;
4271 		goto done;
4272 	}
4273 
4274 	txr = MBUF_TXQ(m);
4275 	if (txr >= na->num_tx_rings) {
4276 		txr %= na->num_tx_rings;
4277 	}
4278 	tx_kring = NMR(na, NR_TX)[txr];
4279 
4280 	if (tx_kring->nr_mode == NKR_NETMAP_OFF) {
4281 		return MBUF_TRANSMIT(na, ifp, m);
4282 	}
4283 
4284 	q = &kring->rx_queue;
4285 
4286 	// XXX reconsider long packets if we handle fragments
4287 	if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */
4288 		nm_prerr("%s from_host, drop packet size %d > %d", na->name,
4289 			len, NETMAP_BUF_SIZE(na));
4290 		goto done;
4291 	}
4292 
4293 	if (!netmap_generic_hwcsum) {
4294 		if (nm_os_mbuf_has_csum_offld(m)) {
4295 			nm_prlim(1, "%s drop mbuf that needs checksum offload", na->name);
4296 			goto done;
4297 		}
4298 	}
4299 
4300 	if (nm_os_mbuf_has_seg_offld(m)) {
4301 		nm_prlim(1, "%s drop mbuf that needs generic segmentation offload", na->name);
4302 		goto done;
4303 	}
4304 
4305 #ifdef __FreeBSD__
4306 	ETHER_BPF_MTAP(ifp, m);
4307 #endif /* __FreeBSD__ */
4308 
4309 	/* protect against netmap_rxsync_from_host(), netmap_sw_to_nic()
4310 	 * and maybe other instances of netmap_transmit (the latter
4311 	 * not possible on Linux).
4312 	 * We enqueue the mbuf only if we are sure there is going to be
4313 	 * enough room in the host RX ring, otherwise we drop it.
4314 	 */
4315 	mbq_lock(q);
4316 
4317 	busy = kring->nr_hwtail - kring->nr_hwcur;
4318 	if (busy < 0)
4319 		busy += kring->nkr_num_slots;
4320 	if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
4321 		nm_prlim(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
4322 			kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
4323 	} else {
4324 		mbq_enqueue(q, m);
4325 		nm_prdis(2, "%s %d bufs in queue", na->name, mbq_len(q));
4326 		/* notify outside the lock */
4327 		m = NULL;
4328 		error = 0;
4329 	}
4330 	mbq_unlock(q);
4331 
4332 done:
4333 	if (m)
4334 		m_freem(m);
4335 	/* unconditionally wake up listeners */
4336 	kring->nm_notify(kring, 0);
4337 	/* this is normally netmap_notify(), but for nics
4338 	 * connected to a bridge it is netmap_bwrap_intr_notify(),
4339 	 * that possibly forwards the frames through the switch
4340 	 */
4341 
4342 	return (error);
4343 }
4344 
4345 
4346 /*
4347  * Reset function to be called by the driver routines when reinitializing
4348  * a hardware ring. The driver is in charge of locking to protect the kring
4349  * while this operation is being performed. This is normally achieved by
4350  * calling netmap_disable_all_rings() before triggering a reset.
4351  * If the kring is not in netmap mode, return NULL to inform the caller
4352  * that this is the case.
4353  * If the kring is in netmap mode, set hwofs so that the netmap indices
4354  * seen by userspace (head/cut/tail) do not change, although the internal
4355  * NIC indices have been reset to 0.
4356  * In any case, adjust kring->nr_mode.
4357  */
4358 struct netmap_slot *
4359 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n,
4360 	u_int new_cur)
4361 {
4362 	struct netmap_kring *kring;
4363 	u_int new_hwtail, new_hwofs;
4364 
4365 	if (!nm_native_on(na)) {
4366 		nm_prdis("interface not in native netmap mode");
4367 		return NULL;	/* nothing to reinitialize */
4368 	}
4369 
4370 	if (tx == NR_TX) {
4371 		if (n >= na->num_tx_rings)
4372 			return NULL;
4373 		kring = na->tx_rings[n];
4374 		/*
4375 		 * Set hwofs to rhead, so that slots[rhead] is mapped to
4376 		 * the NIC internal slot 0, and thus the netmap buffer
4377 		 * at rhead is the next to be transmitted. Transmissions
4378 		 * that were pending before the reset are considered as
4379 		 * sent, so that we can have hwcur = rhead. All the slots
4380 		 * are now owned by the user, so we can also reinit hwtail.
4381 		 */
4382 		new_hwofs = kring->rhead;
4383 		new_hwtail = nm_prev(kring->rhead, kring->nkr_num_slots - 1);
4384 	} else {
4385 		if (n >= na->num_rx_rings)
4386 			return NULL;
4387 		kring = na->rx_rings[n];
4388 		/*
4389 		 * Set hwofs to hwtail, so that slots[hwtail] is mapped to
4390 		 * the NIC internal slot 0, and thus the netmap buffer
4391 		 * at hwtail is the next to be given to the NIC.
4392 		 * Unread slots (the ones in [rhead,hwtail[) are owned by
4393 		 * the user, and thus the caller cannot give them
4394 		 * to the NIC right now.
4395 		 */
4396 		new_hwofs = kring->nr_hwtail;
4397 		new_hwtail = kring->nr_hwtail;
4398 	}
4399 	if (kring->nr_pending_mode == NKR_NETMAP_OFF) {
4400 		kring->nr_mode = NKR_NETMAP_OFF;
4401 		return NULL;
4402 	}
4403 	if (netmap_verbose) {
4404 	    nm_prinf("%s, hc %u->%u, ht %u->%u, ho %u->%u", kring->name,
4405 	        kring->nr_hwcur, kring->rhead,
4406 	        kring->nr_hwtail, new_hwtail,
4407 		kring->nkr_hwofs, new_hwofs);
4408 	}
4409 	kring->nr_hwcur = kring->rhead;
4410 	kring->nr_hwtail = new_hwtail;
4411 	kring->nkr_hwofs = new_hwofs;
4412 
4413 	/*
4414 	 * Wakeup on the individual and global selwait
4415 	 * We do the wakeup here, but the ring is not yet reconfigured.
4416 	 * However, we are under lock so there are no races.
4417 	 */
4418 	kring->nr_mode = NKR_NETMAP_ON;
4419 	kring->nm_notify(kring, 0);
4420 	return kring->ring->slot;
4421 }
4422 
4423 
4424 /*
4425  * Dispatch rx/tx interrupts to the netmap rings.
4426  *
4427  * "work_done" is non-null on the RX path, NULL for the TX path.
4428  * We rely on the OS to make sure that there is only one active
4429  * instance per queue, and that there is appropriate locking.
4430  *
4431  * The 'notify' routine depends on what the ring is attached to.
4432  * - for a netmap file descriptor, do a selwakeup on the individual
4433  *   waitqueue, plus one on the global one if needed
4434  *   (see netmap_notify)
4435  * - for a nic connected to a switch, call the proper forwarding routine
4436  *   (see netmap_bwrap_intr_notify)
4437  */
4438 int
4439 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done)
4440 {
4441 	struct netmap_kring *kring;
4442 	enum txrx t = (work_done ? NR_RX : NR_TX);
4443 
4444 	q &= NETMAP_RING_MASK;
4445 
4446 	if (netmap_debug & (NM_DEBUG_RXINTR|NM_DEBUG_TXINTR)) {
4447 	        nm_prlim(5, "received %s queue %d", work_done ? "RX" : "TX" , q);
4448 	}
4449 
4450 	if (q >= nma_get_nrings(na, t))
4451 		return NM_IRQ_PASS; // not a physical queue
4452 
4453 	kring = NMR(na, t)[q];
4454 
4455 	if (kring->nr_mode == NKR_NETMAP_OFF) {
4456 		return NM_IRQ_PASS;
4457 	}
4458 
4459 	if (t == NR_RX) {
4460 		kring->nr_kflags |= NKR_PENDINTR;	// XXX atomic ?
4461 		*work_done = 1; /* do not fire napi again */
4462 	}
4463 
4464 	return kring->nm_notify(kring, 0);
4465 }
4466 
4467 
4468 /*
4469  * Default functions to handle rx/tx interrupts from a physical device.
4470  * "work_done" is non-null on the RX path, NULL for the TX path.
4471  *
4472  * If the card is not in netmap mode, simply return NM_IRQ_PASS,
4473  * so that the caller proceeds with regular processing.
4474  * Otherwise call netmap_common_irq().
4475  *
4476  * If the card is connected to a netmap file descriptor,
4477  * do a selwakeup on the individual queue, plus one on the global one
4478  * if needed (multiqueue card _and_ there are multiqueue listeners),
4479  * and return NR_IRQ_COMPLETED.
4480  *
4481  * Finally, if called on rx from an interface connected to a switch,
4482  * calls the proper forwarding routine.
4483  */
4484 int
4485 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done)
4486 {
4487 	struct netmap_adapter *na = NA(ifp);
4488 
4489 	/*
4490 	 * XXX emulated netmap mode sets NAF_SKIP_INTR so
4491 	 * we still use the regular driver even though the previous
4492 	 * check fails. It is unclear whether we should use
4493 	 * nm_native_on() here.
4494 	 */
4495 	if (!nm_netmap_on(na))
4496 		return NM_IRQ_PASS;
4497 
4498 	if (na->na_flags & NAF_SKIP_INTR) {
4499 		nm_prdis("use regular interrupt");
4500 		return NM_IRQ_PASS;
4501 	}
4502 
4503 	return netmap_common_irq(na, q, work_done);
4504 }
4505 
4506 /* set/clear native flags and if_transmit/netdev_ops */
4507 void
4508 nm_set_native_flags(struct netmap_adapter *na)
4509 {
4510 	struct ifnet *ifp = na->ifp;
4511 
4512 	/* We do the setup for intercepting packets only if we are the
4513 	 * first user of this adapter. */
4514 	if (na->active_fds > 0) {
4515 		return;
4516 	}
4517 
4518 	na->na_flags |= NAF_NETMAP_ON;
4519 	nm_os_onenter(ifp);
4520 	netmap_update_hostrings_mode(na);
4521 }
4522 
4523 void
4524 nm_clear_native_flags(struct netmap_adapter *na)
4525 {
4526 	struct ifnet *ifp = na->ifp;
4527 
4528 	/* We undo the setup for intercepting packets only if we are the
4529 	 * last user of this adapter. */
4530 	if (na->active_fds > 0) {
4531 		return;
4532 	}
4533 
4534 	netmap_update_hostrings_mode(na);
4535 	nm_os_onexit(ifp);
4536 
4537 	na->na_flags &= ~NAF_NETMAP_ON;
4538 }
4539 
4540 void
4541 netmap_krings_mode_commit(struct netmap_adapter *na, int onoff)
4542 {
4543 	enum txrx t;
4544 
4545 	for_rx_tx(t) {
4546 		int i;
4547 
4548 		for (i = 0; i < netmap_real_rings(na, t); i++) {
4549 			struct netmap_kring *kring = NMR(na, t)[i];
4550 
4551 			if (onoff && nm_kring_pending_on(kring))
4552 				kring->nr_mode = NKR_NETMAP_ON;
4553 			else if (!onoff && nm_kring_pending_off(kring))
4554 				kring->nr_mode = NKR_NETMAP_OFF;
4555 		}
4556 	}
4557 }
4558 
4559 /*
4560  * Module loader and unloader
4561  *
4562  * netmap_init() creates the /dev/netmap device and initializes
4563  * all global variables. Returns 0 on success, errno on failure
4564  * (but there is no chance)
4565  *
4566  * netmap_fini() destroys everything.
4567  */
4568 
4569 static struct cdev *netmap_dev; /* /dev/netmap character device. */
4570 extern struct cdevsw netmap_cdevsw;
4571 
4572 
4573 void
4574 netmap_fini(void)
4575 {
4576 	if (netmap_dev)
4577 		destroy_dev(netmap_dev);
4578 	/* we assume that there are no longer netmap users */
4579 	nm_os_ifnet_fini();
4580 	netmap_uninit_bridges();
4581 	netmap_mem_fini();
4582 	NMG_LOCK_DESTROY();
4583 	nm_prinf("netmap: unloaded module.");
4584 }
4585 
4586 
4587 int
4588 netmap_init(void)
4589 {
4590 	int error;
4591 
4592 	NMG_LOCK_INIT();
4593 
4594 	error = netmap_mem_init();
4595 	if (error != 0)
4596 		goto fail;
4597 	/*
4598 	 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls
4599 	 * when the module is compiled in.
4600 	 * XXX could use make_dev_credv() to get error number
4601 	 */
4602 	netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD,
4603 		&netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600,
4604 			      "netmap");
4605 	if (!netmap_dev)
4606 		goto fail;
4607 
4608 	error = netmap_init_bridges();
4609 	if (error)
4610 		goto fail;
4611 
4612 #ifdef __FreeBSD__
4613 	nm_os_vi_init_index();
4614 #endif
4615 
4616 	error = nm_os_ifnet_init();
4617 	if (error)
4618 		goto fail;
4619 
4620 #if !defined(__FreeBSD__) || defined(KLD_MODULE)
4621 	nm_prinf("netmap: loaded module");
4622 #endif
4623 	return (0);
4624 fail:
4625 	netmap_fini();
4626 	return (EINVAL); /* may be incorrect */
4627 }
4628