1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2011-2014 Matteo Landi 5 * Copyright (C) 2011-2016 Luigi Rizzo 6 * Copyright (C) 2011-2016 Giuseppe Lettieri 7 * Copyright (C) 2011-2016 Vincenzo Maffione 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 33 /* 34 * $FreeBSD$ 35 * 36 * This module supports memory mapped access to network devices, 37 * see netmap(4). 38 * 39 * The module uses a large, memory pool allocated by the kernel 40 * and accessible as mmapped memory by multiple userspace threads/processes. 41 * The memory pool contains packet buffers and "netmap rings", 42 * i.e. user-accessible copies of the interface's queues. 43 * 44 * Access to the network card works like this: 45 * 1. a process/thread issues one or more open() on /dev/netmap, to create 46 * select()able file descriptor on which events are reported. 47 * 2. on each descriptor, the process issues an ioctl() to identify 48 * the interface that should report events to the file descriptor. 49 * 3. on each descriptor, the process issues an mmap() request to 50 * map the shared memory region within the process' address space. 51 * The list of interesting queues is indicated by a location in 52 * the shared memory region. 53 * 4. using the functions in the netmap(4) userspace API, a process 54 * can look up the occupation state of a queue, access memory buffers, 55 * and retrieve received packets or enqueue packets to transmit. 56 * 5. using some ioctl()s the process can synchronize the userspace view 57 * of the queue with the actual status in the kernel. This includes both 58 * receiving the notification of new packets, and transmitting new 59 * packets on the output interface. 60 * 6. select() or poll() can be used to wait for events on individual 61 * transmit or receive queues (or all queues for a given interface). 62 * 63 64 SYNCHRONIZATION (USER) 65 66 The netmap rings and data structures may be shared among multiple 67 user threads or even independent processes. 68 Any synchronization among those threads/processes is delegated 69 to the threads themselves. Only one thread at a time can be in 70 a system call on the same netmap ring. The OS does not enforce 71 this and only guarantees against system crashes in case of 72 invalid usage. 73 74 LOCKING (INTERNAL) 75 76 Within the kernel, access to the netmap rings is protected as follows: 77 78 - a spinlock on each ring, to handle producer/consumer races on 79 RX rings attached to the host stack (against multiple host 80 threads writing from the host stack to the same ring), 81 and on 'destination' rings attached to a VALE switch 82 (i.e. RX rings in VALE ports, and TX rings in NIC/host ports) 83 protecting multiple active senders for the same destination) 84 85 - an atomic variable to guarantee that there is at most one 86 instance of *_*xsync() on the ring at any time. 87 For rings connected to user file 88 descriptors, an atomic_test_and_set() protects this, and the 89 lock on the ring is not actually used. 90 For NIC RX rings connected to a VALE switch, an atomic_test_and_set() 91 is also used to prevent multiple executions (the driver might indeed 92 already guarantee this). 93 For NIC TX rings connected to a VALE switch, the lock arbitrates 94 access to the queue (both when allocating buffers and when pushing 95 them out). 96 97 - *xsync() should be protected against initializations of the card. 98 On FreeBSD most devices have the reset routine protected by 99 a RING lock (ixgbe, igb, em) or core lock (re). lem is missing 100 the RING protection on rx_reset(), this should be added. 101 102 On linux there is an external lock on the tx path, which probably 103 also arbitrates access to the reset routine. XXX to be revised 104 105 - a per-interface core_lock protecting access from the host stack 106 while interfaces may be detached from netmap mode. 107 XXX there should be no need for this lock if we detach the interfaces 108 only while they are down. 109 110 111 --- VALE SWITCH --- 112 113 NMG_LOCK() serializes all modifications to switches and ports. 114 A switch cannot be deleted until all ports are gone. 115 116 For each switch, an SX lock (RWlock on linux) protects 117 deletion of ports. When configuring or deleting a new port, the 118 lock is acquired in exclusive mode (after holding NMG_LOCK). 119 When forwarding, the lock is acquired in shared mode (without NMG_LOCK). 120 The lock is held throughout the entire forwarding cycle, 121 during which the thread may incur in a page fault. 122 Hence it is important that sleepable shared locks are used. 123 124 On the rx ring, the per-port lock is grabbed initially to reserve 125 a number of slot in the ring, then the lock is released, 126 packets are copied from source to destination, and then 127 the lock is acquired again and the receive ring is updated. 128 (A similar thing is done on the tx ring for NIC and host stack 129 ports attached to the switch) 130 131 */ 132 133 134 /* --- internals ---- 135 * 136 * Roadmap to the code that implements the above. 137 * 138 * > 1. a process/thread issues one or more open() on /dev/netmap, to create 139 * > select()able file descriptor on which events are reported. 140 * 141 * Internally, we allocate a netmap_priv_d structure, that will be 142 * initialized on ioctl(NIOCREGIF). There is one netmap_priv_d 143 * structure for each open(). 144 * 145 * os-specific: 146 * FreeBSD: see netmap_open() (netmap_freebsd.c) 147 * linux: see linux_netmap_open() (netmap_linux.c) 148 * 149 * > 2. on each descriptor, the process issues an ioctl() to identify 150 * > the interface that should report events to the file descriptor. 151 * 152 * Implemented by netmap_ioctl(), NIOCREGIF case, with nmr->nr_cmd==0. 153 * Most important things happen in netmap_get_na() and 154 * netmap_do_regif(), called from there. Additional details can be 155 * found in the comments above those functions. 156 * 157 * In all cases, this action creates/takes-a-reference-to a 158 * netmap_*_adapter describing the port, and allocates a netmap_if 159 * and all necessary netmap rings, filling them with netmap buffers. 160 * 161 * In this phase, the sync callbacks for each ring are set (these are used 162 * in steps 5 and 6 below). The callbacks depend on the type of adapter. 163 * The adapter creation/initialization code puts them in the 164 * netmap_adapter (fields na->nm_txsync and na->nm_rxsync). Then, they 165 * are copied from there to the netmap_kring's during netmap_do_regif(), by 166 * the nm_krings_create() callback. All the nm_krings_create callbacks 167 * actually call netmap_krings_create() to perform this and the other 168 * common stuff. netmap_krings_create() also takes care of the host rings, 169 * if needed, by setting their sync callbacks appropriately. 170 * 171 * Additional actions depend on the kind of netmap_adapter that has been 172 * registered: 173 * 174 * - netmap_hw_adapter: [netmap.c] 175 * This is a system netdev/ifp with native netmap support. 176 * The ifp is detached from the host stack by redirecting: 177 * - transmissions (from the network stack) to netmap_transmit() 178 * - receive notifications to the nm_notify() callback for 179 * this adapter. The callback is normally netmap_notify(), unless 180 * the ifp is attached to a bridge using bwrap, in which case it 181 * is netmap_bwrap_intr_notify(). 182 * 183 * - netmap_generic_adapter: [netmap_generic.c] 184 * A system netdev/ifp without native netmap support. 185 * 186 * (the decision about native/non native support is taken in 187 * netmap_get_hw_na(), called by netmap_get_na()) 188 * 189 * - netmap_vp_adapter [netmap_vale.c] 190 * Returned by netmap_get_bdg_na(). 191 * This is a persistent or ephemeral VALE port. Ephemeral ports 192 * are created on the fly if they don't already exist, and are 193 * always attached to a bridge. 194 * Persistent VALE ports must must be created separately, and i 195 * then attached like normal NICs. The NIOCREGIF we are examining 196 * will find them only if they had previosly been created and 197 * attached (see VALE_CTL below). 198 * 199 * - netmap_pipe_adapter [netmap_pipe.c] 200 * Returned by netmap_get_pipe_na(). 201 * Both pipe ends are created, if they didn't already exist. 202 * 203 * - netmap_monitor_adapter [netmap_monitor.c] 204 * Returned by netmap_get_monitor_na(). 205 * If successful, the nm_sync callbacks of the monitored adapter 206 * will be intercepted by the returned monitor. 207 * 208 * - netmap_bwrap_adapter [netmap_vale.c] 209 * Cannot be obtained in this way, see VALE_CTL below 210 * 211 * 212 * os-specific: 213 * linux: we first go through linux_netmap_ioctl() to 214 * adapt the FreeBSD interface to the linux one. 215 * 216 * 217 * > 3. on each descriptor, the process issues an mmap() request to 218 * > map the shared memory region within the process' address space. 219 * > The list of interesting queues is indicated by a location in 220 * > the shared memory region. 221 * 222 * os-specific: 223 * FreeBSD: netmap_mmap_single (netmap_freebsd.c). 224 * linux: linux_netmap_mmap (netmap_linux.c). 225 * 226 * > 4. using the functions in the netmap(4) userspace API, a process 227 * > can look up the occupation state of a queue, access memory buffers, 228 * > and retrieve received packets or enqueue packets to transmit. 229 * 230 * these actions do not involve the kernel. 231 * 232 * > 5. using some ioctl()s the process can synchronize the userspace view 233 * > of the queue with the actual status in the kernel. This includes both 234 * > receiving the notification of new packets, and transmitting new 235 * > packets on the output interface. 236 * 237 * These are implemented in netmap_ioctl(), NIOCTXSYNC and NIOCRXSYNC 238 * cases. They invoke the nm_sync callbacks on the netmap_kring 239 * structures, as initialized in step 2 and maybe later modified 240 * by a monitor. Monitors, however, will always call the original 241 * callback before doing anything else. 242 * 243 * 244 * > 6. select() or poll() can be used to wait for events on individual 245 * > transmit or receive queues (or all queues for a given interface). 246 * 247 * Implemented in netmap_poll(). This will call the same nm_sync() 248 * callbacks as in step 5 above. 249 * 250 * os-specific: 251 * linux: we first go through linux_netmap_poll() to adapt 252 * the FreeBSD interface to the linux one. 253 * 254 * 255 * ---- VALE_CTL ----- 256 * 257 * VALE switches are controlled by issuing a NIOCREGIF with a non-null 258 * nr_cmd in the nmreq structure. These subcommands are handled by 259 * netmap_bdg_ctl() in netmap_vale.c. Persistent VALE ports are created 260 * and destroyed by issuing the NETMAP_BDG_NEWIF and NETMAP_BDG_DELIF 261 * subcommands, respectively. 262 * 263 * Any network interface known to the system (including a persistent VALE 264 * port) can be attached to a VALE switch by issuing the 265 * NETMAP_REQ_VALE_ATTACH command. After the attachment, persistent VALE ports 266 * look exactly like ephemeral VALE ports (as created in step 2 above). The 267 * attachment of other interfaces, instead, requires the creation of a 268 * netmap_bwrap_adapter. Moreover, the attached interface must be put in 269 * netmap mode. This may require the creation of a netmap_generic_adapter if 270 * we have no native support for the interface, or if generic adapters have 271 * been forced by sysctl. 272 * 273 * Both persistent VALE ports and bwraps are handled by netmap_get_bdg_na(), 274 * called by nm_bdg_ctl_attach(), and discriminated by the nm_bdg_attach() 275 * callback. In the case of the bwrap, the callback creates the 276 * netmap_bwrap_adapter. The initialization of the bwrap is then 277 * completed by calling netmap_do_regif() on it, in the nm_bdg_ctl() 278 * callback (netmap_bwrap_bdg_ctl in netmap_vale.c). 279 * A generic adapter for the wrapped ifp will be created if needed, when 280 * netmap_get_bdg_na() calls netmap_get_hw_na(). 281 * 282 * 283 * ---- DATAPATHS ----- 284 * 285 * -= SYSTEM DEVICE WITH NATIVE SUPPORT =- 286 * 287 * na == NA(ifp) == netmap_hw_adapter created in DEVICE_netmap_attach() 288 * 289 * - tx from netmap userspace: 290 * concurrently: 291 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context 292 * kring->nm_sync() == DEVICE_netmap_txsync() 293 * 2) device interrupt handler 294 * na->nm_notify() == netmap_notify() 295 * - rx from netmap userspace: 296 * concurrently: 297 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context 298 * kring->nm_sync() == DEVICE_netmap_rxsync() 299 * 2) device interrupt handler 300 * na->nm_notify() == netmap_notify() 301 * - rx from host stack 302 * concurrently: 303 * 1) host stack 304 * netmap_transmit() 305 * na->nm_notify == netmap_notify() 306 * 2) ioctl(NIOCRXSYNC)/netmap_poll() in process context 307 * kring->nm_sync() == netmap_rxsync_from_host 308 * netmap_rxsync_from_host(na, NULL, NULL) 309 * - tx to host stack 310 * ioctl(NIOCTXSYNC)/netmap_poll() in process context 311 * kring->nm_sync() == netmap_txsync_to_host 312 * netmap_txsync_to_host(na) 313 * nm_os_send_up() 314 * FreeBSD: na->if_input() == ether_input() 315 * linux: netif_rx() with NM_MAGIC_PRIORITY_RX 316 * 317 * 318 * -= SYSTEM DEVICE WITH GENERIC SUPPORT =- 319 * 320 * na == NA(ifp) == generic_netmap_adapter created in generic_netmap_attach() 321 * 322 * - tx from netmap userspace: 323 * concurrently: 324 * 1) ioctl(NIOCTXSYNC)/netmap_poll() in process context 325 * kring->nm_sync() == generic_netmap_txsync() 326 * nm_os_generic_xmit_frame() 327 * linux: dev_queue_xmit() with NM_MAGIC_PRIORITY_TX 328 * ifp->ndo_start_xmit == generic_ndo_start_xmit() 329 * gna->save_start_xmit == orig. dev. start_xmit 330 * FreeBSD: na->if_transmit() == orig. dev if_transmit 331 * 2) generic_mbuf_destructor() 332 * na->nm_notify() == netmap_notify() 333 * - rx from netmap userspace: 334 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context 335 * kring->nm_sync() == generic_netmap_rxsync() 336 * mbq_safe_dequeue() 337 * 2) device driver 338 * generic_rx_handler() 339 * mbq_safe_enqueue() 340 * na->nm_notify() == netmap_notify() 341 * - rx from host stack 342 * FreeBSD: same as native 343 * Linux: same as native except: 344 * 1) host stack 345 * dev_queue_xmit() without NM_MAGIC_PRIORITY_TX 346 * ifp->ndo_start_xmit == generic_ndo_start_xmit() 347 * netmap_transmit() 348 * na->nm_notify() == netmap_notify() 349 * - tx to host stack (same as native): 350 * 351 * 352 * -= VALE =- 353 * 354 * INCOMING: 355 * 356 * - VALE ports: 357 * ioctl(NIOCTXSYNC)/netmap_poll() in process context 358 * kring->nm_sync() == netmap_vp_txsync() 359 * 360 * - system device with native support: 361 * from cable: 362 * interrupt 363 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) 364 * kring->nm_sync() == DEVICE_netmap_rxsync() 365 * netmap_vp_txsync() 366 * kring->nm_sync() == DEVICE_netmap_rxsync() 367 * from host stack: 368 * netmap_transmit() 369 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) 370 * kring->nm_sync() == netmap_rxsync_from_host() 371 * netmap_vp_txsync() 372 * 373 * - system device with generic support: 374 * from device driver: 375 * generic_rx_handler() 376 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr != host ring) 377 * kring->nm_sync() == generic_netmap_rxsync() 378 * netmap_vp_txsync() 379 * kring->nm_sync() == generic_netmap_rxsync() 380 * from host stack: 381 * netmap_transmit() 382 * na->nm_notify() == netmap_bwrap_intr_notify(ring_nr == host ring) 383 * kring->nm_sync() == netmap_rxsync_from_host() 384 * netmap_vp_txsync() 385 * 386 * (all cases) --> nm_bdg_flush() 387 * dest_na->nm_notify() == (see below) 388 * 389 * OUTGOING: 390 * 391 * - VALE ports: 392 * concurrently: 393 * 1) ioctl(NIOCRXSYNC)/netmap_poll() in process context 394 * kring->nm_sync() == netmap_vp_rxsync() 395 * 2) from nm_bdg_flush() 396 * na->nm_notify() == netmap_notify() 397 * 398 * - system device with native support: 399 * to cable: 400 * na->nm_notify() == netmap_bwrap_notify() 401 * netmap_vp_rxsync() 402 * kring->nm_sync() == DEVICE_netmap_txsync() 403 * netmap_vp_rxsync() 404 * to host stack: 405 * netmap_vp_rxsync() 406 * kring->nm_sync() == netmap_txsync_to_host 407 * netmap_vp_rxsync_locked() 408 * 409 * - system device with generic adapter: 410 * to device driver: 411 * na->nm_notify() == netmap_bwrap_notify() 412 * netmap_vp_rxsync() 413 * kring->nm_sync() == generic_netmap_txsync() 414 * netmap_vp_rxsync() 415 * to host stack: 416 * netmap_vp_rxsync() 417 * kring->nm_sync() == netmap_txsync_to_host 418 * netmap_vp_rxsync() 419 * 420 */ 421 422 /* 423 * OS-specific code that is used only within this file. 424 * Other OS-specific code that must be accessed by drivers 425 * is present in netmap_kern.h 426 */ 427 428 #if defined(__FreeBSD__) 429 #include <sys/cdefs.h> /* prerequisite */ 430 #include <sys/types.h> 431 #include <sys/errno.h> 432 #include <sys/param.h> /* defines used in kernel.h */ 433 #include <sys/kernel.h> /* types used in module initialization */ 434 #include <sys/conf.h> /* cdevsw struct, UID, GID */ 435 #include <sys/filio.h> /* FIONBIO */ 436 #include <sys/sockio.h> 437 #include <sys/socketvar.h> /* struct socket */ 438 #include <sys/malloc.h> 439 #include <sys/poll.h> 440 #include <sys/rwlock.h> 441 #include <sys/socket.h> /* sockaddrs */ 442 #include <sys/selinfo.h> 443 #include <sys/sysctl.h> 444 #include <sys/jail.h> 445 #include <net/vnet.h> 446 #include <net/if.h> 447 #include <net/if_var.h> 448 #include <net/bpf.h> /* BIOCIMMEDIATE */ 449 #include <machine/bus.h> /* bus_dmamap_* */ 450 #include <sys/endian.h> 451 #include <sys/refcount.h> 452 453 454 #elif defined(linux) 455 456 #include "bsd_glue.h" 457 458 #elif defined(__APPLE__) 459 460 #warning OSX support is only partial 461 #include "osx_glue.h" 462 463 #elif defined (_WIN32) 464 465 #include "win_glue.h" 466 467 #else 468 469 #error Unsupported platform 470 471 #endif /* unsupported */ 472 473 /* 474 * common headers 475 */ 476 #include <net/netmap.h> 477 #include <dev/netmap/netmap_kern.h> 478 #include <dev/netmap/netmap_mem2.h> 479 480 481 /* user-controlled variables */ 482 int netmap_verbose; 483 484 static int netmap_no_timestamp; /* don't timestamp on rxsync */ 485 int netmap_no_pendintr = 1; 486 int netmap_txsync_retry = 2; 487 static int netmap_fwd = 0; /* force transparent forwarding */ 488 489 /* 490 * netmap_admode selects the netmap mode to use. 491 * Invalid values are reset to NETMAP_ADMODE_BEST 492 */ 493 enum { NETMAP_ADMODE_BEST = 0, /* use native, fallback to generic */ 494 NETMAP_ADMODE_NATIVE, /* either native or none */ 495 NETMAP_ADMODE_GENERIC, /* force generic */ 496 NETMAP_ADMODE_LAST }; 497 static int netmap_admode = NETMAP_ADMODE_BEST; 498 499 /* netmap_generic_mit controls mitigation of RX notifications for 500 * the generic netmap adapter. The value is a time interval in 501 * nanoseconds. */ 502 int netmap_generic_mit = 100*1000; 503 504 /* We use by default netmap-aware qdiscs with generic netmap adapters, 505 * even if there can be a little performance hit with hardware NICs. 506 * However, using the qdisc is the safer approach, for two reasons: 507 * 1) it prevents non-fifo qdiscs to break the TX notification 508 * scheme, which is based on mbuf destructors when txqdisc is 509 * not used. 510 * 2) it makes it possible to transmit over software devices that 511 * change skb->dev, like bridge, veth, ... 512 * 513 * Anyway users looking for the best performance should 514 * use native adapters. 515 */ 516 #ifdef linux 517 int netmap_generic_txqdisc = 1; 518 #endif 519 520 /* Default number of slots and queues for generic adapters. */ 521 int netmap_generic_ringsize = 1024; 522 int netmap_generic_rings = 1; 523 524 /* Non-zero if ptnet devices are allowed to use virtio-net headers. */ 525 int ptnet_vnet_hdr = 1; 526 527 /* 0 if ptnetmap should not use worker threads for TX processing */ 528 int ptnetmap_tx_workers = 1; 529 530 /* 531 * SYSCTL calls are grouped between SYSBEGIN and SYSEND to be emulated 532 * in some other operating systems 533 */ 534 SYSBEGIN(main_init); 535 536 SYSCTL_DECL(_dev_netmap); 537 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 538 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 539 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 540 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 541 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); 542 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr, 543 0, "Always look for new received packets."); 544 SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW, 545 &netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush."); 546 547 SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0, 548 "Force NR_FORWARD mode"); 549 SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0, 550 "Adapter mode. 0 selects the best option available," 551 "1 forces native adapter, 2 forces emulated adapter"); 552 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit, 553 0, "RX notification interval in nanoseconds"); 554 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW, 555 &netmap_generic_ringsize, 0, 556 "Number of per-ring slots for emulated netmap mode"); 557 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW, 558 &netmap_generic_rings, 0, 559 "Number of TX/RX queues for emulated netmap adapters"); 560 #ifdef linux 561 SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW, 562 &netmap_generic_txqdisc, 0, "Use qdisc for generic adapters"); 563 #endif 564 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr, 565 0, "Allow ptnet devices to use virtio-net headers"); 566 SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW, 567 &ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing"); 568 569 SYSEND; 570 571 NMG_LOCK_T netmap_global_lock; 572 573 /* 574 * mark the ring as stopped, and run through the locks 575 * to make sure other users get to see it. 576 * stopped must be either NR_KR_STOPPED (for unbounded stop) 577 * of NR_KR_LOCKED (brief stop for mutual exclusion purposes) 578 */ 579 static void 580 netmap_disable_ring(struct netmap_kring *kr, int stopped) 581 { 582 nm_kr_stop(kr, stopped); 583 // XXX check if nm_kr_stop is sufficient 584 mtx_lock(&kr->q_lock); 585 mtx_unlock(&kr->q_lock); 586 nm_kr_put(kr); 587 } 588 589 /* stop or enable a single ring */ 590 void 591 netmap_set_ring(struct netmap_adapter *na, u_int ring_id, enum txrx t, int stopped) 592 { 593 if (stopped) 594 netmap_disable_ring(NMR(na, t)[ring_id], stopped); 595 else 596 NMR(na, t)[ring_id]->nkr_stopped = 0; 597 } 598 599 600 /* stop or enable all the rings of na */ 601 void 602 netmap_set_all_rings(struct netmap_adapter *na, int stopped) 603 { 604 int i; 605 enum txrx t; 606 607 if (!nm_netmap_on(na)) 608 return; 609 610 for_rx_tx(t) { 611 for (i = 0; i < netmap_real_rings(na, t); i++) { 612 netmap_set_ring(na, i, t, stopped); 613 } 614 } 615 } 616 617 /* 618 * Convenience function used in drivers. Waits for current txsync()s/rxsync()s 619 * to finish and prevents any new one from starting. Call this before turning 620 * netmap mode off, or before removing the hardware rings (e.g., on module 621 * onload). 622 */ 623 void 624 netmap_disable_all_rings(struct ifnet *ifp) 625 { 626 if (NM_NA_VALID(ifp)) { 627 netmap_set_all_rings(NA(ifp), NM_KR_STOPPED); 628 } 629 } 630 631 /* 632 * Convenience function used in drivers. Re-enables rxsync and txsync on the 633 * adapter's rings In linux drivers, this should be placed near each 634 * napi_enable(). 635 */ 636 void 637 netmap_enable_all_rings(struct ifnet *ifp) 638 { 639 if (NM_NA_VALID(ifp)) { 640 netmap_set_all_rings(NA(ifp), 0 /* enabled */); 641 } 642 } 643 644 void 645 netmap_make_zombie(struct ifnet *ifp) 646 { 647 if (NM_NA_VALID(ifp)) { 648 struct netmap_adapter *na = NA(ifp); 649 netmap_set_all_rings(na, NM_KR_LOCKED); 650 na->na_flags |= NAF_ZOMBIE; 651 netmap_set_all_rings(na, 0); 652 } 653 } 654 655 void 656 netmap_undo_zombie(struct ifnet *ifp) 657 { 658 if (NM_NA_VALID(ifp)) { 659 struct netmap_adapter *na = NA(ifp); 660 if (na->na_flags & NAF_ZOMBIE) { 661 netmap_set_all_rings(na, NM_KR_LOCKED); 662 na->na_flags &= ~NAF_ZOMBIE; 663 netmap_set_all_rings(na, 0); 664 } 665 } 666 } 667 668 /* 669 * generic bound_checking function 670 */ 671 u_int 672 nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg) 673 { 674 u_int oldv = *v; 675 const char *op = NULL; 676 677 if (dflt < lo) 678 dflt = lo; 679 if (dflt > hi) 680 dflt = hi; 681 if (oldv < lo) { 682 *v = dflt; 683 op = "Bump"; 684 } else if (oldv > hi) { 685 *v = hi; 686 op = "Clamp"; 687 } 688 if (op && msg) 689 nm_prinf("%s %s to %d (was %d)\n", op, msg, *v, oldv); 690 return *v; 691 } 692 693 694 /* 695 * packet-dump function, user-supplied or static buffer. 696 * The destination buffer must be at least 30+4*len 697 */ 698 const char * 699 nm_dump_buf(char *p, int len, int lim, char *dst) 700 { 701 static char _dst[8192]; 702 int i, j, i0; 703 static char hex[] ="0123456789abcdef"; 704 char *o; /* output position */ 705 706 #define P_HI(x) hex[((x) & 0xf0)>>4] 707 #define P_LO(x) hex[((x) & 0xf)] 708 #define P_C(x) ((x) >= 0x20 && (x) <= 0x7e ? (x) : '.') 709 if (!dst) 710 dst = _dst; 711 if (lim <= 0 || lim > len) 712 lim = len; 713 o = dst; 714 sprintf(o, "buf 0x%p len %d lim %d\n", p, len, lim); 715 o += strlen(o); 716 /* hexdump routine */ 717 for (i = 0; i < lim; ) { 718 sprintf(o, "%5d: ", i); 719 o += strlen(o); 720 memset(o, ' ', 48); 721 i0 = i; 722 for (j=0; j < 16 && i < lim; i++, j++) { 723 o[j*3] = P_HI(p[i]); 724 o[j*3+1] = P_LO(p[i]); 725 } 726 i = i0; 727 for (j=0; j < 16 && i < lim; i++, j++) 728 o[j + 48] = P_C(p[i]); 729 o[j+48] = '\n'; 730 o += j+49; 731 } 732 *o = '\0'; 733 #undef P_HI 734 #undef P_LO 735 #undef P_C 736 return dst; 737 } 738 739 740 /* 741 * Fetch configuration from the device, to cope with dynamic 742 * reconfigurations after loading the module. 743 */ 744 /* call with NMG_LOCK held */ 745 int 746 netmap_update_config(struct netmap_adapter *na) 747 { 748 struct nm_config_info info; 749 750 bzero(&info, sizeof(info)); 751 if (na->nm_config == NULL || 752 na->nm_config(na, &info)) { 753 /* take whatever we had at init time */ 754 info.num_tx_rings = na->num_tx_rings; 755 info.num_tx_descs = na->num_tx_desc; 756 info.num_rx_rings = na->num_rx_rings; 757 info.num_rx_descs = na->num_rx_desc; 758 info.rx_buf_maxsize = na->rx_buf_maxsize; 759 } 760 761 if (na->num_tx_rings == info.num_tx_rings && 762 na->num_tx_desc == info.num_tx_descs && 763 na->num_rx_rings == info.num_rx_rings && 764 na->num_rx_desc == info.num_rx_descs && 765 na->rx_buf_maxsize == info.rx_buf_maxsize) 766 return 0; /* nothing changed */ 767 if (na->active_fds == 0) { 768 na->num_tx_rings = info.num_tx_rings; 769 na->num_tx_desc = info.num_tx_descs; 770 na->num_rx_rings = info.num_rx_rings; 771 na->num_rx_desc = info.num_rx_descs; 772 na->rx_buf_maxsize = info.rx_buf_maxsize; 773 D("configuration changed for %s: txring %d x %d, " 774 "rxring %d x %d, rxbufsz %d", 775 na->name, na->num_tx_rings, na->num_tx_desc, 776 na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize); 777 return 0; 778 } 779 D("WARNING: configuration changed for %s while active: " 780 "txring %d x %d, rxring %d x %d, rxbufsz %d", 781 na->name, info.num_tx_rings, info.num_tx_descs, 782 info.num_rx_rings, info.num_rx_descs, 783 info.rx_buf_maxsize); 784 return 1; 785 } 786 787 /* nm_sync callbacks for the host rings */ 788 static int netmap_txsync_to_host(struct netmap_kring *kring, int flags); 789 static int netmap_rxsync_from_host(struct netmap_kring *kring, int flags); 790 791 /* create the krings array and initialize the fields common to all adapters. 792 * The array layout is this: 793 * 794 * +----------+ 795 * na->tx_rings ----->| | \ 796 * | | } na->num_tx_ring 797 * | | / 798 * +----------+ 799 * | | host tx kring 800 * na->rx_rings ----> +----------+ 801 * | | \ 802 * | | } na->num_rx_rings 803 * | | / 804 * +----------+ 805 * | | host rx kring 806 * +----------+ 807 * na->tailroom ----->| | \ 808 * | | } tailroom bytes 809 * | | / 810 * +----------+ 811 * 812 * Note: for compatibility, host krings are created even when not needed. 813 * The tailroom space is currently used by vale ports for allocating leases. 814 */ 815 /* call with NMG_LOCK held */ 816 int 817 netmap_krings_create(struct netmap_adapter *na, u_int tailroom) 818 { 819 u_int i, len, ndesc; 820 struct netmap_kring *kring; 821 u_int n[NR_TXRX]; 822 enum txrx t; 823 824 if (na->tx_rings != NULL) { 825 D("warning: krings were already created"); 826 return 0; 827 } 828 829 /* account for the (possibly fake) host rings */ 830 n[NR_TX] = na->num_tx_rings + 1; 831 n[NR_RX] = na->num_rx_rings + 1; 832 833 len = (n[NR_TX] + n[NR_RX]) * 834 (sizeof(struct netmap_kring) + sizeof(struct netmap_kring *)) 835 + tailroom; 836 837 na->tx_rings = nm_os_malloc((size_t)len); 838 if (na->tx_rings == NULL) { 839 D("Cannot allocate krings"); 840 return ENOMEM; 841 } 842 na->rx_rings = na->tx_rings + n[NR_TX]; 843 na->tailroom = na->rx_rings + n[NR_RX]; 844 845 /* link the krings in the krings array */ 846 kring = (struct netmap_kring *)((char *)na->tailroom + tailroom); 847 for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) { 848 na->tx_rings[i] = kring; 849 kring++; 850 } 851 852 /* 853 * All fields in krings are 0 except the one initialized below. 854 * but better be explicit on important kring fields. 855 */ 856 for_rx_tx(t) { 857 ndesc = nma_get_ndesc(na, t); 858 for (i = 0; i < n[t]; i++) { 859 kring = NMR(na, t)[i]; 860 bzero(kring, sizeof(*kring)); 861 kring->na = na; 862 kring->notify_na = na; 863 kring->ring_id = i; 864 kring->tx = t; 865 kring->nkr_num_slots = ndesc; 866 kring->nr_mode = NKR_NETMAP_OFF; 867 kring->nr_pending_mode = NKR_NETMAP_OFF; 868 if (i < nma_get_nrings(na, t)) { 869 kring->nm_sync = (t == NR_TX ? na->nm_txsync : na->nm_rxsync); 870 } else { 871 if (!(na->na_flags & NAF_HOST_RINGS)) 872 kring->nr_kflags |= NKR_FAKERING; 873 kring->nm_sync = (t == NR_TX ? 874 netmap_txsync_to_host: 875 netmap_rxsync_from_host); 876 } 877 kring->nm_notify = na->nm_notify; 878 kring->rhead = kring->rcur = kring->nr_hwcur = 0; 879 /* 880 * IMPORTANT: Always keep one slot empty. 881 */ 882 kring->rtail = kring->nr_hwtail = (t == NR_TX ? ndesc - 1 : 0); 883 snprintf(kring->name, sizeof(kring->name) - 1, "%s %s%d", na->name, 884 nm_txrx2str(t), i); 885 ND("ktx %s h %d c %d t %d", 886 kring->name, kring->rhead, kring->rcur, kring->rtail); 887 mtx_init(&kring->q_lock, (t == NR_TX ? "nm_txq_lock" : "nm_rxq_lock"), NULL, MTX_DEF); 888 nm_os_selinfo_init(&kring->si); 889 } 890 nm_os_selinfo_init(&na->si[t]); 891 } 892 893 894 return 0; 895 } 896 897 898 /* undo the actions performed by netmap_krings_create */ 899 /* call with NMG_LOCK held */ 900 void 901 netmap_krings_delete(struct netmap_adapter *na) 902 { 903 struct netmap_kring **kring = na->tx_rings; 904 enum txrx t; 905 906 if (na->tx_rings == NULL) { 907 D("warning: krings were already deleted"); 908 return; 909 } 910 911 for_rx_tx(t) 912 nm_os_selinfo_uninit(&na->si[t]); 913 914 /* we rely on the krings layout described above */ 915 for ( ; kring != na->tailroom; kring++) { 916 mtx_destroy(&(*kring)->q_lock); 917 nm_os_selinfo_uninit(&(*kring)->si); 918 } 919 nm_os_free(na->tx_rings); 920 na->tx_rings = na->rx_rings = na->tailroom = NULL; 921 } 922 923 924 /* 925 * Destructor for NIC ports. They also have an mbuf queue 926 * on the rings connected to the host so we need to purge 927 * them first. 928 */ 929 /* call with NMG_LOCK held */ 930 void 931 netmap_hw_krings_delete(struct netmap_adapter *na) 932 { 933 struct mbq *q = &na->rx_rings[na->num_rx_rings]->rx_queue; 934 935 ND("destroy sw mbq with len %d", mbq_len(q)); 936 mbq_purge(q); 937 mbq_safe_fini(q); 938 netmap_krings_delete(na); 939 } 940 941 static void 942 netmap_mem_drop(struct netmap_adapter *na) 943 { 944 int last = netmap_mem_deref(na->nm_mem, na); 945 /* if the native allocator had been overrided on regif, 946 * restore it now and drop the temporary one 947 */ 948 if (last && na->nm_mem_prev) { 949 netmap_mem_put(na->nm_mem); 950 na->nm_mem = na->nm_mem_prev; 951 na->nm_mem_prev = NULL; 952 } 953 } 954 955 /* 956 * Undo everything that was done in netmap_do_regif(). In particular, 957 * call nm_register(ifp,0) to stop netmap mode on the interface and 958 * revert to normal operation. 959 */ 960 /* call with NMG_LOCK held */ 961 static void netmap_unset_ringid(struct netmap_priv_d *); 962 static void netmap_krings_put(struct netmap_priv_d *); 963 void 964 netmap_do_unregif(struct netmap_priv_d *priv) 965 { 966 struct netmap_adapter *na = priv->np_na; 967 968 NMG_LOCK_ASSERT(); 969 na->active_fds--; 970 /* unset nr_pending_mode and possibly release exclusive mode */ 971 netmap_krings_put(priv); 972 973 #ifdef WITH_MONITOR 974 /* XXX check whether we have to do something with monitor 975 * when rings change nr_mode. */ 976 if (na->active_fds <= 0) { 977 /* walk through all the rings and tell any monitor 978 * that the port is going to exit netmap mode 979 */ 980 netmap_monitor_stop(na); 981 } 982 #endif 983 984 if (na->active_fds <= 0 || nm_kring_pending(priv)) { 985 na->nm_register(na, 0); 986 } 987 988 /* delete rings and buffers that are no longer needed */ 989 netmap_mem_rings_delete(na); 990 991 if (na->active_fds <= 0) { /* last instance */ 992 /* 993 * (TO CHECK) We enter here 994 * when the last reference to this file descriptor goes 995 * away. This means we cannot have any pending poll() 996 * or interrupt routine operating on the structure. 997 * XXX The file may be closed in a thread while 998 * another thread is using it. 999 * Linux keeps the file opened until the last reference 1000 * by any outstanding ioctl/poll or mmap is gone. 1001 * FreeBSD does not track mmap()s (but we do) and 1002 * wakes up any sleeping poll(). Need to check what 1003 * happens if the close() occurs while a concurrent 1004 * syscall is running. 1005 */ 1006 if (netmap_verbose) 1007 D("deleting last instance for %s", na->name); 1008 1009 if (nm_netmap_on(na)) { 1010 D("BUG: netmap on while going to delete the krings"); 1011 } 1012 1013 na->nm_krings_delete(na); 1014 } 1015 1016 /* possibily decrement counter of tx_si/rx_si users */ 1017 netmap_unset_ringid(priv); 1018 /* delete the nifp */ 1019 netmap_mem_if_delete(na, priv->np_nifp); 1020 /* drop the allocator */ 1021 netmap_mem_drop(na); 1022 /* mark the priv as unregistered */ 1023 priv->np_na = NULL; 1024 priv->np_nifp = NULL; 1025 } 1026 1027 /* call with NMG_LOCK held */ 1028 static __inline int 1029 nm_si_user(struct netmap_priv_d *priv, enum txrx t) 1030 { 1031 return (priv->np_na != NULL && 1032 (priv->np_qlast[t] - priv->np_qfirst[t] > 1)); 1033 } 1034 1035 struct netmap_priv_d* 1036 netmap_priv_new(void) 1037 { 1038 struct netmap_priv_d *priv; 1039 1040 priv = nm_os_malloc(sizeof(struct netmap_priv_d)); 1041 if (priv == NULL) 1042 return NULL; 1043 priv->np_refs = 1; 1044 nm_os_get_module(); 1045 return priv; 1046 } 1047 1048 /* 1049 * Destructor of the netmap_priv_d, called when the fd is closed 1050 * Action: undo all the things done by NIOCREGIF, 1051 * On FreeBSD we need to track whether there are active mmap()s, 1052 * and we use np_active_mmaps for that. On linux, the field is always 0. 1053 * Return: 1 if we can free priv, 0 otherwise. 1054 * 1055 */ 1056 /* call with NMG_LOCK held */ 1057 void 1058 netmap_priv_delete(struct netmap_priv_d *priv) 1059 { 1060 struct netmap_adapter *na = priv->np_na; 1061 1062 /* number of active references to this fd */ 1063 if (--priv->np_refs > 0) { 1064 return; 1065 } 1066 nm_os_put_module(); 1067 if (na) { 1068 netmap_do_unregif(priv); 1069 } 1070 netmap_unget_na(na, priv->np_ifp); 1071 bzero(priv, sizeof(*priv)); /* for safety */ 1072 nm_os_free(priv); 1073 } 1074 1075 1076 /* call with NMG_LOCK *not* held */ 1077 void 1078 netmap_dtor(void *data) 1079 { 1080 struct netmap_priv_d *priv = data; 1081 1082 NMG_LOCK(); 1083 netmap_priv_delete(priv); 1084 NMG_UNLOCK(); 1085 } 1086 1087 1088 /* 1089 * Handlers for synchronization of the rings from/to the host stack. 1090 * These are associated to a network interface and are just another 1091 * ring pair managed by userspace. 1092 * 1093 * Netmap also supports transparent forwarding (NS_FORWARD and NR_FORWARD 1094 * flags): 1095 * 1096 * - Before releasing buffers on hw RX rings, the application can mark 1097 * them with the NS_FORWARD flag. During the next RXSYNC or poll(), they 1098 * will be forwarded to the host stack, similarly to what happened if 1099 * the application moved them to the host TX ring. 1100 * 1101 * - Before releasing buffers on the host RX ring, the application can 1102 * mark them with the NS_FORWARD flag. During the next RXSYNC or poll(), 1103 * they will be forwarded to the hw TX rings, saving the application 1104 * from doing the same task in user-space. 1105 * 1106 * Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD 1107 * flag, or globally with the netmap_fwd sysctl. 1108 * 1109 * The transfer NIC --> host is relatively easy, just encapsulate 1110 * into mbufs and we are done. The host --> NIC side is slightly 1111 * harder because there might not be room in the tx ring so it 1112 * might take a while before releasing the buffer. 1113 */ 1114 1115 1116 /* 1117 * Pass a whole queue of mbufs to the host stack as coming from 'dst' 1118 * We do not need to lock because the queue is private. 1119 * After this call the queue is empty. 1120 */ 1121 static void 1122 netmap_send_up(struct ifnet *dst, struct mbq *q) 1123 { 1124 struct mbuf *m; 1125 struct mbuf *head = NULL, *prev = NULL; 1126 1127 /* Send packets up, outside the lock; head/prev machinery 1128 * is only useful for Windows. */ 1129 while ((m = mbq_dequeue(q)) != NULL) { 1130 if (netmap_verbose & NM_VERB_HOST) 1131 D("sending up pkt %p size %d", m, MBUF_LEN(m)); 1132 prev = nm_os_send_up(dst, m, prev); 1133 if (head == NULL) 1134 head = prev; 1135 } 1136 if (head) 1137 nm_os_send_up(dst, NULL, head); 1138 mbq_fini(q); 1139 } 1140 1141 1142 /* 1143 * Scan the buffers from hwcur to ring->head, and put a copy of those 1144 * marked NS_FORWARD (or all of them if forced) into a queue of mbufs. 1145 * Drop remaining packets in the unlikely event 1146 * of an mbuf shortage. 1147 */ 1148 static void 1149 netmap_grab_packets(struct netmap_kring *kring, struct mbq *q, int force) 1150 { 1151 u_int const lim = kring->nkr_num_slots - 1; 1152 u_int const head = kring->rhead; 1153 u_int n; 1154 struct netmap_adapter *na = kring->na; 1155 1156 for (n = kring->nr_hwcur; n != head; n = nm_next(n, lim)) { 1157 struct mbuf *m; 1158 struct netmap_slot *slot = &kring->ring->slot[n]; 1159 1160 if ((slot->flags & NS_FORWARD) == 0 && !force) 1161 continue; 1162 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE(na)) { 1163 RD(5, "bad pkt at %d len %d", n, slot->len); 1164 continue; 1165 } 1166 slot->flags &= ~NS_FORWARD; // XXX needed ? 1167 /* XXX TODO: adapt to the case of a multisegment packet */ 1168 m = m_devget(NMB(na, slot), slot->len, 0, na->ifp, NULL); 1169 1170 if (m == NULL) 1171 break; 1172 mbq_enqueue(q, m); 1173 } 1174 } 1175 1176 static inline int 1177 _nm_may_forward(struct netmap_kring *kring) 1178 { 1179 return ((netmap_fwd || kring->ring->flags & NR_FORWARD) && 1180 kring->na->na_flags & NAF_HOST_RINGS && 1181 kring->tx == NR_RX); 1182 } 1183 1184 static inline int 1185 nm_may_forward_up(struct netmap_kring *kring) 1186 { 1187 return _nm_may_forward(kring) && 1188 kring->ring_id != kring->na->num_rx_rings; 1189 } 1190 1191 static inline int 1192 nm_may_forward_down(struct netmap_kring *kring, int sync_flags) 1193 { 1194 return _nm_may_forward(kring) && 1195 (sync_flags & NAF_CAN_FORWARD_DOWN) && 1196 kring->ring_id == kring->na->num_rx_rings; 1197 } 1198 1199 /* 1200 * Send to the NIC rings packets marked NS_FORWARD between 1201 * kring->nr_hwcur and kring->rhead. 1202 * Called under kring->rx_queue.lock on the sw rx ring. 1203 * 1204 * It can only be called if the user opened all the TX hw rings, 1205 * see NAF_CAN_FORWARD_DOWN flag. 1206 * We can touch the TX netmap rings (slots, head and cur) since 1207 * we are in poll/ioctl system call context, and the application 1208 * is not supposed to touch the ring (using a different thread) 1209 * during the execution of the system call. 1210 */ 1211 static u_int 1212 netmap_sw_to_nic(struct netmap_adapter *na) 1213 { 1214 struct netmap_kring *kring = na->rx_rings[na->num_rx_rings]; 1215 struct netmap_slot *rxslot = kring->ring->slot; 1216 u_int i, rxcur = kring->nr_hwcur; 1217 u_int const head = kring->rhead; 1218 u_int const src_lim = kring->nkr_num_slots - 1; 1219 u_int sent = 0; 1220 1221 /* scan rings to find space, then fill as much as possible */ 1222 for (i = 0; i < na->num_tx_rings; i++) { 1223 struct netmap_kring *kdst = na->tx_rings[i]; 1224 struct netmap_ring *rdst = kdst->ring; 1225 u_int const dst_lim = kdst->nkr_num_slots - 1; 1226 1227 /* XXX do we trust ring or kring->rcur,rtail ? */ 1228 for (; rxcur != head && !nm_ring_empty(rdst); 1229 rxcur = nm_next(rxcur, src_lim) ) { 1230 struct netmap_slot *src, *dst, tmp; 1231 u_int dst_head = rdst->head; 1232 1233 src = &rxslot[rxcur]; 1234 if ((src->flags & NS_FORWARD) == 0 && !netmap_fwd) 1235 continue; 1236 1237 sent++; 1238 1239 dst = &rdst->slot[dst_head]; 1240 1241 tmp = *src; 1242 1243 src->buf_idx = dst->buf_idx; 1244 src->flags = NS_BUF_CHANGED; 1245 1246 dst->buf_idx = tmp.buf_idx; 1247 dst->len = tmp.len; 1248 dst->flags = NS_BUF_CHANGED; 1249 1250 rdst->head = rdst->cur = nm_next(dst_head, dst_lim); 1251 } 1252 /* if (sent) XXX txsync ? it would be just an optimization */ 1253 } 1254 return sent; 1255 } 1256 1257 1258 /* 1259 * netmap_txsync_to_host() passes packets up. We are called from a 1260 * system call in user process context, and the only contention 1261 * can be among multiple user threads erroneously calling 1262 * this routine concurrently. 1263 */ 1264 static int 1265 netmap_txsync_to_host(struct netmap_kring *kring, int flags) 1266 { 1267 struct netmap_adapter *na = kring->na; 1268 u_int const lim = kring->nkr_num_slots - 1; 1269 u_int const head = kring->rhead; 1270 struct mbq q; 1271 1272 /* Take packets from hwcur to head and pass them up. 1273 * Force hwcur = head since netmap_grab_packets() stops at head 1274 */ 1275 mbq_init(&q); 1276 netmap_grab_packets(kring, &q, 1 /* force */); 1277 ND("have %d pkts in queue", mbq_len(&q)); 1278 kring->nr_hwcur = head; 1279 kring->nr_hwtail = head + lim; 1280 if (kring->nr_hwtail > lim) 1281 kring->nr_hwtail -= lim + 1; 1282 1283 netmap_send_up(na->ifp, &q); 1284 return 0; 1285 } 1286 1287 1288 /* 1289 * rxsync backend for packets coming from the host stack. 1290 * They have been put in kring->rx_queue by netmap_transmit(). 1291 * We protect access to the kring using kring->rx_queue.lock 1292 * 1293 * also moves to the nic hw rings any packet the user has marked 1294 * for transparent-mode forwarding, then sets the NR_FORWARD 1295 * flag in the kring to let the caller push them out 1296 */ 1297 static int 1298 netmap_rxsync_from_host(struct netmap_kring *kring, int flags) 1299 { 1300 struct netmap_adapter *na = kring->na; 1301 struct netmap_ring *ring = kring->ring; 1302 u_int nm_i, n; 1303 u_int const lim = kring->nkr_num_slots - 1; 1304 u_int const head = kring->rhead; 1305 int ret = 0; 1306 struct mbq *q = &kring->rx_queue, fq; 1307 1308 mbq_init(&fq); /* fq holds packets to be freed */ 1309 1310 mbq_lock(q); 1311 1312 /* First part: import newly received packets */ 1313 n = mbq_len(q); 1314 if (n) { /* grab packets from the queue */ 1315 struct mbuf *m; 1316 uint32_t stop_i; 1317 1318 nm_i = kring->nr_hwtail; 1319 stop_i = nm_prev(kring->nr_hwcur, lim); 1320 while ( nm_i != stop_i && (m = mbq_dequeue(q)) != NULL ) { 1321 int len = MBUF_LEN(m); 1322 struct netmap_slot *slot = &ring->slot[nm_i]; 1323 1324 m_copydata(m, 0, len, NMB(na, slot)); 1325 ND("nm %d len %d", nm_i, len); 1326 if (netmap_verbose) 1327 D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL)); 1328 1329 slot->len = len; 1330 slot->flags = 0; 1331 nm_i = nm_next(nm_i, lim); 1332 mbq_enqueue(&fq, m); 1333 } 1334 kring->nr_hwtail = nm_i; 1335 } 1336 1337 /* 1338 * Second part: skip past packets that userspace has released. 1339 */ 1340 nm_i = kring->nr_hwcur; 1341 if (nm_i != head) { /* something was released */ 1342 if (nm_may_forward_down(kring, flags)) { 1343 ret = netmap_sw_to_nic(na); 1344 if (ret > 0) { 1345 kring->nr_kflags |= NR_FORWARD; 1346 ret = 0; 1347 } 1348 } 1349 kring->nr_hwcur = head; 1350 } 1351 1352 mbq_unlock(q); 1353 1354 mbq_purge(&fq); 1355 mbq_fini(&fq); 1356 1357 return ret; 1358 } 1359 1360 1361 /* Get a netmap adapter for the port. 1362 * 1363 * If it is possible to satisfy the request, return 0 1364 * with *na containing the netmap adapter found. 1365 * Otherwise return an error code, with *na containing NULL. 1366 * 1367 * When the port is attached to a bridge, we always return 1368 * EBUSY. 1369 * Otherwise, if the port is already bound to a file descriptor, 1370 * then we unconditionally return the existing adapter into *na. 1371 * In all the other cases, we return (into *na) either native, 1372 * generic or NULL, according to the following table: 1373 * 1374 * native_support 1375 * active_fds dev.netmap.admode YES NO 1376 * ------------------------------------------------------- 1377 * >0 * NA(ifp) NA(ifp) 1378 * 1379 * 0 NETMAP_ADMODE_BEST NATIVE GENERIC 1380 * 0 NETMAP_ADMODE_NATIVE NATIVE NULL 1381 * 0 NETMAP_ADMODE_GENERIC GENERIC GENERIC 1382 * 1383 */ 1384 static void netmap_hw_dtor(struct netmap_adapter *); /* needed by NM_IS_NATIVE() */ 1385 int 1386 netmap_get_hw_na(struct ifnet *ifp, struct netmap_mem_d *nmd, struct netmap_adapter **na) 1387 { 1388 /* generic support */ 1389 int i = netmap_admode; /* Take a snapshot. */ 1390 struct netmap_adapter *prev_na; 1391 int error = 0; 1392 1393 *na = NULL; /* default */ 1394 1395 /* reset in case of invalid value */ 1396 if (i < NETMAP_ADMODE_BEST || i >= NETMAP_ADMODE_LAST) 1397 i = netmap_admode = NETMAP_ADMODE_BEST; 1398 1399 if (NM_NA_VALID(ifp)) { 1400 prev_na = NA(ifp); 1401 /* If an adapter already exists, return it if 1402 * there are active file descriptors or if 1403 * netmap is not forced to use generic 1404 * adapters. 1405 */ 1406 if (NETMAP_OWNED_BY_ANY(prev_na) 1407 || i != NETMAP_ADMODE_GENERIC 1408 || prev_na->na_flags & NAF_FORCE_NATIVE 1409 #ifdef WITH_PIPES 1410 /* ugly, but we cannot allow an adapter switch 1411 * if some pipe is referring to this one 1412 */ 1413 || prev_na->na_next_pipe > 0 1414 #endif 1415 ) { 1416 *na = prev_na; 1417 goto assign_mem; 1418 } 1419 } 1420 1421 /* If there isn't native support and netmap is not allowed 1422 * to use generic adapters, we cannot satisfy the request. 1423 */ 1424 if (!NM_IS_NATIVE(ifp) && i == NETMAP_ADMODE_NATIVE) 1425 return EOPNOTSUPP; 1426 1427 /* Otherwise, create a generic adapter and return it, 1428 * saving the previously used netmap adapter, if any. 1429 * 1430 * Note that here 'prev_na', if not NULL, MUST be a 1431 * native adapter, and CANNOT be a generic one. This is 1432 * true because generic adapters are created on demand, and 1433 * destroyed when not used anymore. Therefore, if the adapter 1434 * currently attached to an interface 'ifp' is generic, it 1435 * must be that 1436 * (NA(ifp)->active_fds > 0 || NETMAP_OWNED_BY_KERN(NA(ifp))). 1437 * Consequently, if NA(ifp) is generic, we will enter one of 1438 * the branches above. This ensures that we never override 1439 * a generic adapter with another generic adapter. 1440 */ 1441 error = generic_netmap_attach(ifp); 1442 if (error) 1443 return error; 1444 1445 *na = NA(ifp); 1446 1447 assign_mem: 1448 if (nmd != NULL && !((*na)->na_flags & NAF_MEM_OWNER) && 1449 (*na)->active_fds == 0 && ((*na)->nm_mem != nmd)) { 1450 (*na)->nm_mem_prev = (*na)->nm_mem; 1451 (*na)->nm_mem = netmap_mem_get(nmd); 1452 } 1453 1454 return 0; 1455 } 1456 1457 /* 1458 * MUST BE CALLED UNDER NMG_LOCK() 1459 * 1460 * Get a refcounted reference to a netmap adapter attached 1461 * to the interface specified by req. 1462 * This is always called in the execution of an ioctl(). 1463 * 1464 * Return ENXIO if the interface specified by the request does 1465 * not exist, ENOTSUP if netmap is not supported by the interface, 1466 * EBUSY if the interface is already attached to a bridge, 1467 * EINVAL if parameters are invalid, ENOMEM if needed resources 1468 * could not be allocated. 1469 * If successful, hold a reference to the netmap adapter. 1470 * 1471 * If the interface specified by req is a system one, also keep 1472 * a reference to it and return a valid *ifp. 1473 */ 1474 int 1475 netmap_get_na(struct nmreq_header *hdr, 1476 struct netmap_adapter **na, struct ifnet **ifp, 1477 struct netmap_mem_d *nmd, int create) 1478 { 1479 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body; 1480 int error = 0; 1481 struct netmap_adapter *ret = NULL; 1482 int nmd_ref = 0; 1483 1484 *na = NULL; /* default return value */ 1485 *ifp = NULL; 1486 1487 if (hdr->nr_reqtype != NETMAP_REQ_REGISTER) { 1488 return EINVAL; 1489 } 1490 1491 if (req->nr_mode == NR_REG_PIPE_MASTER || 1492 req->nr_mode == NR_REG_PIPE_SLAVE) { 1493 /* Do not accept deprecated pipe modes. */ 1494 D("Deprecated pipe nr_mode, use xx{yy or xx}yy syntax"); 1495 return EINVAL; 1496 } 1497 1498 NMG_LOCK_ASSERT(); 1499 1500 /* if the request contain a memid, try to find the 1501 * corresponding memory region 1502 */ 1503 if (nmd == NULL && req->nr_mem_id) { 1504 nmd = netmap_mem_find(req->nr_mem_id); 1505 if (nmd == NULL) 1506 return EINVAL; 1507 /* keep the rereference */ 1508 nmd_ref = 1; 1509 } 1510 1511 /* We cascade through all possible types of netmap adapter. 1512 * All netmap_get_*_na() functions return an error and an na, 1513 * with the following combinations: 1514 * 1515 * error na 1516 * 0 NULL type doesn't match 1517 * !0 NULL type matches, but na creation/lookup failed 1518 * 0 !NULL type matches and na created/found 1519 * !0 !NULL impossible 1520 */ 1521 1522 /* try to see if this is a ptnetmap port */ 1523 error = netmap_get_pt_host_na(hdr, na, nmd, create); 1524 if (error || *na != NULL) 1525 goto out; 1526 1527 /* try to see if this is a monitor port */ 1528 error = netmap_get_monitor_na(hdr, na, nmd, create); 1529 if (error || *na != NULL) 1530 goto out; 1531 1532 /* try to see if this is a pipe port */ 1533 error = netmap_get_pipe_na(hdr, na, nmd, create); 1534 if (error || *na != NULL) 1535 goto out; 1536 1537 /* try to see if this is a bridge port */ 1538 error = netmap_get_bdg_na(hdr, na, nmd, create); 1539 if (error) 1540 goto out; 1541 1542 if (*na != NULL) /* valid match in netmap_get_bdg_na() */ 1543 goto out; 1544 1545 /* 1546 * This must be a hardware na, lookup the name in the system. 1547 * Note that by hardware we actually mean "it shows up in ifconfig". 1548 * This may still be a tap, a veth/epair, or even a 1549 * persistent VALE port. 1550 */ 1551 *ifp = ifunit_ref(hdr->nr_name); 1552 if (*ifp == NULL) { 1553 error = ENXIO; 1554 goto out; 1555 } 1556 1557 error = netmap_get_hw_na(*ifp, nmd, &ret); 1558 if (error) 1559 goto out; 1560 1561 *na = ret; 1562 netmap_adapter_get(ret); 1563 1564 out: 1565 if (error) { 1566 if (ret) 1567 netmap_adapter_put(ret); 1568 if (*ifp) { 1569 if_rele(*ifp); 1570 *ifp = NULL; 1571 } 1572 } 1573 if (nmd_ref) 1574 netmap_mem_put(nmd); 1575 1576 return error; 1577 } 1578 1579 /* undo netmap_get_na() */ 1580 void 1581 netmap_unget_na(struct netmap_adapter *na, struct ifnet *ifp) 1582 { 1583 if (ifp) 1584 if_rele(ifp); 1585 if (na) 1586 netmap_adapter_put(na); 1587 } 1588 1589 1590 #define NM_FAIL_ON(t) do { \ 1591 if (unlikely(t)) { \ 1592 RD(5, "%s: fail '" #t "' " \ 1593 "h %d c %d t %d " \ 1594 "rh %d rc %d rt %d " \ 1595 "hc %d ht %d", \ 1596 kring->name, \ 1597 head, cur, ring->tail, \ 1598 kring->rhead, kring->rcur, kring->rtail, \ 1599 kring->nr_hwcur, kring->nr_hwtail); \ 1600 return kring->nkr_num_slots; \ 1601 } \ 1602 } while (0) 1603 1604 /* 1605 * validate parameters on entry for *_txsync() 1606 * Returns ring->cur if ok, or something >= kring->nkr_num_slots 1607 * in case of error. 1608 * 1609 * rhead, rcur and rtail=hwtail are stored from previous round. 1610 * hwcur is the next packet to send to the ring. 1611 * 1612 * We want 1613 * hwcur <= *rhead <= head <= cur <= tail = *rtail <= hwtail 1614 * 1615 * hwcur, rhead, rtail and hwtail are reliable 1616 */ 1617 u_int 1618 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) 1619 { 1620 u_int head = ring->head; /* read only once */ 1621 u_int cur = ring->cur; /* read only once */ 1622 u_int n = kring->nkr_num_slots; 1623 1624 ND(5, "%s kcur %d ktail %d head %d cur %d tail %d", 1625 kring->name, 1626 kring->nr_hwcur, kring->nr_hwtail, 1627 ring->head, ring->cur, ring->tail); 1628 #if 1 /* kernel sanity checks; but we can trust the kring. */ 1629 NM_FAIL_ON(kring->nr_hwcur >= n || kring->rhead >= n || 1630 kring->rtail >= n || kring->nr_hwtail >= n); 1631 #endif /* kernel sanity checks */ 1632 /* 1633 * user sanity checks. We only use head, 1634 * A, B, ... are possible positions for head: 1635 * 1636 * 0 A rhead B rtail C n-1 1637 * 0 D rtail E rhead F n-1 1638 * 1639 * B, F, D are valid. A, C, E are wrong 1640 */ 1641 if (kring->rtail >= kring->rhead) { 1642 /* want rhead <= head <= rtail */ 1643 NM_FAIL_ON(head < kring->rhead || head > kring->rtail); 1644 /* and also head <= cur <= rtail */ 1645 NM_FAIL_ON(cur < head || cur > kring->rtail); 1646 } else { /* here rtail < rhead */ 1647 /* we need head outside rtail .. rhead */ 1648 NM_FAIL_ON(head > kring->rtail && head < kring->rhead); 1649 1650 /* two cases now: head <= rtail or head >= rhead */ 1651 if (head <= kring->rtail) { 1652 /* want head <= cur <= rtail */ 1653 NM_FAIL_ON(cur < head || cur > kring->rtail); 1654 } else { /* head >= rhead */ 1655 /* cur must be outside rtail..head */ 1656 NM_FAIL_ON(cur > kring->rtail && cur < head); 1657 } 1658 } 1659 if (ring->tail != kring->rtail) { 1660 RD(5, "%s tail overwritten was %d need %d", kring->name, 1661 ring->tail, kring->rtail); 1662 ring->tail = kring->rtail; 1663 } 1664 kring->rhead = head; 1665 kring->rcur = cur; 1666 return head; 1667 } 1668 1669 1670 /* 1671 * validate parameters on entry for *_rxsync() 1672 * Returns ring->head if ok, kring->nkr_num_slots on error. 1673 * 1674 * For a valid configuration, 1675 * hwcur <= head <= cur <= tail <= hwtail 1676 * 1677 * We only consider head and cur. 1678 * hwcur and hwtail are reliable. 1679 * 1680 */ 1681 u_int 1682 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) 1683 { 1684 uint32_t const n = kring->nkr_num_slots; 1685 uint32_t head, cur; 1686 1687 ND(5,"%s kc %d kt %d h %d c %d t %d", 1688 kring->name, 1689 kring->nr_hwcur, kring->nr_hwtail, 1690 ring->head, ring->cur, ring->tail); 1691 /* 1692 * Before storing the new values, we should check they do not 1693 * move backwards. However: 1694 * - head is not an issue because the previous value is hwcur; 1695 * - cur could in principle go back, however it does not matter 1696 * because we are processing a brand new rxsync() 1697 */ 1698 cur = kring->rcur = ring->cur; /* read only once */ 1699 head = kring->rhead = ring->head; /* read only once */ 1700 #if 1 /* kernel sanity checks */ 1701 NM_FAIL_ON(kring->nr_hwcur >= n || kring->nr_hwtail >= n); 1702 #endif /* kernel sanity checks */ 1703 /* user sanity checks */ 1704 if (kring->nr_hwtail >= kring->nr_hwcur) { 1705 /* want hwcur <= rhead <= hwtail */ 1706 NM_FAIL_ON(head < kring->nr_hwcur || head > kring->nr_hwtail); 1707 /* and also rhead <= rcur <= hwtail */ 1708 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); 1709 } else { 1710 /* we need rhead outside hwtail..hwcur */ 1711 NM_FAIL_ON(head < kring->nr_hwcur && head > kring->nr_hwtail); 1712 /* two cases now: head <= hwtail or head >= hwcur */ 1713 if (head <= kring->nr_hwtail) { 1714 /* want head <= cur <= hwtail */ 1715 NM_FAIL_ON(cur < head || cur > kring->nr_hwtail); 1716 } else { 1717 /* cur must be outside hwtail..head */ 1718 NM_FAIL_ON(cur < head && cur > kring->nr_hwtail); 1719 } 1720 } 1721 if (ring->tail != kring->rtail) { 1722 RD(5, "%s tail overwritten was %d need %d", 1723 kring->name, 1724 ring->tail, kring->rtail); 1725 ring->tail = kring->rtail; 1726 } 1727 return head; 1728 } 1729 1730 1731 /* 1732 * Error routine called when txsync/rxsync detects an error. 1733 * Can't do much more than resetting head =cur = hwcur, tail = hwtail 1734 * Return 1 on reinit. 1735 * 1736 * This routine is only called by the upper half of the kernel. 1737 * It only reads hwcur (which is changed only by the upper half, too) 1738 * and hwtail (which may be changed by the lower half, but only on 1739 * a tx ring and only to increase it, so any error will be recovered 1740 * on the next call). For the above, we don't strictly need to call 1741 * it under lock. 1742 */ 1743 int 1744 netmap_ring_reinit(struct netmap_kring *kring) 1745 { 1746 struct netmap_ring *ring = kring->ring; 1747 u_int i, lim = kring->nkr_num_slots - 1; 1748 int errors = 0; 1749 1750 // XXX KASSERT nm_kr_tryget 1751 RD(10, "called for %s", kring->name); 1752 // XXX probably wrong to trust userspace 1753 kring->rhead = ring->head; 1754 kring->rcur = ring->cur; 1755 kring->rtail = ring->tail; 1756 1757 if (ring->cur > lim) 1758 errors++; 1759 if (ring->head > lim) 1760 errors++; 1761 if (ring->tail > lim) 1762 errors++; 1763 for (i = 0; i <= lim; i++) { 1764 u_int idx = ring->slot[i].buf_idx; 1765 u_int len = ring->slot[i].len; 1766 if (idx < 2 || idx >= kring->na->na_lut.objtotal) { 1767 RD(5, "bad index at slot %d idx %d len %d ", i, idx, len); 1768 ring->slot[i].buf_idx = 0; 1769 ring->slot[i].len = 0; 1770 } else if (len > NETMAP_BUF_SIZE(kring->na)) { 1771 ring->slot[i].len = 0; 1772 RD(5, "bad len at slot %d idx %d len %d", i, idx, len); 1773 } 1774 } 1775 if (errors) { 1776 RD(10, "total %d errors", errors); 1777 RD(10, "%s reinit, cur %d -> %d tail %d -> %d", 1778 kring->name, 1779 ring->cur, kring->nr_hwcur, 1780 ring->tail, kring->nr_hwtail); 1781 ring->head = kring->rhead = kring->nr_hwcur; 1782 ring->cur = kring->rcur = kring->nr_hwcur; 1783 ring->tail = kring->rtail = kring->nr_hwtail; 1784 } 1785 return (errors ? 1 : 0); 1786 } 1787 1788 /* interpret the ringid and flags fields of an nmreq, by translating them 1789 * into a pair of intervals of ring indices: 1790 * 1791 * [priv->np_txqfirst, priv->np_txqlast) and 1792 * [priv->np_rxqfirst, priv->np_rxqlast) 1793 * 1794 */ 1795 int 1796 netmap_interp_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, 1797 uint16_t nr_ringid, uint64_t nr_flags) 1798 { 1799 struct netmap_adapter *na = priv->np_na; 1800 int excluded_direction[] = { NR_TX_RINGS_ONLY, NR_RX_RINGS_ONLY }; 1801 enum txrx t; 1802 u_int j; 1803 1804 if ((nr_flags & NR_PTNETMAP_HOST) && ((nr_mode != NR_REG_ALL_NIC) || 1805 nr_flags & (NR_RX_RINGS_ONLY|NR_TX_RINGS_ONLY))) { 1806 D("Error: only NR_REG_ALL_NIC supported with netmap passthrough"); 1807 return EINVAL; 1808 } 1809 1810 for_rx_tx(t) { 1811 if (nr_flags & excluded_direction[t]) { 1812 priv->np_qfirst[t] = priv->np_qlast[t] = 0; 1813 continue; 1814 } 1815 switch (nr_mode) { 1816 case NR_REG_ALL_NIC: 1817 priv->np_qfirst[t] = 0; 1818 priv->np_qlast[t] = nma_get_nrings(na, t); 1819 ND("ALL/PIPE: %s %d %d", nm_txrx2str(t), 1820 priv->np_qfirst[t], priv->np_qlast[t]); 1821 break; 1822 case NR_REG_SW: 1823 case NR_REG_NIC_SW: 1824 if (!(na->na_flags & NAF_HOST_RINGS)) { 1825 D("host rings not supported"); 1826 return EINVAL; 1827 } 1828 priv->np_qfirst[t] = (nr_mode == NR_REG_SW ? 1829 nma_get_nrings(na, t) : 0); 1830 priv->np_qlast[t] = nma_get_nrings(na, t) + 1; 1831 ND("%s: %s %d %d", nr_mode == NR_REG_SW ? "SW" : "NIC+SW", 1832 nm_txrx2str(t), 1833 priv->np_qfirst[t], priv->np_qlast[t]); 1834 break; 1835 case NR_REG_ONE_NIC: 1836 if (nr_ringid >= na->num_tx_rings && 1837 nr_ringid >= na->num_rx_rings) { 1838 D("invalid ring id %d", nr_ringid); 1839 return EINVAL; 1840 } 1841 /* if not enough rings, use the first one */ 1842 j = nr_ringid; 1843 if (j >= nma_get_nrings(na, t)) 1844 j = 0; 1845 priv->np_qfirst[t] = j; 1846 priv->np_qlast[t] = j + 1; 1847 ND("ONE_NIC: %s %d %d", nm_txrx2str(t), 1848 priv->np_qfirst[t], priv->np_qlast[t]); 1849 break; 1850 default: 1851 D("invalid regif type %d", nr_mode); 1852 return EINVAL; 1853 } 1854 } 1855 priv->np_flags = nr_flags | nr_mode; // TODO 1856 1857 /* Allow transparent forwarding mode in the host --> nic 1858 * direction only if all the TX hw rings have been opened. */ 1859 if (priv->np_qfirst[NR_TX] == 0 && 1860 priv->np_qlast[NR_TX] >= na->num_tx_rings) { 1861 priv->np_sync_flags |= NAF_CAN_FORWARD_DOWN; 1862 } 1863 1864 if (netmap_verbose) { 1865 D("%s: tx [%d,%d) rx [%d,%d) id %d", 1866 na->name, 1867 priv->np_qfirst[NR_TX], 1868 priv->np_qlast[NR_TX], 1869 priv->np_qfirst[NR_RX], 1870 priv->np_qlast[NR_RX], 1871 nr_ringid); 1872 } 1873 return 0; 1874 } 1875 1876 1877 /* 1878 * Set the ring ID. For devices with a single queue, a request 1879 * for all rings is the same as a single ring. 1880 */ 1881 static int 1882 netmap_set_ringid(struct netmap_priv_d *priv, uint32_t nr_mode, 1883 uint16_t nr_ringid, uint64_t nr_flags) 1884 { 1885 struct netmap_adapter *na = priv->np_na; 1886 int error; 1887 enum txrx t; 1888 1889 error = netmap_interp_ringid(priv, nr_mode, nr_ringid, nr_flags); 1890 if (error) { 1891 return error; 1892 } 1893 1894 priv->np_txpoll = (nr_flags & NR_NO_TX_POLL) ? 0 : 1; 1895 1896 /* optimization: count the users registered for more than 1897 * one ring, which are the ones sleeping on the global queue. 1898 * The default netmap_notify() callback will then 1899 * avoid signaling the global queue if nobody is using it 1900 */ 1901 for_rx_tx(t) { 1902 if (nm_si_user(priv, t)) 1903 na->si_users[t]++; 1904 } 1905 return 0; 1906 } 1907 1908 static void 1909 netmap_unset_ringid(struct netmap_priv_d *priv) 1910 { 1911 struct netmap_adapter *na = priv->np_na; 1912 enum txrx t; 1913 1914 for_rx_tx(t) { 1915 if (nm_si_user(priv, t)) 1916 na->si_users[t]--; 1917 priv->np_qfirst[t] = priv->np_qlast[t] = 0; 1918 } 1919 priv->np_flags = 0; 1920 priv->np_txpoll = 0; 1921 } 1922 1923 1924 /* Set the nr_pending_mode for the requested rings. 1925 * If requested, also try to get exclusive access to the rings, provided 1926 * the rings we want to bind are not exclusively owned by a previous bind. 1927 */ 1928 static int 1929 netmap_krings_get(struct netmap_priv_d *priv) 1930 { 1931 struct netmap_adapter *na = priv->np_na; 1932 u_int i; 1933 struct netmap_kring *kring; 1934 int excl = (priv->np_flags & NR_EXCLUSIVE); 1935 enum txrx t; 1936 1937 if (netmap_verbose) 1938 D("%s: grabbing tx [%d, %d) rx [%d, %d)", 1939 na->name, 1940 priv->np_qfirst[NR_TX], 1941 priv->np_qlast[NR_TX], 1942 priv->np_qfirst[NR_RX], 1943 priv->np_qlast[NR_RX]); 1944 1945 /* first round: check that all the requested rings 1946 * are neither alread exclusively owned, nor we 1947 * want exclusive ownership when they are already in use 1948 */ 1949 for_rx_tx(t) { 1950 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { 1951 kring = NMR(na, t)[i]; 1952 if ((kring->nr_kflags & NKR_EXCLUSIVE) || 1953 (kring->users && excl)) 1954 { 1955 ND("ring %s busy", kring->name); 1956 return EBUSY; 1957 } 1958 } 1959 } 1960 1961 /* second round: increment usage count (possibly marking them 1962 * as exclusive) and set the nr_pending_mode 1963 */ 1964 for_rx_tx(t) { 1965 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { 1966 kring = NMR(na, t)[i]; 1967 kring->users++; 1968 if (excl) 1969 kring->nr_kflags |= NKR_EXCLUSIVE; 1970 kring->nr_pending_mode = NKR_NETMAP_ON; 1971 } 1972 } 1973 1974 return 0; 1975 1976 } 1977 1978 /* Undo netmap_krings_get(). This is done by clearing the exclusive mode 1979 * if was asked on regif, and unset the nr_pending_mode if we are the 1980 * last users of the involved rings. */ 1981 static void 1982 netmap_krings_put(struct netmap_priv_d *priv) 1983 { 1984 struct netmap_adapter *na = priv->np_na; 1985 u_int i; 1986 struct netmap_kring *kring; 1987 int excl = (priv->np_flags & NR_EXCLUSIVE); 1988 enum txrx t; 1989 1990 ND("%s: releasing tx [%d, %d) rx [%d, %d)", 1991 na->name, 1992 priv->np_qfirst[NR_TX], 1993 priv->np_qlast[NR_TX], 1994 priv->np_qfirst[NR_RX], 1995 priv->np_qlast[MR_RX]); 1996 1997 for_rx_tx(t) { 1998 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { 1999 kring = NMR(na, t)[i]; 2000 if (excl) 2001 kring->nr_kflags &= ~NKR_EXCLUSIVE; 2002 kring->users--; 2003 if (kring->users == 0) 2004 kring->nr_pending_mode = NKR_NETMAP_OFF; 2005 } 2006 } 2007 } 2008 2009 static int 2010 nm_priv_rx_enabled(struct netmap_priv_d *priv) 2011 { 2012 return (priv->np_qfirst[NR_RX] != priv->np_qlast[NR_RX]); 2013 } 2014 2015 /* 2016 * possibly move the interface to netmap-mode. 2017 * If success it returns a pointer to netmap_if, otherwise NULL. 2018 * This must be called with NMG_LOCK held. 2019 * 2020 * The following na callbacks are called in the process: 2021 * 2022 * na->nm_config() [by netmap_update_config] 2023 * (get current number and size of rings) 2024 * 2025 * We have a generic one for linux (netmap_linux_config). 2026 * The bwrap has to override this, since it has to forward 2027 * the request to the wrapped adapter (netmap_bwrap_config). 2028 * 2029 * 2030 * na->nm_krings_create() 2031 * (create and init the krings array) 2032 * 2033 * One of the following: 2034 * 2035 * * netmap_hw_krings_create, (hw ports) 2036 * creates the standard layout for the krings 2037 * and adds the mbq (used for the host rings). 2038 * 2039 * * netmap_vp_krings_create (VALE ports) 2040 * add leases and scratchpads 2041 * 2042 * * netmap_pipe_krings_create (pipes) 2043 * create the krings and rings of both ends and 2044 * cross-link them 2045 * 2046 * * netmap_monitor_krings_create (monitors) 2047 * avoid allocating the mbq 2048 * 2049 * * netmap_bwrap_krings_create (bwraps) 2050 * create both the brap krings array, 2051 * the krings array of the wrapped adapter, and 2052 * (if needed) the fake array for the host adapter 2053 * 2054 * na->nm_register(, 1) 2055 * (put the adapter in netmap mode) 2056 * 2057 * This may be one of the following: 2058 * 2059 * * netmap_hw_reg (hw ports) 2060 * checks that the ifp is still there, then calls 2061 * the hardware specific callback; 2062 * 2063 * * netmap_vp_reg (VALE ports) 2064 * If the port is connected to a bridge, 2065 * set the NAF_NETMAP_ON flag under the 2066 * bridge write lock. 2067 * 2068 * * netmap_pipe_reg (pipes) 2069 * inform the other pipe end that it is no 2070 * longer responsible for the lifetime of this 2071 * pipe end 2072 * 2073 * * netmap_monitor_reg (monitors) 2074 * intercept the sync callbacks of the monitored 2075 * rings 2076 * 2077 * * netmap_bwrap_reg (bwraps) 2078 * cross-link the bwrap and hwna rings, 2079 * forward the request to the hwna, override 2080 * the hwna notify callback (to get the frames 2081 * coming from outside go through the bridge). 2082 * 2083 * 2084 */ 2085 int 2086 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, 2087 uint32_t nr_mode, uint16_t nr_ringid, uint64_t nr_flags) 2088 { 2089 struct netmap_if *nifp = NULL; 2090 int error; 2091 2092 NMG_LOCK_ASSERT(); 2093 priv->np_na = na; /* store the reference */ 2094 error = netmap_mem_finalize(na->nm_mem, na); 2095 if (error) 2096 goto err; 2097 2098 if (na->active_fds == 0) { 2099 2100 /* cache the allocator info in the na */ 2101 error = netmap_mem_get_lut(na->nm_mem, &na->na_lut); 2102 if (error) 2103 goto err_drop_mem; 2104 ND("lut %p bufs %u size %u", na->na_lut.lut, na->na_lut.objtotal, 2105 na->na_lut.objsize); 2106 2107 /* ring configuration may have changed, fetch from the card */ 2108 netmap_update_config(na); 2109 } 2110 2111 /* compute the range of tx and rx rings to monitor */ 2112 error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags); 2113 if (error) 2114 goto err_put_lut; 2115 2116 if (na->active_fds == 0) { 2117 /* 2118 * If this is the first registration of the adapter, 2119 * perform sanity checks and create the in-kernel view 2120 * of the netmap rings (the netmap krings). 2121 */ 2122 if (na->ifp && nm_priv_rx_enabled(priv)) { 2123 /* This netmap adapter is attached to an ifnet. */ 2124 unsigned nbs = NETMAP_BUF_SIZE(na); 2125 unsigned mtu = nm_os_ifnet_mtu(na->ifp); 2126 2127 ND("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d", 2128 na->name, mtu, na->rx_buf_maxsize, nbs); 2129 2130 if (na->rx_buf_maxsize == 0) { 2131 D("%s: error: rx_buf_maxsize == 0", na->name); 2132 error = EIO; 2133 goto err_drop_mem; 2134 } 2135 2136 if (mtu <= na->rx_buf_maxsize) { 2137 /* The MTU fits a single NIC slot. We only 2138 * Need to check that netmap buffers are 2139 * large enough to hold an MTU. NS_MOREFRAG 2140 * cannot be used in this case. */ 2141 if (nbs < mtu) { 2142 nm_prerr("error: netmap buf size (%u) " 2143 "< device MTU (%u)\n", nbs, mtu); 2144 error = EINVAL; 2145 goto err_drop_mem; 2146 } 2147 } else { 2148 /* More NIC slots may be needed to receive 2149 * or transmit a single packet. Check that 2150 * the adapter supports NS_MOREFRAG and that 2151 * netmap buffers are large enough to hold 2152 * the maximum per-slot size. */ 2153 if (!(na->na_flags & NAF_MOREFRAG)) { 2154 nm_prerr("error: large MTU (%d) needed " 2155 "but %s does not support " 2156 "NS_MOREFRAG\n", mtu, 2157 na->ifp->if_xname); 2158 error = EINVAL; 2159 goto err_drop_mem; 2160 } else if (nbs < na->rx_buf_maxsize) { 2161 nm_prerr("error: using NS_MOREFRAG on " 2162 "%s requires netmap buf size " 2163 ">= %u\n", na->ifp->if_xname, 2164 na->rx_buf_maxsize); 2165 error = EINVAL; 2166 goto err_drop_mem; 2167 } else { 2168 nm_prinf("info: netmap application on " 2169 "%s needs to support " 2170 "NS_MOREFRAG " 2171 "(MTU=%u,netmap_buf_size=%u)\n", 2172 na->ifp->if_xname, mtu, nbs); 2173 } 2174 } 2175 } 2176 2177 /* 2178 * Depending on the adapter, this may also create 2179 * the netmap rings themselves 2180 */ 2181 error = na->nm_krings_create(na); 2182 if (error) 2183 goto err_put_lut; 2184 2185 } 2186 2187 /* now the krings must exist and we can check whether some 2188 * previous bind has exclusive ownership on them, and set 2189 * nr_pending_mode 2190 */ 2191 error = netmap_krings_get(priv); 2192 if (error) 2193 goto err_del_krings; 2194 2195 /* create all needed missing netmap rings */ 2196 error = netmap_mem_rings_create(na); 2197 if (error) 2198 goto err_rel_excl; 2199 2200 /* in all cases, create a new netmap if */ 2201 nifp = netmap_mem_if_new(na, priv); 2202 if (nifp == NULL) { 2203 error = ENOMEM; 2204 goto err_rel_excl; 2205 } 2206 2207 if (nm_kring_pending(priv)) { 2208 /* Some kring is switching mode, tell the adapter to 2209 * react on this. */ 2210 error = na->nm_register(na, 1); 2211 if (error) 2212 goto err_del_if; 2213 } 2214 2215 /* Commit the reference. */ 2216 na->active_fds++; 2217 2218 /* 2219 * advertise that the interface is ready by setting np_nifp. 2220 * The barrier is needed because readers (poll, *SYNC and mmap) 2221 * check for priv->np_nifp != NULL without locking 2222 */ 2223 mb(); /* make sure previous writes are visible to all CPUs */ 2224 priv->np_nifp = nifp; 2225 2226 return 0; 2227 2228 err_del_if: 2229 netmap_mem_if_delete(na, nifp); 2230 err_rel_excl: 2231 netmap_krings_put(priv); 2232 netmap_mem_rings_delete(na); 2233 err_del_krings: 2234 if (na->active_fds == 0) 2235 na->nm_krings_delete(na); 2236 err_put_lut: 2237 if (na->active_fds == 0) 2238 memset(&na->na_lut, 0, sizeof(na->na_lut)); 2239 err_drop_mem: 2240 netmap_mem_drop(na); 2241 err: 2242 priv->np_na = NULL; 2243 return error; 2244 } 2245 2246 2247 /* 2248 * update kring and ring at the end of rxsync/txsync. 2249 */ 2250 static inline void 2251 nm_sync_finalize(struct netmap_kring *kring) 2252 { 2253 /* 2254 * Update ring tail to what the kernel knows 2255 * After txsync: head/rhead/hwcur might be behind cur/rcur 2256 * if no carrier. 2257 */ 2258 kring->ring->tail = kring->rtail = kring->nr_hwtail; 2259 2260 ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", 2261 kring->name, kring->nr_hwcur, kring->nr_hwtail, 2262 kring->rhead, kring->rcur, kring->rtail); 2263 } 2264 2265 /* set ring timestamp */ 2266 static inline void 2267 ring_timestamp_set(struct netmap_ring *ring) 2268 { 2269 if (netmap_no_timestamp == 0 || ring->flags & NR_TIMESTAMP) { 2270 microtime(&ring->ts); 2271 } 2272 } 2273 2274 static int nmreq_copyin(struct nmreq_header *, int); 2275 static int nmreq_copyout(struct nmreq_header *, int); 2276 static int nmreq_checkoptions(struct nmreq_header *); 2277 2278 /* 2279 * ioctl(2) support for the "netmap" device. 2280 * 2281 * Following a list of accepted commands: 2282 * - NIOCCTRL device control API 2283 * - NIOCTXSYNC sync TX rings 2284 * - NIOCRXSYNC sync RX rings 2285 * - SIOCGIFADDR just for convenience 2286 * - NIOCGINFO deprecated (legacy API) 2287 * - NIOCREGIF deprecated (legacy API) 2288 * 2289 * Return 0 on success, errno otherwise. 2290 */ 2291 int 2292 netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data, 2293 struct thread *td, int nr_body_is_user) 2294 { 2295 struct mbq q; /* packets from RX hw queues to host stack */ 2296 struct netmap_adapter *na = NULL; 2297 struct netmap_mem_d *nmd = NULL; 2298 struct ifnet *ifp = NULL; 2299 int error = 0; 2300 u_int i, qfirst, qlast; 2301 struct netmap_if *nifp; 2302 struct netmap_kring **krings; 2303 int sync_flags; 2304 enum txrx t; 2305 2306 switch (cmd) { 2307 case NIOCCTRL: { 2308 struct nmreq_header *hdr = (struct nmreq_header *)data; 2309 2310 if (hdr->nr_version != NETMAP_API) { 2311 D("API mismatch for reqtype %d: got %d need %d", 2312 hdr->nr_version, 2313 hdr->nr_version, NETMAP_API); 2314 hdr->nr_version = NETMAP_API; 2315 } 2316 if (hdr->nr_version < NETMAP_MIN_API || 2317 hdr->nr_version > NETMAP_MAX_API) { 2318 return EINVAL; 2319 } 2320 2321 /* Make a kernel-space copy of the user-space nr_body. 2322 * For convenince, the nr_body pointer and the pointers 2323 * in the options list will be replaced with their 2324 * kernel-space counterparts. The original pointers are 2325 * saved internally and later restored by nmreq_copyout 2326 */ 2327 error = nmreq_copyin(hdr, nr_body_is_user); 2328 if (error) { 2329 return error; 2330 } 2331 2332 /* Sanitize hdr->nr_name. */ 2333 hdr->nr_name[sizeof(hdr->nr_name) - 1] = '\0'; 2334 2335 switch (hdr->nr_reqtype) { 2336 case NETMAP_REQ_REGISTER: { 2337 struct nmreq_register *req = 2338 (struct nmreq_register *)(uintptr_t)hdr->nr_body; 2339 /* Protect access to priv from concurrent requests. */ 2340 NMG_LOCK(); 2341 do { 2342 u_int memflags; 2343 #ifdef WITH_EXTMEM 2344 struct nmreq_option *opt; 2345 #endif /* WITH_EXTMEM */ 2346 2347 if (priv->np_nifp != NULL) { /* thread already registered */ 2348 error = EBUSY; 2349 break; 2350 } 2351 2352 #ifdef WITH_EXTMEM 2353 opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options, 2354 NETMAP_REQ_OPT_EXTMEM); 2355 if (opt != NULL) { 2356 struct nmreq_opt_extmem *e = 2357 (struct nmreq_opt_extmem *)opt; 2358 2359 error = nmreq_checkduplicate(opt); 2360 if (error) { 2361 opt->nro_status = error; 2362 break; 2363 } 2364 nmd = netmap_mem_ext_create(e->nro_usrptr, 2365 &e->nro_info, &error); 2366 opt->nro_status = error; 2367 if (nmd == NULL) 2368 break; 2369 } 2370 #endif /* WITH_EXTMEM */ 2371 2372 if (nmd == NULL && req->nr_mem_id) { 2373 /* find the allocator and get a reference */ 2374 nmd = netmap_mem_find(req->nr_mem_id); 2375 if (nmd == NULL) { 2376 error = EINVAL; 2377 break; 2378 } 2379 } 2380 /* find the interface and a reference */ 2381 error = netmap_get_na(hdr, &na, &ifp, nmd, 2382 1 /* create */); /* keep reference */ 2383 if (error) 2384 break; 2385 if (NETMAP_OWNED_BY_KERN(na)) { 2386 error = EBUSY; 2387 break; 2388 } 2389 2390 if (na->virt_hdr_len && !(req->nr_flags & NR_ACCEPT_VNET_HDR)) { 2391 error = EIO; 2392 break; 2393 } 2394 2395 error = netmap_do_regif(priv, na, req->nr_mode, 2396 req->nr_ringid, req->nr_flags); 2397 if (error) { /* reg. failed, release priv and ref */ 2398 break; 2399 } 2400 nifp = priv->np_nifp; 2401 priv->np_td = td; /* for debugging purposes */ 2402 2403 /* return the offset of the netmap_if object */ 2404 req->nr_rx_rings = na->num_rx_rings; 2405 req->nr_tx_rings = na->num_tx_rings; 2406 req->nr_rx_slots = na->num_rx_desc; 2407 req->nr_tx_slots = na->num_tx_desc; 2408 error = netmap_mem_get_info(na->nm_mem, &req->nr_memsize, &memflags, 2409 &req->nr_mem_id); 2410 if (error) { 2411 netmap_do_unregif(priv); 2412 break; 2413 } 2414 if (memflags & NETMAP_MEM_PRIVATE) { 2415 *(uint32_t *)(uintptr_t)&nifp->ni_flags |= NI_PRIV_MEM; 2416 } 2417 for_rx_tx(t) { 2418 priv->np_si[t] = nm_si_user(priv, t) ? 2419 &na->si[t] : &NMR(na, t)[priv->np_qfirst[t]]->si; 2420 } 2421 2422 if (req->nr_extra_bufs) { 2423 if (netmap_verbose) 2424 D("requested %d extra buffers", 2425 req->nr_extra_bufs); 2426 req->nr_extra_bufs = netmap_extra_alloc(na, 2427 &nifp->ni_bufs_head, req->nr_extra_bufs); 2428 if (netmap_verbose) 2429 D("got %d extra buffers", req->nr_extra_bufs); 2430 } 2431 req->nr_offset = netmap_mem_if_offset(na->nm_mem, nifp); 2432 2433 error = nmreq_checkoptions(hdr); 2434 if (error) { 2435 netmap_do_unregif(priv); 2436 break; 2437 } 2438 2439 /* store ifp reference so that priv destructor may release it */ 2440 priv->np_ifp = ifp; 2441 } while (0); 2442 if (error) { 2443 netmap_unget_na(na, ifp); 2444 } 2445 /* release the reference from netmap_mem_find() or 2446 * netmap_mem_ext_create() 2447 */ 2448 if (nmd) 2449 netmap_mem_put(nmd); 2450 NMG_UNLOCK(); 2451 break; 2452 } 2453 2454 case NETMAP_REQ_PORT_INFO_GET: { 2455 struct nmreq_port_info_get *req = 2456 (struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body; 2457 2458 NMG_LOCK(); 2459 do { 2460 u_int memflags; 2461 2462 if (hdr->nr_name[0] != '\0') { 2463 /* Build a nmreq_register out of the nmreq_port_info_get, 2464 * so that we can call netmap_get_na(). */ 2465 struct nmreq_register regreq; 2466 bzero(®req, sizeof(regreq)); 2467 regreq.nr_tx_slots = req->nr_tx_slots; 2468 regreq.nr_rx_slots = req->nr_rx_slots; 2469 regreq.nr_tx_rings = req->nr_tx_rings; 2470 regreq.nr_rx_rings = req->nr_rx_rings; 2471 regreq.nr_mem_id = req->nr_mem_id; 2472 2473 /* get a refcount */ 2474 hdr->nr_reqtype = NETMAP_REQ_REGISTER; 2475 hdr->nr_body = (uintptr_t)®req; 2476 error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */); 2477 hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */ 2478 hdr->nr_body = (uintptr_t)req; /* reset nr_body */ 2479 if (error) { 2480 na = NULL; 2481 ifp = NULL; 2482 break; 2483 } 2484 nmd = na->nm_mem; /* get memory allocator */ 2485 } else { 2486 nmd = netmap_mem_find(req->nr_mem_id ? req->nr_mem_id : 1); 2487 if (nmd == NULL) { 2488 error = EINVAL; 2489 break; 2490 } 2491 } 2492 2493 error = netmap_mem_get_info(nmd, &req->nr_memsize, &memflags, 2494 &req->nr_mem_id); 2495 if (error) 2496 break; 2497 if (na == NULL) /* only memory info */ 2498 break; 2499 req->nr_offset = 0; 2500 req->nr_rx_slots = req->nr_tx_slots = 0; 2501 netmap_update_config(na); 2502 req->nr_rx_rings = na->num_rx_rings; 2503 req->nr_tx_rings = na->num_tx_rings; 2504 req->nr_rx_slots = na->num_rx_desc; 2505 req->nr_tx_slots = na->num_tx_desc; 2506 } while (0); 2507 netmap_unget_na(na, ifp); 2508 NMG_UNLOCK(); 2509 break; 2510 } 2511 #ifdef WITH_VALE 2512 case NETMAP_REQ_VALE_ATTACH: { 2513 error = nm_bdg_ctl_attach(hdr, NULL /* userspace request */); 2514 break; 2515 } 2516 2517 case NETMAP_REQ_VALE_DETACH: { 2518 error = nm_bdg_ctl_detach(hdr, NULL /* userspace request */); 2519 break; 2520 } 2521 2522 case NETMAP_REQ_VALE_LIST: { 2523 error = netmap_bdg_list(hdr); 2524 break; 2525 } 2526 2527 case NETMAP_REQ_PORT_HDR_SET: { 2528 struct nmreq_port_hdr *req = 2529 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; 2530 /* Build a nmreq_register out of the nmreq_port_hdr, 2531 * so that we can call netmap_get_bdg_na(). */ 2532 struct nmreq_register regreq; 2533 bzero(®req, sizeof(regreq)); 2534 /* For now we only support virtio-net headers, and only for 2535 * VALE ports, but this may change in future. Valid lengths 2536 * for the virtio-net header are 0 (no header), 10 and 12. */ 2537 if (req->nr_hdr_len != 0 && 2538 req->nr_hdr_len != sizeof(struct nm_vnet_hdr) && 2539 req->nr_hdr_len != 12) { 2540 error = EINVAL; 2541 break; 2542 } 2543 NMG_LOCK(); 2544 hdr->nr_reqtype = NETMAP_REQ_REGISTER; 2545 hdr->nr_body = (uintptr_t)®req; 2546 error = netmap_get_bdg_na(hdr, &na, NULL, 0); 2547 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET; 2548 hdr->nr_body = (uintptr_t)req; 2549 if (na && !error) { 2550 struct netmap_vp_adapter *vpna = 2551 (struct netmap_vp_adapter *)na; 2552 na->virt_hdr_len = req->nr_hdr_len; 2553 if (na->virt_hdr_len) { 2554 vpna->mfs = NETMAP_BUF_SIZE(na); 2555 } 2556 D("Using vnet_hdr_len %d for %p", na->virt_hdr_len, na); 2557 netmap_adapter_put(na); 2558 } else if (!na) { 2559 error = ENXIO; 2560 } 2561 NMG_UNLOCK(); 2562 break; 2563 } 2564 2565 case NETMAP_REQ_PORT_HDR_GET: { 2566 /* Get vnet-header length for this netmap port */ 2567 struct nmreq_port_hdr *req = 2568 (struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body; 2569 /* Build a nmreq_register out of the nmreq_port_hdr, 2570 * so that we can call netmap_get_bdg_na(). */ 2571 struct nmreq_register regreq; 2572 struct ifnet *ifp; 2573 2574 bzero(®req, sizeof(regreq)); 2575 NMG_LOCK(); 2576 hdr->nr_reqtype = NETMAP_REQ_REGISTER; 2577 hdr->nr_body = (uintptr_t)®req; 2578 error = netmap_get_na(hdr, &na, &ifp, NULL, 0); 2579 hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET; 2580 hdr->nr_body = (uintptr_t)req; 2581 if (na && !error) { 2582 req->nr_hdr_len = na->virt_hdr_len; 2583 } 2584 netmap_unget_na(na, ifp); 2585 NMG_UNLOCK(); 2586 break; 2587 } 2588 2589 case NETMAP_REQ_VALE_NEWIF: { 2590 error = nm_vi_create(hdr); 2591 break; 2592 } 2593 2594 case NETMAP_REQ_VALE_DELIF: { 2595 error = nm_vi_destroy(hdr->nr_name); 2596 break; 2597 } 2598 2599 case NETMAP_REQ_VALE_POLLING_ENABLE: 2600 case NETMAP_REQ_VALE_POLLING_DISABLE: { 2601 error = nm_bdg_polling(hdr); 2602 break; 2603 } 2604 #endif /* WITH_VALE */ 2605 case NETMAP_REQ_POOLS_INFO_GET: { 2606 struct nmreq_pools_info *req = 2607 (struct nmreq_pools_info *)(uintptr_t)hdr->nr_body; 2608 /* Get information from the memory allocator. This 2609 * netmap device must already be bound to a port. 2610 * Note that hdr->nr_name is ignored. */ 2611 NMG_LOCK(); 2612 if (priv->np_na && priv->np_na->nm_mem) { 2613 struct netmap_mem_d *nmd = priv->np_na->nm_mem; 2614 error = netmap_mem_pools_info_get(req, nmd); 2615 } else { 2616 error = EINVAL; 2617 } 2618 NMG_UNLOCK(); 2619 break; 2620 } 2621 2622 default: { 2623 error = EINVAL; 2624 break; 2625 } 2626 } 2627 /* Write back request body to userspace and reset the 2628 * user-space pointer. */ 2629 error = nmreq_copyout(hdr, error); 2630 break; 2631 } 2632 2633 case NIOCTXSYNC: 2634 case NIOCRXSYNC: { 2635 nifp = priv->np_nifp; 2636 2637 if (nifp == NULL) { 2638 error = ENXIO; 2639 break; 2640 } 2641 mb(); /* make sure following reads are not from cache */ 2642 2643 na = priv->np_na; /* we have a reference */ 2644 2645 if (na == NULL) { 2646 D("Internal error: nifp != NULL && na == NULL"); 2647 error = ENXIO; 2648 break; 2649 } 2650 2651 mbq_init(&q); 2652 t = (cmd == NIOCTXSYNC ? NR_TX : NR_RX); 2653 krings = NMR(na, t); 2654 qfirst = priv->np_qfirst[t]; 2655 qlast = priv->np_qlast[t]; 2656 sync_flags = priv->np_sync_flags; 2657 2658 for (i = qfirst; i < qlast; i++) { 2659 struct netmap_kring *kring = krings[i]; 2660 struct netmap_ring *ring = kring->ring; 2661 2662 if (unlikely(nm_kr_tryget(kring, 1, &error))) { 2663 error = (error ? EIO : 0); 2664 continue; 2665 } 2666 2667 if (cmd == NIOCTXSYNC) { 2668 if (netmap_verbose & NM_VERB_TXSYNC) 2669 D("pre txsync ring %d cur %d hwcur %d", 2670 i, ring->cur, 2671 kring->nr_hwcur); 2672 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { 2673 netmap_ring_reinit(kring); 2674 } else if (kring->nm_sync(kring, sync_flags | NAF_FORCE_RECLAIM) == 0) { 2675 nm_sync_finalize(kring); 2676 } 2677 if (netmap_verbose & NM_VERB_TXSYNC) 2678 D("post txsync ring %d cur %d hwcur %d", 2679 i, ring->cur, 2680 kring->nr_hwcur); 2681 } else { 2682 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { 2683 netmap_ring_reinit(kring); 2684 } 2685 if (nm_may_forward_up(kring)) { 2686 /* transparent forwarding, see netmap_poll() */ 2687 netmap_grab_packets(kring, &q, netmap_fwd); 2688 } 2689 if (kring->nm_sync(kring, sync_flags | NAF_FORCE_READ) == 0) { 2690 nm_sync_finalize(kring); 2691 } 2692 ring_timestamp_set(ring); 2693 } 2694 nm_kr_put(kring); 2695 } 2696 2697 if (mbq_peek(&q)) { 2698 netmap_send_up(na->ifp, &q); 2699 } 2700 2701 break; 2702 } 2703 2704 default: { 2705 return netmap_ioctl_legacy(priv, cmd, data, td); 2706 break; 2707 } 2708 } 2709 2710 return (error); 2711 } 2712 2713 size_t 2714 nmreq_size_by_type(uint16_t nr_reqtype) 2715 { 2716 switch (nr_reqtype) { 2717 case NETMAP_REQ_REGISTER: 2718 return sizeof(struct nmreq_register); 2719 case NETMAP_REQ_PORT_INFO_GET: 2720 return sizeof(struct nmreq_port_info_get); 2721 case NETMAP_REQ_VALE_ATTACH: 2722 return sizeof(struct nmreq_vale_attach); 2723 case NETMAP_REQ_VALE_DETACH: 2724 return sizeof(struct nmreq_vale_detach); 2725 case NETMAP_REQ_VALE_LIST: 2726 return sizeof(struct nmreq_vale_list); 2727 case NETMAP_REQ_PORT_HDR_SET: 2728 case NETMAP_REQ_PORT_HDR_GET: 2729 return sizeof(struct nmreq_port_hdr); 2730 case NETMAP_REQ_VALE_NEWIF: 2731 return sizeof(struct nmreq_vale_newif); 2732 case NETMAP_REQ_VALE_DELIF: 2733 return 0; 2734 case NETMAP_REQ_VALE_POLLING_ENABLE: 2735 case NETMAP_REQ_VALE_POLLING_DISABLE: 2736 return sizeof(struct nmreq_vale_polling); 2737 case NETMAP_REQ_POOLS_INFO_GET: 2738 return sizeof(struct nmreq_pools_info); 2739 } 2740 return 0; 2741 } 2742 2743 static size_t 2744 nmreq_opt_size_by_type(uint16_t nro_reqtype) 2745 { 2746 size_t rv = sizeof(struct nmreq_option); 2747 #ifdef NETMAP_REQ_OPT_DEBUG 2748 if (nro_reqtype & NETMAP_REQ_OPT_DEBUG) 2749 return (nro_reqtype & ~NETMAP_REQ_OPT_DEBUG); 2750 #endif /* NETMAP_REQ_OPT_DEBUG */ 2751 switch (nro_reqtype) { 2752 #ifdef WITH_EXTMEM 2753 case NETMAP_REQ_OPT_EXTMEM: 2754 rv = sizeof(struct nmreq_opt_extmem); 2755 break; 2756 #endif /* WITH_EXTMEM */ 2757 } 2758 /* subtract the common header */ 2759 return rv - sizeof(struct nmreq_option); 2760 } 2761 2762 int 2763 nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user) 2764 { 2765 size_t rqsz, optsz, bufsz; 2766 int error; 2767 char *ker = NULL, *p; 2768 struct nmreq_option **next, *src; 2769 struct nmreq_option buf; 2770 uint64_t *ptrs; 2771 2772 if (hdr->nr_reserved) 2773 return EINVAL; 2774 2775 if (!nr_body_is_user) 2776 return 0; 2777 2778 hdr->nr_reserved = nr_body_is_user; 2779 2780 /* compute the total size of the buffer */ 2781 rqsz = nmreq_size_by_type(hdr->nr_reqtype); 2782 if (rqsz > NETMAP_REQ_MAXSIZE) { 2783 error = EMSGSIZE; 2784 goto out_err; 2785 } 2786 if ((rqsz && hdr->nr_body == (uintptr_t)NULL) || 2787 (!rqsz && hdr->nr_body != (uintptr_t)NULL)) { 2788 /* Request body expected, but not found; or 2789 * request body found but unexpected. */ 2790 error = EINVAL; 2791 goto out_err; 2792 } 2793 2794 bufsz = 2 * sizeof(void *) + rqsz; 2795 optsz = 0; 2796 for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src; 2797 src = (struct nmreq_option *)(uintptr_t)buf.nro_next) 2798 { 2799 error = copyin(src, &buf, sizeof(*src)); 2800 if (error) 2801 goto out_err; 2802 optsz += sizeof(*src); 2803 optsz += nmreq_opt_size_by_type(buf.nro_reqtype); 2804 if (rqsz + optsz > NETMAP_REQ_MAXSIZE) { 2805 error = EMSGSIZE; 2806 goto out_err; 2807 } 2808 bufsz += optsz + sizeof(void *); 2809 } 2810 2811 ker = nm_os_malloc(bufsz); 2812 if (ker == NULL) { 2813 error = ENOMEM; 2814 goto out_err; 2815 } 2816 p = ker; 2817 2818 /* make a copy of the user pointers */ 2819 ptrs = (uint64_t*)p; 2820 *ptrs++ = hdr->nr_body; 2821 *ptrs++ = hdr->nr_options; 2822 p = (char *)ptrs; 2823 2824 /* copy the body */ 2825 error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz); 2826 if (error) 2827 goto out_restore; 2828 /* overwrite the user pointer with the in-kernel one */ 2829 hdr->nr_body = (uintptr_t)p; 2830 p += rqsz; 2831 2832 /* copy the options */ 2833 next = (struct nmreq_option **)&hdr->nr_options; 2834 src = *next; 2835 while (src) { 2836 struct nmreq_option *opt; 2837 2838 /* copy the option header */ 2839 ptrs = (uint64_t *)p; 2840 opt = (struct nmreq_option *)(ptrs + 1); 2841 error = copyin(src, opt, sizeof(*src)); 2842 if (error) 2843 goto out_restore; 2844 /* make a copy of the user next pointer */ 2845 *ptrs = opt->nro_next; 2846 /* overwrite the user pointer with the in-kernel one */ 2847 *next = opt; 2848 2849 /* initialize the option as not supported. 2850 * Recognized options will update this field. 2851 */ 2852 opt->nro_status = EOPNOTSUPP; 2853 2854 p = (char *)(opt + 1); 2855 2856 /* copy the option body */ 2857 optsz = nmreq_opt_size_by_type(opt->nro_reqtype); 2858 if (optsz) { 2859 /* the option body follows the option header */ 2860 error = copyin(src + 1, p, optsz); 2861 if (error) 2862 goto out_restore; 2863 p += optsz; 2864 } 2865 2866 /* move to next option */ 2867 next = (struct nmreq_option **)&opt->nro_next; 2868 src = *next; 2869 } 2870 return 0; 2871 2872 out_restore: 2873 ptrs = (uint64_t *)ker; 2874 hdr->nr_body = *ptrs++; 2875 hdr->nr_options = *ptrs++; 2876 hdr->nr_reserved = 0; 2877 nm_os_free(ker); 2878 out_err: 2879 return error; 2880 } 2881 2882 static int 2883 nmreq_copyout(struct nmreq_header *hdr, int rerror) 2884 { 2885 struct nmreq_option *src, *dst; 2886 void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart; 2887 uint64_t *ptrs; 2888 size_t bodysz; 2889 int error; 2890 2891 if (!hdr->nr_reserved) 2892 return rerror; 2893 2894 /* restore the user pointers in the header */ 2895 ptrs = (uint64_t *)ker - 2; 2896 bufstart = ptrs; 2897 hdr->nr_body = *ptrs++; 2898 src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; 2899 hdr->nr_options = *ptrs; 2900 2901 if (!rerror) { 2902 /* copy the body */ 2903 bodysz = nmreq_size_by_type(hdr->nr_reqtype); 2904 error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz); 2905 if (error) { 2906 rerror = error; 2907 goto out; 2908 } 2909 } 2910 2911 /* copy the options */ 2912 dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options; 2913 while (src) { 2914 size_t optsz; 2915 uint64_t next; 2916 2917 /* restore the user pointer */ 2918 next = src->nro_next; 2919 ptrs = (uint64_t *)src - 1; 2920 src->nro_next = *ptrs; 2921 2922 /* always copy the option header */ 2923 error = copyout(src, dst, sizeof(*src)); 2924 if (error) { 2925 rerror = error; 2926 goto out; 2927 } 2928 2929 /* copy the option body only if there was no error */ 2930 if (!rerror && !src->nro_status) { 2931 optsz = nmreq_opt_size_by_type(src->nro_reqtype); 2932 if (optsz) { 2933 error = copyout(src + 1, dst + 1, optsz); 2934 if (error) { 2935 rerror = error; 2936 goto out; 2937 } 2938 } 2939 } 2940 src = (struct nmreq_option *)(uintptr_t)next; 2941 dst = (struct nmreq_option *)(uintptr_t)*ptrs; 2942 } 2943 2944 2945 out: 2946 hdr->nr_reserved = 0; 2947 nm_os_free(bufstart); 2948 return rerror; 2949 } 2950 2951 struct nmreq_option * 2952 nmreq_findoption(struct nmreq_option *opt, uint16_t reqtype) 2953 { 2954 for ( ; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) 2955 if (opt->nro_reqtype == reqtype) 2956 return opt; 2957 return NULL; 2958 } 2959 2960 int 2961 nmreq_checkduplicate(struct nmreq_option *opt) { 2962 uint16_t type = opt->nro_reqtype; 2963 int dup = 0; 2964 2965 while ((opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)opt->nro_next, 2966 type))) { 2967 dup++; 2968 opt->nro_status = EINVAL; 2969 } 2970 return (dup ? EINVAL : 0); 2971 } 2972 2973 static int 2974 nmreq_checkoptions(struct nmreq_header *hdr) 2975 { 2976 struct nmreq_option *opt; 2977 /* return error if there is still any option 2978 * marked as not supported 2979 */ 2980 2981 for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt; 2982 opt = (struct nmreq_option *)(uintptr_t)opt->nro_next) 2983 if (opt->nro_status == EOPNOTSUPP) 2984 return EOPNOTSUPP; 2985 2986 return 0; 2987 } 2988 2989 /* 2990 * select(2) and poll(2) handlers for the "netmap" device. 2991 * 2992 * Can be called for one or more queues. 2993 * Return true the event mask corresponding to ready events. 2994 * If there are no ready events, do a selrecord on either individual 2995 * selinfo or on the global one. 2996 * Device-dependent parts (locking and sync of tx/rx rings) 2997 * are done through callbacks. 2998 * 2999 * On linux, arguments are really pwait, the poll table, and 'td' is struct file * 3000 * The first one is remapped to pwait as selrecord() uses the name as an 3001 * hidden argument. 3002 */ 3003 int 3004 netmap_poll(struct netmap_priv_d *priv, int events, NM_SELRECORD_T *sr) 3005 { 3006 struct netmap_adapter *na; 3007 struct netmap_kring *kring; 3008 struct netmap_ring *ring; 3009 u_int i, check_all_tx, check_all_rx, want[NR_TXRX], revents = 0; 3010 #define want_tx want[NR_TX] 3011 #define want_rx want[NR_RX] 3012 struct mbq q; /* packets from RX hw queues to host stack */ 3013 3014 /* 3015 * In order to avoid nested locks, we need to "double check" 3016 * txsync and rxsync if we decide to do a selrecord(). 3017 * retry_tx (and retry_rx, later) prevent looping forever. 3018 */ 3019 int retry_tx = 1, retry_rx = 1; 3020 3021 /* Transparent mode: send_down is 1 if we have found some 3022 * packets to forward (host RX ring --> NIC) during the rx 3023 * scan and we have not sent them down to the NIC yet. 3024 * Transparent mode requires to bind all rings to a single 3025 * file descriptor. 3026 */ 3027 int send_down = 0; 3028 int sync_flags = priv->np_sync_flags; 3029 3030 mbq_init(&q); 3031 3032 if (priv->np_nifp == NULL) { 3033 D("No if registered"); 3034 return POLLERR; 3035 } 3036 mb(); /* make sure following reads are not from cache */ 3037 3038 na = priv->np_na; 3039 3040 if (!nm_netmap_on(na)) 3041 return POLLERR; 3042 3043 if (netmap_verbose & 0x8000) 3044 D("device %s events 0x%x", na->name, events); 3045 want_tx = events & (POLLOUT | POLLWRNORM); 3046 want_rx = events & (POLLIN | POLLRDNORM); 3047 3048 /* 3049 * check_all_{tx|rx} are set if the card has more than one queue AND 3050 * the file descriptor is bound to all of them. If so, we sleep on 3051 * the "global" selinfo, otherwise we sleep on individual selinfo 3052 * (FreeBSD only allows two selinfo's per file descriptor). 3053 * The interrupt routine in the driver wake one or the other 3054 * (or both) depending on which clients are active. 3055 * 3056 * rxsync() is only called if we run out of buffers on a POLLIN. 3057 * txsync() is called if we run out of buffers on POLLOUT, or 3058 * there are pending packets to send. The latter can be disabled 3059 * passing NETMAP_NO_TX_POLL in the NIOCREG call. 3060 */ 3061 check_all_tx = nm_si_user(priv, NR_TX); 3062 check_all_rx = nm_si_user(priv, NR_RX); 3063 3064 #ifdef __FreeBSD__ 3065 /* 3066 * We start with a lock free round which is cheap if we have 3067 * slots available. If this fails, then lock and call the sync 3068 * routines. We can't do this on Linux, as the contract says 3069 * that we must call nm_os_selrecord() unconditionally. 3070 */ 3071 if (want_tx) { 3072 enum txrx t = NR_TX; 3073 for (i = priv->np_qfirst[t]; want[t] && i < priv->np_qlast[t]; i++) { 3074 kring = NMR(na, t)[i]; 3075 /* XXX compare ring->cur and kring->tail */ 3076 if (!nm_ring_empty(kring->ring)) { 3077 revents |= want[t]; 3078 want[t] = 0; /* also breaks the loop */ 3079 } 3080 } 3081 } 3082 if (want_rx) { 3083 enum txrx t = NR_RX; 3084 want_rx = 0; /* look for a reason to run the handlers */ 3085 for (i = priv->np_qfirst[t]; i < priv->np_qlast[t]; i++) { 3086 kring = NMR(na, t)[i]; 3087 if (kring->ring->cur == kring->ring->tail /* try fetch new buffers */ 3088 || kring->rhead != kring->ring->head /* release buffers */) { 3089 want_rx = 1; 3090 } 3091 } 3092 if (!want_rx) 3093 revents |= events & (POLLIN | POLLRDNORM); /* we have data */ 3094 } 3095 #endif 3096 3097 #ifdef linux 3098 /* The selrecord must be unconditional on linux. */ 3099 nm_os_selrecord(sr, check_all_tx ? 3100 &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]]->si); 3101 nm_os_selrecord(sr, check_all_rx ? 3102 &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]]->si); 3103 #endif /* linux */ 3104 3105 /* 3106 * If we want to push packets out (priv->np_txpoll) or 3107 * want_tx is still set, we must issue txsync calls 3108 * (on all rings, to avoid that the tx rings stall). 3109 * Fortunately, normal tx mode has np_txpoll set. 3110 */ 3111 if (priv->np_txpoll || want_tx) { 3112 /* 3113 * The first round checks if anyone is ready, if not 3114 * do a selrecord and another round to handle races. 3115 * want_tx goes to 0 if any space is found, and is 3116 * used to skip rings with no pending transmissions. 3117 */ 3118 flush_tx: 3119 for (i = priv->np_qfirst[NR_TX]; i < priv->np_qlast[NR_TX]; i++) { 3120 int found = 0; 3121 3122 kring = na->tx_rings[i]; 3123 ring = kring->ring; 3124 3125 /* 3126 * Don't try to txsync this TX ring if we already found some 3127 * space in some of the TX rings (want_tx == 0) and there are no 3128 * TX slots in this ring that need to be flushed to the NIC 3129 * (head == hwcur). 3130 */ 3131 if (!send_down && !want_tx && ring->head == kring->nr_hwcur) 3132 continue; 3133 3134 if (nm_kr_tryget(kring, 1, &revents)) 3135 continue; 3136 3137 if (nm_txsync_prologue(kring, ring) >= kring->nkr_num_slots) { 3138 netmap_ring_reinit(kring); 3139 revents |= POLLERR; 3140 } else { 3141 if (kring->nm_sync(kring, sync_flags)) 3142 revents |= POLLERR; 3143 else 3144 nm_sync_finalize(kring); 3145 } 3146 3147 /* 3148 * If we found new slots, notify potential 3149 * listeners on the same ring. 3150 * Since we just did a txsync, look at the copies 3151 * of cur,tail in the kring. 3152 */ 3153 found = kring->rcur != kring->rtail; 3154 nm_kr_put(kring); 3155 if (found) { /* notify other listeners */ 3156 revents |= want_tx; 3157 want_tx = 0; 3158 #ifndef linux 3159 kring->nm_notify(kring, 0); 3160 #endif /* linux */ 3161 } 3162 } 3163 /* if there were any packet to forward we must have handled them by now */ 3164 send_down = 0; 3165 if (want_tx && retry_tx && sr) { 3166 #ifndef linux 3167 nm_os_selrecord(sr, check_all_tx ? 3168 &na->si[NR_TX] : &na->tx_rings[priv->np_qfirst[NR_TX]]->si); 3169 #endif /* !linux */ 3170 retry_tx = 0; 3171 goto flush_tx; 3172 } 3173 } 3174 3175 /* 3176 * If want_rx is still set scan receive rings. 3177 * Do it on all rings because otherwise we starve. 3178 */ 3179 if (want_rx) { 3180 /* two rounds here for race avoidance */ 3181 do_retry_rx: 3182 for (i = priv->np_qfirst[NR_RX]; i < priv->np_qlast[NR_RX]; i++) { 3183 int found = 0; 3184 3185 kring = na->rx_rings[i]; 3186 ring = kring->ring; 3187 3188 if (unlikely(nm_kr_tryget(kring, 1, &revents))) 3189 continue; 3190 3191 if (nm_rxsync_prologue(kring, ring) >= kring->nkr_num_slots) { 3192 netmap_ring_reinit(kring); 3193 revents |= POLLERR; 3194 } 3195 /* now we can use kring->rcur, rtail */ 3196 3197 /* 3198 * transparent mode support: collect packets from 3199 * hw rxring(s) that have been released by the user 3200 */ 3201 if (nm_may_forward_up(kring)) { 3202 netmap_grab_packets(kring, &q, netmap_fwd); 3203 } 3204 3205 /* Clear the NR_FORWARD flag anyway, it may be set by 3206 * the nm_sync() below only on for the host RX ring (see 3207 * netmap_rxsync_from_host()). */ 3208 kring->nr_kflags &= ~NR_FORWARD; 3209 if (kring->nm_sync(kring, sync_flags)) 3210 revents |= POLLERR; 3211 else 3212 nm_sync_finalize(kring); 3213 send_down |= (kring->nr_kflags & NR_FORWARD); 3214 ring_timestamp_set(ring); 3215 found = kring->rcur != kring->rtail; 3216 nm_kr_put(kring); 3217 if (found) { 3218 revents |= want_rx; 3219 retry_rx = 0; 3220 #ifndef linux 3221 kring->nm_notify(kring, 0); 3222 #endif /* linux */ 3223 } 3224 } 3225 3226 #ifndef linux 3227 if (retry_rx && sr) { 3228 nm_os_selrecord(sr, check_all_rx ? 3229 &na->si[NR_RX] : &na->rx_rings[priv->np_qfirst[NR_RX]]->si); 3230 } 3231 #endif /* !linux */ 3232 if (send_down || retry_rx) { 3233 retry_rx = 0; 3234 if (send_down) 3235 goto flush_tx; /* and retry_rx */ 3236 else 3237 goto do_retry_rx; 3238 } 3239 } 3240 3241 /* 3242 * Transparent mode: released bufs (i.e. between kring->nr_hwcur and 3243 * ring->head) marked with NS_FORWARD on hw rx rings are passed up 3244 * to the host stack. 3245 */ 3246 3247 if (mbq_peek(&q)) { 3248 netmap_send_up(na->ifp, &q); 3249 } 3250 3251 return (revents); 3252 #undef want_tx 3253 #undef want_rx 3254 } 3255 3256 int 3257 nma_intr_enable(struct netmap_adapter *na, int onoff) 3258 { 3259 bool changed = false; 3260 enum txrx t; 3261 int i; 3262 3263 for_rx_tx(t) { 3264 for (i = 0; i < nma_get_nrings(na, t); i++) { 3265 struct netmap_kring *kring = NMR(na, t)[i]; 3266 int on = !(kring->nr_kflags & NKR_NOINTR); 3267 3268 if (!!onoff != !!on) { 3269 changed = true; 3270 } 3271 if (onoff) { 3272 kring->nr_kflags &= ~NKR_NOINTR; 3273 } else { 3274 kring->nr_kflags |= NKR_NOINTR; 3275 } 3276 } 3277 } 3278 3279 if (!changed) { 3280 return 0; /* nothing to do */ 3281 } 3282 3283 if (!na->nm_intr) { 3284 D("Cannot %s interrupts for %s", onoff ? "enable" : "disable", 3285 na->name); 3286 return -1; 3287 } 3288 3289 na->nm_intr(na, onoff); 3290 3291 return 0; 3292 } 3293 3294 3295 /*-------------------- driver support routines -------------------*/ 3296 3297 /* default notify callback */ 3298 static int 3299 netmap_notify(struct netmap_kring *kring, int flags) 3300 { 3301 struct netmap_adapter *na = kring->notify_na; 3302 enum txrx t = kring->tx; 3303 3304 nm_os_selwakeup(&kring->si); 3305 /* optimization: avoid a wake up on the global 3306 * queue if nobody has registered for more 3307 * than one ring 3308 */ 3309 if (na->si_users[t] > 0) 3310 nm_os_selwakeup(&na->si[t]); 3311 3312 return NM_IRQ_COMPLETED; 3313 } 3314 3315 /* called by all routines that create netmap_adapters. 3316 * provide some defaults and get a reference to the 3317 * memory allocator 3318 */ 3319 int 3320 netmap_attach_common(struct netmap_adapter *na) 3321 { 3322 if (na->num_tx_rings == 0 || na->num_rx_rings == 0) { 3323 D("%s: invalid rings tx %d rx %d", 3324 na->name, na->num_tx_rings, na->num_rx_rings); 3325 return EINVAL; 3326 } 3327 3328 if (!na->rx_buf_maxsize) { 3329 /* Set a conservative default (larger is safer). */ 3330 na->rx_buf_maxsize = PAGE_SIZE; 3331 } 3332 3333 #ifdef __FreeBSD__ 3334 if (na->na_flags & NAF_HOST_RINGS && na->ifp) { 3335 na->if_input = na->ifp->if_input; /* for netmap_send_up */ 3336 } 3337 na->pdev = na; /* make sure netmap_mem_map() is called */ 3338 #endif /* __FreeBSD__ */ 3339 if (na->nm_krings_create == NULL) { 3340 /* we assume that we have been called by a driver, 3341 * since other port types all provide their own 3342 * nm_krings_create 3343 */ 3344 na->nm_krings_create = netmap_hw_krings_create; 3345 na->nm_krings_delete = netmap_hw_krings_delete; 3346 } 3347 if (na->nm_notify == NULL) 3348 na->nm_notify = netmap_notify; 3349 na->active_fds = 0; 3350 3351 if (na->nm_mem == NULL) { 3352 /* use the global allocator */ 3353 na->nm_mem = netmap_mem_get(&nm_mem); 3354 } 3355 #ifdef WITH_VALE 3356 if (na->nm_bdg_attach == NULL) 3357 /* no special nm_bdg_attach callback. On VALE 3358 * attach, we need to interpose a bwrap 3359 */ 3360 na->nm_bdg_attach = netmap_bwrap_attach; 3361 #endif 3362 3363 return 0; 3364 } 3365 3366 /* Wrapper for the register callback provided netmap-enabled 3367 * hardware drivers. 3368 * nm_iszombie(na) means that the driver module has been 3369 * unloaded, so we cannot call into it. 3370 * nm_os_ifnet_lock() must guarantee mutual exclusion with 3371 * module unloading. 3372 */ 3373 static int 3374 netmap_hw_reg(struct netmap_adapter *na, int onoff) 3375 { 3376 struct netmap_hw_adapter *hwna = 3377 (struct netmap_hw_adapter*)na; 3378 int error = 0; 3379 3380 nm_os_ifnet_lock(); 3381 3382 if (nm_iszombie(na)) { 3383 if (onoff) { 3384 error = ENXIO; 3385 } else if (na != NULL) { 3386 na->na_flags &= ~NAF_NETMAP_ON; 3387 } 3388 goto out; 3389 } 3390 3391 error = hwna->nm_hw_register(na, onoff); 3392 3393 out: 3394 nm_os_ifnet_unlock(); 3395 3396 return error; 3397 } 3398 3399 static void 3400 netmap_hw_dtor(struct netmap_adapter *na) 3401 { 3402 if (nm_iszombie(na) || na->ifp == NULL) 3403 return; 3404 3405 WNA(na->ifp) = NULL; 3406 } 3407 3408 3409 /* 3410 * Allocate a netmap_adapter object, and initialize it from the 3411 * 'arg' passed by the driver on attach. 3412 * We allocate a block of memory of 'size' bytes, which has room 3413 * for struct netmap_adapter plus additional room private to 3414 * the caller. 3415 * Return 0 on success, ENOMEM otherwise. 3416 */ 3417 int 3418 netmap_attach_ext(struct netmap_adapter *arg, size_t size, int override_reg) 3419 { 3420 struct netmap_hw_adapter *hwna = NULL; 3421 struct ifnet *ifp = NULL; 3422 3423 if (size < sizeof(struct netmap_hw_adapter)) { 3424 D("Invalid netmap adapter size %d", (int)size); 3425 return EINVAL; 3426 } 3427 3428 if (arg == NULL || arg->ifp == NULL) 3429 goto fail; 3430 3431 ifp = arg->ifp; 3432 if (NA(ifp) && !NM_NA_VALID(ifp)) { 3433 /* If NA(ifp) is not null but there is no valid netmap 3434 * adapter it means that someone else is using the same 3435 * pointer (e.g. ax25_ptr on linux). This happens for 3436 * instance when also PF_RING is in use. */ 3437 D("Error: netmap adapter hook is busy"); 3438 return EBUSY; 3439 } 3440 3441 hwna = nm_os_malloc(size); 3442 if (hwna == NULL) 3443 goto fail; 3444 hwna->up = *arg; 3445 hwna->up.na_flags |= NAF_HOST_RINGS | NAF_NATIVE; 3446 strncpy(hwna->up.name, ifp->if_xname, sizeof(hwna->up.name)); 3447 if (override_reg) { 3448 hwna->nm_hw_register = hwna->up.nm_register; 3449 hwna->up.nm_register = netmap_hw_reg; 3450 } 3451 if (netmap_attach_common(&hwna->up)) { 3452 nm_os_free(hwna); 3453 goto fail; 3454 } 3455 netmap_adapter_get(&hwna->up); 3456 3457 NM_ATTACH_NA(ifp, &hwna->up); 3458 3459 #ifdef linux 3460 if (ifp->netdev_ops) { 3461 /* prepare a clone of the netdev ops */ 3462 #ifndef NETMAP_LINUX_HAVE_NETDEV_OPS 3463 hwna->nm_ndo.ndo_start_xmit = ifp->netdev_ops; 3464 #else 3465 hwna->nm_ndo = *ifp->netdev_ops; 3466 #endif /* NETMAP_LINUX_HAVE_NETDEV_OPS */ 3467 } 3468 hwna->nm_ndo.ndo_start_xmit = linux_netmap_start_xmit; 3469 hwna->nm_ndo.ndo_change_mtu = linux_netmap_change_mtu; 3470 if (ifp->ethtool_ops) { 3471 hwna->nm_eto = *ifp->ethtool_ops; 3472 } 3473 hwna->nm_eto.set_ringparam = linux_netmap_set_ringparam; 3474 #ifdef NETMAP_LINUX_HAVE_SET_CHANNELS 3475 hwna->nm_eto.set_channels = linux_netmap_set_channels; 3476 #endif /* NETMAP_LINUX_HAVE_SET_CHANNELS */ 3477 if (arg->nm_config == NULL) { 3478 hwna->up.nm_config = netmap_linux_config; 3479 } 3480 #endif /* linux */ 3481 if (arg->nm_dtor == NULL) { 3482 hwna->up.nm_dtor = netmap_hw_dtor; 3483 } 3484 3485 if_printf(ifp, "netmap queues/slots: TX %d/%d, RX %d/%d\n", 3486 hwna->up.num_tx_rings, hwna->up.num_tx_desc, 3487 hwna->up.num_rx_rings, hwna->up.num_rx_desc); 3488 return 0; 3489 3490 fail: 3491 D("fail, arg %p ifp %p na %p", arg, ifp, hwna); 3492 return (hwna ? EINVAL : ENOMEM); 3493 } 3494 3495 3496 int 3497 netmap_attach(struct netmap_adapter *arg) 3498 { 3499 return netmap_attach_ext(arg, sizeof(struct netmap_hw_adapter), 3500 1 /* override nm_reg */); 3501 } 3502 3503 3504 void 3505 NM_DBG(netmap_adapter_get)(struct netmap_adapter *na) 3506 { 3507 if (!na) { 3508 return; 3509 } 3510 3511 refcount_acquire(&na->na_refcount); 3512 } 3513 3514 3515 /* returns 1 iff the netmap_adapter is destroyed */ 3516 int 3517 NM_DBG(netmap_adapter_put)(struct netmap_adapter *na) 3518 { 3519 if (!na) 3520 return 1; 3521 3522 if (!refcount_release(&na->na_refcount)) 3523 return 0; 3524 3525 if (na->nm_dtor) 3526 na->nm_dtor(na); 3527 3528 if (na->tx_rings) { /* XXX should not happen */ 3529 D("freeing leftover tx_rings"); 3530 na->nm_krings_delete(na); 3531 } 3532 netmap_pipe_dealloc(na); 3533 if (na->nm_mem) 3534 netmap_mem_put(na->nm_mem); 3535 bzero(na, sizeof(*na)); 3536 nm_os_free(na); 3537 3538 return 1; 3539 } 3540 3541 /* nm_krings_create callback for all hardware native adapters */ 3542 int 3543 netmap_hw_krings_create(struct netmap_adapter *na) 3544 { 3545 int ret = netmap_krings_create(na, 0); 3546 if (ret == 0) { 3547 /* initialize the mbq for the sw rx ring */ 3548 mbq_safe_init(&na->rx_rings[na->num_rx_rings]->rx_queue); 3549 ND("initialized sw rx queue %d", na->num_rx_rings); 3550 } 3551 return ret; 3552 } 3553 3554 3555 3556 /* 3557 * Called on module unload by the netmap-enabled drivers 3558 */ 3559 void 3560 netmap_detach(struct ifnet *ifp) 3561 { 3562 struct netmap_adapter *na = NA(ifp); 3563 3564 if (!na) 3565 return; 3566 3567 NMG_LOCK(); 3568 netmap_set_all_rings(na, NM_KR_LOCKED); 3569 /* 3570 * if the netmap adapter is not native, somebody 3571 * changed it, so we can not release it here. 3572 * The NAF_ZOMBIE flag will notify the new owner that 3573 * the driver is gone. 3574 */ 3575 if (!(na->na_flags & NAF_NATIVE) || !netmap_adapter_put(na)) { 3576 na->na_flags |= NAF_ZOMBIE; 3577 } 3578 /* give active users a chance to notice that NAF_ZOMBIE has been 3579 * turned on, so that they can stop and return an error to userspace. 3580 * Note that this becomes a NOP if there are no active users and, 3581 * therefore, the put() above has deleted the na, since now NA(ifp) is 3582 * NULL. 3583 */ 3584 netmap_enable_all_rings(ifp); 3585 NMG_UNLOCK(); 3586 } 3587 3588 3589 /* 3590 * Intercept packets from the network stack and pass them 3591 * to netmap as incoming packets on the 'software' ring. 3592 * 3593 * We only store packets in a bounded mbq and then copy them 3594 * in the relevant rxsync routine. 3595 * 3596 * We rely on the OS to make sure that the ifp and na do not go 3597 * away (typically the caller checks for IFF_DRV_RUNNING or the like). 3598 * In nm_register() or whenever there is a reinitialization, 3599 * we make sure to make the mode change visible here. 3600 */ 3601 int 3602 netmap_transmit(struct ifnet *ifp, struct mbuf *m) 3603 { 3604 struct netmap_adapter *na = NA(ifp); 3605 struct netmap_kring *kring, *tx_kring; 3606 u_int len = MBUF_LEN(m); 3607 u_int error = ENOBUFS; 3608 unsigned int txr; 3609 struct mbq *q; 3610 int busy; 3611 3612 kring = na->rx_rings[na->num_rx_rings]; 3613 // XXX [Linux] we do not need this lock 3614 // if we follow the down/configure/up protocol -gl 3615 // mtx_lock(&na->core_lock); 3616 3617 if (!nm_netmap_on(na)) { 3618 D("%s not in netmap mode anymore", na->name); 3619 error = ENXIO; 3620 goto done; 3621 } 3622 3623 txr = MBUF_TXQ(m); 3624 if (txr >= na->num_tx_rings) { 3625 txr %= na->num_tx_rings; 3626 } 3627 tx_kring = NMR(na, NR_TX)[txr]; 3628 3629 if (tx_kring->nr_mode == NKR_NETMAP_OFF) { 3630 return MBUF_TRANSMIT(na, ifp, m); 3631 } 3632 3633 q = &kring->rx_queue; 3634 3635 // XXX reconsider long packets if we handle fragments 3636 if (len > NETMAP_BUF_SIZE(na)) { /* too long for us */ 3637 D("%s from_host, drop packet size %d > %d", na->name, 3638 len, NETMAP_BUF_SIZE(na)); 3639 goto done; 3640 } 3641 3642 if (nm_os_mbuf_has_offld(m)) { 3643 RD(1, "%s drop mbuf that needs offloadings", na->name); 3644 goto done; 3645 } 3646 3647 /* protect against netmap_rxsync_from_host(), netmap_sw_to_nic() 3648 * and maybe other instances of netmap_transmit (the latter 3649 * not possible on Linux). 3650 * We enqueue the mbuf only if we are sure there is going to be 3651 * enough room in the host RX ring, otherwise we drop it. 3652 */ 3653 mbq_lock(q); 3654 3655 busy = kring->nr_hwtail - kring->nr_hwcur; 3656 if (busy < 0) 3657 busy += kring->nkr_num_slots; 3658 if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) { 3659 RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name, 3660 kring->nr_hwcur, kring->nr_hwtail, mbq_len(q)); 3661 } else { 3662 mbq_enqueue(q, m); 3663 ND(2, "%s %d bufs in queue", na->name, mbq_len(q)); 3664 /* notify outside the lock */ 3665 m = NULL; 3666 error = 0; 3667 } 3668 mbq_unlock(q); 3669 3670 done: 3671 if (m) 3672 m_freem(m); 3673 /* unconditionally wake up listeners */ 3674 kring->nm_notify(kring, 0); 3675 /* this is normally netmap_notify(), but for nics 3676 * connected to a bridge it is netmap_bwrap_intr_notify(), 3677 * that possibly forwards the frames through the switch 3678 */ 3679 3680 return (error); 3681 } 3682 3683 3684 /* 3685 * netmap_reset() is called by the driver routines when reinitializing 3686 * a ring. The driver is in charge of locking to protect the kring. 3687 * If native netmap mode is not set just return NULL. 3688 * If native netmap mode is set, in particular, we have to set nr_mode to 3689 * NKR_NETMAP_ON. 3690 */ 3691 struct netmap_slot * 3692 netmap_reset(struct netmap_adapter *na, enum txrx tx, u_int n, 3693 u_int new_cur) 3694 { 3695 struct netmap_kring *kring; 3696 int new_hwofs, lim; 3697 3698 if (!nm_native_on(na)) { 3699 ND("interface not in native netmap mode"); 3700 return NULL; /* nothing to reinitialize */ 3701 } 3702 3703 /* XXX note- in the new scheme, we are not guaranteed to be 3704 * under lock (e.g. when called on a device reset). 3705 * In this case, we should set a flag and do not trust too 3706 * much the values. In practice: TODO 3707 * - set a RESET flag somewhere in the kring 3708 * - do the processing in a conservative way 3709 * - let the *sync() fixup at the end. 3710 */ 3711 if (tx == NR_TX) { 3712 if (n >= na->num_tx_rings) 3713 return NULL; 3714 3715 kring = na->tx_rings[n]; 3716 3717 if (kring->nr_pending_mode == NKR_NETMAP_OFF) { 3718 kring->nr_mode = NKR_NETMAP_OFF; 3719 return NULL; 3720 } 3721 3722 // XXX check whether we should use hwcur or rcur 3723 new_hwofs = kring->nr_hwcur - new_cur; 3724 } else { 3725 if (n >= na->num_rx_rings) 3726 return NULL; 3727 kring = na->rx_rings[n]; 3728 3729 if (kring->nr_pending_mode == NKR_NETMAP_OFF) { 3730 kring->nr_mode = NKR_NETMAP_OFF; 3731 return NULL; 3732 } 3733 3734 new_hwofs = kring->nr_hwtail - new_cur; 3735 } 3736 lim = kring->nkr_num_slots - 1; 3737 if (new_hwofs > lim) 3738 new_hwofs -= lim + 1; 3739 3740 /* Always set the new offset value and realign the ring. */ 3741 if (netmap_verbose) 3742 D("%s %s%d hwofs %d -> %d, hwtail %d -> %d", 3743 na->name, 3744 tx == NR_TX ? "TX" : "RX", n, 3745 kring->nkr_hwofs, new_hwofs, 3746 kring->nr_hwtail, 3747 tx == NR_TX ? lim : kring->nr_hwtail); 3748 kring->nkr_hwofs = new_hwofs; 3749 if (tx == NR_TX) { 3750 kring->nr_hwtail = kring->nr_hwcur + lim; 3751 if (kring->nr_hwtail > lim) 3752 kring->nr_hwtail -= lim + 1; 3753 } 3754 3755 /* 3756 * Wakeup on the individual and global selwait 3757 * We do the wakeup here, but the ring is not yet reconfigured. 3758 * However, we are under lock so there are no races. 3759 */ 3760 kring->nr_mode = NKR_NETMAP_ON; 3761 kring->nm_notify(kring, 0); 3762 return kring->ring->slot; 3763 } 3764 3765 3766 /* 3767 * Dispatch rx/tx interrupts to the netmap rings. 3768 * 3769 * "work_done" is non-null on the RX path, NULL for the TX path. 3770 * We rely on the OS to make sure that there is only one active 3771 * instance per queue, and that there is appropriate locking. 3772 * 3773 * The 'notify' routine depends on what the ring is attached to. 3774 * - for a netmap file descriptor, do a selwakeup on the individual 3775 * waitqueue, plus one on the global one if needed 3776 * (see netmap_notify) 3777 * - for a nic connected to a switch, call the proper forwarding routine 3778 * (see netmap_bwrap_intr_notify) 3779 */ 3780 int 3781 netmap_common_irq(struct netmap_adapter *na, u_int q, u_int *work_done) 3782 { 3783 struct netmap_kring *kring; 3784 enum txrx t = (work_done ? NR_RX : NR_TX); 3785 3786 q &= NETMAP_RING_MASK; 3787 3788 if (netmap_verbose) { 3789 RD(5, "received %s queue %d", work_done ? "RX" : "TX" , q); 3790 } 3791 3792 if (q >= nma_get_nrings(na, t)) 3793 return NM_IRQ_PASS; // not a physical queue 3794 3795 kring = NMR(na, t)[q]; 3796 3797 if (kring->nr_mode == NKR_NETMAP_OFF) { 3798 return NM_IRQ_PASS; 3799 } 3800 3801 if (t == NR_RX) { 3802 kring->nr_kflags |= NKR_PENDINTR; // XXX atomic ? 3803 *work_done = 1; /* do not fire napi again */ 3804 } 3805 3806 return kring->nm_notify(kring, 0); 3807 } 3808 3809 3810 /* 3811 * Default functions to handle rx/tx interrupts from a physical device. 3812 * "work_done" is non-null on the RX path, NULL for the TX path. 3813 * 3814 * If the card is not in netmap mode, simply return NM_IRQ_PASS, 3815 * so that the caller proceeds with regular processing. 3816 * Otherwise call netmap_common_irq(). 3817 * 3818 * If the card is connected to a netmap file descriptor, 3819 * do a selwakeup on the individual queue, plus one on the global one 3820 * if needed (multiqueue card _and_ there are multiqueue listeners), 3821 * and return NR_IRQ_COMPLETED. 3822 * 3823 * Finally, if called on rx from an interface connected to a switch, 3824 * calls the proper forwarding routine. 3825 */ 3826 int 3827 netmap_rx_irq(struct ifnet *ifp, u_int q, u_int *work_done) 3828 { 3829 struct netmap_adapter *na = NA(ifp); 3830 3831 /* 3832 * XXX emulated netmap mode sets NAF_SKIP_INTR so 3833 * we still use the regular driver even though the previous 3834 * check fails. It is unclear whether we should use 3835 * nm_native_on() here. 3836 */ 3837 if (!nm_netmap_on(na)) 3838 return NM_IRQ_PASS; 3839 3840 if (na->na_flags & NAF_SKIP_INTR) { 3841 ND("use regular interrupt"); 3842 return NM_IRQ_PASS; 3843 } 3844 3845 return netmap_common_irq(na, q, work_done); 3846 } 3847 3848 3849 /* 3850 * Module loader and unloader 3851 * 3852 * netmap_init() creates the /dev/netmap device and initializes 3853 * all global variables. Returns 0 on success, errno on failure 3854 * (but there is no chance) 3855 * 3856 * netmap_fini() destroys everything. 3857 */ 3858 3859 static struct cdev *netmap_dev; /* /dev/netmap character device. */ 3860 extern struct cdevsw netmap_cdevsw; 3861 3862 3863 void 3864 netmap_fini(void) 3865 { 3866 if (netmap_dev) 3867 destroy_dev(netmap_dev); 3868 /* we assume that there are no longer netmap users */ 3869 nm_os_ifnet_fini(); 3870 netmap_uninit_bridges(); 3871 netmap_mem_fini(); 3872 NMG_LOCK_DESTROY(); 3873 nm_prinf("netmap: unloaded module.\n"); 3874 } 3875 3876 3877 int 3878 netmap_init(void) 3879 { 3880 int error; 3881 3882 NMG_LOCK_INIT(); 3883 3884 error = netmap_mem_init(); 3885 if (error != 0) 3886 goto fail; 3887 /* 3888 * MAKEDEV_ETERNAL_KLD avoids an expensive check on syscalls 3889 * when the module is compiled in. 3890 * XXX could use make_dev_credv() to get error number 3891 */ 3892 netmap_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, 3893 &netmap_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, 3894 "netmap"); 3895 if (!netmap_dev) 3896 goto fail; 3897 3898 error = netmap_init_bridges(); 3899 if (error) 3900 goto fail; 3901 3902 #ifdef __FreeBSD__ 3903 nm_os_vi_init_index(); 3904 #endif 3905 3906 error = nm_os_ifnet_init(); 3907 if (error) 3908 goto fail; 3909 3910 nm_prinf("netmap: loaded module\n"); 3911 return (0); 3912 fail: 3913 netmap_fini(); 3914 return (EINVAL); /* may be incorrect */ 3915 } 3916