1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * $FreeBSD$ 31 * 32 * Definitions of constants and the structures used by the netmap 33 * framework, for the part visible to both kernel and userspace. 34 * Detailed info on netmap is available with "man netmap" or at 35 * 36 * http://info.iet.unipi.it/~luigi/netmap/ 37 * 38 * This API is also used to communicate with the VALE software switch 39 */ 40 41 #ifndef _NET_NETMAP_H_ 42 #define _NET_NETMAP_H_ 43 44 #define NETMAP_API 14 /* current API version */ 45 46 #define NETMAP_MIN_API 14 /* min and max versions accepted */ 47 #define NETMAP_MAX_API 15 48 /* 49 * Some fields should be cache-aligned to reduce contention. 50 * The alignment is architecture and OS dependent, but rather than 51 * digging into OS headers to find the exact value we use an estimate 52 * that should cover most architectures. 53 */ 54 #define NM_CACHE_ALIGN 128 55 56 /* 57 * --- Netmap data structures --- 58 * 59 * The userspace data structures used by netmap are shown below. 60 * They are allocated by the kernel and mmap()ed by userspace threads. 61 * Pointers are implemented as memory offsets or indexes, 62 * so that they can be easily dereferenced in kernel and userspace. 63 64 KERNEL (opaque, obviously) 65 66 ==================================================================== 67 | 68 USERSPACE | struct netmap_ring 69 +---->+---------------+ 70 / | head,cur,tail | 71 struct netmap_if (nifp, 1 per fd) / | buf_ofs | 72 +----------------+ / | other fields | 73 | ni_tx_rings | / +===============+ 74 | ni_rx_rings | / | buf_idx, len | slot[0] 75 | | / | flags, ptr | 76 | | / +---------------+ 77 +================+ / | buf_idx, len | slot[1] 78 | txring_ofs[0] | (rel.to nifp)--' | flags, ptr | 79 | txring_ofs[1] | +---------------+ 80 (tx+htx entries) (num_slots entries) 81 | txring_ofs[t] | | buf_idx, len | slot[n-1] 82 +----------------+ | flags, ptr | 83 | rxring_ofs[0] | +---------------+ 84 | rxring_ofs[1] | 85 (rx+hrx entries) 86 | rxring_ofs[r] | 87 +----------------+ 88 89 * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to 90 * a file descriptor, the mmap()ed region contains a (logically readonly) 91 * struct netmap_if pointing to struct netmap_ring's. 92 * 93 * There is one netmap_ring per physical NIC ring, plus at least one tx/rx ring 94 * pair attached to the host stack (these pairs are unused for non-NIC ports). 95 * 96 * All physical/host stack ports share the same memory region, 97 * so that zero-copy can be implemented between them. 98 * VALE switch ports instead have separate memory regions. 99 * 100 * The netmap_ring is the userspace-visible replica of the NIC ring. 101 * Each slot has the index of a buffer (MTU-sized and residing in the 102 * mmapped region), its length and some flags. An extra 64-bit pointer 103 * is provided for user-supplied buffers in the tx path. 104 * 105 * In user space, the buffer address is computed as 106 * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE 107 * 108 * Added in NETMAP_API 11: 109 * 110 * + NIOCREGIF can request the allocation of extra spare buffers from 111 * the same memory pool. The desired number of buffers must be in 112 * nr_arg3. The ioctl may return fewer buffers, depending on memory 113 * availability. nr_arg3 will return the actual value, and, once 114 * mapped, nifp->ni_bufs_head will be the index of the first buffer. 115 * 116 * The buffers are linked to each other using the first uint32_t 117 * as the index. On close, ni_bufs_head must point to the list of 118 * buffers to be released. 119 * 120 * + NIOCREGIF can attach to PIPE rings sharing the same memory 121 * space with a parent device. The ifname indicates the parent device, 122 * which must already exist. Flags in nr_flags indicate if we want to 123 * bind the master or slave side, the index (from nr_ringid) 124 * is just a cookie and does not need to be sequential. 125 * 126 * + NIOCREGIF can also attach to 'monitor' rings that replicate 127 * the content of specific rings, also from the same memory space. 128 * 129 * Extra flags in nr_flags support the above functions. 130 * Application libraries may use the following naming scheme: 131 * netmap:foo all NIC rings pairs 132 * netmap:foo^ only host rings pairs 133 * netmap:foo^k the k-th host rings pair 134 * netmap:foo+ all NIC rings + host rings pairs 135 * netmap:foo-k the k-th NIC rings pair 136 * netmap:foo{k PIPE rings pair k, master side 137 * netmap:foo}k PIPE rings pair k, slave side 138 * 139 * Some notes about host rings: 140 * 141 * + The RX host rings are used to store those packets that the host network 142 * stack is trying to transmit through a NIC queue, but only if that queue 143 * is currently in netmap mode. Netmap will not intercept host stack mbufs 144 * designated to NIC queues that are not in netmap mode. As a consequence, 145 * registering a netmap port with netmap:foo^ is not enough to intercept 146 * mbufs in the RX host rings; the netmap port should be registered with 147 * netmap:foo*, or another registration should be done to open at least a 148 * NIC TX queue in netmap mode. 149 * 150 * + Netmap is not currently able to deal with intercepted trasmit mbufs which 151 * require offloadings like TSO, UFO, checksumming offloadings, etc. It is 152 * responsibility of the user to disable those offloadings (e.g. using 153 * ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being 154 * used in netmap mode. If the offloadings are not disabled, GSO and/or 155 * unchecksummed packets may be dropped immediately or end up in the host RX 156 * rings, and will be dropped as soon as the packet reaches another netmap 157 * adapter. 158 */ 159 160 /* 161 * struct netmap_slot is a buffer descriptor 162 */ 163 struct netmap_slot { 164 uint32_t buf_idx; /* buffer index */ 165 uint16_t len; /* length for this slot */ 166 uint16_t flags; /* buf changed, etc. */ 167 uint64_t ptr; /* pointer for indirect buffers */ 168 }; 169 170 /* 171 * The following flags control how the slot is used 172 */ 173 174 #define NS_BUF_CHANGED 0x0001 /* buf_idx changed */ 175 /* 176 * must be set whenever buf_idx is changed (as it might be 177 * necessary to recompute the physical address and mapping) 178 * 179 * It is also set by the kernel whenever the buf_idx is 180 * changed internally (e.g., by pipes). Applications may 181 * use this information to know when they can reuse the 182 * contents of previously prepared buffers. 183 */ 184 185 #define NS_REPORT 0x0002 /* ask the hardware to report results */ 186 /* 187 * Request notification when slot is used by the hardware. 188 * Normally transmit completions are handled lazily and 189 * may be unreported. This flag lets us know when a slot 190 * has been sent (e.g. to terminate the sender). 191 */ 192 193 #define NS_FORWARD 0x0004 /* pass packet 'forward' */ 194 /* 195 * (Only for physical ports, rx rings with NR_FORWARD set). 196 * Slot released to the kernel (i.e. before ring->head) with 197 * this flag set are passed to the peer ring (host/NIC), 198 * thus restoring the host-NIC connection for these slots. 199 * This supports efficient traffic monitoring or firewalling. 200 */ 201 202 #define NS_NO_LEARN 0x0008 /* disable bridge learning */ 203 /* 204 * On a VALE switch, do not 'learn' the source port for 205 * this buffer. 206 */ 207 208 #define NS_INDIRECT 0x0010 /* userspace buffer */ 209 /* 210 * (VALE tx rings only) data is in a userspace buffer, 211 * whose address is in the 'ptr' field in the slot. 212 */ 213 214 #define NS_MOREFRAG 0x0020 /* packet has more fragments */ 215 /* 216 * (VALE ports, ptnetmap ports and some NIC ports, e.g. 217 * ixgbe and i40e on Linux) 218 * Set on all but the last slot of a multi-segment packet. 219 * The 'len' field refers to the individual fragment. 220 */ 221 222 #define NS_PORT_SHIFT 8 223 #define NS_PORT_MASK (0xff << NS_PORT_SHIFT) 224 /* 225 * The high 8 bits of the flag, if not zero, indicate the 226 * destination port for the VALE switch, overriding 227 * the lookup table. 228 */ 229 230 #define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff) 231 /* 232 * (VALE rx rings only) the high 8 bits 233 * are the number of fragments. 234 */ 235 236 #define NETMAP_MAX_FRAGS 64 /* max number of fragments */ 237 238 239 /* 240 * struct netmap_ring 241 * 242 * Netmap representation of a TX or RX ring (also known as "queue"). 243 * This is a queue implemented as a fixed-size circular array. 244 * At the software level the important fields are: head, cur, tail. 245 * 246 * In TX rings: 247 * 248 * head first slot available for transmission. 249 * cur wakeup point. select() and poll() will unblock 250 * when 'tail' moves past 'cur' 251 * tail (readonly) first slot reserved to the kernel 252 * 253 * [head .. tail-1] can be used for new packets to send; 254 * 'head' and 'cur' must be incremented as slots are filled 255 * with new packets to be sent; 256 * 'cur' can be moved further ahead if we need more space 257 * for new transmissions. XXX todo (2014-03-12) 258 * 259 * In RX rings: 260 * 261 * head first valid received packet 262 * cur wakeup point. select() and poll() will unblock 263 * when 'tail' moves past 'cur' 264 * tail (readonly) first slot reserved to the kernel 265 * 266 * [head .. tail-1] contain received packets; 267 * 'head' and 'cur' must be incremented as slots are consumed 268 * and can be returned to the kernel; 269 * 'cur' can be moved further ahead if we want to wait for 270 * new packets without returning the previous ones. 271 * 272 * DATA OWNERSHIP/LOCKING: 273 * The netmap_ring, and all slots and buffers in the range 274 * [head .. tail-1] are owned by the user program; 275 * the kernel only accesses them during a netmap system call 276 * and in the user thread context. 277 * 278 * Other slots and buffers are reserved for use by the kernel 279 */ 280 struct netmap_ring { 281 /* 282 * buf_ofs is meant to be used through macros. 283 * It contains the offset of the buffer region from this 284 * descriptor. 285 */ 286 const int64_t buf_ofs; 287 const uint32_t num_slots; /* number of slots in the ring. */ 288 const uint32_t nr_buf_size; 289 const uint16_t ringid; 290 const uint16_t dir; /* 0: tx, 1: rx */ 291 292 uint32_t head; /* (u) first user slot */ 293 uint32_t cur; /* (u) wakeup point */ 294 uint32_t tail; /* (k) first kernel slot */ 295 296 uint32_t flags; 297 298 struct timeval ts; /* (k) time of last *sync() */ 299 300 /* opaque room for a mutex or similar object */ 301 #if !defined(_WIN32) || defined(__CYGWIN__) 302 uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128]; 303 #else 304 uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128]; 305 #endif 306 307 /* the slots follow. This struct has variable size */ 308 struct netmap_slot slot[0]; /* array of slots. */ 309 }; 310 311 312 /* 313 * RING FLAGS 314 */ 315 #define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */ 316 /* 317 * updates the 'ts' field on each netmap syscall. This saves 318 * saves a separate gettimeofday(), and is not much worse than 319 * software timestamps generated in the interrupt handler. 320 */ 321 322 #define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */ 323 /* 324 * Enables the NS_FORWARD slot flag for the ring. 325 */ 326 327 /* 328 * Helper functions for kernel and userspace 329 */ 330 331 /* 332 * Check if space is available in the ring. We use ring->head, which 333 * points to the next netmap slot to be published to netmap. It is 334 * possible that the applications moves ring->cur ahead of ring->tail 335 * (e.g., by setting ring->cur <== ring->tail), if it wants more slots 336 * than the ones currently available, and it wants to be notified when 337 * more arrive. See netmap(4) for more details and examples. 338 */ 339 static inline int 340 nm_ring_empty(struct netmap_ring *ring) 341 { 342 return (ring->head == ring->tail); 343 } 344 345 /* 346 * Netmap representation of an interface and its queue(s). 347 * This is initialized by the kernel when binding a file 348 * descriptor to a port, and should be considered as readonly 349 * by user programs. The kernel never uses it. 350 * 351 * There is one netmap_if for each file descriptor on which we want 352 * to select/poll. 353 * select/poll operates on one or all pairs depending on the value of 354 * nmr_queueid passed on the ioctl. 355 */ 356 struct netmap_if { 357 char ni_name[IFNAMSIZ]; /* name of the interface. */ 358 const uint32_t ni_version; /* API version, currently unused */ 359 const uint32_t ni_flags; /* properties */ 360 #define NI_PRIV_MEM 0x1 /* private memory region */ 361 362 /* 363 * The number of packet rings available in netmap mode. 364 * Physical NICs can have different numbers of tx and rx rings. 365 * Physical NICs also have at least a 'host' rings pair. 366 * Additionally, clients can request additional ring pairs to 367 * be used for internal communication. 368 */ 369 const uint32_t ni_tx_rings; /* number of HW tx rings */ 370 const uint32_t ni_rx_rings; /* number of HW rx rings */ 371 372 uint32_t ni_bufs_head; /* head index for extra bufs */ 373 const uint32_t ni_host_tx_rings; /* number of SW tx rings */ 374 const uint32_t ni_host_rx_rings; /* number of SW rx rings */ 375 uint32_t ni_spare1[3]; 376 /* 377 * The following array contains the offset of each netmap ring 378 * from this structure, in the following order: 379 * - NIC tx rings (ni_tx_rings); 380 * - host tx rings (ni_host_tx_rings); 381 * - NIC rx rings (ni_rx_rings); 382 * - host rx ring (ni_host_rx_rings); 383 * 384 * The area is filled up by the kernel on NETMAP_REQ_REGISTER, 385 * and then only read by userspace code. 386 */ 387 const ssize_t ring_ofs[0]; 388 }; 389 390 /* Legacy interface to interact with a netmap control device. 391 * Included for backward compatibility. The user should not include this 392 * file directly. */ 393 #include "netmap_legacy.h" 394 395 /* 396 * New API to control netmap control devices. New applications should only use 397 * nmreq_xyz structs with the NIOCCTRL ioctl() command. 398 * 399 * NIOCCTRL takes a nmreq_header struct, which contains the required 400 * API version, the name of a netmap port, a command type, and pointers 401 * to request body and options. 402 * 403 * nr_name (in) 404 * The name of the port (em0, valeXXX:YYY, eth0{pn1 etc.) 405 * 406 * nr_version (in/out) 407 * Must match NETMAP_API as used in the kernel, error otherwise. 408 * Always returns the desired value on output. 409 * 410 * nr_reqtype (in) 411 * One of the NETMAP_REQ_* command types below 412 * 413 * nr_body (in) 414 * Pointer to a command-specific struct, described by one 415 * of the struct nmreq_xyz below. 416 * 417 * nr_options (in) 418 * Command specific options, if any. 419 * 420 * A NETMAP_REQ_REGISTER command activates netmap mode on the netmap 421 * port (e.g. physical interface) specified by nmreq_header.nr_name. 422 * The request body (struct nmreq_register) has several arguments to 423 * specify how the port is to be registered. 424 * 425 * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings, 426 * nr_host_tx_rings, nr_host_rx_rings (in/out) 427 * On input, non-zero values may be used to reconfigure the port 428 * according to the requested values, but this is not guaranteed. 429 * On output the actual values in use are reported. 430 * 431 * nr_mode (in) 432 * Indicate what set of rings must be bound to the netmap 433 * device (e.g. all NIC rings, host rings only, NIC and 434 * host rings, ...). Values are in NR_REG_*. 435 * 436 * nr_ringid (in) 437 * If nr_mode == NR_REG_ONE_NIC (only a single couple of TX/RX 438 * rings), indicate which NIC TX and/or RX ring is to be bound 439 * (0..nr_*x_rings-1). 440 * 441 * nr_flags (in) 442 * Indicate special options for how to open the port. 443 * 444 * NR_NO_TX_POLL can be OR-ed to make select()/poll() push 445 * packets on tx rings only if POLLOUT is set. 446 * The default is to push any pending packet. 447 * 448 * NR_DO_RX_POLL can be OR-ed to make select()/poll() release 449 * packets on rx rings also when POLLIN is NOT set. 450 * The default is to touch the rx ring only with POLLIN. 451 * Note that this is the opposite of TX because it 452 * reflects the common usage. 453 * 454 * Other options are NR_MONITOR_TX, NR_MONITOR_RX, NR_ZCOPY_MON, 455 * NR_EXCLUSIVE, NR_RX_RINGS_ONLY, NR_TX_RINGS_ONLY and 456 * NR_ACCEPT_VNET_HDR. 457 * 458 * nr_mem_id (in/out) 459 * The identity of the memory region used. 460 * On input, 0 means the system decides autonomously, 461 * other values may try to select a specific region. 462 * On return the actual value is reported. 463 * Region '1' is the global allocator, normally shared 464 * by all interfaces. Other values are private regions. 465 * If two ports the same region zero-copy is possible. 466 * 467 * nr_extra_bufs (in/out) 468 * Number of extra buffers to be allocated. 469 * 470 * The other NETMAP_REQ_* commands are described below. 471 * 472 */ 473 474 /* maximum size of a request, including all options */ 475 #define NETMAP_REQ_MAXSIZE 4096 476 477 /* Header common to all request options. */ 478 struct nmreq_option { 479 /* Pointer ot the next option. */ 480 uint64_t nro_next; 481 /* Option type. */ 482 uint32_t nro_reqtype; 483 /* (out) status of the option: 484 * 0: recognized and processed 485 * !=0: errno value 486 */ 487 uint32_t nro_status; 488 /* Option size, used only for options that can have variable size 489 * (e.g. because they contain arrays). For fixed-size options this 490 * field should be set to zero. */ 491 uint64_t nro_size; 492 }; 493 494 /* Header common to all requests. Do not reorder these fields, as we need 495 * the second one (nr_reqtype) to know how much to copy from/to userspace. */ 496 struct nmreq_header { 497 uint16_t nr_version; /* API version */ 498 uint16_t nr_reqtype; /* nmreq type (NETMAP_REQ_*) */ 499 uint32_t nr_reserved; /* must be zero */ 500 #define NETMAP_REQ_IFNAMSIZ 64 501 char nr_name[NETMAP_REQ_IFNAMSIZ]; /* port name */ 502 uint64_t nr_options; /* command-specific options */ 503 uint64_t nr_body; /* ptr to nmreq_xyz struct */ 504 }; 505 506 enum { 507 /* Register a netmap port with the device. */ 508 NETMAP_REQ_REGISTER = 1, 509 /* Get information from a netmap port. */ 510 NETMAP_REQ_PORT_INFO_GET, 511 /* Attach a netmap port to a VALE switch. */ 512 NETMAP_REQ_VALE_ATTACH, 513 /* Detach a netmap port from a VALE switch. */ 514 NETMAP_REQ_VALE_DETACH, 515 /* List the ports attached to a VALE switch. */ 516 NETMAP_REQ_VALE_LIST, 517 /* Set the port header length (was virtio-net header length). */ 518 NETMAP_REQ_PORT_HDR_SET, 519 /* Get the port header length (was virtio-net header length). */ 520 NETMAP_REQ_PORT_HDR_GET, 521 /* Create a new persistent VALE port. */ 522 NETMAP_REQ_VALE_NEWIF, 523 /* Delete a persistent VALE port. */ 524 NETMAP_REQ_VALE_DELIF, 525 /* Enable polling kernel thread(s) on an attached VALE port. */ 526 NETMAP_REQ_VALE_POLLING_ENABLE, 527 /* Disable polling kernel thread(s) on an attached VALE port. */ 528 NETMAP_REQ_VALE_POLLING_DISABLE, 529 /* Get info about the pools of a memory allocator. */ 530 NETMAP_REQ_POOLS_INFO_GET, 531 /* Start an in-kernel loop that syncs the rings periodically or 532 * on notifications. The loop runs in the context of the ioctl 533 * syscall, and only stops on NETMAP_REQ_SYNC_KLOOP_STOP. */ 534 NETMAP_REQ_SYNC_KLOOP_START, 535 /* Stops the thread executing the in-kernel loop. The thread 536 * returns from the ioctl syscall. */ 537 NETMAP_REQ_SYNC_KLOOP_STOP, 538 /* Enable CSB mode on a registered netmap control device. */ 539 NETMAP_REQ_CSB_ENABLE, 540 }; 541 542 enum { 543 /* On NETMAP_REQ_REGISTER, ask netmap to use memory allocated 544 * from user-space allocated memory pools (e.g. hugepages). 545 */ 546 NETMAP_REQ_OPT_EXTMEM = 1, 547 548 /* ON NETMAP_REQ_SYNC_KLOOP_START, ask netmap to use eventfd-based 549 * notifications to synchronize the kernel loop with the application. 550 */ 551 NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS, 552 553 /* On NETMAP_REQ_REGISTER, ask netmap to work in CSB mode, where 554 * head, cur and tail pointers are not exchanged through the 555 * struct netmap_ring header, but rather using an user-provided 556 * memory area (see struct nm_csb_atok and struct nm_csb_ktoa). 557 */ 558 NETMAP_REQ_OPT_CSB, 559 560 /* An extension to NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS, which specifies 561 * if the TX and/or RX rings are synced in the context of the VM exit. 562 * This requires the 'ioeventfd' fields to be valid (cannot be < 0). 563 */ 564 NETMAP_REQ_OPT_SYNC_KLOOP_MODE, 565 566 /* This is a marker to count the number of available options. 567 * New options must be added above it. */ 568 NETMAP_REQ_OPT_MAX, 569 }; 570 571 /* 572 * nr_reqtype: NETMAP_REQ_REGISTER 573 * Bind (register) a netmap port to this control device. 574 */ 575 struct nmreq_register { 576 uint64_t nr_offset; /* nifp offset in the shared region */ 577 uint64_t nr_memsize; /* size of the shared region */ 578 uint32_t nr_tx_slots; /* slots in tx rings */ 579 uint32_t nr_rx_slots; /* slots in rx rings */ 580 uint16_t nr_tx_rings; /* number of tx rings */ 581 uint16_t nr_rx_rings; /* number of rx rings */ 582 uint16_t nr_host_tx_rings; /* number of host tx rings */ 583 uint16_t nr_host_rx_rings; /* number of host rx rings */ 584 585 uint16_t nr_mem_id; /* id of the memory allocator */ 586 uint16_t nr_ringid; /* ring(s) we care about */ 587 uint32_t nr_mode; /* specify NR_REG_* modes */ 588 uint32_t nr_extra_bufs; /* number of requested extra buffers */ 589 590 uint64_t nr_flags; /* additional flags (see below) */ 591 /* monitors use nr_ringid and nr_mode to select the rings to monitor */ 592 #define NR_MONITOR_TX 0x100 593 #define NR_MONITOR_RX 0x200 594 #define NR_ZCOPY_MON 0x400 595 /* request exclusive access to the selected rings */ 596 #define NR_EXCLUSIVE 0x800 597 /* 0x1000 unused */ 598 #define NR_RX_RINGS_ONLY 0x2000 599 #define NR_TX_RINGS_ONLY 0x4000 600 /* Applications set this flag if they are able to deal with virtio-net headers, 601 * that is send/receive frames that start with a virtio-net header. 602 * If not set, NETMAP_REQ_REGISTER will fail with netmap ports that require 603 * applications to use those headers. If the flag is set, the application can 604 * use the NETMAP_VNET_HDR_GET command to figure out the header length. */ 605 #define NR_ACCEPT_VNET_HDR 0x8000 606 /* The following two have the same meaning of NETMAP_NO_TX_POLL and 607 * NETMAP_DO_RX_POLL. */ 608 #define NR_DO_RX_POLL 0x10000 609 #define NR_NO_TX_POLL 0x20000 610 }; 611 612 /* Valid values for nmreq_register.nr_mode (see above). */ 613 enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */ 614 NR_REG_ALL_NIC = 1, 615 NR_REG_SW = 2, 616 NR_REG_NIC_SW = 3, 617 NR_REG_ONE_NIC = 4, 618 NR_REG_PIPE_MASTER = 5, /* deprecated, use "x{y" port name syntax */ 619 NR_REG_PIPE_SLAVE = 6, /* deprecated, use "x}y" port name syntax */ 620 NR_REG_NULL = 7, 621 NR_REG_ONE_SW = 8, 622 }; 623 624 /* A single ioctl number is shared by all the new API command. 625 * Demultiplexing is done using the hdr.nr_reqtype field. 626 * FreeBSD uses the size value embedded in the _IOWR to determine 627 * how much to copy in/out, so we define the ioctl() command 628 * specifying only nmreq_header, and copyin/copyout the rest. */ 629 #define NIOCCTRL _IOWR('i', 151, struct nmreq_header) 630 631 /* The ioctl commands to sync TX/RX netmap rings. 632 * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues, 633 * whose identity is set in NETMAP_REQ_REGISTER through nr_ringid. 634 * These are non blocking and take no argument. */ 635 #define NIOCTXSYNC _IO('i', 148) /* sync tx queues */ 636 #define NIOCRXSYNC _IO('i', 149) /* sync rx queues */ 637 638 /* 639 * nr_reqtype: NETMAP_REQ_PORT_INFO_GET 640 * Get information about a netmap port, including number of rings. 641 * slots per ring, id of the memory allocator, etc. The netmap 642 * control device used for this operation does not need to be bound 643 * to a netmap port. 644 */ 645 struct nmreq_port_info_get { 646 uint64_t nr_memsize; /* size of the shared region */ 647 uint32_t nr_tx_slots; /* slots in tx rings */ 648 uint32_t nr_rx_slots; /* slots in rx rings */ 649 uint16_t nr_tx_rings; /* number of tx rings */ 650 uint16_t nr_rx_rings; /* number of rx rings */ 651 uint16_t nr_host_tx_rings; /* number of host tx rings */ 652 uint16_t nr_host_rx_rings; /* number of host rx rings */ 653 uint16_t nr_mem_id; /* memory allocator id (in/out) */ 654 uint16_t pad[3]; 655 }; 656 657 #define NM_BDG_NAME "vale" /* prefix for bridge port name */ 658 659 /* 660 * nr_reqtype: NETMAP_REQ_VALE_ATTACH 661 * Attach a netmap port to a VALE switch. Both the name of the netmap 662 * port and the VALE switch are specified through the nr_name argument. 663 * The attach operation could need to register a port, so at least 664 * the same arguments are available. 665 * port_index will contain the index where the port has been attached. 666 */ 667 struct nmreq_vale_attach { 668 struct nmreq_register reg; 669 uint32_t port_index; 670 uint32_t pad1; 671 }; 672 673 /* 674 * nr_reqtype: NETMAP_REQ_VALE_DETACH 675 * Detach a netmap port from a VALE switch. Both the name of the netmap 676 * port and the VALE switch are specified through the nr_name argument. 677 * port_index will contain the index where the port was attached. 678 */ 679 struct nmreq_vale_detach { 680 uint32_t port_index; 681 uint32_t pad1; 682 }; 683 684 /* 685 * nr_reqtype: NETMAP_REQ_VALE_LIST 686 * List the ports of a VALE switch. 687 */ 688 struct nmreq_vale_list { 689 /* Name of the VALE port (valeXXX:YYY) or empty. */ 690 uint16_t nr_bridge_idx; 691 uint16_t pad1; 692 uint32_t nr_port_idx; 693 }; 694 695 /* 696 * nr_reqtype: NETMAP_REQ_PORT_HDR_SET or NETMAP_REQ_PORT_HDR_GET 697 * Set or get the port header length of the port identified by hdr.nr_name. 698 * The control device does not need to be bound to a netmap port. 699 */ 700 struct nmreq_port_hdr { 701 uint32_t nr_hdr_len; 702 uint32_t pad1; 703 }; 704 705 /* 706 * nr_reqtype: NETMAP_REQ_VALE_NEWIF 707 * Create a new persistent VALE port. 708 */ 709 struct nmreq_vale_newif { 710 uint32_t nr_tx_slots; /* slots in tx rings */ 711 uint32_t nr_rx_slots; /* slots in rx rings */ 712 uint16_t nr_tx_rings; /* number of tx rings */ 713 uint16_t nr_rx_rings; /* number of rx rings */ 714 uint16_t nr_mem_id; /* id of the memory allocator */ 715 uint16_t pad1; 716 }; 717 718 /* 719 * nr_reqtype: NETMAP_REQ_VALE_POLLING_ENABLE or NETMAP_REQ_VALE_POLLING_DISABLE 720 * Enable or disable polling kthreads on a VALE port. 721 */ 722 struct nmreq_vale_polling { 723 uint32_t nr_mode; 724 #define NETMAP_POLLING_MODE_SINGLE_CPU 1 725 #define NETMAP_POLLING_MODE_MULTI_CPU 2 726 uint32_t nr_first_cpu_id; 727 uint32_t nr_num_polling_cpus; 728 uint32_t pad1; 729 }; 730 731 /* 732 * nr_reqtype: NETMAP_REQ_POOLS_INFO_GET 733 * Get info about the pools of the memory allocator of the netmap 734 * port specified by hdr.nr_name and nr_mem_id. The netmap control 735 * device used for this operation does not need to be bound to a netmap 736 * port. 737 */ 738 struct nmreq_pools_info { 739 uint64_t nr_memsize; 740 uint16_t nr_mem_id; /* in/out argument */ 741 uint16_t pad1[3]; 742 uint64_t nr_if_pool_offset; 743 uint32_t nr_if_pool_objtotal; 744 uint32_t nr_if_pool_objsize; 745 uint64_t nr_ring_pool_offset; 746 uint32_t nr_ring_pool_objtotal; 747 uint32_t nr_ring_pool_objsize; 748 uint64_t nr_buf_pool_offset; 749 uint32_t nr_buf_pool_objtotal; 750 uint32_t nr_buf_pool_objsize; 751 }; 752 753 /* 754 * nr_reqtype: NETMAP_REQ_SYNC_KLOOP_START 755 * Start an in-kernel loop that syncs the rings periodically or on 756 * notifications. The loop runs in the context of the ioctl syscall, 757 * and only stops on NETMAP_REQ_SYNC_KLOOP_STOP. 758 * The registered netmap port must be open in CSB mode. 759 */ 760 struct nmreq_sync_kloop_start { 761 /* Sleeping is the default synchronization method for the kloop. 762 * The 'sleep_us' field specifies how many microsconds to sleep for 763 * when there is no work to do, before doing another kloop iteration. 764 */ 765 uint32_t sleep_us; 766 uint32_t pad1; 767 }; 768 769 /* A CSB entry for the application --> kernel direction. */ 770 struct nm_csb_atok { 771 uint32_t head; /* AW+ KR+ the head of the appl netmap_ring */ 772 uint32_t cur; /* AW+ KR+ the cur of the appl netmap_ring */ 773 uint32_t appl_need_kick; /* AW+ KR+ kern --> appl notification enable */ 774 uint32_t sync_flags; /* AW+ KR+ the flags of the appl [tx|rx]sync() */ 775 uint32_t pad[12]; /* pad to a 64 bytes cacheline */ 776 }; 777 778 /* A CSB entry for the application <-- kernel direction. */ 779 struct nm_csb_ktoa { 780 uint32_t hwcur; /* AR+ KW+ the hwcur of the kern netmap_kring */ 781 uint32_t hwtail; /* AR+ KW+ the hwtail of the kern netmap_kring */ 782 uint32_t kern_need_kick; /* AR+ KW+ appl-->kern notification enable */ 783 uint32_t pad[13]; 784 }; 785 786 #ifdef __linux__ 787 788 #ifdef __KERNEL__ 789 #define nm_stst_barrier smp_wmb 790 #define nm_ldld_barrier smp_rmb 791 #define nm_stld_barrier smp_mb 792 #else /* !__KERNEL__ */ 793 static inline void nm_stst_barrier(void) 794 { 795 /* A memory barrier with release semantic has the combined 796 * effect of a store-store barrier and a load-store barrier, 797 * which is fine for us. */ 798 __atomic_thread_fence(__ATOMIC_RELEASE); 799 } 800 static inline void nm_ldld_barrier(void) 801 { 802 /* A memory barrier with acquire semantic has the combined 803 * effect of a load-load barrier and a store-load barrier, 804 * which is fine for us. */ 805 __atomic_thread_fence(__ATOMIC_ACQUIRE); 806 } 807 #endif /* !__KERNEL__ */ 808 809 #elif defined(__FreeBSD__) 810 811 #ifdef _KERNEL 812 #define nm_stst_barrier atomic_thread_fence_rel 813 #define nm_ldld_barrier atomic_thread_fence_acq 814 #define nm_stld_barrier atomic_thread_fence_seq_cst 815 #else /* !_KERNEL */ 816 #include <stdatomic.h> 817 static inline void nm_stst_barrier(void) 818 { 819 atomic_thread_fence(memory_order_release); 820 } 821 static inline void nm_ldld_barrier(void) 822 { 823 atomic_thread_fence(memory_order_acquire); 824 } 825 #endif /* !_KERNEL */ 826 827 #else /* !__linux__ && !__FreeBSD__ */ 828 #error "OS not supported" 829 #endif /* !__linux__ && !__FreeBSD__ */ 830 831 /* Application side of sync-kloop: Write ring pointers (cur, head) to the CSB. 832 * This routine is coupled with sync_kloop_kernel_read(). */ 833 static inline void 834 nm_sync_kloop_appl_write(struct nm_csb_atok *atok, uint32_t cur, 835 uint32_t head) 836 { 837 /* Issue a first store-store barrier to make sure writes to the 838 * netmap ring do not overcome updates on atok->cur and atok->head. */ 839 nm_stst_barrier(); 840 841 /* 842 * We need to write cur and head to the CSB but we cannot do it atomically. 843 * There is no way we can prevent the host from reading the updated value 844 * of one of the two and the old value of the other. However, if we make 845 * sure that the host never reads a value of head more recent than the 846 * value of cur we are safe. We can allow the host to read a value of cur 847 * more recent than the value of head, since in the netmap ring cur can be 848 * ahead of head and cur cannot wrap around head because it must be behind 849 * tail. Inverting the order of writes below could instead result into the 850 * host to think head went ahead of cur, which would cause the sync 851 * prologue to fail. 852 * 853 * The following memory barrier scheme is used to make this happen: 854 * 855 * Guest Host 856 * 857 * STORE(cur) LOAD(head) 858 * wmb() <-----------> rmb() 859 * STORE(head) LOAD(cur) 860 * 861 */ 862 atok->cur = cur; 863 nm_stst_barrier(); 864 atok->head = head; 865 } 866 867 /* Application side of sync-kloop: Read kring pointers (hwcur, hwtail) from 868 * the CSB. This routine is coupled with sync_kloop_kernel_write(). */ 869 static inline void 870 nm_sync_kloop_appl_read(struct nm_csb_ktoa *ktoa, uint32_t *hwtail, 871 uint32_t *hwcur) 872 { 873 /* 874 * We place a memory barrier to make sure that the update of hwtail never 875 * overtakes the update of hwcur. 876 * (see explanation in sync_kloop_kernel_write). 877 */ 878 *hwtail = ktoa->hwtail; 879 nm_ldld_barrier(); 880 *hwcur = ktoa->hwcur; 881 882 /* Make sure that loads from ktoa->hwtail and ktoa->hwcur are not delayed 883 * after the loads from the netmap ring. */ 884 nm_ldld_barrier(); 885 } 886 887 /* 888 * data for NETMAP_REQ_OPT_* options 889 */ 890 891 struct nmreq_opt_sync_kloop_eventfds { 892 struct nmreq_option nro_opt; /* common header */ 893 /* An array of N entries for bidirectional notifications between 894 * the kernel loop and the application. The number of entries and 895 * their order must agree with the CSB arrays passed in the 896 * NETMAP_REQ_OPT_CSB option. Each entry contains a file descriptor 897 * backed by an eventfd. 898 * 899 * If any of the 'ioeventfd' entries is < 0, the event loop uses 900 * the sleeping synchronization strategy (according to sleep_us), 901 * and keeps kern_need_kick always disabled. 902 * Each 'irqfd' can be < 0, and in that case the corresponding queue 903 * is never notified. 904 */ 905 struct { 906 /* Notifier for the application --> kernel loop direction. */ 907 int32_t ioeventfd; 908 /* Notifier for the kernel loop --> application direction. */ 909 int32_t irqfd; 910 } eventfds[0]; 911 }; 912 913 struct nmreq_opt_sync_kloop_mode { 914 struct nmreq_option nro_opt; /* common header */ 915 #define NM_OPT_SYNC_KLOOP_DIRECT_TX (1 << 0) 916 #define NM_OPT_SYNC_KLOOP_DIRECT_RX (1 << 1) 917 uint32_t mode; 918 }; 919 920 struct nmreq_opt_extmem { 921 struct nmreq_option nro_opt; /* common header */ 922 uint64_t nro_usrptr; /* (in) ptr to usr memory */ 923 struct nmreq_pools_info nro_info; /* (in/out) */ 924 }; 925 926 struct nmreq_opt_csb { 927 struct nmreq_option nro_opt; 928 929 /* Array of CSB entries for application --> kernel communication 930 * (N entries). */ 931 uint64_t csb_atok; 932 933 /* Array of CSB entries for kernel --> application communication 934 * (N entries). */ 935 uint64_t csb_ktoa; 936 }; 937 938 #endif /* _NET_NETMAP_H_ */ 939