1 /* 2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * This module supports memory mapped access to network devices, 28 * see netmap(4). 29 * 30 * The module uses a large, memory pool allocated by the kernel 31 * and accessible as mmapped memory by multiple userspace threads/processes. 32 * The memory pool contains packet buffers and "netmap rings", 33 * i.e. user-accessible copies of the interface's queues. 34 * 35 * Access to the network card works like this: 36 * 1. a process/thread issues one or more open() on /dev/netmap, to create 37 * select()able file descriptor on which events are reported. 38 * 2. on each descriptor, the process issues an ioctl() to identify 39 * the interface that should report events to the file descriptor. 40 * 3. on each descriptor, the process issues an mmap() request to 41 * map the shared memory region within the process' address space. 42 * The list of interesting queues is indicated by a location in 43 * the shared memory region. 44 * 4. using the functions in the netmap(4) userspace API, a process 45 * can look up the occupation state of a queue, access memory buffers, 46 * and retrieve received packets or enqueue packets to transmit. 47 * 5. using some ioctl()s the process can synchronize the userspace view 48 * of the queue with the actual status in the kernel. This includes both 49 * receiving the notification of new packets, and transmitting new 50 * packets on the output interface. 51 * 6. select() or poll() can be used to wait for events on individual 52 * transmit or receive queues (or all queues for a given interface). 53 */ 54 55 #include <sys/cdefs.h> /* prerequisite */ 56 __FBSDID("$FreeBSD$"); 57 58 #include <sys/types.h> 59 #include <sys/module.h> 60 #include <sys/errno.h> 61 #include <sys/param.h> /* defines used in kernel.h */ 62 #include <sys/jail.h> 63 #include <sys/kernel.h> /* types used in module initialization */ 64 #include <sys/conf.h> /* cdevsw struct */ 65 #include <sys/uio.h> /* uio struct */ 66 #include <sys/sockio.h> 67 #include <sys/socketvar.h> /* struct socket */ 68 #include <sys/malloc.h> 69 #include <sys/mman.h> /* PROT_EXEC */ 70 #include <sys/poll.h> 71 #include <sys/proc.h> 72 #include <vm/vm.h> /* vtophys */ 73 #include <vm/pmap.h> /* vtophys */ 74 #include <sys/socket.h> /* sockaddrs */ 75 #include <machine/bus.h> 76 #include <sys/selinfo.h> 77 #include <sys/sysctl.h> 78 #include <net/if.h> 79 #include <net/bpf.h> /* BIOCIMMEDIATE */ 80 #include <net/vnet.h> 81 #include <net/netmap.h> 82 #include <dev/netmap/netmap_kern.h> 83 #include <machine/bus.h> /* bus_dmamap_* */ 84 85 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 86 87 /* 88 * lock and unlock for the netmap memory allocator 89 */ 90 #define NMA_LOCK() mtx_lock(&nm_mem->nm_mtx); 91 #define NMA_UNLOCK() mtx_unlock(&nm_mem->nm_mtx); 92 struct netmap_mem_d; 93 static struct netmap_mem_d *nm_mem; /* Our memory allocator. */ 94 95 u_int netmap_total_buffers; 96 char *netmap_buffer_base; /* address of an invalid buffer */ 97 98 /* user-controlled variables */ 99 int netmap_verbose; 100 101 static int netmap_no_timestamp; /* don't timestamp on rxsync */ 102 103 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 104 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 105 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 106 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 107 CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp"); 108 int netmap_buf_size = 2048; 109 TUNABLE_INT("hw.netmap.buf_size", &netmap_buf_size); 110 SYSCTL_INT(_dev_netmap, OID_AUTO, buf_size, 111 CTLFLAG_RD, &netmap_buf_size, 0, "Size of packet buffers"); 112 int netmap_mitigate = 1; 113 SYSCTL_INT(_dev_netmap, OID_AUTO, mitigate, CTLFLAG_RW, &netmap_mitigate, 0, ""); 114 int netmap_no_pendintr; 115 SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, 116 CTLFLAG_RW, &netmap_no_pendintr, 0, "Always look for new received packets."); 117 118 119 120 /*----- memory allocator -----------------*/ 121 /* 122 * Here we have the low level routines for memory allocator 123 * and its primary users. 124 */ 125 126 /* 127 * Default amount of memory pre-allocated by the module. 128 * We start with a large size and then shrink our demand 129 * according to what is avalable when the module is loaded. 130 * At the moment the block is contiguous, but we can easily 131 * restrict our demand to smaller units (16..64k) 132 */ 133 #define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE) 134 static void * netmap_malloc(size_t size, const char *msg); 135 static void netmap_free(void *addr, const char *msg); 136 137 #define netmap_if_malloc(len) netmap_malloc(len, "nifp") 138 #define netmap_if_free(v) netmap_free((v), "nifp") 139 140 #define netmap_ring_malloc(len) netmap_malloc(len, "ring") 141 #define netmap_free_rings(na) \ 142 netmap_free((na)->tx_rings[0].ring, "shadow rings"); 143 144 /* 145 * Allocator for a pool of packet buffers. For each buffer we have 146 * one entry in the bitmap to signal the state. Allocation scans 147 * the bitmap, but since this is done only on attach, we are not 148 * too worried about performance 149 * XXX if we need to allocate small blocks, a translation 150 * table is used both for kernel virtual address and physical 151 * addresses. 152 */ 153 struct netmap_buf_pool { 154 u_int total_buffers; /* total buffers. */ 155 u_int free; 156 u_int bufsize; 157 char *base; /* buffer base address */ 158 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 159 }; 160 struct netmap_buf_pool nm_buf_pool; 161 SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers, 162 CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers"); 163 SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers, 164 CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers"); 165 166 167 168 169 /* 170 * Allocate n buffers from the ring, and fill the slot. 171 * Buffer 0 is the 'junk' buffer. 172 */ 173 static void 174 netmap_new_bufs(struct netmap_if *nifp __unused, 175 struct netmap_slot *slot, u_int n) 176 { 177 struct netmap_buf_pool *p = &nm_buf_pool; 178 uint32_t bi = 0; /* index in the bitmap */ 179 uint32_t mask, j, i = 0; /* slot counter */ 180 181 if (n > p->free) { 182 D("only %d out of %d buffers available", i, n); 183 return; 184 } 185 /* termination is guaranteed by p->free */ 186 while (i < n && p->free > 0) { 187 uint32_t cur = p->bitmap[bi]; 188 if (cur == 0) { /* bitmask is fully used */ 189 bi++; 190 continue; 191 } 192 /* locate a slot */ 193 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; 194 p->bitmap[bi] &= ~mask; /* slot in use */ 195 p->free--; 196 slot[i].buf_idx = bi*32+j; 197 slot[i].len = p->bufsize; 198 slot[i].flags = NS_BUF_CHANGED; 199 i++; 200 } 201 ND("allocated %d buffers, %d available", n, p->free); 202 } 203 204 205 static void 206 netmap_free_buf(struct netmap_if *nifp __unused, uint32_t i) 207 { 208 struct netmap_buf_pool *p = &nm_buf_pool; 209 210 uint32_t pos, mask; 211 if (i >= p->total_buffers) { 212 D("invalid free index %d", i); 213 return; 214 } 215 pos = i / 32; 216 mask = 1 << (i % 32); 217 if (p->bitmap[pos] & mask) { 218 D("slot %d already free", i); 219 return; 220 } 221 p->bitmap[pos] |= mask; 222 p->free++; 223 } 224 225 226 /* Descriptor of the memory objects handled by our memory allocator. */ 227 struct netmap_mem_obj { 228 TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the 229 chain. */ 230 int nmo_used; /* flag set on used memory objects. */ 231 size_t nmo_size; /* size of the memory area reserved for the 232 object. */ 233 void *nmo_data; /* pointer to the memory area. */ 234 }; 235 236 /* Wrap our memory objects to make them ``chainable``. */ 237 TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj); 238 239 240 /* Descriptor of our custom memory allocator. */ 241 struct netmap_mem_d { 242 struct mtx nm_mtx; /* lock used to handle the chain of memory 243 objects. */ 244 struct netmap_mem_obj_h nm_molist; /* list of memory objects */ 245 size_t nm_size; /* total amount of memory used for rings etc. */ 246 size_t nm_totalsize; /* total amount of allocated memory 247 (the difference is used for buffers) */ 248 size_t nm_buf_start; /* offset of packet buffers. 249 This is page-aligned. */ 250 size_t nm_buf_len; /* total memory for buffers */ 251 void *nm_buffer; /* pointer to the whole pre-allocated memory 252 area. */ 253 }; 254 255 /* Shorthand to compute a netmap interface offset. */ 256 #define netmap_if_offset(v) \ 257 ((char *) (v) - (char *) nm_mem->nm_buffer) 258 /* .. and get a physical address given a memory offset */ 259 #define netmap_ofstophys(o) \ 260 (vtophys(nm_mem->nm_buffer) + (o)) 261 262 263 /*------ netmap memory allocator -------*/ 264 /* 265 * Request for a chunk of memory. 266 * 267 * Memory objects are arranged into a list, hence we need to walk this 268 * list until we find an object with the needed amount of data free. 269 * This sounds like a completely inefficient implementation, but given 270 * the fact that data allocation is done once, we can handle it 271 * flawlessly. 272 * 273 * Return NULL on failure. 274 */ 275 static void * 276 netmap_malloc(size_t size, __unused const char *msg) 277 { 278 struct netmap_mem_obj *mem_obj, *new_mem_obj; 279 void *ret = NULL; 280 281 NMA_LOCK(); 282 TAILQ_FOREACH(mem_obj, &nm_mem->nm_molist, nmo_next) { 283 if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size) 284 continue; 285 286 new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 287 M_WAITOK | M_ZERO); 288 TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next); 289 290 new_mem_obj->nmo_used = 1; 291 new_mem_obj->nmo_size = size; 292 new_mem_obj->nmo_data = mem_obj->nmo_data; 293 memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size); 294 295 mem_obj->nmo_size -= size; 296 mem_obj->nmo_data = (char *) mem_obj->nmo_data + size; 297 if (mem_obj->nmo_size == 0) { 298 TAILQ_REMOVE(&nm_mem->nm_molist, mem_obj, 299 nmo_next); 300 free(mem_obj, M_NETMAP); 301 } 302 303 ret = new_mem_obj->nmo_data; 304 305 break; 306 } 307 NMA_UNLOCK(); 308 ND("%s: %d bytes at %p", msg, size, ret); 309 310 return (ret); 311 } 312 313 /* 314 * Return the memory to the allocator. 315 * 316 * While freeing a memory object, we try to merge adjacent chunks in 317 * order to reduce memory fragmentation. 318 */ 319 static void 320 netmap_free(void *addr, const char *msg) 321 { 322 size_t size; 323 struct netmap_mem_obj *cur, *prev, *next; 324 325 if (addr == NULL) { 326 D("NULL addr for %s", msg); 327 return; 328 } 329 330 NMA_LOCK(); 331 TAILQ_FOREACH(cur, &nm_mem->nm_molist, nmo_next) { 332 if (cur->nmo_data == addr && cur->nmo_used) 333 break; 334 } 335 if (cur == NULL) { 336 NMA_UNLOCK(); 337 D("invalid addr %s %p", msg, addr); 338 return; 339 } 340 341 size = cur->nmo_size; 342 cur->nmo_used = 0; 343 344 /* merge current chunk of memory with the previous one, 345 if present. */ 346 prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next); 347 if (prev && prev->nmo_used == 0) { 348 TAILQ_REMOVE(&nm_mem->nm_molist, cur, nmo_next); 349 prev->nmo_size += cur->nmo_size; 350 free(cur, M_NETMAP); 351 cur = prev; 352 } 353 354 /* merge with the next one */ 355 next = TAILQ_NEXT(cur, nmo_next); 356 if (next && next->nmo_used == 0) { 357 TAILQ_REMOVE(&nm_mem->nm_molist, next, nmo_next); 358 cur->nmo_size += next->nmo_size; 359 free(next, M_NETMAP); 360 } 361 NMA_UNLOCK(); 362 ND("freed %s %d bytes at %p", msg, size, addr); 363 } 364 365 366 /* 367 * Create and return a new ``netmap_if`` object, and possibly also 368 * rings and packet buffors. 369 * 370 * Return NULL on failure. 371 */ 372 static void * 373 netmap_if_new(const char *ifname, struct netmap_adapter *na) 374 { 375 struct netmap_if *nifp; 376 struct netmap_ring *ring; 377 struct netmap_kring *kring; 378 char *buff; 379 u_int i, len, ofs, numdesc; 380 u_int nrx = na->num_rx_queues + 1; /* shorthand, include stack queue */ 381 u_int ntx = na->num_tx_queues + 1; /* shorthand, include stack queue */ 382 383 /* 384 * the descriptor is followed inline by an array of offsets 385 * to the tx and rx rings in the shared memory region. 386 */ 387 len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t); 388 nifp = netmap_if_malloc(len); 389 if (nifp == NULL) 390 return (NULL); 391 392 /* initialize base fields */ 393 *(int *)(uintptr_t)&nifp->ni_rx_queues = na->num_rx_queues; 394 *(int *)(uintptr_t)&nifp->ni_tx_queues = na->num_tx_queues; 395 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 396 397 (na->refcount)++; /* XXX atomic ? we are under lock */ 398 if (na->refcount > 1) 399 goto final; 400 401 /* 402 * First instance. Allocate the netmap rings 403 * (one for each hw queue, one pair for the host). 404 * The rings are contiguous, but have variable size. 405 * The entire block is reachable at 406 * na->tx_rings[0] 407 */ 408 len = (ntx + nrx) * sizeof(struct netmap_ring) + 409 (ntx * na->num_tx_desc + nrx * na->num_rx_desc) * 410 sizeof(struct netmap_slot); 411 buff = netmap_ring_malloc(len); 412 if (buff == NULL) { 413 D("failed to allocate %d bytes for %s shadow ring", 414 len, ifname); 415 error: 416 (na->refcount)--; 417 netmap_if_free(nifp); 418 return (NULL); 419 } 420 /* Check whether we have enough buffers */ 421 len = ntx * na->num_tx_desc + nrx * na->num_rx_desc; 422 NMA_LOCK(); 423 if (nm_buf_pool.free < len) { 424 NMA_UNLOCK(); 425 netmap_free(buff, "not enough bufs"); 426 goto error; 427 } 428 /* 429 * in the kring, store the pointers to the shared rings 430 * and initialize the rings. We are under NMA_LOCK(). 431 */ 432 ofs = 0; 433 for (i = 0; i < ntx; i++) { /* Transmit rings */ 434 kring = &na->tx_rings[i]; 435 numdesc = na->num_tx_desc; 436 bzero(kring, sizeof(*kring)); 437 kring->na = na; 438 439 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 440 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 441 nm_buf_pool.base - (char *)ring; 442 ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs); 443 *(uint32_t *)(uintptr_t)&ring->num_slots = 444 kring->nkr_num_slots = numdesc; 445 446 /* 447 * IMPORTANT: 448 * Always keep one slot empty, so we can detect new 449 * transmissions comparing cur and nr_hwcur (they are 450 * the same only if there are no new transmissions). 451 */ 452 ring->avail = kring->nr_hwavail = numdesc - 1; 453 ring->cur = kring->nr_hwcur = 0; 454 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 455 netmap_new_bufs(nifp, ring->slot, numdesc); 456 457 ofs += sizeof(struct netmap_ring) + 458 numdesc * sizeof(struct netmap_slot); 459 } 460 461 for (i = 0; i < nrx; i++) { /* Receive rings */ 462 kring = &na->rx_rings[i]; 463 numdesc = na->num_rx_desc; 464 bzero(kring, sizeof(*kring)); 465 kring->na = na; 466 467 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 468 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 469 nm_buf_pool.base - (char *)ring; 470 ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs); 471 *(uint32_t *)(uintptr_t)&ring->num_slots = 472 kring->nkr_num_slots = numdesc; 473 ring->cur = kring->nr_hwcur = 0; 474 ring->avail = kring->nr_hwavail = 0; /* empty */ 475 *(uint16_t *)(uintptr_t)&ring->nr_buf_size = NETMAP_BUF_SIZE; 476 netmap_new_bufs(nifp, ring->slot, numdesc); 477 ofs += sizeof(struct netmap_ring) + 478 numdesc * sizeof(struct netmap_slot); 479 } 480 NMA_UNLOCK(); 481 // XXX initialize the selrecord structs. 482 483 final: 484 /* 485 * fill the slots for the rx and tx queues. They contain the offset 486 * between the ring and nifp, so the information is usable in 487 * userspace to reach the ring from the nifp. 488 */ 489 for (i = 0; i < ntx; i++) { 490 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 491 (char *)na->tx_rings[i].ring - (char *)nifp; 492 } 493 for (i = 0; i < nrx; i++) { 494 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] = 495 (char *)na->rx_rings[i].ring - (char *)nifp; 496 } 497 return (nifp); 498 } 499 500 /* 501 * Initialize the memory allocator. 502 * 503 * Create the descriptor for the memory , allocate the pool of memory 504 * and initialize the list of memory objects with a single chunk 505 * containing the whole pre-allocated memory marked as free. 506 * 507 * Start with a large size, then halve as needed if we fail to 508 * allocate the block. While halving, always add one extra page 509 * because buffers 0 and 1 are used for special purposes. 510 * Return 0 on success, errno otherwise. 511 */ 512 static int 513 netmap_memory_init(void) 514 { 515 struct netmap_mem_obj *mem_obj; 516 void *buf = NULL; 517 int i, n, sz = NETMAP_MEMORY_SIZE; 518 int extra_sz = 0; // space for rings and two spare buffers 519 520 for (; sz >= 1<<20; sz >>=1) { 521 extra_sz = sz/200; 522 extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 523 buf = contigmalloc(sz + extra_sz, 524 M_NETMAP, 525 M_WAITOK | M_ZERO, 526 0, /* low address */ 527 -1UL, /* high address */ 528 PAGE_SIZE, /* alignment */ 529 0 /* boundary */ 530 ); 531 if (buf) 532 break; 533 } 534 if (buf == NULL) 535 return (ENOMEM); 536 sz += extra_sz; 537 nm_mem = malloc(sizeof(struct netmap_mem_d), M_NETMAP, 538 M_WAITOK | M_ZERO); 539 mtx_init(&nm_mem->nm_mtx, "netmap memory allocator lock", NULL, 540 MTX_DEF); 541 TAILQ_INIT(&nm_mem->nm_molist); 542 nm_mem->nm_buffer = buf; 543 nm_mem->nm_totalsize = sz; 544 545 /* 546 * A buffer takes 2k, a slot takes 8 bytes + ring overhead, 547 * so the ratio is 200:1. In other words, we can use 1/200 of 548 * the memory for the rings, and the rest for the buffers, 549 * and be sure we never run out. 550 */ 551 nm_mem->nm_size = sz/200; 552 nm_mem->nm_buf_start = 553 (nm_mem->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 554 nm_mem->nm_buf_len = sz - nm_mem->nm_buf_start; 555 556 nm_buf_pool.base = nm_mem->nm_buffer; 557 nm_buf_pool.base += nm_mem->nm_buf_start; 558 netmap_buffer_base = nm_buf_pool.base; 559 D("netmap_buffer_base %p (offset %d)", 560 netmap_buffer_base, (int)nm_mem->nm_buf_start); 561 /* number of buffers, they all start as free */ 562 563 netmap_total_buffers = nm_buf_pool.total_buffers = 564 nm_mem->nm_buf_len / NETMAP_BUF_SIZE; 565 nm_buf_pool.bufsize = NETMAP_BUF_SIZE; 566 567 D("Have %d MB, use %dKB for rings, %d buffers at %p", 568 (sz >> 20), (int)(nm_mem->nm_size >> 10), 569 nm_buf_pool.total_buffers, nm_buf_pool.base); 570 571 /* allocate and initialize the bitmap. Entry 0 is considered 572 * always busy (used as default when there are no buffers left). 573 */ 574 n = (nm_buf_pool.total_buffers + 31) / 32; 575 nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, 576 M_WAITOK | M_ZERO); 577 nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */ 578 for (i = 1; i < n; i++) 579 nm_buf_pool.bitmap[i] = ~0; 580 nm_buf_pool.free = nm_buf_pool.total_buffers - 2; 581 582 mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 583 M_WAITOK | M_ZERO); 584 TAILQ_INSERT_HEAD(&nm_mem->nm_molist, mem_obj, nmo_next); 585 mem_obj->nmo_used = 0; 586 mem_obj->nmo_size = nm_mem->nm_size; 587 mem_obj->nmo_data = nm_mem->nm_buffer; 588 589 return (0); 590 } 591 592 593 /* 594 * Finalize the memory allocator. 595 * 596 * Free all the memory objects contained inside the list, and deallocate 597 * the pool of memory; finally free the memory allocator descriptor. 598 */ 599 static void 600 netmap_memory_fini(void) 601 { 602 struct netmap_mem_obj *mem_obj; 603 604 while (!TAILQ_EMPTY(&nm_mem->nm_molist)) { 605 mem_obj = TAILQ_FIRST(&nm_mem->nm_molist); 606 TAILQ_REMOVE(&nm_mem->nm_molist, mem_obj, nmo_next); 607 if (mem_obj->nmo_used == 1) { 608 printf("netmap: leaked %d bytes at %p\n", 609 (int)mem_obj->nmo_size, 610 mem_obj->nmo_data); 611 } 612 free(mem_obj, M_NETMAP); 613 } 614 contigfree(nm_mem->nm_buffer, nm_mem->nm_totalsize, M_NETMAP); 615 // XXX mutex_destroy(nm_mtx); 616 free(nm_mem, M_NETMAP); 617 } 618 /*------------- end of memory allocator -----------------*/ 619 620 621 /* Structure associated to each thread which registered an interface. */ 622 struct netmap_priv_d { 623 struct netmap_if *np_nifp; /* netmap interface descriptor. */ 624 625 struct ifnet *np_ifp; /* device for which we hold a reference */ 626 int np_ringid; /* from the ioctl */ 627 u_int np_qfirst, np_qlast; /* range of rings to scan */ 628 uint16_t np_txpoll; 629 }; 630 631 632 /* 633 * File descriptor's private data destructor. 634 * 635 * Call nm_register(ifp,0) to stop netmap mode on the interface and 636 * revert to normal operation. We expect that np_ifp has not gone. 637 */ 638 static void 639 netmap_dtor_locked(void *data) 640 { 641 struct netmap_priv_d *priv = data; 642 struct ifnet *ifp = priv->np_ifp; 643 struct netmap_adapter *na = NA(ifp); 644 struct netmap_if *nifp = priv->np_nifp; 645 646 na->refcount--; 647 if (na->refcount <= 0) { /* last instance */ 648 u_int i, j, lim; 649 650 D("deleting last netmap instance for %s", ifp->if_xname); 651 /* 652 * there is a race here with *_netmap_task() and 653 * netmap_poll(), which don't run under NETMAP_REG_LOCK. 654 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP 655 * (aka NETMAP_DELETING(na)) are a unique marker that the 656 * device is dying. 657 * Before destroying stuff we sleep a bit, and then complete 658 * the job. NIOCREG should realize the condition and 659 * loop until they can continue; the other routines 660 * should check the condition at entry and quit if 661 * they cannot run. 662 */ 663 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 664 tsleep(na, 0, "NIOCUNREG", 4); 665 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 666 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */ 667 /* Wake up any sleeping threads. netmap_poll will 668 * then return POLLERR 669 */ 670 for (i = 0; i < na->num_tx_queues + 1; i++) 671 selwakeuppri(&na->tx_rings[i].si, PI_NET); 672 for (i = 0; i < na->num_rx_queues + 1; i++) 673 selwakeuppri(&na->rx_rings[i].si, PI_NET); 674 selwakeuppri(&na->tx_si, PI_NET); 675 selwakeuppri(&na->rx_si, PI_NET); 676 /* release all buffers */ 677 NMA_LOCK(); 678 for (i = 0; i < na->num_tx_queues + 1; i++) { 679 struct netmap_ring *ring = na->tx_rings[i].ring; 680 lim = na->tx_rings[i].nkr_num_slots; 681 for (j = 0; j < lim; j++) 682 netmap_free_buf(nifp, ring->slot[j].buf_idx); 683 } 684 for (i = 0; i < na->num_rx_queues + 1; i++) { 685 struct netmap_ring *ring = na->rx_rings[i].ring; 686 lim = na->rx_rings[i].nkr_num_slots; 687 for (j = 0; j < lim; j++) 688 netmap_free_buf(nifp, ring->slot[j].buf_idx); 689 } 690 NMA_UNLOCK(); 691 netmap_free_rings(na); 692 wakeup(na); 693 } 694 netmap_if_free(nifp); 695 } 696 697 698 static void 699 netmap_dtor(void *data) 700 { 701 struct netmap_priv_d *priv = data; 702 struct ifnet *ifp = priv->np_ifp; 703 struct netmap_adapter *na = NA(ifp); 704 705 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 706 netmap_dtor_locked(data); 707 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 708 709 if_rele(ifp); 710 bzero(priv, sizeof(*priv)); /* XXX for safety */ 711 free(priv, M_DEVBUF); 712 } 713 714 715 /* 716 * mmap(2) support for the "netmap" device. 717 * 718 * Expose all the memory previously allocated by our custom memory 719 * allocator: this way the user has only to issue a single mmap(2), and 720 * can work on all the data structures flawlessly. 721 * 722 * Return 0 on success, -1 otherwise. 723 */ 724 725 static int 726 netmap_mmap(__unused struct cdev *dev, 727 #if __FreeBSD_version < 900000 728 vm_offset_t offset, vm_paddr_t *paddr, int nprot 729 #else 730 vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, 731 __unused vm_memattr_t *memattr 732 #endif 733 ) 734 { 735 if (nprot & PROT_EXEC) 736 return (-1); // XXX -1 or EINVAL ? 737 738 ND("request for offset 0x%x", (uint32_t)offset); 739 *paddr = netmap_ofstophys(offset); 740 741 return (0); 742 } 743 744 745 /* 746 * Handlers for synchronization of the queues from/to the host. 747 * 748 * netmap_sync_to_host() passes packets up. We are called from a 749 * system call in user process context, and the only contention 750 * can be among multiple user threads erroneously calling 751 * this routine concurrently. In principle we should not even 752 * need to lock. 753 */ 754 static void 755 netmap_sync_to_host(struct netmap_adapter *na) 756 { 757 struct netmap_kring *kring = &na->tx_rings[na->num_tx_queues]; 758 struct netmap_ring *ring = kring->ring; 759 struct mbuf *head = NULL, *tail = NULL, *m; 760 u_int k, n, lim = kring->nkr_num_slots - 1; 761 762 k = ring->cur; 763 if (k > lim) { 764 netmap_ring_reinit(kring); 765 return; 766 } 767 // na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0); 768 769 /* Take packets from hwcur to cur and pass them up. 770 * In case of no buffers we give up. At the end of the loop, 771 * the queue is drained in all cases. 772 */ 773 for (n = kring->nr_hwcur; n != k;) { 774 struct netmap_slot *slot = &ring->slot[n]; 775 776 n = (n == lim) ? 0 : n + 1; 777 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) { 778 D("bad pkt at %d len %d", n, slot->len); 779 continue; 780 } 781 m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL); 782 783 if (m == NULL) 784 break; 785 if (tail) 786 tail->m_nextpkt = m; 787 else 788 head = m; 789 tail = m; 790 m->m_nextpkt = NULL; 791 } 792 kring->nr_hwcur = k; 793 kring->nr_hwavail = ring->avail = lim; 794 // na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0); 795 796 /* send packets up, outside the lock */ 797 while ((m = head) != NULL) { 798 head = head->m_nextpkt; 799 m->m_nextpkt = NULL; 800 if (netmap_verbose & NM_VERB_HOST) 801 D("sending up pkt %p size %d", m, MBUF_LEN(m)); 802 NM_SEND_UP(na->ifp, m); 803 } 804 } 805 806 /* 807 * rxsync backend for packets coming from the host stack. 808 * They have been put in the queue by netmap_start() so we 809 * need to protect access to the kring using a lock. 810 * 811 * This routine also does the selrecord if called from the poll handler 812 * (we know because td != NULL). 813 */ 814 static void 815 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td) 816 { 817 struct netmap_kring *kring = &na->rx_rings[na->num_rx_queues]; 818 struct netmap_ring *ring = kring->ring; 819 u_int j, n, lim = kring->nkr_num_slots; 820 u_int k = ring->cur, resvd = ring->reserved; 821 822 na->nm_lock(na->ifp, NETMAP_CORE_LOCK, 0); 823 if (k >= lim) { 824 netmap_ring_reinit(kring); 825 return; 826 } 827 /* new packets are already set in nr_hwavail */ 828 /* skip past packets that userspace has released */ 829 j = kring->nr_hwcur; 830 if (resvd > 0) { 831 if (resvd + ring->avail >= lim + 1) { 832 D("XXX invalid reserve/avail %d %d", resvd, ring->avail); 833 ring->reserved = resvd = 0; // XXX panic... 834 } 835 k = (k >= resvd) ? k - resvd : k + lim - resvd; 836 } 837 if (j != k) { 838 n = k >= j ? k - j : k + lim - j; 839 kring->nr_hwavail -= n; 840 kring->nr_hwcur = k; 841 } 842 k = ring->avail = kring->nr_hwavail - resvd; 843 if (k == 0 && td) 844 selrecord(td, &kring->si); 845 if (k && (netmap_verbose & NM_VERB_HOST)) 846 D("%d pkts from stack", k); 847 na->nm_lock(na->ifp, NETMAP_CORE_UNLOCK, 0); 848 } 849 850 851 /* 852 * get a refcounted reference to an interface. 853 * Return ENXIO if the interface does not exist, EINVAL if netmap 854 * is not supported by the interface. 855 * If successful, hold a reference. 856 */ 857 static int 858 get_ifp(const char *name, struct ifnet **ifp) 859 { 860 *ifp = ifunit_ref(name); 861 if (*ifp == NULL) 862 return (ENXIO); 863 /* can do this if the capability exists and if_pspare[0] 864 * points to the netmap descriptor. 865 */ 866 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp)) 867 return 0; /* valid pointer, we hold the refcount */ 868 if_rele(*ifp); 869 return EINVAL; // not NETMAP capable 870 } 871 872 873 /* 874 * Error routine called when txsync/rxsync detects an error. 875 * Can't do much more than resetting cur = hwcur, avail = hwavail. 876 * Return 1 on reinit. 877 * 878 * This routine is only called by the upper half of the kernel. 879 * It only reads hwcur (which is changed only by the upper half, too) 880 * and hwavail (which may be changed by the lower half, but only on 881 * a tx ring and only to increase it, so any error will be recovered 882 * on the next call). For the above, we don't strictly need to call 883 * it under lock. 884 */ 885 int 886 netmap_ring_reinit(struct netmap_kring *kring) 887 { 888 struct netmap_ring *ring = kring->ring; 889 u_int i, lim = kring->nkr_num_slots - 1; 890 int errors = 0; 891 892 D("called for %s", kring->na->ifp->if_xname); 893 if (ring->cur > lim) 894 errors++; 895 for (i = 0; i <= lim; i++) { 896 u_int idx = ring->slot[i].buf_idx; 897 u_int len = ring->slot[i].len; 898 if (idx < 2 || idx >= netmap_total_buffers) { 899 if (!errors++) 900 D("bad buffer at slot %d idx %d len %d ", i, idx, len); 901 ring->slot[i].buf_idx = 0; 902 ring->slot[i].len = 0; 903 } else if (len > NETMAP_BUF_SIZE) { 904 ring->slot[i].len = 0; 905 if (!errors++) 906 D("bad len %d at slot %d idx %d", 907 len, i, idx); 908 } 909 } 910 if (errors) { 911 int pos = kring - kring->na->tx_rings; 912 int n = kring->na->num_tx_queues + 1; 913 914 D("total %d errors", errors); 915 errors++; 916 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d", 917 kring->na->ifp->if_xname, 918 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 919 ring->cur, kring->nr_hwcur, 920 ring->avail, kring->nr_hwavail); 921 ring->cur = kring->nr_hwcur; 922 ring->avail = kring->nr_hwavail; 923 } 924 return (errors ? 1 : 0); 925 } 926 927 928 /* 929 * Set the ring ID. For devices with a single queue, a request 930 * for all rings is the same as a single ring. 931 */ 932 static int 933 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid) 934 { 935 struct ifnet *ifp = priv->np_ifp; 936 struct netmap_adapter *na = NA(ifp); 937 u_int i = ringid & NETMAP_RING_MASK; 938 /* initially (np_qfirst == np_qlast) we don't want to lock */ 939 int need_lock = (priv->np_qfirst != priv->np_qlast); 940 int lim = na->num_rx_queues; 941 942 if (na->num_tx_queues > lim) 943 lim = na->num_tx_queues; 944 if ( (ringid & NETMAP_HW_RING) && i >= lim) { 945 D("invalid ring id %d", i); 946 return (EINVAL); 947 } 948 if (need_lock) 949 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 950 priv->np_ringid = ringid; 951 if (ringid & NETMAP_SW_RING) { 952 priv->np_qfirst = NETMAP_SW_RING; 953 priv->np_qlast = 0; 954 } else if (ringid & NETMAP_HW_RING) { 955 priv->np_qfirst = i; 956 priv->np_qlast = i + 1; 957 } else { 958 priv->np_qfirst = 0; 959 priv->np_qlast = NETMAP_HW_RING ; 960 } 961 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; 962 if (need_lock) 963 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 964 if (ringid & NETMAP_SW_RING) 965 D("ringid %s set to SW RING", ifp->if_xname); 966 else if (ringid & NETMAP_HW_RING) 967 D("ringid %s set to HW RING %d", ifp->if_xname, 968 priv->np_qfirst); 969 else 970 D("ringid %s set to all %d HW RINGS", ifp->if_xname, lim); 971 return 0; 972 } 973 974 /* 975 * ioctl(2) support for the "netmap" device. 976 * 977 * Following a list of accepted commands: 978 * - NIOCGINFO 979 * - SIOCGIFADDR just for convenience 980 * - NIOCREGIF 981 * - NIOCUNREGIF 982 * - NIOCTXSYNC 983 * - NIOCRXSYNC 984 * 985 * Return 0 on success, errno otherwise. 986 */ 987 static int 988 netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data, 989 __unused int fflag, struct thread *td) 990 { 991 struct netmap_priv_d *priv = NULL; 992 struct ifnet *ifp; 993 struct nmreq *nmr = (struct nmreq *) data; 994 struct netmap_adapter *na; 995 int error; 996 u_int i, lim; 997 struct netmap_if *nifp; 998 999 CURVNET_SET(TD_TO_VNET(td)); 1000 1001 error = devfs_get_cdevpriv((void **)&priv); 1002 if (error != ENOENT && error != 0) { 1003 CURVNET_RESTORE(); 1004 return (error); 1005 } 1006 1007 error = 0; /* Could be ENOENT */ 1008 switch (cmd) { 1009 case NIOCGINFO: /* return capabilities etc */ 1010 /* memsize is always valid */ 1011 nmr->nr_memsize = nm_mem->nm_totalsize; 1012 nmr->nr_offset = 0; 1013 nmr->nr_rx_rings = nmr->nr_tx_rings = 0; 1014 nmr->nr_rx_slots = nmr->nr_tx_slots = 0; 1015 if (nmr->nr_version != NETMAP_API) { 1016 D("API mismatch got %d have %d", 1017 nmr->nr_version, NETMAP_API); 1018 nmr->nr_version = NETMAP_API; 1019 error = EINVAL; 1020 break; 1021 } 1022 if (nmr->nr_name[0] == '\0') /* just get memory info */ 1023 break; 1024 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 1025 if (error) 1026 break; 1027 na = NA(ifp); /* retrieve netmap_adapter */ 1028 nmr->nr_rx_rings = na->num_rx_queues; 1029 nmr->nr_tx_rings = na->num_tx_queues; 1030 nmr->nr_rx_slots = na->num_rx_desc; 1031 nmr->nr_tx_slots = na->num_tx_desc; 1032 if_rele(ifp); /* return the refcount */ 1033 break; 1034 1035 case NIOCREGIF: 1036 if (nmr->nr_version != NETMAP_API) { 1037 nmr->nr_version = NETMAP_API; 1038 error = EINVAL; 1039 break; 1040 } 1041 if (priv != NULL) { /* thread already registered */ 1042 error = netmap_set_ringid(priv, nmr->nr_ringid); 1043 break; 1044 } 1045 /* find the interface and a reference */ 1046 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 1047 if (error) 1048 break; 1049 na = NA(ifp); /* retrieve netmap adapter */ 1050 /* 1051 * Allocate the private per-thread structure. 1052 * XXX perhaps we can use a blocking malloc ? 1053 */ 1054 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 1055 M_NOWAIT | M_ZERO); 1056 if (priv == NULL) { 1057 error = ENOMEM; 1058 if_rele(ifp); /* return the refcount */ 1059 break; 1060 } 1061 1062 for (i = 10; i > 0; i--) { 1063 na->nm_lock(ifp, NETMAP_REG_LOCK, 0); 1064 if (!NETMAP_DELETING(na)) 1065 break; 1066 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 1067 tsleep(na, 0, "NIOCREGIF", hz/10); 1068 } 1069 if (i == 0) { 1070 D("too many NIOCREGIF attempts, give up"); 1071 error = EINVAL; 1072 free(priv, M_DEVBUF); 1073 if_rele(ifp); /* return the refcount */ 1074 break; 1075 } 1076 1077 priv->np_ifp = ifp; /* store the reference */ 1078 error = netmap_set_ringid(priv, nmr->nr_ringid); 1079 if (error) 1080 goto error; 1081 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na); 1082 if (nifp == NULL) { /* allocation failed */ 1083 error = ENOMEM; 1084 } else if (ifp->if_capenable & IFCAP_NETMAP) { 1085 /* was already set */ 1086 } else { 1087 /* Otherwise set the card in netmap mode 1088 * and make it use the shared buffers. 1089 */ 1090 error = na->nm_register(ifp, 1); /* mode on */ 1091 if (error) 1092 netmap_dtor_locked(priv); 1093 } 1094 1095 if (error) { /* reg. failed, release priv and ref */ 1096 error: 1097 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 1098 if_rele(ifp); /* return the refcount */ 1099 bzero(priv, sizeof(*priv)); 1100 free(priv, M_DEVBUF); 1101 break; 1102 } 1103 1104 na->nm_lock(ifp, NETMAP_REG_UNLOCK, 0); 1105 error = devfs_set_cdevpriv(priv, netmap_dtor); 1106 1107 if (error != 0) { 1108 /* could not assign the private storage for the 1109 * thread, call the destructor explicitly. 1110 */ 1111 netmap_dtor(priv); 1112 break; 1113 } 1114 1115 /* return the offset of the netmap_if object */ 1116 nmr->nr_rx_rings = na->num_rx_queues; 1117 nmr->nr_tx_rings = na->num_tx_queues; 1118 nmr->nr_rx_slots = na->num_rx_desc; 1119 nmr->nr_tx_slots = na->num_tx_desc; 1120 nmr->nr_memsize = nm_mem->nm_totalsize; 1121 nmr->nr_offset = netmap_if_offset(nifp); 1122 break; 1123 1124 case NIOCUNREGIF: 1125 if (priv == NULL) { 1126 error = ENXIO; 1127 break; 1128 } 1129 1130 /* the interface is unregistered inside the 1131 destructor of the private data. */ 1132 devfs_clear_cdevpriv(); 1133 break; 1134 1135 case NIOCTXSYNC: 1136 case NIOCRXSYNC: 1137 if (priv == NULL) { 1138 error = ENXIO; 1139 break; 1140 } 1141 ifp = priv->np_ifp; /* we have a reference */ 1142 na = NA(ifp); /* retrieve netmap adapter */ 1143 if (priv->np_qfirst == NETMAP_SW_RING) { /* host rings */ 1144 if (cmd == NIOCTXSYNC) 1145 netmap_sync_to_host(na); 1146 else 1147 netmap_sync_from_host(na, NULL); 1148 break; 1149 } 1150 /* find the last ring to scan */ 1151 lim = priv->np_qlast; 1152 if (lim == NETMAP_HW_RING) 1153 lim = (cmd == NIOCTXSYNC) ? na->num_tx_queues : na->num_rx_queues; 1154 1155 for (i = priv->np_qfirst; i < lim; i++) { 1156 if (cmd == NIOCTXSYNC) { 1157 struct netmap_kring *kring = &na->tx_rings[i]; 1158 if (netmap_verbose & NM_VERB_TXSYNC) 1159 D("sync tx ring %d cur %d hwcur %d", 1160 i, kring->ring->cur, 1161 kring->nr_hwcur); 1162 na->nm_txsync(ifp, i, 1 /* do lock */); 1163 if (netmap_verbose & NM_VERB_TXSYNC) 1164 D("after sync tx ring %d cur %d hwcur %d", 1165 i, kring->ring->cur, 1166 kring->nr_hwcur); 1167 } else { 1168 na->nm_rxsync(ifp, i, 1 /* do lock */); 1169 microtime(&na->rx_rings[i].ring->ts); 1170 } 1171 } 1172 1173 break; 1174 1175 case BIOCIMMEDIATE: 1176 case BIOCGHDRCMPLT: 1177 case BIOCSHDRCMPLT: 1178 case BIOCSSEESENT: 1179 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); 1180 break; 1181 1182 default: /* allow device-specific ioctls */ 1183 { 1184 struct socket so; 1185 bzero(&so, sizeof(so)); 1186 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 1187 if (error) 1188 break; 1189 so.so_vnet = ifp->if_vnet; 1190 // so->so_proto not null. 1191 error = ifioctl(&so, cmd, data, td); 1192 if_rele(ifp); 1193 break; 1194 } 1195 } 1196 1197 CURVNET_RESTORE(); 1198 return (error); 1199 } 1200 1201 1202 /* 1203 * select(2) and poll(2) handlers for the "netmap" device. 1204 * 1205 * Can be called for one or more queues. 1206 * Return true the event mask corresponding to ready events. 1207 * If there are no ready events, do a selrecord on either individual 1208 * selfd or on the global one. 1209 * Device-dependent parts (locking and sync of tx/rx rings) 1210 * are done through callbacks. 1211 */ 1212 static int 1213 netmap_poll(__unused struct cdev *dev, int events, struct thread *td) 1214 { 1215 struct netmap_priv_d *priv = NULL; 1216 struct netmap_adapter *na; 1217 struct ifnet *ifp; 1218 struct netmap_kring *kring; 1219 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0; 1220 u_int lim_tx, lim_rx; 1221 enum {NO_CL, NEED_CL, LOCKED_CL }; /* see below */ 1222 1223 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1224 return POLLERR; 1225 1226 ifp = priv->np_ifp; 1227 // XXX check for deleting() ? 1228 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1229 return POLLERR; 1230 1231 if (netmap_verbose & 0x8000) 1232 D("device %s events 0x%x", ifp->if_xname, events); 1233 want_tx = events & (POLLOUT | POLLWRNORM); 1234 want_rx = events & (POLLIN | POLLRDNORM); 1235 1236 na = NA(ifp); /* retrieve netmap adapter */ 1237 1238 lim_tx = na->num_tx_queues; 1239 lim_rx = na->num_rx_queues; 1240 /* how many queues we are scanning */ 1241 if (priv->np_qfirst == NETMAP_SW_RING) { 1242 if (priv->np_txpoll || want_tx) { 1243 /* push any packets up, then we are always ready */ 1244 kring = &na->tx_rings[lim_tx]; 1245 netmap_sync_to_host(na); 1246 revents |= want_tx; 1247 } 1248 if (want_rx) { 1249 kring = &na->rx_rings[lim_rx]; 1250 if (kring->ring->avail == 0) 1251 netmap_sync_from_host(na, td); 1252 if (kring->ring->avail > 0) { 1253 revents |= want_rx; 1254 } 1255 } 1256 return (revents); 1257 } 1258 1259 /* 1260 * check_all is set if the card has more than one queue and 1261 * the client is polling all of them. If true, we sleep on 1262 * the "global" selfd, otherwise we sleep on individual selfd 1263 * (we can only sleep on one of them per direction). 1264 * The interrupt routine in the driver should always wake on 1265 * the individual selfd, and also on the global one if the card 1266 * has more than one ring. 1267 * 1268 * If the card has only one lock, we just use that. 1269 * If the card has separate ring locks, we just use those 1270 * unless we are doing check_all, in which case the whole 1271 * loop is wrapped by the global lock. 1272 * We acquire locks only when necessary: if poll is called 1273 * when buffers are available, we can just return without locks. 1274 * 1275 * rxsync() is only called if we run out of buffers on a POLLIN. 1276 * txsync() is called if we run out of buffers on POLLOUT, or 1277 * there are pending packets to send. The latter can be disabled 1278 * passing NETMAP_NO_TX_POLL in the NIOCREG call. 1279 */ 1280 check_all = (priv->np_qlast == NETMAP_HW_RING) && (lim_tx > 1 || lim_rx > 1); 1281 1282 /* 1283 * core_lock indicates what to do with the core lock. 1284 * The core lock is used when either the card has no individual 1285 * locks, or it has individual locks but we are cheking all 1286 * rings so we need the core lock to avoid missing wakeup events. 1287 * 1288 * It has three possible states: 1289 * NO_CL we don't need to use the core lock, e.g. 1290 * because we are protected by individual locks. 1291 * NEED_CL we need the core lock. In this case, when we 1292 * call the lock routine, move to LOCKED_CL 1293 * to remember to release the lock once done. 1294 * LOCKED_CL core lock is set, so we need to release it. 1295 */ 1296 core_lock = (check_all || !na->separate_locks) ? NEED_CL : NO_CL; 1297 if (priv->np_qlast != NETMAP_HW_RING) { 1298 lim_tx = lim_rx = priv->np_qlast; 1299 } 1300 1301 /* 1302 * We start with a lock free round which is good if we have 1303 * data available. If this fails, then lock and call the sync 1304 * routines. 1305 */ 1306 for (i = priv->np_qfirst; want_rx && i < lim_rx; i++) { 1307 kring = &na->rx_rings[i]; 1308 if (kring->ring->avail > 0) { 1309 revents |= want_rx; 1310 want_rx = 0; /* also breaks the loop */ 1311 } 1312 } 1313 for (i = priv->np_qfirst; want_tx && i < lim_tx; i++) { 1314 kring = &na->tx_rings[i]; 1315 if (kring->ring->avail > 0) { 1316 revents |= want_tx; 1317 want_tx = 0; /* also breaks the loop */ 1318 } 1319 } 1320 1321 /* 1322 * If we to push packets out (priv->np_txpoll) or want_tx is 1323 * still set, we do need to run the txsync calls (on all rings, 1324 * to avoid that the tx rings stall). 1325 */ 1326 if (priv->np_txpoll || want_tx) { 1327 for (i = priv->np_qfirst; i < lim_tx; i++) { 1328 kring = &na->tx_rings[i]; 1329 /* 1330 * Skip the current ring if want_tx == 0 1331 * (we have already done a successful sync on 1332 * a previous ring) AND kring->cur == kring->hwcur 1333 * (there are no pending transmissions for this ring). 1334 */ 1335 if (!want_tx && kring->ring->cur == kring->nr_hwcur) 1336 continue; 1337 if (core_lock == NEED_CL) { 1338 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1339 core_lock = LOCKED_CL; 1340 } 1341 if (na->separate_locks) 1342 na->nm_lock(ifp, NETMAP_TX_LOCK, i); 1343 if (netmap_verbose & NM_VERB_TXSYNC) 1344 D("send %d on %s %d", 1345 kring->ring->cur, 1346 ifp->if_xname, i); 1347 if (na->nm_txsync(ifp, i, 0 /* no lock */)) 1348 revents |= POLLERR; 1349 1350 /* Check avail/call selrecord only if called with POLLOUT */ 1351 if (want_tx) { 1352 if (kring->ring->avail > 0) { 1353 /* stop at the first ring. We don't risk 1354 * starvation. 1355 */ 1356 revents |= want_tx; 1357 want_tx = 0; 1358 } else if (!check_all) 1359 selrecord(td, &kring->si); 1360 } 1361 if (na->separate_locks) 1362 na->nm_lock(ifp, NETMAP_TX_UNLOCK, i); 1363 } 1364 } 1365 1366 /* 1367 * now if want_rx is still set we need to lock and rxsync. 1368 * Do it on all rings because otherwise we starve. 1369 */ 1370 if (want_rx) { 1371 for (i = priv->np_qfirst; i < lim_rx; i++) { 1372 kring = &na->rx_rings[i]; 1373 if (core_lock == NEED_CL) { 1374 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1375 core_lock = LOCKED_CL; 1376 } 1377 if (na->separate_locks) 1378 na->nm_lock(ifp, NETMAP_RX_LOCK, i); 1379 1380 if (na->nm_rxsync(ifp, i, 0 /* no lock */)) 1381 revents |= POLLERR; 1382 if (netmap_no_timestamp == 0 || 1383 kring->ring->flags & NR_TIMESTAMP) { 1384 microtime(&kring->ring->ts); 1385 } 1386 1387 if (kring->ring->avail > 0) 1388 revents |= want_rx; 1389 else if (!check_all) 1390 selrecord(td, &kring->si); 1391 if (na->separate_locks) 1392 na->nm_lock(ifp, NETMAP_RX_UNLOCK, i); 1393 } 1394 } 1395 if (check_all && revents == 0) { /* signal on the global queue */ 1396 if (want_tx) 1397 selrecord(td, &na->tx_si); 1398 if (want_rx) 1399 selrecord(td, &na->rx_si); 1400 } 1401 if (core_lock == LOCKED_CL) 1402 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 1403 1404 return (revents); 1405 } 1406 1407 /*------- driver support routines ------*/ 1408 1409 /* 1410 * default lock wrapper. 1411 */ 1412 static void 1413 netmap_lock_wrapper(struct ifnet *dev, int what, u_int queueid) 1414 { 1415 struct netmap_adapter *na = NA(dev); 1416 1417 switch (what) { 1418 #ifdef linux /* some system do not need lock on register */ 1419 case NETMAP_REG_LOCK: 1420 case NETMAP_REG_UNLOCK: 1421 break; 1422 #endif /* linux */ 1423 1424 case NETMAP_CORE_LOCK: 1425 mtx_lock(&na->core_lock); 1426 break; 1427 1428 case NETMAP_CORE_UNLOCK: 1429 mtx_unlock(&na->core_lock); 1430 break; 1431 1432 case NETMAP_TX_LOCK: 1433 mtx_lock(&na->tx_rings[queueid].q_lock); 1434 break; 1435 1436 case NETMAP_TX_UNLOCK: 1437 mtx_unlock(&na->tx_rings[queueid].q_lock); 1438 break; 1439 1440 case NETMAP_RX_LOCK: 1441 mtx_lock(&na->rx_rings[queueid].q_lock); 1442 break; 1443 1444 case NETMAP_RX_UNLOCK: 1445 mtx_unlock(&na->rx_rings[queueid].q_lock); 1446 break; 1447 } 1448 } 1449 1450 1451 /* 1452 * Initialize a ``netmap_adapter`` object created by driver on attach. 1453 * We allocate a block of memory with room for a struct netmap_adapter 1454 * plus two sets of N+2 struct netmap_kring (where N is the number 1455 * of hardware rings): 1456 * krings 0..N-1 are for the hardware queues. 1457 * kring N is for the host stack queue 1458 * kring N+1 is only used for the selinfo for all queues. 1459 * Return 0 on success, ENOMEM otherwise. 1460 * 1461 * na->num_tx_queues can be set for cards with different tx/rx setups 1462 */ 1463 int 1464 netmap_attach(struct netmap_adapter *na, int num_queues) 1465 { 1466 int i, n, size; 1467 void *buf; 1468 struct ifnet *ifp = na->ifp; 1469 1470 if (ifp == NULL) { 1471 D("ifp not set, giving up"); 1472 return EINVAL; 1473 } 1474 /* clear other fields ? */ 1475 na->refcount = 0; 1476 if (na->num_tx_queues == 0) 1477 na->num_tx_queues = num_queues; 1478 na->num_rx_queues = num_queues; 1479 /* on each direction we have N+1 resources 1480 * 0..n-1 are the hardware rings 1481 * n is the ring attached to the stack. 1482 */ 1483 n = na->num_rx_queues + na->num_tx_queues + 2; 1484 size = sizeof(*na) + n * sizeof(struct netmap_kring); 1485 1486 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1487 if (buf) { 1488 WNA(ifp) = buf; 1489 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1490 na->rx_rings = na->tx_rings + na->num_tx_queues + 1; 1491 na->buff_size = NETMAP_BUF_SIZE; 1492 bcopy(na, buf, sizeof(*na)); 1493 ifp->if_capabilities |= IFCAP_NETMAP; 1494 1495 na = buf; 1496 if (na->nm_lock == NULL) 1497 na->nm_lock = netmap_lock_wrapper; 1498 mtx_init(&na->core_lock, "netmap core lock", NULL, MTX_DEF); 1499 for (i = 0 ; i < na->num_tx_queues + 1; i++) 1500 mtx_init(&na->tx_rings[i].q_lock, "netmap txq lock", NULL, MTX_DEF); 1501 for (i = 0 ; i < na->num_rx_queues + 1; i++) 1502 mtx_init(&na->rx_rings[i].q_lock, "netmap rxq lock", NULL, MTX_DEF); 1503 } 1504 #ifdef linux 1505 D("netdev_ops %p", ifp->netdev_ops); 1506 /* prepare a clone of the netdev ops */ 1507 na->nm_ndo = *ifp->netdev_ops; 1508 na->nm_ndo.ndo_start_xmit = netmap_start_linux; 1509 #endif 1510 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1511 1512 return (buf ? 0 : ENOMEM); 1513 } 1514 1515 1516 /* 1517 * Free the allocated memory linked to the given ``netmap_adapter`` 1518 * object. 1519 */ 1520 void 1521 netmap_detach(struct ifnet *ifp) 1522 { 1523 u_int i; 1524 struct netmap_adapter *na = NA(ifp); 1525 1526 if (!na) 1527 return; 1528 1529 for (i = 0; i < na->num_tx_queues + 1; i++) { 1530 knlist_destroy(&na->tx_rings[i].si.si_note); 1531 mtx_destroy(&na->tx_rings[i].q_lock); 1532 } 1533 for (i = 0; i < na->num_rx_queues + 1; i++) { 1534 knlist_destroy(&na->rx_rings[i].si.si_note); 1535 mtx_destroy(&na->rx_rings[i].q_lock); 1536 } 1537 knlist_destroy(&na->tx_si.si_note); 1538 knlist_destroy(&na->rx_si.si_note); 1539 bzero(na, sizeof(*na)); 1540 WNA(ifp) = NULL; 1541 free(na, M_DEVBUF); 1542 } 1543 1544 1545 /* 1546 * Intercept packets from the network stack and pass them 1547 * to netmap as incoming packets on the 'software' ring. 1548 * We are not locked when called. 1549 */ 1550 int 1551 netmap_start(struct ifnet *ifp, struct mbuf *m) 1552 { 1553 struct netmap_adapter *na = NA(ifp); 1554 struct netmap_kring *kring = &na->rx_rings[na->num_rx_queues]; 1555 u_int i, len = MBUF_LEN(m); 1556 int error = EBUSY, lim = kring->nkr_num_slots - 1; 1557 struct netmap_slot *slot; 1558 1559 if (netmap_verbose & NM_VERB_HOST) 1560 D("%s packet %d len %d from the stack", ifp->if_xname, 1561 kring->nr_hwcur + kring->nr_hwavail, len); 1562 na->nm_lock(ifp, NETMAP_CORE_LOCK, 0); 1563 if (kring->nr_hwavail >= lim) { 1564 D("stack ring %s full\n", ifp->if_xname); 1565 goto done; /* no space */ 1566 } 1567 if (len > NETMAP_BUF_SIZE) { 1568 D("drop packet size %d > %d", len, NETMAP_BUF_SIZE); 1569 goto done; /* too long for us */ 1570 } 1571 1572 /* compute the insert position */ 1573 i = kring->nr_hwcur + kring->nr_hwavail; 1574 if (i > lim) 1575 i -= lim + 1; 1576 slot = &kring->ring->slot[i]; 1577 m_copydata(m, 0, len, NMB(slot)); 1578 slot->len = len; 1579 kring->nr_hwavail++; 1580 if (netmap_verbose & NM_VERB_HOST) 1581 D("wake up host ring %s %d", na->ifp->if_xname, na->num_rx_queues); 1582 selwakeuppri(&kring->si, PI_NET); 1583 error = 0; 1584 done: 1585 na->nm_lock(ifp, NETMAP_CORE_UNLOCK, 0); 1586 1587 /* release the mbuf in either cases of success or failure. As an 1588 * alternative, put the mbuf in a free list and free the list 1589 * only when really necessary. 1590 */ 1591 m_freem(m); 1592 1593 return (error); 1594 } 1595 1596 1597 /* 1598 * netmap_reset() is called by the driver routines when reinitializing 1599 * a ring. The driver is in charge of locking to protect the kring. 1600 * If netmap mode is not set just return NULL. 1601 */ 1602 struct netmap_slot * 1603 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n, 1604 u_int new_cur) 1605 { 1606 struct netmap_kring *kring; 1607 int new_hwofs, lim; 1608 1609 if (na == NULL) 1610 return NULL; /* no netmap support here */ 1611 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1612 return NULL; /* nothing to reinitialize */ 1613 1614 if (tx == NR_TX) { 1615 kring = na->tx_rings + n; 1616 new_hwofs = kring->nr_hwcur - new_cur; 1617 } else { 1618 kring = na->rx_rings + n; 1619 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1620 } 1621 lim = kring->nkr_num_slots - 1; 1622 if (new_hwofs > lim) 1623 new_hwofs -= lim + 1; 1624 1625 /* Alwayws set the new offset value and realign the ring. */ 1626 kring->nkr_hwofs = new_hwofs; 1627 if (tx == NR_TX) 1628 kring->nr_hwavail = kring->nkr_num_slots - 1; 1629 D("new hwofs %d on %s %s[%d]", 1630 kring->nkr_hwofs, na->ifp->if_xname, 1631 tx == NR_TX ? "TX" : "RX", n); 1632 1633 /* 1634 * Wakeup on the individual and global lock 1635 * We do the wakeup here, but the ring is not yet reconfigured. 1636 * However, we are under lock so there are no races. 1637 */ 1638 selwakeuppri(&kring->si, PI_NET); 1639 selwakeuppri(tx == NR_TX ? &na->tx_si : &na->rx_si, PI_NET); 1640 return kring->ring->slot; 1641 } 1642 1643 1644 /* 1645 * Default functions to handle rx/tx interrupts 1646 * we have 4 cases: 1647 * 1 ring, single lock: 1648 * lock(core); wake(i=0); unlock(core) 1649 * N rings, single lock: 1650 * lock(core); wake(i); wake(N+1) unlock(core) 1651 * 1 ring, separate locks: (i=0) 1652 * lock(i); wake(i); unlock(i) 1653 * N rings, separate locks: 1654 * lock(i); wake(i); unlock(i); lock(core) wake(N+1) unlock(core) 1655 * work_done is non-null on the RX path. 1656 */ 1657 int 1658 netmap_rx_irq(struct ifnet *ifp, int q, int *work_done) 1659 { 1660 struct netmap_adapter *na; 1661 struct netmap_kring *r; 1662 NM_SELINFO_T *main_wq; 1663 1664 if (!(ifp->if_capenable & IFCAP_NETMAP)) 1665 return 0; 1666 na = NA(ifp); 1667 if (work_done) { /* RX path */ 1668 r = na->rx_rings + q; 1669 r->nr_kflags |= NKR_PENDINTR; 1670 main_wq = (na->num_rx_queues > 1) ? &na->tx_si : NULL; 1671 } else { /* tx path */ 1672 r = na->tx_rings + q; 1673 main_wq = (na->num_tx_queues > 1) ? &na->rx_si : NULL; 1674 work_done = &q; /* dummy */ 1675 } 1676 if (na->separate_locks) { 1677 mtx_lock(&r->q_lock); 1678 selwakeuppri(&r->si, PI_NET); 1679 mtx_unlock(&r->q_lock); 1680 if (main_wq) { 1681 mtx_lock(&na->core_lock); 1682 selwakeuppri(main_wq, PI_NET); 1683 mtx_unlock(&na->core_lock); 1684 } 1685 } else { 1686 mtx_lock(&na->core_lock); 1687 selwakeuppri(&r->si, PI_NET); 1688 if (main_wq) 1689 selwakeuppri(main_wq, PI_NET); 1690 mtx_unlock(&na->core_lock); 1691 } 1692 *work_done = 1; /* do not fire napi again */ 1693 return 1; 1694 } 1695 1696 1697 static struct cdevsw netmap_cdevsw = { 1698 .d_version = D_VERSION, 1699 .d_name = "netmap", 1700 .d_mmap = netmap_mmap, 1701 .d_ioctl = netmap_ioctl, 1702 .d_poll = netmap_poll, 1703 }; 1704 1705 1706 static struct cdev *netmap_dev; /* /dev/netmap character device. */ 1707 1708 1709 /* 1710 * Module loader. 1711 * 1712 * Create the /dev/netmap device and initialize all global 1713 * variables. 1714 * 1715 * Return 0 on success, errno on failure. 1716 */ 1717 static int 1718 netmap_init(void) 1719 { 1720 int error; 1721 1722 error = netmap_memory_init(); 1723 if (error != 0) { 1724 printf("netmap: unable to initialize the memory allocator."); 1725 return (error); 1726 } 1727 printf("netmap: loaded module with %d Mbytes\n", 1728 (int)(nm_mem->nm_totalsize >> 20)); 1729 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 1730 "netmap"); 1731 return (error); 1732 } 1733 1734 1735 /* 1736 * Module unloader. 1737 * 1738 * Free all the memory, and destroy the ``/dev/netmap`` device. 1739 */ 1740 static void 1741 netmap_fini(void) 1742 { 1743 destroy_dev(netmap_dev); 1744 netmap_memory_fini(); 1745 printf("netmap: unloaded module.\n"); 1746 } 1747 1748 1749 /* 1750 * Kernel entry point. 1751 * 1752 * Initialize/finalize the module and return. 1753 * 1754 * Return 0 on success, errno on failure. 1755 */ 1756 static int 1757 netmap_loader(__unused struct module *module, int event, __unused void *arg) 1758 { 1759 int error = 0; 1760 1761 switch (event) { 1762 case MOD_LOAD: 1763 error = netmap_init(); 1764 break; 1765 1766 case MOD_UNLOAD: 1767 netmap_fini(); 1768 break; 1769 1770 default: 1771 error = EOPNOTSUPP; 1772 break; 1773 } 1774 1775 return (error); 1776 } 1777 1778 1779 DEV_MODULE(netmap, netmap_loader, NULL); 1780