1 /* 2 * Copyright (C) 2011 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * $Id: netmap.c 9795 2011-12-02 11:39:08Z luigi $ 29 * 30 * This module supports memory mapped access to network devices, 31 * see netmap(4). 32 * 33 * The module uses a large, memory pool allocated by the kernel 34 * and accessible as mmapped memory by multiple userspace threads/processes. 35 * The memory pool contains packet buffers and "netmap rings", 36 * i.e. user-accessible copies of the interface's queues. 37 * 38 * Access to the network card works like this: 39 * 1. a process/thread issues one or more open() on /dev/netmap, to create 40 * select()able file descriptor on which events are reported. 41 * 2. on each descriptor, the process issues an ioctl() to identify 42 * the interface that should report events to the file descriptor. 43 * 3. on each descriptor, the process issues an mmap() request to 44 * map the shared memory region within the process' address space. 45 * The list of interesting queues is indicated by a location in 46 * the shared memory region. 47 * 4. using the functions in the netmap(4) userspace API, a process 48 * can look up the occupation state of a queue, access memory buffers, 49 * and retrieve received packets or enqueue packets to transmit. 50 * 5. using some ioctl()s the process can synchronize the userspace view 51 * of the queue with the actual status in the kernel. This includes both 52 * receiving the notification of new packets, and transmitting new 53 * packets on the output interface. 54 * 6. select() or poll() can be used to wait for events on individual 55 * transmit or receive queues (or all queues for a given interface). 56 */ 57 58 #include <sys/cdefs.h> /* prerequisite */ 59 __FBSDID("$FreeBSD$"); 60 61 #include <sys/types.h> 62 #include <sys/module.h> 63 #include <sys/errno.h> 64 #include <sys/param.h> /* defines used in kernel.h */ 65 #include <sys/jail.h> 66 #include <sys/kernel.h> /* types used in module initialization */ 67 #include <sys/conf.h> /* cdevsw struct */ 68 #include <sys/uio.h> /* uio struct */ 69 #include <sys/sockio.h> 70 #include <sys/socketvar.h> /* struct socket */ 71 #include <sys/malloc.h> 72 #include <sys/mman.h> /* PROT_EXEC */ 73 #include <sys/poll.h> 74 #include <sys/proc.h> 75 #include <vm/vm.h> /* vtophys */ 76 #include <vm/pmap.h> /* vtophys */ 77 #include <sys/socket.h> /* sockaddrs */ 78 #include <machine/bus.h> 79 #include <sys/selinfo.h> 80 #include <sys/sysctl.h> 81 #include <net/if.h> 82 #include <net/bpf.h> /* BIOCIMMEDIATE */ 83 #include <net/vnet.h> 84 #include <net/netmap.h> 85 #include <dev/netmap/netmap_kern.h> 86 #include <machine/bus.h> /* bus_dmamap_* */ 87 88 MALLOC_DEFINE(M_NETMAP, "netmap", "Network memory map"); 89 90 /* 91 * lock and unlock for the netmap memory allocator 92 */ 93 #define NMA_LOCK() mtx_lock(&netmap_mem_d->nm_mtx); 94 #define NMA_UNLOCK() mtx_unlock(&netmap_mem_d->nm_mtx); 95 96 /* 97 * Default amount of memory pre-allocated by the module. 98 * We start with a large size and then shrink our demand 99 * according to what is avalable when the module is loaded. 100 * At the moment the block is contiguous, but we can easily 101 * restrict our demand to smaller units (16..64k) 102 */ 103 #define NETMAP_MEMORY_SIZE (64 * 1024 * PAGE_SIZE) 104 static void * netmap_malloc(size_t size, const char *msg); 105 static void netmap_free(void *addr, const char *msg); 106 107 #define netmap_if_malloc(len) netmap_malloc(len, "nifp") 108 #define netmap_if_free(v) netmap_free((v), "nifp") 109 110 #define netmap_ring_malloc(len) netmap_malloc(len, "ring") 111 #define netmap_free_rings(na) \ 112 netmap_free((na)->tx_rings[0].ring, "shadow rings"); 113 114 /* 115 * Allocator for a pool of packet buffers. For each buffer we have 116 * one entry in the bitmap to signal the state. Allocation scans 117 * the bitmap, but since this is done only on attach, we are not 118 * too worried about performance 119 * XXX if we need to allocate small blocks, a translation 120 * table is used both for kernel virtual address and physical 121 * addresses. 122 */ 123 struct netmap_buf_pool { 124 u_int total_buffers; /* total buffers. */ 125 u_int free; 126 u_int bufsize; 127 char *base; /* buffer base address */ 128 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 129 }; 130 struct netmap_buf_pool nm_buf_pool; 131 /* XXX move these two vars back into netmap_buf_pool */ 132 u_int netmap_total_buffers; 133 char *netmap_buffer_base; /* address of an invalid buffer */ 134 135 /* user-controlled variables */ 136 int netmap_verbose; 137 138 static int no_timestamp; /* don't timestamp on rxsync */ 139 140 SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args"); 141 SYSCTL_INT(_dev_netmap, OID_AUTO, verbose, 142 CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode"); 143 SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp, 144 CTLFLAG_RW, &no_timestamp, 0, "no_timestamp"); 145 SYSCTL_INT(_dev_netmap, OID_AUTO, total_buffers, 146 CTLFLAG_RD, &nm_buf_pool.total_buffers, 0, "total_buffers"); 147 SYSCTL_INT(_dev_netmap, OID_AUTO, free_buffers, 148 CTLFLAG_RD, &nm_buf_pool.free, 0, "free_buffers"); 149 150 /* 151 * Allocate n buffers from the ring, and fill the slot. 152 * Buffer 0 is the 'junk' buffer. 153 */ 154 static void 155 netmap_new_bufs(struct netmap_if *nifp __unused, 156 struct netmap_slot *slot, u_int n) 157 { 158 struct netmap_buf_pool *p = &nm_buf_pool; 159 uint32_t bi = 0; /* index in the bitmap */ 160 uint32_t mask, j, i = 0; /* slot counter */ 161 162 if (n > p->free) { 163 D("only %d out of %d buffers available", i, n); 164 return; 165 } 166 /* termination is guaranteed by p->free */ 167 while (i < n && p->free > 0) { 168 uint32_t cur = p->bitmap[bi]; 169 if (cur == 0) { /* bitmask is fully used */ 170 bi++; 171 continue; 172 } 173 /* locate a slot */ 174 for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; 175 p->bitmap[bi] &= ~mask; /* slot in use */ 176 p->free--; 177 slot[i].buf_idx = bi*32+j; 178 slot[i].len = p->bufsize; 179 slot[i].flags = NS_BUF_CHANGED; 180 i++; 181 } 182 ND("allocated %d buffers, %d available", n, p->free); 183 } 184 185 186 static void 187 netmap_free_buf(struct netmap_if *nifp __unused, uint32_t i) 188 { 189 struct netmap_buf_pool *p = &nm_buf_pool; 190 191 uint32_t pos, mask; 192 if (i >= p->total_buffers) { 193 D("invalid free index %d", i); 194 return; 195 } 196 pos = i / 32; 197 mask = 1 << (i % 32); 198 if (p->bitmap[pos] & mask) { 199 D("slot %d already free", i); 200 return; 201 } 202 p->bitmap[pos] |= mask; 203 p->free++; 204 } 205 206 207 /* Descriptor of the memory objects handled by our memory allocator. */ 208 struct netmap_mem_obj { 209 TAILQ_ENTRY(netmap_mem_obj) nmo_next; /* next object in the 210 chain. */ 211 int nmo_used; /* flag set on used memory objects. */ 212 size_t nmo_size; /* size of the memory area reserved for the 213 object. */ 214 void *nmo_data; /* pointer to the memory area. */ 215 }; 216 217 /* Wrap our memory objects to make them ``chainable``. */ 218 TAILQ_HEAD(netmap_mem_obj_h, netmap_mem_obj); 219 220 221 /* Descriptor of our custom memory allocator. */ 222 struct netmap_mem_d { 223 struct mtx nm_mtx; /* lock used to handle the chain of memory 224 objects. */ 225 struct netmap_mem_obj_h nm_molist; /* list of memory objects */ 226 size_t nm_size; /* total amount of memory used for rings etc. */ 227 size_t nm_totalsize; /* total amount of allocated memory 228 (the difference is used for buffers) */ 229 size_t nm_buf_start; /* offset of packet buffers. 230 This is page-aligned. */ 231 size_t nm_buf_len; /* total memory for buffers */ 232 void *nm_buffer; /* pointer to the whole pre-allocated memory 233 area. */ 234 }; 235 236 237 /* Structure associated to each thread which registered an interface. */ 238 struct netmap_priv_d { 239 struct netmap_if *np_nifp; /* netmap interface descriptor. */ 240 241 struct ifnet *np_ifp; /* device for which we hold a reference */ 242 int np_ringid; /* from the ioctl */ 243 u_int np_qfirst, np_qlast; /* range of rings to scan */ 244 uint16_t np_txpoll; 245 }; 246 247 /* Shorthand to compute a netmap interface offset. */ 248 #define netmap_if_offset(v) \ 249 ((char *) (v) - (char *) netmap_mem_d->nm_buffer) 250 /* .. and get a physical address given a memory offset */ 251 #define netmap_ofstophys(o) \ 252 (vtophys(netmap_mem_d->nm_buffer) + (o)) 253 254 static struct cdev *netmap_dev; /* /dev/netmap character device. */ 255 static struct netmap_mem_d *netmap_mem_d; /* Our memory allocator. */ 256 257 258 static d_mmap_t netmap_mmap; 259 static d_ioctl_t netmap_ioctl; 260 static d_poll_t netmap_poll; 261 262 #ifdef NETMAP_KEVENT 263 static d_kqfilter_t netmap_kqfilter; 264 #endif 265 266 static struct cdevsw netmap_cdevsw = { 267 .d_version = D_VERSION, 268 .d_name = "netmap", 269 .d_mmap = netmap_mmap, 270 .d_ioctl = netmap_ioctl, 271 .d_poll = netmap_poll, 272 #ifdef NETMAP_KEVENT 273 .d_kqfilter = netmap_kqfilter, 274 #endif 275 }; 276 277 #ifdef NETMAP_KEVENT 278 static int netmap_kqread(struct knote *, long); 279 static int netmap_kqwrite(struct knote *, long); 280 static void netmap_kqdetach(struct knote *); 281 282 static struct filterops netmap_read_filterops = { 283 .f_isfd = 1, 284 .f_attach = NULL, 285 .f_detach = netmap_kqdetach, 286 .f_event = netmap_kqread, 287 }; 288 289 static struct filterops netmap_write_filterops = { 290 .f_isfd = 1, 291 .f_attach = NULL, 292 .f_detach = netmap_kqdetach, 293 .f_event = netmap_kqwrite, 294 }; 295 296 /* 297 * support for the kevent() system call. 298 * 299 * This is the kevent filter, and is executed each time a new event 300 * is triggered on the device. This function execute some operation 301 * depending on the received filter. 302 * 303 * The implementation should test the filters and should implement 304 * filter operations we are interested on (a full list in /sys/event.h). 305 * 306 * On a match we should: 307 * - set kn->kn_fop 308 * - set kn->kn_hook 309 * - call knlist_add() to deliver the event to the application. 310 * 311 * Return 0 if the event should be delivered to the application. 312 */ 313 static int 314 netmap_kqfilter(struct cdev *dev, struct knote *kn) 315 { 316 /* declare variables needed to read/write */ 317 318 switch(kn->kn_filter) { 319 case EVFILT_READ: 320 if (netmap_verbose) 321 D("%s kqfilter: EVFILT_READ" ifp->if_xname); 322 323 /* read operations */ 324 kn->kn_fop = &netmap_read_filterops; 325 break; 326 327 case EVFILT_WRITE: 328 if (netmap_verbose) 329 D("%s kqfilter: EVFILT_WRITE" ifp->if_xname); 330 331 /* write operations */ 332 kn->kn_fop = &netmap_write_filterops; 333 break; 334 335 default: 336 if (netmap_verbose) 337 D("%s kqfilter: invalid filter" ifp->if_xname); 338 return(EINVAL); 339 } 340 341 kn->kn_hook = 0;// 342 knlist_add(&netmap_sc->tun_rsel.si_note, kn, 0); 343 344 return (0); 345 } 346 #endif /* NETMAP_KEVENT */ 347 348 /* 349 * File descriptor's private data destructor. 350 * 351 * Call nm_register(ifp,0) to stop netmap mode on the interface and 352 * revert to normal operation. We expect that np_ifp has not gone. 353 */ 354 static void 355 netmap_dtor(void *data) 356 { 357 struct netmap_priv_d *priv = data; 358 struct ifnet *ifp = priv->np_ifp; 359 struct netmap_adapter *na = NA(ifp); 360 struct netmap_if *nifp = priv->np_nifp; 361 362 if (0) 363 printf("%s starting for %p ifp %p\n", __FUNCTION__, priv, 364 priv ? priv->np_ifp : NULL); 365 366 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 367 368 na->refcount--; 369 if (na->refcount <= 0) { /* last instance */ 370 u_int i; 371 372 D("deleting last netmap instance for %s", ifp->if_xname); 373 /* 374 * there is a race here with *_netmap_task() and 375 * netmap_poll(), which don't run under NETMAP_CORE_LOCK. 376 * na->refcount == 0 && na->ifp->if_capenable & IFCAP_NETMAP 377 * (aka NETMAP_DELETING(na)) are a unique marker that the 378 * device is dying. 379 * Before destroying stuff we sleep a bit, and then complete 380 * the job. NIOCREG should realize the condition and 381 * loop until they can continue; the other routines 382 * should check the condition at entry and quit if 383 * they cannot run. 384 */ 385 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 386 tsleep(na, 0, "NIOCUNREG", 4); 387 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 388 na->nm_register(ifp, 0); /* off, clear IFCAP_NETMAP */ 389 /* Wake up any sleeping threads. netmap_poll will 390 * then return POLLERR 391 */ 392 for (i = 0; i < na->num_queues + 2; i++) { 393 selwakeuppri(&na->tx_rings[i].si, PI_NET); 394 selwakeuppri(&na->rx_rings[i].si, PI_NET); 395 } 396 /* release all buffers */ 397 NMA_LOCK(); 398 for (i = 0; i < na->num_queues + 1; i++) { 399 int j, lim; 400 struct netmap_ring *ring; 401 402 ND("tx queue %d", i); 403 ring = na->tx_rings[i].ring; 404 lim = na->tx_rings[i].nkr_num_slots; 405 for (j = 0; j < lim; j++) 406 netmap_free_buf(nifp, ring->slot[j].buf_idx); 407 408 ND("rx queue %d", i); 409 ring = na->rx_rings[i].ring; 410 lim = na->rx_rings[i].nkr_num_slots; 411 for (j = 0; j < lim; j++) 412 netmap_free_buf(nifp, ring->slot[j].buf_idx); 413 } 414 NMA_UNLOCK(); 415 netmap_free_rings(na); 416 wakeup(na); 417 } 418 netmap_if_free(nifp); 419 420 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 421 422 if_rele(ifp); 423 424 bzero(priv, sizeof(*priv)); /* XXX for safety */ 425 free(priv, M_DEVBUF); 426 } 427 428 429 430 /* 431 * Create and return a new ``netmap_if`` object, and possibly also 432 * rings and packet buffors. 433 * 434 * Return NULL on failure. 435 */ 436 static void * 437 netmap_if_new(const char *ifname, struct netmap_adapter *na) 438 { 439 struct netmap_if *nifp; 440 struct netmap_ring *ring; 441 char *buff; 442 u_int i, len, ofs; 443 u_int n = na->num_queues + 1; /* shorthand, include stack queue */ 444 445 /* 446 * the descriptor is followed inline by an array of offsets 447 * to the tx and rx rings in the shared memory region. 448 */ 449 len = sizeof(struct netmap_if) + 2 * n * sizeof(ssize_t); 450 nifp = netmap_if_malloc(len); 451 if (nifp == NULL) 452 return (NULL); 453 454 /* initialize base fields */ 455 *(int *)(uintptr_t)&nifp->ni_num_queues = na->num_queues; 456 strncpy(nifp->ni_name, ifname, IFNAMSIZ); 457 458 (na->refcount)++; /* XXX atomic ? we are under lock */ 459 if (na->refcount > 1) 460 goto final; 461 462 /* 463 * If this is the first instance, allocate the shadow rings and 464 * buffers for this card (one for each hw queue, one for the host). 465 * The rings are contiguous, but have variable size. 466 * The entire block is reachable at 467 * na->tx_rings[0].ring 468 */ 469 470 len = n * (2 * sizeof(struct netmap_ring) + 471 (na->num_tx_desc + na->num_rx_desc) * 472 sizeof(struct netmap_slot) ); 473 buff = netmap_ring_malloc(len); 474 if (buff == NULL) { 475 D("failed to allocate %d bytes for %s shadow ring", 476 len, ifname); 477 error: 478 (na->refcount)--; 479 netmap_if_free(nifp); 480 return (NULL); 481 } 482 /* do we have the bufers ? we are in need of num_tx_desc buffers for 483 * each tx ring and num_tx_desc buffers for each rx ring. */ 484 len = n * (na->num_tx_desc + na->num_rx_desc); 485 NMA_LOCK(); 486 if (nm_buf_pool.free < len) { 487 NMA_UNLOCK(); 488 netmap_free(buff, "not enough bufs"); 489 goto error; 490 } 491 /* 492 * in the kring, store the pointers to the shared rings 493 * and initialize the rings. We are under NMA_LOCK(). 494 */ 495 ofs = 0; 496 for (i = 0; i < n; i++) { 497 struct netmap_kring *kring; 498 int numdesc; 499 500 /* Transmit rings */ 501 kring = &na->tx_rings[i]; 502 numdesc = na->num_tx_desc; 503 bzero(kring, sizeof(*kring)); 504 kring->na = na; 505 506 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 507 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 508 nm_buf_pool.base - (char *)ring; 509 ND("txring[%d] at %p ofs %d", i, ring, ring->buf_ofs); 510 *(int *)(int *)(uintptr_t)&ring->num_slots = 511 kring->nkr_num_slots = numdesc; 512 513 /* 514 * IMPORTANT: 515 * Always keep one slot empty, so we can detect new 516 * transmissions comparing cur and nr_hwcur (they are 517 * the same only if there are no new transmissions). 518 */ 519 ring->avail = kring->nr_hwavail = numdesc - 1; 520 ring->cur = kring->nr_hwcur = 0; 521 netmap_new_bufs(nifp, ring->slot, numdesc); 522 523 ofs += sizeof(struct netmap_ring) + 524 numdesc * sizeof(struct netmap_slot); 525 526 /* Receive rings */ 527 kring = &na->rx_rings[i]; 528 numdesc = na->num_rx_desc; 529 bzero(kring, sizeof(*kring)); 530 kring->na = na; 531 532 ring = kring->ring = (struct netmap_ring *)(buff + ofs); 533 *(ssize_t *)(uintptr_t)&ring->buf_ofs = 534 nm_buf_pool.base - (char *)ring; 535 ND("rxring[%d] at %p offset %d", i, ring, ring->buf_ofs); 536 *(int *)(int *)(uintptr_t)&ring->num_slots = 537 kring->nkr_num_slots = numdesc; 538 ring->cur = kring->nr_hwcur = 0; 539 ring->avail = kring->nr_hwavail = 0; /* empty */ 540 netmap_new_bufs(nifp, ring->slot, numdesc); 541 ofs += sizeof(struct netmap_ring) + 542 numdesc * sizeof(struct netmap_slot); 543 } 544 NMA_UNLOCK(); 545 for (i = 0; i < n+1; i++) { 546 // XXX initialize the selrecord structs. 547 } 548 final: 549 /* 550 * fill the slots for the rx and tx queues. They contain the offset 551 * between the ring and nifp, so the information is usable in 552 * userspace to reach the ring from the nifp. 553 */ 554 for (i = 0; i < n; i++) { 555 char *base = (char *)nifp; 556 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = 557 (char *)na->tx_rings[i].ring - base; 558 *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n] = 559 (char *)na->rx_rings[i].ring - base; 560 } 561 return (nifp); 562 } 563 564 565 /* 566 * mmap(2) support for the "netmap" device. 567 * 568 * Expose all the memory previously allocated by our custom memory 569 * allocator: this way the user has only to issue a single mmap(2), and 570 * can work on all the data structures flawlessly. 571 * 572 * Return 0 on success, -1 otherwise. 573 */ 574 static int 575 #if __FreeBSD_version < 900000 576 netmap_mmap(__unused struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, 577 int nprot) 578 #else 579 netmap_mmap(__unused struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, 580 int nprot, __unused vm_memattr_t *memattr) 581 #endif 582 { 583 if (nprot & PROT_EXEC) 584 return (-1); // XXX -1 or EINVAL ? 585 586 ND("request for offset 0x%x", (uint32_t)offset); 587 *paddr = netmap_ofstophys(offset); 588 589 return (0); 590 } 591 592 593 /* 594 * Handlers for synchronization of the queues from/to the host. 595 * 596 * netmap_sync_to_host() passes packets up. We are called from a 597 * system call in user process context, and the only contention 598 * can be among multiple user threads erroneously calling 599 * this routine concurrently. In principle we should not even 600 * need to lock. 601 */ 602 static void 603 netmap_sync_to_host(struct netmap_adapter *na) 604 { 605 struct netmap_kring *kring = &na->tx_rings[na->num_queues]; 606 struct netmap_ring *ring = kring->ring; 607 struct mbuf *head = NULL, *tail = NULL, *m; 608 u_int k, n, lim = kring->nkr_num_slots - 1; 609 610 k = ring->cur; 611 if (k > lim) { 612 netmap_ring_reinit(kring); 613 return; 614 } 615 // na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0); 616 617 /* Take packets from hwcur to cur and pass them up. 618 * In case of no buffers we give up. At the end of the loop, 619 * the queue is drained in all cases. 620 */ 621 for (n = kring->nr_hwcur; n != k;) { 622 struct netmap_slot *slot = &ring->slot[n]; 623 624 n = (n == lim) ? 0 : n + 1; 625 if (slot->len < 14 || slot->len > NETMAP_BUF_SIZE) { 626 D("bad pkt at %d len %d", n, slot->len); 627 continue; 628 } 629 m = m_devget(NMB(slot), slot->len, 0, na->ifp, NULL); 630 631 if (m == NULL) 632 break; 633 if (tail) 634 tail->m_nextpkt = m; 635 else 636 head = m; 637 tail = m; 638 m->m_nextpkt = NULL; 639 } 640 kring->nr_hwcur = k; 641 kring->nr_hwavail = ring->avail = lim; 642 // na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 643 644 /* send packets up, outside the lock */ 645 while ((m = head) != NULL) { 646 head = head->m_nextpkt; 647 m->m_nextpkt = NULL; 648 m->m_pkthdr.rcvif = na->ifp; 649 if (netmap_verbose & NM_VERB_HOST) 650 D("sending up pkt %p size %d", m, m->m_pkthdr.len); 651 (na->ifp->if_input)(na->ifp, m); 652 } 653 } 654 655 /* 656 * rxsync backend for packets coming from the host stack. 657 * They have been put in the queue by netmap_start() so we 658 * need to protect access to the kring using a lock. 659 * 660 * This routine also does the selrecord if called from the poll handler 661 * (we know because td != NULL). 662 */ 663 static void 664 netmap_sync_from_host(struct netmap_adapter *na, struct thread *td) 665 { 666 struct netmap_kring *kring = &na->rx_rings[na->num_queues]; 667 struct netmap_ring *ring = kring->ring; 668 int error = 1, delta; 669 u_int k = ring->cur, lim = kring->nkr_num_slots; 670 671 na->nm_lock(na->ifp->if_softc, NETMAP_CORE_LOCK, 0); 672 if (k >= lim) /* bad value */ 673 goto done; 674 delta = k - kring->nr_hwcur; 675 if (delta < 0) 676 delta += lim; 677 kring->nr_hwavail -= delta; 678 if (kring->nr_hwavail < 0) /* error */ 679 goto done; 680 kring->nr_hwcur = k; 681 error = 0; 682 k = ring->avail = kring->nr_hwavail; 683 if (k == 0 && td) 684 selrecord(td, &kring->si); 685 if (k && (netmap_verbose & NM_VERB_HOST)) 686 D("%d pkts from stack", k); 687 done: 688 na->nm_lock(na->ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 689 if (error) 690 netmap_ring_reinit(kring); 691 } 692 693 694 /* 695 * get a refcounted reference to an interface. 696 * Return ENXIO if the interface does not exist, EINVAL if netmap 697 * is not supported by the interface. 698 * If successful, hold a reference. 699 */ 700 static int 701 get_ifp(const char *name, struct ifnet **ifp) 702 { 703 *ifp = ifunit_ref(name); 704 if (*ifp == NULL) 705 return (ENXIO); 706 /* can do this if the capability exists and if_pspare[0] 707 * points to the netmap descriptor. 708 */ 709 if ((*ifp)->if_capabilities & IFCAP_NETMAP && NA(*ifp)) 710 return 0; /* valid pointer, we hold the refcount */ 711 if_rele(*ifp); 712 return EINVAL; // not NETMAP capable 713 } 714 715 716 /* 717 * Error routine called when txsync/rxsync detects an error. 718 * Can't do much more than resetting cur = hwcur, avail = hwavail. 719 * Return 1 on reinit. 720 * 721 * This routine is only called by the upper half of the kernel. 722 * It only reads hwcur (which is changed only by the upper half, too) 723 * and hwavail (which may be changed by the lower half, but only on 724 * a tx ring and only to increase it, so any error will be recovered 725 * on the next call). For the above, we don't strictly need to call 726 * it under lock. 727 */ 728 int 729 netmap_ring_reinit(struct netmap_kring *kring) 730 { 731 struct netmap_ring *ring = kring->ring; 732 u_int i, lim = kring->nkr_num_slots - 1; 733 int errors = 0; 734 735 D("called for %s", kring->na->ifp->if_xname); 736 if (ring->cur > lim) 737 errors++; 738 for (i = 0; i <= lim; i++) { 739 u_int idx = ring->slot[i].buf_idx; 740 u_int len = ring->slot[i].len; 741 if (idx < 2 || idx >= netmap_total_buffers) { 742 if (!errors++) 743 D("bad buffer at slot %d idx %d len %d ", i, idx, len); 744 ring->slot[i].buf_idx = 0; 745 ring->slot[i].len = 0; 746 } else if (len > NETMAP_BUF_SIZE) { 747 ring->slot[i].len = 0; 748 if (!errors++) 749 D("bad len %d at slot %d idx %d", 750 len, i, idx); 751 } 752 } 753 if (errors) { 754 int pos = kring - kring->na->tx_rings; 755 int n = kring->na->num_queues + 2; 756 757 D("total %d errors", errors); 758 errors++; 759 D("%s %s[%d] reinit, cur %d -> %d avail %d -> %d", 760 kring->na->ifp->if_xname, 761 pos < n ? "TX" : "RX", pos < n ? pos : pos - n, 762 ring->cur, kring->nr_hwcur, 763 ring->avail, kring->nr_hwavail); 764 ring->cur = kring->nr_hwcur; 765 ring->avail = kring->nr_hwavail; 766 } 767 return (errors ? 1 : 0); 768 } 769 770 771 /* 772 * Set the ring ID. For devices with a single queue, a request 773 * for all rings is the same as a single ring. 774 */ 775 static int 776 netmap_set_ringid(struct netmap_priv_d *priv, u_int ringid) 777 { 778 struct ifnet *ifp = priv->np_ifp; 779 struct netmap_adapter *na = NA(ifp); 780 void *adapter = na->ifp->if_softc; /* shorthand */ 781 u_int i = ringid & NETMAP_RING_MASK; 782 /* first time we don't lock */ 783 int need_lock = (priv->np_qfirst != priv->np_qlast); 784 785 if ( (ringid & NETMAP_HW_RING) && i >= na->num_queues) { 786 D("invalid ring id %d", i); 787 return (EINVAL); 788 } 789 if (need_lock) 790 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 791 priv->np_ringid = ringid; 792 if (ringid & NETMAP_SW_RING) { 793 priv->np_qfirst = na->num_queues; 794 priv->np_qlast = na->num_queues + 1; 795 } else if (ringid & NETMAP_HW_RING) { 796 priv->np_qfirst = i; 797 priv->np_qlast = i + 1; 798 } else { 799 priv->np_qfirst = 0; 800 priv->np_qlast = na->num_queues; 801 } 802 priv->np_txpoll = (ringid & NETMAP_NO_TX_POLL) ? 0 : 1; 803 if (need_lock) 804 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 805 if (ringid & NETMAP_SW_RING) 806 D("ringid %s set to SW RING", ifp->if_xname); 807 else if (ringid & NETMAP_HW_RING) 808 D("ringid %s set to HW RING %d", ifp->if_xname, 809 priv->np_qfirst); 810 else 811 D("ringid %s set to all %d HW RINGS", ifp->if_xname, 812 priv->np_qlast); 813 return 0; 814 } 815 816 /* 817 * ioctl(2) support for the "netmap" device. 818 * 819 * Following a list of accepted commands: 820 * - NIOCGINFO 821 * - SIOCGIFADDR just for convenience 822 * - NIOCREGIF 823 * - NIOCUNREGIF 824 * - NIOCTXSYNC 825 * - NIOCRXSYNC 826 * 827 * Return 0 on success, errno otherwise. 828 */ 829 static int 830 netmap_ioctl(__unused struct cdev *dev, u_long cmd, caddr_t data, 831 __unused int fflag, struct thread *td) 832 { 833 struct netmap_priv_d *priv = NULL; 834 struct ifnet *ifp; 835 struct nmreq *nmr = (struct nmreq *) data; 836 struct netmap_adapter *na; 837 void *adapter; 838 int error; 839 u_int i; 840 struct netmap_if *nifp; 841 842 CURVNET_SET(TD_TO_VNET(td)); 843 844 error = devfs_get_cdevpriv((void **)&priv); 845 if (error != ENOENT && error != 0) { 846 CURVNET_RESTORE(); 847 return (error); 848 } 849 850 error = 0; /* Could be ENOENT */ 851 switch (cmd) { 852 case NIOCGINFO: /* return capabilities etc */ 853 /* memsize is always valid */ 854 nmr->nr_memsize = netmap_mem_d->nm_totalsize; 855 nmr->nr_offset = 0; 856 nmr->nr_numrings = 0; 857 nmr->nr_numslots = 0; 858 if (nmr->nr_name[0] == '\0') /* just get memory info */ 859 break; 860 error = get_ifp(nmr->nr_name, &ifp); /* get a refcount */ 861 if (error) 862 break; 863 na = NA(ifp); /* retrieve netmap_adapter */ 864 nmr->nr_numrings = na->num_queues; 865 nmr->nr_numslots = na->num_tx_desc; 866 if_rele(ifp); /* return the refcount */ 867 break; 868 869 case NIOCREGIF: 870 if (priv != NULL) { /* thread already registered */ 871 error = netmap_set_ringid(priv, nmr->nr_ringid); 872 break; 873 } 874 /* find the interface and a reference */ 875 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 876 if (error) 877 break; 878 na = NA(ifp); /* retrieve netmap adapter */ 879 adapter = na->ifp->if_softc; /* shorthand */ 880 /* 881 * Allocate the private per-thread structure. 882 * XXX perhaps we can use a blocking malloc ? 883 */ 884 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF, 885 M_NOWAIT | M_ZERO); 886 if (priv == NULL) { 887 error = ENOMEM; 888 if_rele(ifp); /* return the refcount */ 889 break; 890 } 891 892 893 for (i = 10; i > 0; i--) { 894 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 895 if (!NETMAP_DELETING(na)) 896 break; 897 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 898 tsleep(na, 0, "NIOCREGIF", hz/10); 899 } 900 if (i == 0) { 901 D("too many NIOCREGIF attempts, give up"); 902 error = EINVAL; 903 free(priv, M_DEVBUF); 904 if_rele(ifp); /* return the refcount */ 905 break; 906 } 907 908 priv->np_ifp = ifp; /* store the reference */ 909 error = netmap_set_ringid(priv, nmr->nr_ringid); 910 if (error) 911 goto error; 912 priv->np_nifp = nifp = netmap_if_new(nmr->nr_name, na); 913 if (nifp == NULL) { /* allocation failed */ 914 error = ENOMEM; 915 } else if (ifp->if_capenable & IFCAP_NETMAP) { 916 /* was already set */ 917 } else { 918 /* Otherwise set the card in netmap mode 919 * and make it use the shared buffers. 920 */ 921 error = na->nm_register(ifp, 1); /* mode on */ 922 if (error) { 923 /* 924 * do something similar to netmap_dtor(). 925 */ 926 netmap_free_rings(na); 927 // XXX tx_rings is inline, must not be freed. 928 // free(na->tx_rings, M_DEVBUF); // XXX wrong ? 929 na->tx_rings = na->rx_rings = NULL; 930 na->refcount--; 931 netmap_if_free(nifp); 932 nifp = NULL; 933 } 934 } 935 936 if (error) { /* reg. failed, release priv and ref */ 937 error: 938 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 939 free(priv, M_DEVBUF); 940 if_rele(ifp); /* return the refcount */ 941 break; 942 } 943 944 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 945 error = devfs_set_cdevpriv(priv, netmap_dtor); 946 947 if (error != 0) { 948 /* could not assign the private storage for the 949 * thread, call the destructor explicitly. 950 */ 951 netmap_dtor(priv); 952 break; 953 } 954 955 /* return the offset of the netmap_if object */ 956 nmr->nr_numrings = na->num_queues; 957 nmr->nr_numslots = na->num_tx_desc; 958 nmr->nr_memsize = netmap_mem_d->nm_totalsize; 959 nmr->nr_offset = netmap_if_offset(nifp); 960 break; 961 962 case NIOCUNREGIF: 963 if (priv == NULL) { 964 error = ENXIO; 965 break; 966 } 967 968 /* the interface is unregistered inside the 969 destructor of the private data. */ 970 devfs_clear_cdevpriv(); 971 break; 972 973 case NIOCTXSYNC: 974 case NIOCRXSYNC: 975 if (priv == NULL) { 976 error = ENXIO; 977 break; 978 } 979 ifp = priv->np_ifp; /* we have a reference */ 980 na = NA(ifp); /* retrieve netmap adapter */ 981 adapter = ifp->if_softc; /* shorthand */ 982 983 if (priv->np_qfirst == na->num_queues) { 984 /* queues to/from host */ 985 if (cmd == NIOCTXSYNC) 986 netmap_sync_to_host(na); 987 else 988 netmap_sync_from_host(na, NULL); 989 break; 990 } 991 992 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 993 if (cmd == NIOCTXSYNC) { 994 struct netmap_kring *kring = &na->tx_rings[i]; 995 if (netmap_verbose & NM_VERB_TXSYNC) 996 D("sync tx ring %d cur %d hwcur %d", 997 i, kring->ring->cur, 998 kring->nr_hwcur); 999 na->nm_txsync(adapter, i, 1 /* do lock */); 1000 if (netmap_verbose & NM_VERB_TXSYNC) 1001 D("after sync tx ring %d cur %d hwcur %d", 1002 i, kring->ring->cur, 1003 kring->nr_hwcur); 1004 } else { 1005 na->nm_rxsync(adapter, i, 1 /* do lock */); 1006 microtime(&na->rx_rings[i].ring->ts); 1007 } 1008 } 1009 1010 break; 1011 1012 case BIOCIMMEDIATE: 1013 case BIOCGHDRCMPLT: 1014 case BIOCSHDRCMPLT: 1015 case BIOCSSEESENT: 1016 D("ignore BIOCIMMEDIATE/BIOCSHDRCMPLT/BIOCSHDRCMPLT/BIOCSSEESENT"); 1017 break; 1018 1019 default: 1020 { 1021 /* 1022 * allow device calls 1023 */ 1024 struct socket so; 1025 bzero(&so, sizeof(so)); 1026 error = get_ifp(nmr->nr_name, &ifp); /* keep reference */ 1027 if (error) 1028 break; 1029 so.so_vnet = ifp->if_vnet; 1030 // so->so_proto not null. 1031 error = ifioctl(&so, cmd, data, td); 1032 if_rele(ifp); 1033 } 1034 } 1035 1036 CURVNET_RESTORE(); 1037 return (error); 1038 } 1039 1040 1041 /* 1042 * select(2) and poll(2) handlers for the "netmap" device. 1043 * 1044 * Can be called for one or more queues. 1045 * Return true the event mask corresponding to ready events. 1046 * If there are no ready events, do a selrecord on either individual 1047 * selfd or on the global one. 1048 * Device-dependent parts (locking and sync of tx/rx rings) 1049 * are done through callbacks. 1050 */ 1051 static int 1052 netmap_poll(__unused struct cdev *dev, int events, struct thread *td) 1053 { 1054 struct netmap_priv_d *priv = NULL; 1055 struct netmap_adapter *na; 1056 struct ifnet *ifp; 1057 struct netmap_kring *kring; 1058 u_int core_lock, i, check_all, want_tx, want_rx, revents = 0; 1059 void *adapter; 1060 1061 if (devfs_get_cdevpriv((void **)&priv) != 0 || priv == NULL) 1062 return POLLERR; 1063 1064 ifp = priv->np_ifp; 1065 // XXX check for deleting() ? 1066 if ( (ifp->if_capenable & IFCAP_NETMAP) == 0) 1067 return POLLERR; 1068 1069 if (netmap_verbose & 0x8000) 1070 D("device %s events 0x%x", ifp->if_xname, events); 1071 want_tx = events & (POLLOUT | POLLWRNORM); 1072 want_rx = events & (POLLIN | POLLRDNORM); 1073 1074 adapter = ifp->if_softc; 1075 na = NA(ifp); /* retrieve netmap adapter */ 1076 1077 /* how many queues we are scanning */ 1078 i = priv->np_qfirst; 1079 if (i == na->num_queues) { /* from/to host */ 1080 if (priv->np_txpoll || want_tx) { 1081 /* push any packets up, then we are always ready */ 1082 kring = &na->tx_rings[i]; 1083 netmap_sync_to_host(na); 1084 revents |= want_tx; 1085 } 1086 if (want_rx) { 1087 kring = &na->rx_rings[i]; 1088 if (kring->ring->avail == 0) 1089 netmap_sync_from_host(na, td); 1090 if (kring->ring->avail > 0) { 1091 revents |= want_rx; 1092 } 1093 } 1094 return (revents); 1095 } 1096 1097 /* 1098 * check_all is set if the card has more than one queue and 1099 * the client is polling all of them. If true, we sleep on 1100 * the "global" selfd, otherwise we sleep on individual selfd 1101 * (we can only sleep on one of them per direction). 1102 * The interrupt routine in the driver should always wake on 1103 * the individual selfd, and also on the global one if the card 1104 * has more than one ring. 1105 * 1106 * If the card has only one lock, we just use that. 1107 * If the card has separate ring locks, we just use those 1108 * unless we are doing check_all, in which case the whole 1109 * loop is wrapped by the global lock. 1110 * We acquire locks only when necessary: if poll is called 1111 * when buffers are available, we can just return without locks. 1112 * 1113 * rxsync() is only called if we run out of buffers on a POLLIN. 1114 * txsync() is called if we run out of buffers on POLLOUT, or 1115 * there are pending packets to send. The latter can be disabled 1116 * passing NETMAP_NO_TX_POLL in the NIOCREG call. 1117 */ 1118 check_all = (i + 1 != priv->np_qlast); 1119 1120 /* 1121 * core_lock indicates what to do with the core lock. 1122 * The core lock is used when either the card has no individual 1123 * locks, or it has individual locks but we are cheking all 1124 * rings so we need the core lock to avoid missing wakeup events. 1125 * 1126 * It has three possible states: 1127 * NO_CL we don't need to use the core lock, e.g. 1128 * because we are protected by individual locks. 1129 * NEED_CL we need the core lock. In this case, when we 1130 * call the lock routine, move to LOCKED_CL 1131 * to remember to release the lock once done. 1132 * LOCKED_CL core lock is set, so we need to release it. 1133 */ 1134 enum {NO_CL, NEED_CL, LOCKED_CL }; 1135 core_lock = (check_all || !na->separate_locks) ? NEED_CL:NO_CL; 1136 /* 1137 * We start with a lock free round which is good if we have 1138 * data available. If this fails, then lock and call the sync 1139 * routines. 1140 */ 1141 for (i = priv->np_qfirst; want_rx && i < priv->np_qlast; i++) { 1142 kring = &na->rx_rings[i]; 1143 if (kring->ring->avail > 0) { 1144 revents |= want_rx; 1145 want_rx = 0; /* also breaks the loop */ 1146 } 1147 } 1148 for (i = priv->np_qfirst; want_tx && i < priv->np_qlast; i++) { 1149 kring = &na->tx_rings[i]; 1150 if (kring->ring->avail > 0) { 1151 revents |= want_tx; 1152 want_tx = 0; /* also breaks the loop */ 1153 } 1154 } 1155 1156 /* 1157 * If we to push packets out (priv->np_txpoll) or want_tx is 1158 * still set, we do need to run the txsync calls (on all rings, 1159 * to avoid that the tx rings stall). 1160 */ 1161 if (priv->np_txpoll || want_tx) { 1162 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 1163 kring = &na->tx_rings[i]; 1164 if (!want_tx && kring->ring->cur == kring->nr_hwcur) 1165 continue; 1166 if (core_lock == NEED_CL) { 1167 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1168 core_lock = LOCKED_CL; 1169 } 1170 if (na->separate_locks) 1171 na->nm_lock(adapter, NETMAP_TX_LOCK, i); 1172 if (netmap_verbose & NM_VERB_TXSYNC) 1173 D("send %d on %s %d", 1174 kring->ring->cur, 1175 ifp->if_xname, i); 1176 if (na->nm_txsync(adapter, i, 0 /* no lock */)) 1177 revents |= POLLERR; 1178 1179 if (want_tx) { 1180 if (kring->ring->avail > 0) { 1181 /* stop at the first ring. We don't risk 1182 * starvation. 1183 */ 1184 revents |= want_tx; 1185 want_tx = 0; 1186 } else if (!check_all) 1187 selrecord(td, &kring->si); 1188 } 1189 if (na->separate_locks) 1190 na->nm_lock(adapter, NETMAP_TX_UNLOCK, i); 1191 } 1192 } 1193 1194 /* 1195 * now if want_rx is still set we need to lock and rxsync. 1196 * Do it on all rings because otherwise we starve. 1197 */ 1198 if (want_rx) { 1199 for (i = priv->np_qfirst; i < priv->np_qlast; i++) { 1200 kring = &na->rx_rings[i]; 1201 if (core_lock == NEED_CL) { 1202 na->nm_lock(adapter, NETMAP_CORE_LOCK, 0); 1203 core_lock = LOCKED_CL; 1204 } 1205 if (na->separate_locks) 1206 na->nm_lock(adapter, NETMAP_RX_LOCK, i); 1207 1208 if (na->nm_rxsync(adapter, i, 0 /* no lock */)) 1209 revents |= POLLERR; 1210 if (no_timestamp == 0 || 1211 kring->ring->flags & NR_TIMESTAMP) 1212 microtime(&kring->ring->ts); 1213 1214 if (kring->ring->avail > 0) 1215 revents |= want_rx; 1216 else if (!check_all) 1217 selrecord(td, &kring->si); 1218 if (na->separate_locks) 1219 na->nm_lock(adapter, NETMAP_RX_UNLOCK, i); 1220 } 1221 } 1222 if (check_all && revents == 0) { 1223 i = na->num_queues + 1; /* the global queue */ 1224 if (want_tx) 1225 selrecord(td, &na->tx_rings[i].si); 1226 if (want_rx) 1227 selrecord(td, &na->rx_rings[i].si); 1228 } 1229 if (core_lock == LOCKED_CL) 1230 na->nm_lock(adapter, NETMAP_CORE_UNLOCK, 0); 1231 1232 return (revents); 1233 } 1234 1235 /*------- driver support routines ------*/ 1236 1237 /* 1238 * Initialize a ``netmap_adapter`` object created by driver on attach. 1239 * We allocate a block of memory with room for a struct netmap_adapter 1240 * plus two sets of N+2 struct netmap_kring (where N is the number 1241 * of hardware rings): 1242 * krings 0..N-1 are for the hardware queues. 1243 * kring N is for the host stack queue 1244 * kring N+1 is only used for the selinfo for all queues. 1245 * Return 0 on success, ENOMEM otherwise. 1246 */ 1247 int 1248 netmap_attach(struct netmap_adapter *na, int num_queues) 1249 { 1250 int n = num_queues + 2; 1251 int size = sizeof(*na) + 2 * n * sizeof(struct netmap_kring); 1252 void *buf; 1253 struct ifnet *ifp = na->ifp; 1254 1255 if (ifp == NULL) { 1256 D("ifp not set, giving up"); 1257 return EINVAL; 1258 } 1259 na->refcount = 0; 1260 na->num_queues = num_queues; 1261 1262 buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 1263 if (buf) { 1264 WNA(ifp) = buf; 1265 na->tx_rings = (void *)((char *)buf + sizeof(*na)); 1266 na->rx_rings = na->tx_rings + n; 1267 bcopy(na, buf, sizeof(*na)); 1268 ifp->if_capabilities |= IFCAP_NETMAP; 1269 } 1270 D("%s for %s", buf ? "ok" : "failed", ifp->if_xname); 1271 1272 return (buf ? 0 : ENOMEM); 1273 } 1274 1275 1276 /* 1277 * Free the allocated memory linked to the given ``netmap_adapter`` 1278 * object. 1279 */ 1280 void 1281 netmap_detach(struct ifnet *ifp) 1282 { 1283 u_int i; 1284 struct netmap_adapter *na = NA(ifp); 1285 1286 if (!na) 1287 return; 1288 1289 for (i = 0; i < na->num_queues + 2; i++) { 1290 knlist_destroy(&na->tx_rings[i].si.si_note); 1291 knlist_destroy(&na->rx_rings[i].si.si_note); 1292 } 1293 bzero(na, sizeof(*na)); 1294 WNA(ifp) = NULL; 1295 free(na, M_DEVBUF); 1296 } 1297 1298 1299 /* 1300 * Intercept packets from the network stack and pass them 1301 * to netmap as incoming packets on the 'software' ring. 1302 * We are not locked when called. 1303 */ 1304 int 1305 netmap_start(struct ifnet *ifp, struct mbuf *m) 1306 { 1307 struct netmap_adapter *na = NA(ifp); 1308 struct netmap_kring *kring = &na->rx_rings[na->num_queues]; 1309 u_int i, len = m->m_pkthdr.len; 1310 int error = EBUSY, lim = kring->nkr_num_slots - 1; 1311 struct netmap_slot *slot; 1312 1313 if (netmap_verbose & NM_VERB_HOST) 1314 D("%s packet %d len %d from the stack", ifp->if_xname, 1315 kring->nr_hwcur + kring->nr_hwavail, len); 1316 na->nm_lock(ifp->if_softc, NETMAP_CORE_LOCK, 0); 1317 if (kring->nr_hwavail >= lim) { 1318 D("stack ring %s full\n", ifp->if_xname); 1319 goto done; /* no space */ 1320 } 1321 if (len > na->buff_size) { 1322 D("drop packet size %d > %d", len, na->buff_size); 1323 goto done; /* too long for us */ 1324 } 1325 1326 /* compute the insert position */ 1327 i = kring->nr_hwcur + kring->nr_hwavail; 1328 if (i > lim) 1329 i -= lim + 1; 1330 slot = &kring->ring->slot[i]; 1331 m_copydata(m, 0, len, NMB(slot)); 1332 slot->len = len; 1333 kring->nr_hwavail++; 1334 if (netmap_verbose & NM_VERB_HOST) 1335 D("wake up host ring %s %d", na->ifp->if_xname, na->num_queues); 1336 selwakeuppri(&kring->si, PI_NET); 1337 error = 0; 1338 done: 1339 na->nm_lock(ifp->if_softc, NETMAP_CORE_UNLOCK, 0); 1340 1341 /* release the mbuf in either cases of success or failure. As an 1342 * alternative, put the mbuf in a free list and free the list 1343 * only when really necessary. 1344 */ 1345 m_freem(m); 1346 1347 return (error); 1348 } 1349 1350 1351 /* 1352 * netmap_reset() is called by the driver routines when reinitializing 1353 * a ring. The driver is in charge of locking to protect the kring. 1354 * If netmap mode is not set just return NULL. 1355 */ 1356 struct netmap_slot * 1357 netmap_reset(struct netmap_adapter *na, enum txrx tx, int n, 1358 u_int new_cur) 1359 { 1360 struct netmap_kring *kring; 1361 struct netmap_ring *ring; 1362 int new_hwofs, lim; 1363 1364 if (na == NULL) 1365 return NULL; /* no netmap support here */ 1366 if (!(na->ifp->if_capenable & IFCAP_NETMAP)) 1367 return NULL; /* nothing to reinitialize */ 1368 kring = tx == NR_TX ? na->tx_rings + n : na->rx_rings + n; 1369 ring = kring->ring; 1370 lim = kring->nkr_num_slots - 1; 1371 1372 if (tx == NR_TX) 1373 new_hwofs = kring->nr_hwcur - new_cur; 1374 else 1375 new_hwofs = kring->nr_hwcur + kring->nr_hwavail - new_cur; 1376 if (new_hwofs > lim) 1377 new_hwofs -= lim + 1; 1378 1379 /* Alwayws set the new offset value and realign the ring. */ 1380 kring->nkr_hwofs = new_hwofs; 1381 if (tx == NR_TX) 1382 kring->nr_hwavail = kring->nkr_num_slots - 1; 1383 D("new hwofs %d on %s %s[%d]", 1384 kring->nkr_hwofs, na->ifp->if_xname, 1385 tx == NR_TX ? "TX" : "RX", n); 1386 1387 /* 1388 * We do the wakeup here, but the ring is not yet reconfigured. 1389 * However, we are under lock so there are no races. 1390 */ 1391 selwakeuppri(&kring->si, PI_NET); 1392 selwakeuppri(&kring[na->num_queues + 1 - n].si, PI_NET); 1393 return kring->ring->slot; 1394 } 1395 1396 static void 1397 ns_dmamap_cb(__unused void *arg, __unused bus_dma_segment_t * segs, 1398 __unused int nseg, __unused int error) 1399 { 1400 } 1401 1402 /* unload a bus_dmamap and create a new one. Used when the 1403 * buffer in the slot is changed. 1404 * XXX buflen is probably not needed, buffers have constant size. 1405 */ 1406 void 1407 netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1408 { 1409 bus_dmamap_unload(tag, map); 1410 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, ns_dmamap_cb, 1411 NULL, BUS_DMA_NOWAIT); 1412 } 1413 1414 void 1415 netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1416 { 1417 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, ns_dmamap_cb, 1418 NULL, BUS_DMA_NOWAIT); 1419 } 1420 1421 /*------ netmap memory allocator -------*/ 1422 /* 1423 * Request for a chunk of memory. 1424 * 1425 * Memory objects are arranged into a list, hence we need to walk this 1426 * list until we find an object with the needed amount of data free. 1427 * This sounds like a completely inefficient implementation, but given 1428 * the fact that data allocation is done once, we can handle it 1429 * flawlessly. 1430 * 1431 * Return NULL on failure. 1432 */ 1433 static void * 1434 netmap_malloc(size_t size, __unused const char *msg) 1435 { 1436 struct netmap_mem_obj *mem_obj, *new_mem_obj; 1437 void *ret = NULL; 1438 1439 NMA_LOCK(); 1440 TAILQ_FOREACH(mem_obj, &netmap_mem_d->nm_molist, nmo_next) { 1441 if (mem_obj->nmo_used != 0 || mem_obj->nmo_size < size) 1442 continue; 1443 1444 new_mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 1445 M_WAITOK | M_ZERO); 1446 TAILQ_INSERT_BEFORE(mem_obj, new_mem_obj, nmo_next); 1447 1448 new_mem_obj->nmo_used = 1; 1449 new_mem_obj->nmo_size = size; 1450 new_mem_obj->nmo_data = mem_obj->nmo_data; 1451 memset(new_mem_obj->nmo_data, 0, new_mem_obj->nmo_size); 1452 1453 mem_obj->nmo_size -= size; 1454 mem_obj->nmo_data = (char *) mem_obj->nmo_data + size; 1455 if (mem_obj->nmo_size == 0) { 1456 TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, 1457 nmo_next); 1458 free(mem_obj, M_NETMAP); 1459 } 1460 1461 ret = new_mem_obj->nmo_data; 1462 1463 break; 1464 } 1465 NMA_UNLOCK(); 1466 ND("%s: %d bytes at %p", msg, size, ret); 1467 1468 return (ret); 1469 } 1470 1471 /* 1472 * Return the memory to the allocator. 1473 * 1474 * While freeing a memory object, we try to merge adjacent chunks in 1475 * order to reduce memory fragmentation. 1476 */ 1477 static void 1478 netmap_free(void *addr, const char *msg) 1479 { 1480 size_t size; 1481 struct netmap_mem_obj *cur, *prev, *next; 1482 1483 if (addr == NULL) { 1484 D("NULL addr for %s", msg); 1485 return; 1486 } 1487 1488 NMA_LOCK(); 1489 TAILQ_FOREACH(cur, &netmap_mem_d->nm_molist, nmo_next) { 1490 if (cur->nmo_data == addr && cur->nmo_used) 1491 break; 1492 } 1493 if (cur == NULL) { 1494 NMA_UNLOCK(); 1495 D("invalid addr %s %p", msg, addr); 1496 return; 1497 } 1498 1499 size = cur->nmo_size; 1500 cur->nmo_used = 0; 1501 1502 /* merge current chunk of memory with the previous one, 1503 if present. */ 1504 prev = TAILQ_PREV(cur, netmap_mem_obj_h, nmo_next); 1505 if (prev && prev->nmo_used == 0) { 1506 TAILQ_REMOVE(&netmap_mem_d->nm_molist, cur, nmo_next); 1507 prev->nmo_size += cur->nmo_size; 1508 free(cur, M_NETMAP); 1509 cur = prev; 1510 } 1511 1512 /* merge with the next one */ 1513 next = TAILQ_NEXT(cur, nmo_next); 1514 if (next && next->nmo_used == 0) { 1515 TAILQ_REMOVE(&netmap_mem_d->nm_molist, next, nmo_next); 1516 cur->nmo_size += next->nmo_size; 1517 free(next, M_NETMAP); 1518 } 1519 NMA_UNLOCK(); 1520 ND("freed %s %d bytes at %p", msg, size, addr); 1521 } 1522 1523 1524 /* 1525 * Initialize the memory allocator. 1526 * 1527 * Create the descriptor for the memory , allocate the pool of memory 1528 * and initialize the list of memory objects with a single chunk 1529 * containing the whole pre-allocated memory marked as free. 1530 * 1531 * Start with a large size, then halve as needed if we fail to 1532 * allocate the block. While halving, always add one extra page 1533 * because buffers 0 and 1 are used for special purposes. 1534 * Return 0 on success, errno otherwise. 1535 */ 1536 static int 1537 netmap_memory_init(void) 1538 { 1539 struct netmap_mem_obj *mem_obj; 1540 void *buf = NULL; 1541 int i, n, sz = NETMAP_MEMORY_SIZE; 1542 int extra_sz = 0; // space for rings and two spare buffers 1543 1544 for (; !buf && sz >= 1<<20; sz >>=1) { 1545 extra_sz = sz/200; 1546 extra_sz = (extra_sz + 2*PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 1547 buf = contigmalloc(sz + extra_sz, 1548 M_NETMAP, 1549 M_WAITOK | M_ZERO, 1550 0, /* low address */ 1551 -1UL, /* high address */ 1552 PAGE_SIZE, /* alignment */ 1553 0 /* boundary */ 1554 ); 1555 } 1556 if (buf == NULL) 1557 return (ENOMEM); 1558 sz += extra_sz; 1559 netmap_mem_d = malloc(sizeof(struct netmap_mem_d), M_NETMAP, 1560 M_WAITOK | M_ZERO); 1561 mtx_init(&netmap_mem_d->nm_mtx, "netmap memory allocator lock", NULL, 1562 MTX_DEF); 1563 TAILQ_INIT(&netmap_mem_d->nm_molist); 1564 netmap_mem_d->nm_buffer = buf; 1565 netmap_mem_d->nm_totalsize = sz; 1566 1567 /* 1568 * A buffer takes 2k, a slot takes 8 bytes + ring overhead, 1569 * so the ratio is 200:1. In other words, we can use 1/200 of 1570 * the memory for the rings, and the rest for the buffers, 1571 * and be sure we never run out. 1572 */ 1573 netmap_mem_d->nm_size = sz/200; 1574 netmap_mem_d->nm_buf_start = 1575 (netmap_mem_d->nm_size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1); 1576 netmap_mem_d->nm_buf_len = sz - netmap_mem_d->nm_buf_start; 1577 1578 nm_buf_pool.base = netmap_mem_d->nm_buffer; 1579 nm_buf_pool.base += netmap_mem_d->nm_buf_start; 1580 netmap_buffer_base = nm_buf_pool.base; 1581 D("netmap_buffer_base %p (offset %d)", 1582 netmap_buffer_base, (int)netmap_mem_d->nm_buf_start); 1583 /* number of buffers, they all start as free */ 1584 1585 netmap_total_buffers = nm_buf_pool.total_buffers = 1586 netmap_mem_d->nm_buf_len / NETMAP_BUF_SIZE; 1587 nm_buf_pool.bufsize = NETMAP_BUF_SIZE; 1588 1589 D("Have %d MB, use %dKB for rings, %d buffers at %p", 1590 (sz >> 20), (int)(netmap_mem_d->nm_size >> 10), 1591 nm_buf_pool.total_buffers, nm_buf_pool.base); 1592 1593 /* allocate and initialize the bitmap. Entry 0 is considered 1594 * always busy (used as default when there are no buffers left). 1595 */ 1596 n = (nm_buf_pool.total_buffers + 31) / 32; 1597 nm_buf_pool.bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, 1598 M_WAITOK | M_ZERO); 1599 nm_buf_pool.bitmap[0] = ~3; /* slot 0 and 1 always busy */ 1600 for (i = 1; i < n; i++) 1601 nm_buf_pool.bitmap[i] = ~0; 1602 nm_buf_pool.free = nm_buf_pool.total_buffers - 2; 1603 1604 mem_obj = malloc(sizeof(struct netmap_mem_obj), M_NETMAP, 1605 M_WAITOK | M_ZERO); 1606 TAILQ_INSERT_HEAD(&netmap_mem_d->nm_molist, mem_obj, nmo_next); 1607 mem_obj->nmo_used = 0; 1608 mem_obj->nmo_size = netmap_mem_d->nm_size; 1609 mem_obj->nmo_data = netmap_mem_d->nm_buffer; 1610 1611 return (0); 1612 } 1613 1614 1615 /* 1616 * Finalize the memory allocator. 1617 * 1618 * Free all the memory objects contained inside the list, and deallocate 1619 * the pool of memory; finally free the memory allocator descriptor. 1620 */ 1621 static void 1622 netmap_memory_fini(void) 1623 { 1624 struct netmap_mem_obj *mem_obj; 1625 1626 while (!TAILQ_EMPTY(&netmap_mem_d->nm_molist)) { 1627 mem_obj = TAILQ_FIRST(&netmap_mem_d->nm_molist); 1628 TAILQ_REMOVE(&netmap_mem_d->nm_molist, mem_obj, nmo_next); 1629 if (mem_obj->nmo_used == 1) { 1630 printf("netmap: leaked %d bytes at %p\n", 1631 (int)mem_obj->nmo_size, 1632 mem_obj->nmo_data); 1633 } 1634 free(mem_obj, M_NETMAP); 1635 } 1636 contigfree(netmap_mem_d->nm_buffer, netmap_mem_d->nm_totalsize, M_NETMAP); 1637 // XXX mutex_destroy(nm_mtx); 1638 free(netmap_mem_d, M_NETMAP); 1639 } 1640 1641 1642 /* 1643 * Module loader. 1644 * 1645 * Create the /dev/netmap device and initialize all global 1646 * variables. 1647 * 1648 * Return 0 on success, errno on failure. 1649 */ 1650 static int 1651 netmap_init(void) 1652 { 1653 int error; 1654 1655 1656 error = netmap_memory_init(); 1657 if (error != 0) { 1658 printf("netmap: unable to initialize the memory allocator."); 1659 return (error); 1660 } 1661 printf("netmap: loaded module with %d Mbytes\n", 1662 (int)(netmap_mem_d->nm_totalsize >> 20)); 1663 1664 netmap_dev = make_dev(&netmap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660, 1665 "netmap"); 1666 1667 return (0); 1668 } 1669 1670 1671 /* 1672 * Module unloader. 1673 * 1674 * Free all the memory, and destroy the ``/dev/netmap`` device. 1675 */ 1676 static void 1677 netmap_fini(void) 1678 { 1679 destroy_dev(netmap_dev); 1680 1681 netmap_memory_fini(); 1682 1683 printf("netmap: unloaded module.\n"); 1684 } 1685 1686 1687 /* 1688 * Kernel entry point. 1689 * 1690 * Initialize/finalize the module and return. 1691 * 1692 * Return 0 on success, errno on failure. 1693 */ 1694 static int 1695 netmap_loader(__unused struct module *module, int event, __unused void *arg) 1696 { 1697 int error = 0; 1698 1699 switch (event) { 1700 case MOD_LOAD: 1701 error = netmap_init(); 1702 break; 1703 1704 case MOD_UNLOAD: 1705 netmap_fini(); 1706 break; 1707 1708 default: 1709 error = EOPNOTSUPP; 1710 break; 1711 } 1712 1713 return (error); 1714 } 1715 1716 1717 DEV_MODULE(netmap, netmap_loader, NULL); 1718