1 /* 2 * Copyright (C) 2013-2016 Vincenzo Maffione 3 * Copyright (C) 2013-2016 Luigi Rizzo 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * This module implements netmap support on top of standard, 30 * unmodified device drivers. 31 * 32 * A NIOCREGIF request is handled here if the device does not 33 * have native support. TX and RX rings are emulated as follows: 34 * 35 * NIOCREGIF 36 * We preallocate a block of TX mbufs (roughly as many as 37 * tx descriptors; the number is not critical) to speed up 38 * operation during transmissions. The refcount on most of 39 * these buffers is artificially bumped up so we can recycle 40 * them more easily. Also, the destructor is intercepted 41 * so we use it as an interrupt notification to wake up 42 * processes blocked on a poll(). 43 * 44 * For each receive ring we allocate one "struct mbq" 45 * (an mbuf tailq plus a spinlock). We intercept packets 46 * (through if_input) 47 * on the receive path and put them in the mbq from which 48 * netmap receive routines can grab them. 49 * 50 * TX: 51 * in the generic_txsync() routine, netmap buffers are copied 52 * (or linked, in a future) to the preallocated mbufs 53 * and pushed to the transmit queue. Some of these mbufs 54 * (those with NS_REPORT, or otherwise every half ring) 55 * have the refcount=1, others have refcount=2. 56 * When the destructor is invoked, we take that as 57 * a notification that all mbufs up to that one in 58 * the specific ring have been completed, and generate 59 * the equivalent of a transmit interrupt. 60 * 61 * RX: 62 * 63 */ 64 65 #ifdef __FreeBSD__ 66 67 #include <sys/cdefs.h> /* prerequisite */ 68 __FBSDID("$FreeBSD$"); 69 70 #include <sys/types.h> 71 #include <sys/errno.h> 72 #include <sys/malloc.h> 73 #include <sys/lock.h> /* PROT_EXEC */ 74 #include <sys/rwlock.h> 75 #include <sys/socket.h> /* sockaddrs */ 76 #include <sys/selinfo.h> 77 #include <net/if.h> 78 #include <net/if_var.h> 79 #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 80 81 // XXX temporary - D() defined here 82 #include <net/netmap.h> 83 #include <dev/netmap/netmap_kern.h> 84 #include <dev/netmap/netmap_mem2.h> 85 86 #define rtnl_lock() ND("rtnl_lock called") 87 #define rtnl_unlock() ND("rtnl_unlock called") 88 #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) 89 #define smp_mb() 90 91 /* 92 * FreeBSD mbuf allocator/deallocator in emulation mode: 93 */ 94 #if __FreeBSD_version < 1100000 95 96 /* 97 * For older versions of FreeBSD: 98 * 99 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE 100 * so that the destructor, if invoked, will not free the packet. 101 * In principle we should set the destructor only on demand, 102 * but since there might be a race we better do it on allocation. 103 * As a consequence, we also need to set the destructor or we 104 * would leak buffers. 105 */ 106 107 /* mbuf destructor, also need to change the type to EXT_EXTREF, 108 * add an M_NOFREE flag, and then clear the flag and 109 * chain into uma_zfree(zone_pack, mf) 110 * (or reinstall the buffer ?) 111 */ 112 #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 113 (m)->m_ext.ext_free = (void *)fn; \ 114 (m)->m_ext.ext_type = EXT_EXTREF; \ 115 } while (0) 116 117 static int 118 void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) 119 { 120 /* restore original mbuf */ 121 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg1; 122 m->m_ext.ext_arg1 = NULL; 123 m->m_ext.ext_type = EXT_PACKET; 124 m->m_ext.ext_free = NULL; 125 if (MBUF_REFCNT(m) == 0) 126 SET_MBUF_REFCNT(m, 1); 127 uma_zfree(zone_pack, m); 128 129 return 0; 130 } 131 132 static inline struct mbuf * 133 nm_os_get_mbuf(struct ifnet *ifp, int len) 134 { 135 struct mbuf *m; 136 137 (void)ifp; 138 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 139 if (m) { 140 /* m_getcl() (mb_ctor_mbuf) has an assert that checks that 141 * M_NOFREE flag is not specified as third argument, 142 * so we have to set M_NOFREE after m_getcl(). */ 143 m->m_flags |= M_NOFREE; 144 m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save 145 m->m_ext.ext_free = (void *)void_mbuf_dtor; 146 m->m_ext.ext_type = EXT_EXTREF; 147 ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); 148 } 149 return m; 150 } 151 152 #else /* __FreeBSD_version >= 1100000 */ 153 154 /* 155 * Newer versions of FreeBSD, using a straightforward scheme. 156 * 157 * We allocate mbufs with m_gethdr(), since the mbuf header is needed 158 * by the driver. We also attach a customly-provided external storage, 159 * which in this case is a netmap buffer. When calling m_extadd(), however 160 * we pass a NULL address, since the real address (and length) will be 161 * filled in by nm_os_generic_xmit_frame() right before calling 162 * if_transmit(). 163 * 164 * The dtor function does nothing, however we need it since mb_free_ext() 165 * has a KASSERT(), checking that the mbuf dtor function is not NULL. 166 */ 167 168 static void void_mbuf_dtor(struct mbuf *m, void *arg1, void *arg2) { } 169 170 #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 171 (m)->m_ext.ext_free = (fn != NULL) ? \ 172 (void *)fn : (void *)void_mbuf_dtor; \ 173 } while (0) 174 175 static inline struct mbuf * 176 nm_os_get_mbuf(struct ifnet *ifp, int len) 177 { 178 struct mbuf *m; 179 180 (void)ifp; 181 (void)len; 182 183 m = m_gethdr(M_NOWAIT, MT_DATA); 184 if (m == NULL) { 185 return m; 186 } 187 188 m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor, 189 NULL, NULL, 0, EXT_NET_DRV); 190 191 return m; 192 } 193 194 #endif /* __FreeBSD_version >= 1100000 */ 195 196 #elif defined _WIN32 197 198 #include "win_glue.h" 199 200 #define rtnl_lock() ND("rtnl_lock called") 201 #define rtnl_unlock() ND("rtnl_unlock called") 202 #define MBUF_TXQ(m) 0//((m)->m_pkthdr.flowid) 203 #define MBUF_RXQ(m) 0//((m)->m_pkthdr.flowid) 204 #define smp_mb() //XXX: to be correctly defined 205 206 #else /* linux */ 207 208 #include "bsd_glue.h" 209 210 #include <linux/rtnetlink.h> /* rtnl_[un]lock() */ 211 #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 212 #include <linux/hrtimer.h> 213 214 static inline struct mbuf * 215 nm_os_get_mbuf(struct ifnet *ifp, int len) 216 { 217 return alloc_skb(ifp->needed_headroom + len + 218 ifp->needed_tailroom, GFP_ATOMIC); 219 } 220 221 #endif /* linux */ 222 223 224 /* Common headers. */ 225 #include <net/netmap.h> 226 #include <dev/netmap/netmap_kern.h> 227 #include <dev/netmap/netmap_mem2.h> 228 229 230 #define for_each_kring_n(_i, _k, _karr, _n) \ 231 for (_k=_karr, _i = 0; _i < _n; (_k)++, (_i)++) 232 233 #define for_each_tx_kring(_i, _k, _na) \ 234 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings) 235 #define for_each_tx_kring_h(_i, _k, _na) \ 236 for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1) 237 238 #define for_each_rx_kring(_i, _k, _na) \ 239 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings) 240 #define for_each_rx_kring_h(_i, _k, _na) \ 241 for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1) 242 243 244 /* ======================== PERFORMANCE STATISTICS =========================== */ 245 246 #ifdef RATE_GENERIC 247 #define IFRATE(x) x 248 struct rate_stats { 249 unsigned long txpkt; 250 unsigned long txsync; 251 unsigned long txirq; 252 unsigned long txrepl; 253 unsigned long txdrop; 254 unsigned long rxpkt; 255 unsigned long rxirq; 256 unsigned long rxsync; 257 }; 258 259 struct rate_context { 260 unsigned refcount; 261 struct timer_list timer; 262 struct rate_stats new; 263 struct rate_stats old; 264 }; 265 266 #define RATE_PRINTK(_NAME_) \ 267 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 268 #define RATE_PERIOD 2 269 static void rate_callback(unsigned long arg) 270 { 271 struct rate_context * ctx = (struct rate_context *)arg; 272 struct rate_stats cur = ctx->new; 273 int r; 274 275 RATE_PRINTK(txpkt); 276 RATE_PRINTK(txsync); 277 RATE_PRINTK(txirq); 278 RATE_PRINTK(txrepl); 279 RATE_PRINTK(txdrop); 280 RATE_PRINTK(rxpkt); 281 RATE_PRINTK(rxsync); 282 RATE_PRINTK(rxirq); 283 printk("\n"); 284 285 ctx->old = cur; 286 r = mod_timer(&ctx->timer, jiffies + 287 msecs_to_jiffies(RATE_PERIOD * 1000)); 288 if (unlikely(r)) 289 D("[v1000] Error: mod_timer()"); 290 } 291 292 static struct rate_context rate_ctx; 293 294 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi) 295 { 296 if (txp) rate_ctx.new.txpkt++; 297 if (txs) rate_ctx.new.txsync++; 298 if (txi) rate_ctx.new.txirq++; 299 if (rxp) rate_ctx.new.rxpkt++; 300 if (rxs) rate_ctx.new.rxsync++; 301 if (rxi) rate_ctx.new.rxirq++; 302 } 303 304 #else /* !RATE */ 305 #define IFRATE(x) 306 #endif /* !RATE */ 307 308 309 /* ========== GENERIC (EMULATED) NETMAP ADAPTER SUPPORT ============= */ 310 311 /* 312 * Wrapper used by the generic adapter layer to notify 313 * the poller threads. Differently from netmap_rx_irq(), we check 314 * only NAF_NETMAP_ON instead of NAF_NATIVE_ON to enable the irq. 315 */ 316 void 317 netmap_generic_irq(struct netmap_adapter *na, u_int q, u_int *work_done) 318 { 319 if (unlikely(!nm_netmap_on(na))) 320 return; 321 322 netmap_common_irq(na, q, work_done); 323 #ifdef RATE_GENERIC 324 if (work_done) 325 rate_ctx.new.rxirq++; 326 else 327 rate_ctx.new.txirq++; 328 #endif /* RATE_GENERIC */ 329 } 330 331 static int 332 generic_netmap_unregister(struct netmap_adapter *na) 333 { 334 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 335 struct netmap_kring *kring = NULL; 336 int i, r; 337 338 if (na->active_fds == 0) { 339 rtnl_lock(); 340 341 na->na_flags &= ~NAF_NETMAP_ON; 342 343 /* Release packet steering control. */ 344 nm_os_catch_tx(gna, 0); 345 346 /* Stop intercepting packets on the RX path. */ 347 nm_os_catch_rx(gna, 0); 348 349 rtnl_unlock(); 350 } 351 352 for_each_rx_kring_h(r, kring, na) { 353 if (nm_kring_pending_off(kring)) { 354 D("Emulated adapter: ring '%s' deactivated", kring->name); 355 kring->nr_mode = NKR_NETMAP_OFF; 356 } 357 } 358 for_each_tx_kring_h(r, kring, na) { 359 if (nm_kring_pending_off(kring)) { 360 kring->nr_mode = NKR_NETMAP_OFF; 361 D("Emulated adapter: ring '%s' deactivated", kring->name); 362 } 363 } 364 365 for_each_rx_kring(r, kring, na) { 366 /* Free the mbufs still pending in the RX queues, 367 * that did not end up into the corresponding netmap 368 * RX rings. */ 369 mbq_safe_purge(&kring->rx_queue); 370 nm_os_mitigation_cleanup(&gna->mit[r]); 371 } 372 373 /* Decrement reference counter for the mbufs in the 374 * TX pools. These mbufs can be still pending in drivers, 375 * (e.g. this happens with virtio-net driver, which 376 * does lazy reclaiming of transmitted mbufs). */ 377 for_each_tx_kring(r, kring, na) { 378 /* We must remove the destructor on the TX event, 379 * because the destructor invokes netmap code, and 380 * the netmap module may disappear before the 381 * TX event is consumed. */ 382 mtx_lock_spin(&kring->tx_event_lock); 383 if (kring->tx_event) { 384 SET_MBUF_DESTRUCTOR(kring->tx_event, NULL); 385 } 386 kring->tx_event = NULL; 387 mtx_unlock_spin(&kring->tx_event_lock); 388 } 389 390 if (na->active_fds == 0) { 391 nm_os_free(gna->mit); 392 393 for_each_rx_kring(r, kring, na) { 394 mbq_safe_fini(&kring->rx_queue); 395 } 396 397 for_each_tx_kring(r, kring, na) { 398 mtx_destroy(&kring->tx_event_lock); 399 if (kring->tx_pool == NULL) { 400 continue; 401 } 402 403 for (i=0; i<na->num_tx_desc; i++) { 404 if (kring->tx_pool[i]) { 405 m_freem(kring->tx_pool[i]); 406 } 407 } 408 nm_os_free(kring->tx_pool); 409 kring->tx_pool = NULL; 410 } 411 412 #ifdef RATE_GENERIC 413 if (--rate_ctx.refcount == 0) { 414 D("del_timer()"); 415 del_timer(&rate_ctx.timer); 416 } 417 #endif 418 D("Emulated adapter for %s deactivated", na->name); 419 } 420 421 return 0; 422 } 423 424 /* Enable/disable netmap mode for a generic network interface. */ 425 static int 426 generic_netmap_register(struct netmap_adapter *na, int enable) 427 { 428 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 429 struct netmap_kring *kring = NULL; 430 int error; 431 int i, r; 432 433 if (!na) { 434 return EINVAL; 435 } 436 437 if (!enable) { 438 /* This is actually an unregif. */ 439 return generic_netmap_unregister(na); 440 } 441 442 if (na->active_fds == 0) { 443 D("Emulated adapter for %s activated", na->name); 444 /* Do all memory allocations when (na->active_fds == 0), to 445 * simplify error management. */ 446 447 /* Allocate memory for mitigation support on all the rx queues. */ 448 gna->mit = nm_os_malloc(na->num_rx_rings * sizeof(struct nm_generic_mit)); 449 if (!gna->mit) { 450 D("mitigation allocation failed"); 451 error = ENOMEM; 452 goto out; 453 } 454 455 for_each_rx_kring(r, kring, na) { 456 /* Init mitigation support. */ 457 nm_os_mitigation_init(&gna->mit[r], r, na); 458 459 /* Initialize the rx queue, as generic_rx_handler() can 460 * be called as soon as nm_os_catch_rx() returns. 461 */ 462 mbq_safe_init(&kring->rx_queue); 463 } 464 465 /* 466 * Prepare mbuf pools (parallel to the tx rings), for packet 467 * transmission. Don't preallocate the mbufs here, it's simpler 468 * to leave this task to txsync. 469 */ 470 for_each_tx_kring(r, kring, na) { 471 kring->tx_pool = NULL; 472 } 473 for_each_tx_kring(r, kring, na) { 474 kring->tx_pool = 475 nm_os_malloc(na->num_tx_desc * sizeof(struct mbuf *)); 476 if (!kring->tx_pool) { 477 D("tx_pool allocation failed"); 478 error = ENOMEM; 479 goto free_tx_pools; 480 } 481 mtx_init(&kring->tx_event_lock, "tx_event_lock", 482 NULL, MTX_SPIN); 483 } 484 } 485 486 for_each_rx_kring_h(r, kring, na) { 487 if (nm_kring_pending_on(kring)) { 488 D("Emulated adapter: ring '%s' activated", kring->name); 489 kring->nr_mode = NKR_NETMAP_ON; 490 } 491 492 } 493 for_each_tx_kring_h(r, kring, na) { 494 if (nm_kring_pending_on(kring)) { 495 D("Emulated adapter: ring '%s' activated", kring->name); 496 kring->nr_mode = NKR_NETMAP_ON; 497 } 498 } 499 500 for_each_tx_kring(r, kring, na) { 501 /* Initialize tx_pool and tx_event. */ 502 for (i=0; i<na->num_tx_desc; i++) { 503 kring->tx_pool[i] = NULL; 504 } 505 506 kring->tx_event = NULL; 507 } 508 509 if (na->active_fds == 0) { 510 rtnl_lock(); 511 512 /* Prepare to intercept incoming traffic. */ 513 error = nm_os_catch_rx(gna, 1); 514 if (error) { 515 D("nm_os_catch_rx(1) failed (%d)", error); 516 goto register_handler; 517 } 518 519 /* Make netmap control the packet steering. */ 520 error = nm_os_catch_tx(gna, 1); 521 if (error) { 522 D("nm_os_catch_tx(1) failed (%d)", error); 523 goto catch_rx; 524 } 525 526 rtnl_unlock(); 527 528 na->na_flags |= NAF_NETMAP_ON; 529 530 #ifdef RATE_GENERIC 531 if (rate_ctx.refcount == 0) { 532 D("setup_timer()"); 533 memset(&rate_ctx, 0, sizeof(rate_ctx)); 534 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 535 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 536 D("Error: mod_timer()"); 537 } 538 } 539 rate_ctx.refcount++; 540 #endif /* RATE */ 541 } 542 543 return 0; 544 545 /* Here (na->active_fds == 0) holds. */ 546 catch_rx: 547 nm_os_catch_rx(gna, 0); 548 register_handler: 549 rtnl_unlock(); 550 free_tx_pools: 551 for_each_tx_kring(r, kring, na) { 552 mtx_destroy(&kring->tx_event_lock); 553 if (kring->tx_pool == NULL) { 554 continue; 555 } 556 nm_os_free(kring->tx_pool); 557 kring->tx_pool = NULL; 558 } 559 for_each_rx_kring(r, kring, na) { 560 mbq_safe_fini(&kring->rx_queue); 561 } 562 nm_os_free(gna->mit); 563 out: 564 565 return error; 566 } 567 568 /* 569 * Callback invoked when the device driver frees an mbuf used 570 * by netmap to transmit a packet. This usually happens when 571 * the NIC notifies the driver that transmission is completed. 572 */ 573 static void 574 generic_mbuf_destructor(struct mbuf *m) 575 { 576 struct netmap_adapter *na = NA(GEN_TX_MBUF_IFP(m)); 577 struct netmap_kring *kring; 578 unsigned int r = MBUF_TXQ(m); 579 unsigned int r_orig = r; 580 581 if (unlikely(!nm_netmap_on(na) || r >= na->num_tx_rings)) { 582 D("Error: no netmap adapter on device %p", 583 GEN_TX_MBUF_IFP(m)); 584 return; 585 } 586 587 /* 588 * First, clear the event mbuf. 589 * In principle, the event 'm' should match the one stored 590 * on ring 'r'. However we check it explicitely to stay 591 * safe against lower layers (qdisc, driver, etc.) changing 592 * MBUF_TXQ(m) under our feet. If the match is not found 593 * on 'r', we try to see if it belongs to some other ring. 594 */ 595 for (;;) { 596 bool match = false; 597 598 kring = &na->tx_rings[r]; 599 mtx_lock_spin(&kring->tx_event_lock); 600 if (kring->tx_event == m) { 601 kring->tx_event = NULL; 602 match = true; 603 } 604 mtx_unlock_spin(&kring->tx_event_lock); 605 606 if (match) { 607 if (r != r_orig) { 608 RD(1, "event %p migrated: ring %u --> %u", 609 m, r_orig, r); 610 } 611 break; 612 } 613 614 if (++r == na->num_tx_rings) r = 0; 615 616 if (r == r_orig) { 617 RD(1, "Cannot match event %p", m); 618 return; 619 } 620 } 621 622 /* Second, wake up clients. They will reclaim the event through 623 * txsync. */ 624 netmap_generic_irq(na, r, NULL); 625 #ifdef __FreeBSD__ 626 void_mbuf_dtor(m, NULL, NULL); 627 #endif 628 } 629 630 /* Record completed transmissions and update hwtail. 631 * 632 * The oldest tx buffer not yet completed is at nr_hwtail + 1, 633 * nr_hwcur is the first unsent buffer. 634 */ 635 static u_int 636 generic_netmap_tx_clean(struct netmap_kring *kring, int txqdisc) 637 { 638 u_int const lim = kring->nkr_num_slots - 1; 639 u_int nm_i = nm_next(kring->nr_hwtail, lim); 640 u_int hwcur = kring->nr_hwcur; 641 u_int n = 0; 642 struct mbuf **tx_pool = kring->tx_pool; 643 644 ND("hwcur = %d, hwtail = %d", kring->nr_hwcur, kring->nr_hwtail); 645 646 while (nm_i != hwcur) { /* buffers not completed */ 647 struct mbuf *m = tx_pool[nm_i]; 648 649 if (txqdisc) { 650 if (m == NULL) { 651 /* Nothing to do, this is going 652 * to be replenished. */ 653 RD(3, "Is this happening?"); 654 655 } else if (MBUF_QUEUED(m)) { 656 break; /* Not dequeued yet. */ 657 658 } else if (MBUF_REFCNT(m) != 1) { 659 /* This mbuf has been dequeued but is still busy 660 * (refcount is 2). 661 * Leave it to the driver and replenish. */ 662 m_freem(m); 663 tx_pool[nm_i] = NULL; 664 } 665 666 } else { 667 if (unlikely(m == NULL)) { 668 int event_consumed; 669 670 /* This slot was used to place an event. */ 671 mtx_lock_spin(&kring->tx_event_lock); 672 event_consumed = (kring->tx_event == NULL); 673 mtx_unlock_spin(&kring->tx_event_lock); 674 if (!event_consumed) { 675 /* The event has not been consumed yet, 676 * still busy in the driver. */ 677 break; 678 } 679 /* The event has been consumed, we can go 680 * ahead. */ 681 682 } else if (MBUF_REFCNT(m) != 1) { 683 /* This mbuf is still busy: its refcnt is 2. */ 684 break; 685 } 686 } 687 688 n++; 689 nm_i = nm_next(nm_i, lim); 690 } 691 kring->nr_hwtail = nm_prev(nm_i, lim); 692 ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); 693 694 return n; 695 } 696 697 /* Compute a slot index in the middle between inf and sup. */ 698 static inline u_int 699 ring_middle(u_int inf, u_int sup, u_int lim) 700 { 701 u_int n = lim + 1; 702 u_int e; 703 704 if (sup >= inf) { 705 e = (sup + inf) / 2; 706 } else { /* wrap around */ 707 e = (sup + n + inf) / 2; 708 if (e >= n) { 709 e -= n; 710 } 711 } 712 713 if (unlikely(e >= n)) { 714 D("This cannot happen"); 715 e = 0; 716 } 717 718 return e; 719 } 720 721 static void 722 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 723 { 724 u_int lim = kring->nkr_num_slots - 1; 725 struct mbuf *m; 726 u_int e; 727 u_int ntc = nm_next(kring->nr_hwtail, lim); /* next to clean */ 728 729 if (ntc == hwcur) { 730 return; /* all buffers are free */ 731 } 732 733 /* 734 * We have pending packets in the driver between hwtail+1 735 * and hwcur, and we have to chose one of these slot to 736 * generate a notification. 737 * There is a race but this is only called within txsync which 738 * does a double check. 739 */ 740 #if 0 741 /* Choose a slot in the middle, so that we don't risk ending 742 * up in a situation where the client continuously wake up, 743 * fills one or a few TX slots and go to sleep again. */ 744 e = ring_middle(ntc, hwcur, lim); 745 #else 746 /* Choose the first pending slot, to be safe against driver 747 * reordering mbuf transmissions. */ 748 e = ntc; 749 #endif 750 751 m = kring->tx_pool[e]; 752 if (m == NULL) { 753 /* An event is already in place. */ 754 return; 755 } 756 757 mtx_lock_spin(&kring->tx_event_lock); 758 if (kring->tx_event) { 759 /* An event is already in place. */ 760 mtx_unlock_spin(&kring->tx_event_lock); 761 return; 762 } 763 764 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 765 kring->tx_event = m; 766 mtx_unlock_spin(&kring->tx_event_lock); 767 768 kring->tx_pool[e] = NULL; 769 770 ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? MBUF_REFCNT(m) : -2 ); 771 772 /* Decrement the refcount. This will free it if we lose the race 773 * with the driver. */ 774 m_freem(m); 775 smp_mb(); 776 } 777 778 779 /* 780 * generic_netmap_txsync() transforms netmap buffers into mbufs 781 * and passes them to the standard device driver 782 * (ndo_start_xmit() or ifp->if_transmit() ). 783 * On linux this is not done directly, but using dev_queue_xmit(), 784 * since it implements the TX flow control (and takes some locks). 785 */ 786 static int 787 generic_netmap_txsync(struct netmap_kring *kring, int flags) 788 { 789 struct netmap_adapter *na = kring->na; 790 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 791 struct ifnet *ifp = na->ifp; 792 struct netmap_ring *ring = kring->ring; 793 u_int nm_i; /* index into the netmap ring */ // j 794 u_int const lim = kring->nkr_num_slots - 1; 795 u_int const head = kring->rhead; 796 u_int ring_nr = kring->ring_id; 797 798 IFRATE(rate_ctx.new.txsync++); 799 800 rmb(); 801 802 /* 803 * First part: process new packets to send. 804 */ 805 nm_i = kring->nr_hwcur; 806 if (nm_i != head) { /* we have new packets to send */ 807 struct nm_os_gen_arg a; 808 u_int event = -1; 809 810 if (gna->txqdisc && nm_kr_txempty(kring)) { 811 /* In txqdisc mode, we ask for a delayed notification, 812 * but only when cur == hwtail, which means that the 813 * client is going to block. */ 814 event = ring_middle(nm_i, head, lim); 815 ND(3, "Place txqdisc event (hwcur=%u,event=%u," 816 "head=%u,hwtail=%u)", nm_i, event, head, 817 kring->nr_hwtail); 818 } 819 820 a.ifp = ifp; 821 a.ring_nr = ring_nr; 822 a.head = a.tail = NULL; 823 824 while (nm_i != head) { 825 struct netmap_slot *slot = &ring->slot[nm_i]; 826 u_int len = slot->len; 827 void *addr = NMB(na, slot); 828 /* device-specific */ 829 struct mbuf *m; 830 int tx_ret; 831 832 NM_CHECK_ADDR_LEN(na, addr, len); 833 834 /* Tale a mbuf from the tx pool (replenishing the pool 835 * entry if necessary) and copy in the user packet. */ 836 m = kring->tx_pool[nm_i]; 837 if (unlikely(m == NULL)) { 838 kring->tx_pool[nm_i] = m = 839 nm_os_get_mbuf(ifp, NETMAP_BUF_SIZE(na)); 840 if (m == NULL) { 841 RD(2, "Failed to replenish mbuf"); 842 /* Here we could schedule a timer which 843 * retries to replenish after a while, 844 * and notifies the client when it 845 * manages to replenish some slots. In 846 * any case we break early to avoid 847 * crashes. */ 848 break; 849 } 850 IFRATE(rate_ctx.new.txrepl++); 851 } 852 853 a.m = m; 854 a.addr = addr; 855 a.len = len; 856 a.qevent = (nm_i == event); 857 /* When not in txqdisc mode, we should ask 858 * notifications when NS_REPORT is set, or roughly 859 * every half ring. To optimize this, we set a 860 * notification event when the client runs out of 861 * TX ring space, or when transmission fails. In 862 * the latter case we also break early. 863 */ 864 tx_ret = nm_os_generic_xmit_frame(&a); 865 if (unlikely(tx_ret)) { 866 if (!gna->txqdisc) { 867 /* 868 * No room for this mbuf in the device driver. 869 * Request a notification FOR A PREVIOUS MBUF, 870 * then call generic_netmap_tx_clean(kring) to do the 871 * double check and see if we can free more buffers. 872 * If there is space continue, else break; 873 * NOTE: the double check is necessary if the problem 874 * occurs in the txsync call after selrecord(). 875 * Also, we need some way to tell the caller that not 876 * all buffers were queued onto the device (this was 877 * not a problem with native netmap driver where space 878 * is preallocated). The bridge has a similar problem 879 * and we solve it there by dropping the excess packets. 880 */ 881 generic_set_tx_event(kring, nm_i); 882 if (generic_netmap_tx_clean(kring, gna->txqdisc)) { 883 /* space now available */ 884 continue; 885 } else { 886 break; 887 } 888 } 889 890 /* In txqdisc mode, the netmap-aware qdisc 891 * queue has the same length as the number of 892 * netmap slots (N). Since tail is advanced 893 * only when packets are dequeued, qdisc 894 * queue overrun cannot happen, so 895 * nm_os_generic_xmit_frame() did not fail 896 * because of that. 897 * However, packets can be dropped because 898 * carrier is off, or because our qdisc is 899 * being deactivated, or possibly for other 900 * reasons. In these cases, we just let the 901 * packet to be dropped. */ 902 IFRATE(rate_ctx.new.txdrop++); 903 } 904 905 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 906 nm_i = nm_next(nm_i, lim); 907 IFRATE(rate_ctx.new.txpkt++); 908 } 909 if (a.head != NULL) { 910 a.addr = NULL; 911 nm_os_generic_xmit_frame(&a); 912 } 913 /* Update hwcur to the next slot to transmit. Here nm_i 914 * is not necessarily head, we could break early. */ 915 kring->nr_hwcur = nm_i; 916 } 917 918 /* 919 * Second, reclaim completed buffers 920 */ 921 if (!gna->txqdisc && (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring))) { 922 /* No more available slots? Set a notification event 923 * on a netmap slot that will be cleaned in the future. 924 * No doublecheck is performed, since txsync() will be 925 * called twice by netmap_poll(). 926 */ 927 generic_set_tx_event(kring, nm_i); 928 } 929 930 generic_netmap_tx_clean(kring, gna->txqdisc); 931 932 return 0; 933 } 934 935 936 /* 937 * This handler is registered (through nm_os_catch_rx()) 938 * within the attached network interface 939 * in the RX subsystem, so that every mbuf passed up by 940 * the driver can be stolen to the network stack. 941 * Stolen packets are put in a queue where the 942 * generic_netmap_rxsync() callback can extract them. 943 * Returns 1 if the packet was stolen, 0 otherwise. 944 */ 945 int 946 generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 947 { 948 struct netmap_adapter *na = NA(ifp); 949 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 950 struct netmap_kring *kring; 951 u_int work_done; 952 u_int r = MBUF_RXQ(m); /* receive ring number */ 953 954 if (r >= na->num_rx_rings) { 955 r = r % na->num_rx_rings; 956 } 957 958 kring = &na->rx_rings[r]; 959 960 if (kring->nr_mode == NKR_NETMAP_OFF) { 961 /* We must not intercept this mbuf. */ 962 return 0; 963 } 964 965 /* limit the size of the queue */ 966 if (unlikely(!gna->rxsg && MBUF_LEN(m) > NETMAP_BUF_SIZE(na))) { 967 /* This may happen when GRO/LRO features are enabled for 968 * the NIC driver when the generic adapter does not 969 * support RX scatter-gather. */ 970 RD(2, "Warning: driver pushed up big packet " 971 "(size=%d)", (int)MBUF_LEN(m)); 972 m_freem(m); 973 } else if (unlikely(mbq_len(&kring->rx_queue) > 1024)) { 974 m_freem(m); 975 } else { 976 mbq_safe_enqueue(&kring->rx_queue, m); 977 } 978 979 if (netmap_generic_mit < 32768) { 980 /* no rx mitigation, pass notification up */ 981 netmap_generic_irq(na, r, &work_done); 982 } else { 983 /* same as send combining, filter notification if there is a 984 * pending timer, otherwise pass it up and start a timer. 985 */ 986 if (likely(nm_os_mitigation_active(&gna->mit[r]))) { 987 /* Record that there is some pending work. */ 988 gna->mit[r].mit_pending = 1; 989 } else { 990 netmap_generic_irq(na, r, &work_done); 991 nm_os_mitigation_start(&gna->mit[r]); 992 } 993 } 994 995 /* We have intercepted the mbuf. */ 996 return 1; 997 } 998 999 /* 1000 * generic_netmap_rxsync() extracts mbufs from the queue filled by 1001 * generic_netmap_rx_handler() and puts their content in the netmap 1002 * receive ring. 1003 * Access must be protected because the rx handler is asynchronous, 1004 */ 1005 static int 1006 generic_netmap_rxsync(struct netmap_kring *kring, int flags) 1007 { 1008 struct netmap_ring *ring = kring->ring; 1009 struct netmap_adapter *na = kring->na; 1010 u_int nm_i; /* index into the netmap ring */ //j, 1011 u_int n; 1012 u_int const lim = kring->nkr_num_slots - 1; 1013 u_int const head = kring->rhead; 1014 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1015 1016 /* Adapter-specific variables. */ 1017 uint16_t slot_flags = kring->nkr_slot_flags; 1018 u_int nm_buf_len = NETMAP_BUF_SIZE(na); 1019 struct mbq tmpq; 1020 struct mbuf *m; 1021 int avail; /* in bytes */ 1022 int mlen; 1023 int copy; 1024 1025 if (head > lim) 1026 return netmap_ring_reinit(kring); 1027 1028 IFRATE(rate_ctx.new.rxsync++); 1029 1030 /* 1031 * First part: skip past packets that userspace has released. 1032 * This can possibly make room for the second part. 1033 */ 1034 nm_i = kring->nr_hwcur; 1035 if (nm_i != head) { 1036 /* Userspace has released some packets. */ 1037 for (n = 0; nm_i != head; n++) { 1038 struct netmap_slot *slot = &ring->slot[nm_i]; 1039 1040 slot->flags &= ~NS_BUF_CHANGED; 1041 nm_i = nm_next(nm_i, lim); 1042 } 1043 kring->nr_hwcur = head; 1044 } 1045 1046 /* 1047 * Second part: import newly received packets. 1048 */ 1049 if (!netmap_no_pendintr && !force_update) { 1050 return 0; 1051 } 1052 1053 nm_i = kring->nr_hwtail; /* First empty slot in the receive ring. */ 1054 1055 /* Compute the available space (in bytes) in this netmap ring. 1056 * The first slot that is not considered in is the one before 1057 * nr_hwcur. */ 1058 1059 avail = nm_prev(kring->nr_hwcur, lim) - nm_i; 1060 if (avail < 0) 1061 avail += lim + 1; 1062 avail *= nm_buf_len; 1063 1064 /* First pass: While holding the lock on the RX mbuf queue, 1065 * extract as many mbufs as they fit the available space, 1066 * and put them in a temporary queue. 1067 * To avoid performing a per-mbuf division (mlen / nm_buf_len) to 1068 * to update avail, we do the update in a while loop that we 1069 * also use to set the RX slots, but without performing the copy. */ 1070 mbq_init(&tmpq); 1071 mbq_lock(&kring->rx_queue); 1072 for (n = 0;; n++) { 1073 m = mbq_peek(&kring->rx_queue); 1074 if (!m) { 1075 /* No more packets from the driver. */ 1076 break; 1077 } 1078 1079 mlen = MBUF_LEN(m); 1080 if (mlen > avail) { 1081 /* No more space in the ring. */ 1082 break; 1083 } 1084 1085 mbq_dequeue(&kring->rx_queue); 1086 1087 while (mlen) { 1088 copy = nm_buf_len; 1089 if (mlen < copy) { 1090 copy = mlen; 1091 } 1092 mlen -= copy; 1093 avail -= nm_buf_len; 1094 1095 ring->slot[nm_i].len = copy; 1096 ring->slot[nm_i].flags = slot_flags | (mlen ? NS_MOREFRAG : 0); 1097 nm_i = nm_next(nm_i, lim); 1098 } 1099 1100 mbq_enqueue(&tmpq, m); 1101 } 1102 mbq_unlock(&kring->rx_queue); 1103 1104 /* Second pass: Drain the temporary queue, going over the used RX slots, 1105 * and perform the copy out of the RX queue lock. */ 1106 nm_i = kring->nr_hwtail; 1107 1108 for (;;) { 1109 void *nmaddr; 1110 int ofs = 0; 1111 int morefrag; 1112 1113 m = mbq_dequeue(&tmpq); 1114 if (!m) { 1115 break; 1116 } 1117 1118 do { 1119 nmaddr = NMB(na, &ring->slot[nm_i]); 1120 /* We only check the address here on generic rx rings. */ 1121 if (nmaddr == NETMAP_BUF_BASE(na)) { /* Bad buffer */ 1122 m_freem(m); 1123 mbq_purge(&tmpq); 1124 mbq_fini(&tmpq); 1125 return netmap_ring_reinit(kring); 1126 } 1127 1128 copy = ring->slot[nm_i].len; 1129 m_copydata(m, ofs, copy, nmaddr); 1130 ofs += copy; 1131 morefrag = ring->slot[nm_i].flags & NS_MOREFRAG; 1132 nm_i = nm_next(nm_i, lim); 1133 } while (morefrag); 1134 1135 m_freem(m); 1136 } 1137 1138 mbq_fini(&tmpq); 1139 1140 if (n) { 1141 kring->nr_hwtail = nm_i; 1142 IFRATE(rate_ctx.new.rxpkt += n); 1143 } 1144 kring->nr_kflags &= ~NKR_PENDINTR; 1145 1146 return 0; 1147 } 1148 1149 static void 1150 generic_netmap_dtor(struct netmap_adapter *na) 1151 { 1152 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 1153 struct ifnet *ifp = netmap_generic_getifp(gna); 1154 struct netmap_adapter *prev_na = gna->prev; 1155 1156 if (prev_na != NULL) { 1157 netmap_adapter_put(prev_na); 1158 if (nm_iszombie(na)) { 1159 /* 1160 * The driver has been removed without releasing 1161 * the reference so we need to do it here. 1162 */ 1163 netmap_adapter_put(prev_na); 1164 } 1165 D("Native netmap adapter %p restored", prev_na); 1166 } 1167 NM_ATTACH_NA(ifp, prev_na); 1168 /* 1169 * netmap_detach_common(), that it's called after this function, 1170 * overrides WNA(ifp) if na->ifp is not NULL. 1171 */ 1172 na->ifp = NULL; 1173 D("Emulated netmap adapter for %s destroyed", na->name); 1174 } 1175 1176 int 1177 na_is_generic(struct netmap_adapter *na) 1178 { 1179 return na->nm_register == generic_netmap_register; 1180 } 1181 1182 /* 1183 * generic_netmap_attach() makes it possible to use netmap on 1184 * a device without native netmap support. 1185 * This is less performant than native support but potentially 1186 * faster than raw sockets or similar schemes. 1187 * 1188 * In this "emulated" mode, netmap rings do not necessarily 1189 * have the same size as those in the NIC. We use a default 1190 * value and possibly override it if the OS has ways to fetch the 1191 * actual configuration. 1192 */ 1193 int 1194 generic_netmap_attach(struct ifnet *ifp) 1195 { 1196 struct netmap_adapter *na; 1197 struct netmap_generic_adapter *gna; 1198 int retval; 1199 u_int num_tx_desc, num_rx_desc; 1200 1201 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 1202 1203 nm_os_generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); /* ignore errors */ 1204 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 1205 if (num_tx_desc == 0 || num_rx_desc == 0) { 1206 D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); 1207 return EINVAL; 1208 } 1209 1210 gna = nm_os_malloc(sizeof(*gna)); 1211 if (gna == NULL) { 1212 D("no memory on attach, give up"); 1213 return ENOMEM; 1214 } 1215 na = (struct netmap_adapter *)gna; 1216 strncpy(na->name, ifp->if_xname, sizeof(na->name)); 1217 na->ifp = ifp; 1218 na->num_tx_desc = num_tx_desc; 1219 na->num_rx_desc = num_rx_desc; 1220 na->nm_register = &generic_netmap_register; 1221 na->nm_txsync = &generic_netmap_txsync; 1222 na->nm_rxsync = &generic_netmap_rxsync; 1223 na->nm_dtor = &generic_netmap_dtor; 1224 /* when using generic, NAF_NETMAP_ON is set so we force 1225 * NAF_SKIP_INTR to use the regular interrupt handler 1226 */ 1227 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; 1228 1229 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 1230 ifp->num_tx_queues, ifp->real_num_tx_queues, 1231 ifp->tx_queue_len); 1232 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 1233 ifp->num_rx_queues, ifp->real_num_rx_queues); 1234 1235 nm_os_generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 1236 1237 retval = netmap_attach_common(na); 1238 if (retval) { 1239 nm_os_free(gna); 1240 return retval; 1241 } 1242 1243 gna->prev = NA(ifp); /* save old na */ 1244 if (gna->prev != NULL) { 1245 netmap_adapter_get(gna->prev); 1246 } 1247 NM_ATTACH_NA(ifp, na); 1248 1249 nm_os_generic_set_features(gna); 1250 1251 D("Emulated adapter for %s created (prev was %p)", na->name, gna->prev); 1252 1253 return retval; 1254 } 1255