1 /* 2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * This module implements netmap support on top of standard, 28 * unmodified device drivers. 29 * 30 * A NIOCREGIF request is handled here if the device does not 31 * have native support. TX and RX rings are emulated as follows: 32 * 33 * NIOCREGIF 34 * We preallocate a block of TX mbufs (roughly as many as 35 * tx descriptors; the number is not critical) to speed up 36 * operation during transmissions. The refcount on most of 37 * these buffers is artificially bumped up so we can recycle 38 * them more easily. Also, the destructor is intercepted 39 * so we use it as an interrupt notification to wake up 40 * processes blocked on a poll(). 41 * 42 * For each receive ring we allocate one "struct mbq" 43 * (an mbuf tailq plus a spinlock). We intercept packets 44 * (through if_input) 45 * on the receive path and put them in the mbq from which 46 * netmap receive routines can grab them. 47 * 48 * TX: 49 * in the generic_txsync() routine, netmap buffers are copied 50 * (or linked, in a future) to the preallocated mbufs 51 * and pushed to the transmit queue. Some of these mbufs 52 * (those with NS_REPORT, or otherwise every half ring) 53 * have the refcount=1, others have refcount=2. 54 * When the destructor is invoked, we take that as 55 * a notification that all mbufs up to that one in 56 * the specific ring have been completed, and generate 57 * the equivalent of a transmit interrupt. 58 * 59 * RX: 60 * 61 */ 62 63 #ifdef __FreeBSD__ 64 65 #include <sys/cdefs.h> /* prerequisite */ 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/types.h> 69 #include <sys/errno.h> 70 #include <sys/malloc.h> 71 #include <sys/lock.h> /* PROT_EXEC */ 72 #include <sys/rwlock.h> 73 #include <sys/socket.h> /* sockaddrs */ 74 #include <sys/selinfo.h> 75 #include <net/if.h> 76 #include <net/if_var.h> 77 #include <machine/bus.h> /* bus_dmamap_* in netmap_kern.h */ 78 79 // XXX temporary - D() defined here 80 #include <net/netmap.h> 81 #include <dev/netmap/netmap_kern.h> 82 #include <dev/netmap/netmap_mem2.h> 83 84 #define rtnl_lock() ND("rtnl_lock called") 85 #define rtnl_unlock() ND("rtnl_unlock called") 86 #define MBUF_TXQ(m) ((m)->m_pkthdr.flowid) 87 #define MBUF_RXQ(m) ((m)->m_pkthdr.flowid) 88 #define smp_mb() 89 90 /* 91 * FreeBSD mbuf allocator/deallocator in emulation mode: 92 * 93 * We allocate EXT_PACKET mbuf+clusters, but need to set M_NOFREE 94 * so that the destructor, if invoked, will not free the packet. 95 * In principle we should set the destructor only on demand, 96 * but since there might be a race we better do it on allocation. 97 * As a consequence, we also need to set the destructor or we 98 * would leak buffers. 99 */ 100 101 /* 102 * mbuf wrappers 103 */ 104 105 /* 106 * mbuf destructor, also need to change the type to EXT_EXTREF, 107 * add an M_NOFREE flag, and then clear the flag and 108 * chain into uma_zfree(zone_pack, mf) 109 * (or reinstall the buffer ?) 110 * 111 * On FreeBSD 9 the destructor is called as ext_free(ext_arg1, ext_arg2) 112 * whereas newer version have ext_free(m, ext_arg1, ext_arg2) 113 * For compatibility we set ext_arg1 = m on allocation so we have 114 * the same code on both. 115 */ 116 #define SET_MBUF_DESTRUCTOR(m, fn) do { \ 117 (m)->m_ext.ext_free = (void *)fn; \ 118 (m)->m_ext.ext_type = EXT_EXTREF; \ 119 } while (0) 120 121 static void 122 netmap_default_mbuf_destructor(struct mbuf *m) 123 { 124 /* restore original data pointer and type */ 125 m->m_ext.ext_buf = m->m_data = m->m_ext.ext_arg2; 126 m->m_ext.ext_type = EXT_PACKET; 127 m->m_ext.ext_free = NULL; 128 m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; 129 if (*(m->m_ext.ext_cnt) == 0) 130 *(m->m_ext.ext_cnt) = 1; 131 uma_zfree(zone_pack, m); 132 } 133 134 static inline struct mbuf * 135 netmap_get_mbuf(int len) 136 { 137 struct mbuf *m; 138 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR | M_NOFREE); 139 if (m) { 140 m->m_ext.ext_arg1 = m; /* FreeBSD 9 compat */ 141 m->m_ext.ext_arg2 = m->m_ext.ext_buf; /* save original */ 142 m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor; 143 m->m_ext.ext_type = EXT_EXTREF; 144 ND(5, "create m %p refcnt %d", m, *m->m_ext.ext_cnt); 145 } 146 return m; 147 } 148 149 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *(m)->m_ext.ext_cnt : -1) 150 151 152 153 #else /* linux */ 154 155 #include "bsd_glue.h" 156 157 #include <linux/rtnetlink.h> /* rtnl_[un]lock() */ 158 #include <linux/ethtool.h> /* struct ethtool_ops, get_ringparam */ 159 #include <linux/hrtimer.h> 160 161 //#define RATE /* Enables communication statistics. */ 162 163 //#define REG_RESET 164 165 #endif /* linux */ 166 167 168 /* Common headers. */ 169 #include <net/netmap.h> 170 #include <dev/netmap/netmap_kern.h> 171 #include <dev/netmap/netmap_mem2.h> 172 173 174 175 /* ======================== usage stats =========================== */ 176 177 #ifdef RATE 178 #define IFRATE(x) x 179 struct rate_stats { 180 unsigned long txpkt; 181 unsigned long txsync; 182 unsigned long txirq; 183 unsigned long rxpkt; 184 unsigned long rxirq; 185 unsigned long rxsync; 186 }; 187 188 struct rate_context { 189 unsigned refcount; 190 struct timer_list timer; 191 struct rate_stats new; 192 struct rate_stats old; 193 }; 194 195 #define RATE_PRINTK(_NAME_) \ 196 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD); 197 #define RATE_PERIOD 2 198 static void rate_callback(unsigned long arg) 199 { 200 struct rate_context * ctx = (struct rate_context *)arg; 201 struct rate_stats cur = ctx->new; 202 int r; 203 204 RATE_PRINTK(txpkt); 205 RATE_PRINTK(txsync); 206 RATE_PRINTK(txirq); 207 RATE_PRINTK(rxpkt); 208 RATE_PRINTK(rxsync); 209 RATE_PRINTK(rxirq); 210 printk("\n"); 211 212 ctx->old = cur; 213 r = mod_timer(&ctx->timer, jiffies + 214 msecs_to_jiffies(RATE_PERIOD * 1000)); 215 if (unlikely(r)) 216 D("[v1000] Error: mod_timer()"); 217 } 218 219 static struct rate_context rate_ctx; 220 221 #else /* !RATE */ 222 #define IFRATE(x) 223 #endif /* !RATE */ 224 225 226 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */ 227 #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */ 228 229 /* 230 * Wrapper used by the generic adapter layer to notify 231 * the poller threads. Differently from netmap_rx_irq(), we check 232 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq. 233 */ 234 static void 235 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done) 236 { 237 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP))) 238 return; 239 240 netmap_common_irq(ifp, q, work_done); 241 } 242 243 244 /* Enable/disable netmap mode for a generic network interface. */ 245 static int 246 generic_netmap_register(struct netmap_adapter *na, int enable) 247 { 248 struct ifnet *ifp = na->ifp; 249 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 250 struct mbuf *m; 251 int error; 252 int i, r; 253 254 if (!na) 255 return EINVAL; 256 257 #ifdef REG_RESET 258 error = ifp->netdev_ops->ndo_stop(ifp); 259 if (error) { 260 return error; 261 } 262 #endif /* REG_RESET */ 263 264 if (enable) { /* Enable netmap mode. */ 265 /* Init the mitigation support on all the rx queues. */ 266 gna->mit = malloc(na->num_rx_rings * sizeof(struct nm_generic_mit), 267 M_DEVBUF, M_NOWAIT | M_ZERO); 268 if (!gna->mit) { 269 D("mitigation allocation failed"); 270 error = ENOMEM; 271 goto out; 272 } 273 for (r=0; r<na->num_rx_rings; r++) 274 netmap_mitigation_init(&gna->mit[r], na); 275 276 /* Initialize the rx queue, as generic_rx_handler() can 277 * be called as soon as netmap_catch_rx() returns. 278 */ 279 for (r=0; r<na->num_rx_rings; r++) { 280 mbq_safe_init(&na->rx_rings[r].rx_queue); 281 } 282 283 /* 284 * Preallocate packet buffers for the tx rings. 285 */ 286 for (r=0; r<na->num_tx_rings; r++) 287 na->tx_rings[r].tx_pool = NULL; 288 for (r=0; r<na->num_tx_rings; r++) { 289 na->tx_rings[r].tx_pool = malloc(na->num_tx_desc * sizeof(struct mbuf *), 290 M_DEVBUF, M_NOWAIT | M_ZERO); 291 if (!na->tx_rings[r].tx_pool) { 292 D("tx_pool allocation failed"); 293 error = ENOMEM; 294 goto free_tx_pools; 295 } 296 for (i=0; i<na->num_tx_desc; i++) 297 na->tx_rings[r].tx_pool[i] = NULL; 298 for (i=0; i<na->num_tx_desc; i++) { 299 m = netmap_get_mbuf(GENERIC_BUF_SIZE); 300 if (!m) { 301 D("tx_pool[%d] allocation failed", i); 302 error = ENOMEM; 303 goto free_tx_pools; 304 } 305 na->tx_rings[r].tx_pool[i] = m; 306 } 307 } 308 rtnl_lock(); 309 /* Prepare to intercept incoming traffic. */ 310 error = netmap_catch_rx(na, 1); 311 if (error) { 312 D("netdev_rx_handler_register() failed (%d)", error); 313 goto register_handler; 314 } 315 ifp->if_capenable |= IFCAP_NETMAP; 316 317 /* Make netmap control the packet steering. */ 318 netmap_catch_tx(gna, 1); 319 320 rtnl_unlock(); 321 322 #ifdef RATE 323 if (rate_ctx.refcount == 0) { 324 D("setup_timer()"); 325 memset(&rate_ctx, 0, sizeof(rate_ctx)); 326 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx); 327 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) { 328 D("Error: mod_timer()"); 329 } 330 } 331 rate_ctx.refcount++; 332 #endif /* RATE */ 333 334 } else if (na->tx_rings[0].tx_pool) { 335 /* Disable netmap mode. We enter here only if the previous 336 generic_netmap_register(na, 1) was successfull. 337 If it was not, na->tx_rings[0].tx_pool was set to NULL by the 338 error handling code below. */ 339 rtnl_lock(); 340 341 ifp->if_capenable &= ~IFCAP_NETMAP; 342 343 /* Release packet steering control. */ 344 netmap_catch_tx(gna, 0); 345 346 /* Do not intercept packets on the rx path. */ 347 netmap_catch_rx(na, 0); 348 349 rtnl_unlock(); 350 351 /* Free the mbufs going to the netmap rings */ 352 for (r=0; r<na->num_rx_rings; r++) { 353 mbq_safe_purge(&na->rx_rings[r].rx_queue); 354 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 355 } 356 357 for (r=0; r<na->num_rx_rings; r++) 358 netmap_mitigation_cleanup(&gna->mit[r]); 359 free(gna->mit, M_DEVBUF); 360 361 for (r=0; r<na->num_tx_rings; r++) { 362 for (i=0; i<na->num_tx_desc; i++) { 363 m_freem(na->tx_rings[r].tx_pool[i]); 364 } 365 free(na->tx_rings[r].tx_pool, M_DEVBUF); 366 } 367 368 #ifdef RATE 369 if (--rate_ctx.refcount == 0) { 370 D("del_timer()"); 371 del_timer(&rate_ctx.timer); 372 } 373 #endif 374 } 375 376 #ifdef REG_RESET 377 error = ifp->netdev_ops->ndo_open(ifp); 378 if (error) { 379 goto free_tx_pools; 380 } 381 #endif 382 383 return 0; 384 385 register_handler: 386 rtnl_unlock(); 387 free_tx_pools: 388 for (r=0; r<na->num_tx_rings; r++) { 389 if (na->tx_rings[r].tx_pool == NULL) 390 continue; 391 for (i=0; i<na->num_tx_desc; i++) 392 if (na->tx_rings[r].tx_pool[i]) 393 m_freem(na->tx_rings[r].tx_pool[i]); 394 free(na->tx_rings[r].tx_pool, M_DEVBUF); 395 na->tx_rings[r].tx_pool = NULL; 396 } 397 for (r=0; r<na->num_rx_rings; r++) { 398 netmap_mitigation_cleanup(&gna->mit[r]); 399 mbq_safe_destroy(&na->rx_rings[r].rx_queue); 400 } 401 free(gna->mit, M_DEVBUF); 402 out: 403 404 return error; 405 } 406 407 /* 408 * Callback invoked when the device driver frees an mbuf used 409 * by netmap to transmit a packet. This usually happens when 410 * the NIC notifies the driver that transmission is completed. 411 */ 412 static void 413 generic_mbuf_destructor(struct mbuf *m) 414 { 415 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL); 416 #ifdef __FreeBSD__ 417 if (netmap_verbose) 418 RD(5, "Tx irq (%p) queue %d index %d" , m, MBUF_TXQ(m), (int)(uintptr_t)m->m_ext.ext_arg1); 419 netmap_default_mbuf_destructor(m); 420 #endif /* __FreeBSD__ */ 421 IFRATE(rate_ctx.new.txirq++); 422 } 423 424 /* Record completed transmissions and update hwtail. 425 * 426 * The oldest tx buffer not yet completed is at nr_hwtail + 1, 427 * nr_hwcur is the first unsent buffer. 428 */ 429 static u_int 430 generic_netmap_tx_clean(struct netmap_kring *kring) 431 { 432 u_int const lim = kring->nkr_num_slots - 1; 433 u_int nm_i = nm_next(kring->nr_hwtail, lim); 434 u_int hwcur = kring->nr_hwcur; 435 u_int n = 0; 436 struct mbuf **tx_pool = kring->tx_pool; 437 438 while (nm_i != hwcur) { /* buffers not completed */ 439 struct mbuf *m = tx_pool[nm_i]; 440 441 if (unlikely(m == NULL)) { 442 /* this is done, try to replenish the entry */ 443 tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 444 if (unlikely(m == NULL)) { 445 D("mbuf allocation failed, XXX error"); 446 // XXX how do we proceed ? break ? 447 return -ENOMEM; 448 } 449 } else if (GET_MBUF_REFCNT(m) != 1) { 450 break; /* This mbuf is still busy: its refcnt is 2. */ 451 } 452 n++; 453 nm_i = nm_next(nm_i, lim); 454 } 455 kring->nr_hwtail = nm_prev(nm_i, lim); 456 ND("tx completed [%d] -> hwtail %d", n, kring->nr_hwtail); 457 458 return n; 459 } 460 461 462 /* 463 * We have pending packets in the driver between nr_hwtail +1 and hwcur. 464 * Compute a position in the middle, to be used to generate 465 * a notification. 466 */ 467 static inline u_int 468 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur) 469 { 470 u_int n = kring->nkr_num_slots; 471 u_int ntc = nm_next(kring->nr_hwtail, n-1); 472 u_int e; 473 474 if (hwcur >= ntc) { 475 e = (hwcur + ntc) / 2; 476 } else { /* wrap around */ 477 e = (hwcur + n + ntc) / 2; 478 if (e >= n) { 479 e -= n; 480 } 481 } 482 483 if (unlikely(e >= n)) { 484 D("This cannot happen"); 485 e = 0; 486 } 487 488 return e; 489 } 490 491 /* 492 * We have pending packets in the driver between nr_hwtail+1 and hwcur. 493 * Schedule a notification approximately in the middle of the two. 494 * There is a race but this is only called within txsync which does 495 * a double check. 496 */ 497 static void 498 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur) 499 { 500 struct mbuf *m; 501 u_int e; 502 503 if (nm_next(kring->nr_hwtail, kring->nkr_num_slots -1) == hwcur) { 504 return; /* all buffers are free */ 505 } 506 e = generic_tx_event_middle(kring, hwcur); 507 508 m = kring->tx_pool[e]; 509 ND(5, "Request Event at %d mbuf %p refcnt %d", e, m, m ? GET_MBUF_REFCNT(m) : -2 ); 510 if (m == NULL) { 511 /* This can happen if there is already an event on the netmap 512 slot 'e': There is nothing to do. */ 513 return; 514 } 515 kring->tx_pool[e] = NULL; 516 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor); 517 518 // XXX wmb() ? 519 /* Decrement the refcount an free it if we have the last one. */ 520 m_freem(m); 521 smp_mb(); 522 } 523 524 525 /* 526 * generic_netmap_txsync() transforms netmap buffers into mbufs 527 * and passes them to the standard device driver 528 * (ndo_start_xmit() or ifp->if_transmit() ). 529 * On linux this is not done directly, but using dev_queue_xmit(), 530 * since it implements the TX flow control (and takes some locks). 531 */ 532 static int 533 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags) 534 { 535 struct ifnet *ifp = na->ifp; 536 struct netmap_kring *kring = &na->tx_rings[ring_nr]; 537 struct netmap_ring *ring = kring->ring; 538 u_int nm_i; /* index into the netmap ring */ // j 539 u_int const lim = kring->nkr_num_slots - 1; 540 u_int const head = kring->rhead; 541 542 IFRATE(rate_ctx.new.txsync++); 543 544 // TODO: handle the case of mbuf allocation failure 545 546 rmb(); 547 548 /* 549 * First part: process new packets to send. 550 */ 551 nm_i = kring->nr_hwcur; 552 if (nm_i != head) { /* we have new packets to send */ 553 while (nm_i != head) { 554 struct netmap_slot *slot = &ring->slot[nm_i]; 555 u_int len = slot->len; 556 void *addr = NMB(slot); 557 558 /* device-specific */ 559 struct mbuf *m; 560 int tx_ret; 561 562 NM_CHECK_ADDR_LEN(addr, len); 563 564 /* Tale a mbuf from the tx pool and copy in the user packet. */ 565 m = kring->tx_pool[nm_i]; 566 if (unlikely(!m)) { 567 RD(5, "This should never happen"); 568 kring->tx_pool[nm_i] = m = netmap_get_mbuf(GENERIC_BUF_SIZE); 569 if (unlikely(m == NULL)) { 570 D("mbuf allocation failed"); 571 break; 572 } 573 } 574 /* XXX we should ask notifications when NS_REPORT is set, 575 * or roughly every half frame. We can optimize this 576 * by lazily requesting notifications only when a 577 * transmission fails. Probably the best way is to 578 * break on failures and set notifications when 579 * ring->cur == ring->tail || nm_i != cur 580 */ 581 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr); 582 if (unlikely(tx_ret)) { 583 RD(5, "start_xmit failed: err %d [nm_i %u, head %u, hwtail %u]", 584 tx_ret, nm_i, head, kring->nr_hwtail); 585 /* 586 * No room for this mbuf in the device driver. 587 * Request a notification FOR A PREVIOUS MBUF, 588 * then call generic_netmap_tx_clean(kring) to do the 589 * double check and see if we can free more buffers. 590 * If there is space continue, else break; 591 * NOTE: the double check is necessary if the problem 592 * occurs in the txsync call after selrecord(). 593 * Also, we need some way to tell the caller that not 594 * all buffers were queued onto the device (this was 595 * not a problem with native netmap driver where space 596 * is preallocated). The bridge has a similar problem 597 * and we solve it there by dropping the excess packets. 598 */ 599 generic_set_tx_event(kring, nm_i); 600 if (generic_netmap_tx_clean(kring)) { /* space now available */ 601 continue; 602 } else { 603 break; 604 } 605 } 606 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 607 nm_i = nm_next(nm_i, lim); 608 IFRATE(rate_ctx.new.txpkt ++); 609 } 610 611 /* Update hwcur to the next slot to transmit. */ 612 kring->nr_hwcur = nm_i; /* not head, we could break early */ 613 } 614 615 /* 616 * Second, reclaim completed buffers 617 */ 618 if (flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 619 /* No more available slots? Set a notification event 620 * on a netmap slot that will be cleaned in the future. 621 * No doublecheck is performed, since txsync() will be 622 * called twice by netmap_poll(). 623 */ 624 generic_set_tx_event(kring, nm_i); 625 } 626 ND("tx #%d, hwtail = %d", n, kring->nr_hwtail); 627 628 generic_netmap_tx_clean(kring); 629 630 nm_txsync_finalize(kring); 631 632 return 0; 633 } 634 635 636 /* 637 * This handler is registered (through netmap_catch_rx()) 638 * within the attached network interface 639 * in the RX subsystem, so that every mbuf passed up by 640 * the driver can be stolen to the network stack. 641 * Stolen packets are put in a queue where the 642 * generic_netmap_rxsync() callback can extract them. 643 */ 644 void 645 generic_rx_handler(struct ifnet *ifp, struct mbuf *m) 646 { 647 struct netmap_adapter *na = NA(ifp); 648 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na; 649 u_int work_done; 650 u_int rr = MBUF_RXQ(m); // receive ring number 651 652 if (rr >= na->num_rx_rings) { 653 rr = rr % na->num_rx_rings; // XXX expensive... 654 } 655 656 /* limit the size of the queue */ 657 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) { 658 m_freem(m); 659 } else { 660 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m); 661 } 662 663 if (netmap_generic_mit < 32768) { 664 /* no rx mitigation, pass notification up */ 665 netmap_generic_irq(na->ifp, rr, &work_done); 666 IFRATE(rate_ctx.new.rxirq++); 667 } else { 668 /* same as send combining, filter notification if there is a 669 * pending timer, otherwise pass it up and start a timer. 670 */ 671 if (likely(netmap_mitigation_active(&gna->mit[rr]))) { 672 /* Record that there is some pending work. */ 673 gna->mit[rr].mit_pending = 1; 674 } else { 675 netmap_generic_irq(na->ifp, rr, &work_done); 676 IFRATE(rate_ctx.new.rxirq++); 677 netmap_mitigation_start(&gna->mit[rr]); 678 } 679 } 680 } 681 682 /* 683 * generic_netmap_rxsync() extracts mbufs from the queue filled by 684 * generic_netmap_rx_handler() and puts their content in the netmap 685 * receive ring. 686 * Access must be protected because the rx handler is asynchronous, 687 */ 688 static int 689 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags) 690 { 691 struct netmap_kring *kring = &na->rx_rings[ring_nr]; 692 struct netmap_ring *ring = kring->ring; 693 u_int nm_i; /* index into the netmap ring */ //j, 694 u_int n; 695 u_int const lim = kring->nkr_num_slots - 1; 696 u_int const head = nm_rxsync_prologue(kring); 697 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 698 699 if (head > lim) 700 return netmap_ring_reinit(kring); 701 702 /* 703 * First part: import newly received packets. 704 */ 705 if (netmap_no_pendintr || force_update) { 706 /* extract buffers from the rx queue, stop at most one 707 * slot before nr_hwcur (stop_i) 708 */ 709 uint16_t slot_flags = kring->nkr_slot_flags; 710 u_int stop_i = nm_prev(kring->nr_hwcur, lim); 711 712 nm_i = kring->nr_hwtail; /* first empty slot in the receive ring */ 713 for (n = 0; nm_i != stop_i; n++) { 714 int len; 715 void *addr = NMB(&ring->slot[nm_i]); 716 struct mbuf *m; 717 718 /* we only check the address here on generic rx rings */ 719 if (addr == netmap_buffer_base) { /* Bad buffer */ 720 return netmap_ring_reinit(kring); 721 } 722 /* 723 * Call the locked version of the function. 724 * XXX Ideally we could grab a batch of mbufs at once 725 * and save some locking overhead. 726 */ 727 m = mbq_safe_dequeue(&kring->rx_queue); 728 if (!m) /* no more data */ 729 break; 730 len = MBUF_LEN(m); 731 m_copydata(m, 0, len, addr); 732 ring->slot[nm_i].len = len; 733 ring->slot[nm_i].flags = slot_flags; 734 m_freem(m); 735 nm_i = nm_next(nm_i, lim); 736 } 737 if (n) { 738 kring->nr_hwtail = nm_i; 739 IFRATE(rate_ctx.new.rxpkt += n); 740 } 741 kring->nr_kflags &= ~NKR_PENDINTR; 742 } 743 744 // XXX should we invert the order ? 745 /* 746 * Second part: skip past packets that userspace has released. 747 */ 748 nm_i = kring->nr_hwcur; 749 if (nm_i != head) { 750 /* Userspace has released some packets. */ 751 for (n = 0; nm_i != head; n++) { 752 struct netmap_slot *slot = &ring->slot[nm_i]; 753 754 slot->flags &= ~NS_BUF_CHANGED; 755 nm_i = nm_next(nm_i, lim); 756 } 757 kring->nr_hwcur = head; 758 } 759 /* tell userspace that there might be new packets. */ 760 nm_rxsync_finalize(kring); 761 IFRATE(rate_ctx.new.rxsync++); 762 763 return 0; 764 } 765 766 static void 767 generic_netmap_dtor(struct netmap_adapter *na) 768 { 769 struct ifnet *ifp = na->ifp; 770 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na; 771 struct netmap_adapter *prev_na = gna->prev; 772 773 if (prev_na != NULL) { 774 D("Released generic NA %p", gna); 775 if_rele(na->ifp); 776 netmap_adapter_put(prev_na); 777 } 778 if (ifp != NULL) { 779 WNA(ifp) = prev_na; 780 D("Restored native NA %p", prev_na); 781 na->ifp = NULL; 782 } 783 } 784 785 /* 786 * generic_netmap_attach() makes it possible to use netmap on 787 * a device without native netmap support. 788 * This is less performant than native support but potentially 789 * faster than raw sockets or similar schemes. 790 * 791 * In this "emulated" mode, netmap rings do not necessarily 792 * have the same size as those in the NIC. We use a default 793 * value and possibly override it if the OS has ways to fetch the 794 * actual configuration. 795 */ 796 int 797 generic_netmap_attach(struct ifnet *ifp) 798 { 799 struct netmap_adapter *na; 800 struct netmap_generic_adapter *gna; 801 int retval; 802 u_int num_tx_desc, num_rx_desc; 803 804 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */ 805 806 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc); 807 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc); 808 if (num_tx_desc == 0 || num_rx_desc == 0) { 809 D("Device has no hw slots (tx %u, rx %u)", num_tx_desc, num_rx_desc); 810 return EINVAL; 811 } 812 813 gna = malloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO); 814 if (gna == NULL) { 815 D("no memory on attach, give up"); 816 return ENOMEM; 817 } 818 na = (struct netmap_adapter *)gna; 819 na->ifp = ifp; 820 na->num_tx_desc = num_tx_desc; 821 na->num_rx_desc = num_rx_desc; 822 na->nm_register = &generic_netmap_register; 823 na->nm_txsync = &generic_netmap_txsync; 824 na->nm_rxsync = &generic_netmap_rxsync; 825 na->nm_dtor = &generic_netmap_dtor; 826 /* when using generic, IFCAP_NETMAP is set so we force 827 * NAF_SKIP_INTR to use the regular interrupt handler 828 */ 829 na->na_flags = NAF_SKIP_INTR | NAF_HOST_RINGS; 830 831 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)", 832 ifp->num_tx_queues, ifp->real_num_tx_queues, 833 ifp->tx_queue_len); 834 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)", 835 ifp->num_rx_queues, ifp->real_num_rx_queues); 836 837 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings); 838 839 retval = netmap_attach_common(na); 840 if (retval) { 841 free(gna, M_DEVBUF); 842 } 843 844 return retval; 845 } 846