1 /* 2 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* 28 * $FreeBSD$ 29 * 30 * The header contains the definitions of constants and function 31 * prototypes used only in kernelspace. 32 */ 33 34 #ifndef _NET_NETMAP_KERN_H_ 35 #define _NET_NETMAP_KERN_H_ 36 37 #define WITH_VALE // comment out to disable VALE support 38 #define WITH_PIPES 39 #define WITH_MONITOR 40 #define WITH_GENERIC 41 42 #if defined(__FreeBSD__) 43 44 #define likely(x) __builtin_expect((long)!!(x), 1L) 45 #define unlikely(x) __builtin_expect((long)!!(x), 0L) 46 47 #define NM_LOCK_T struct mtx 48 49 /* netmap global lock */ 50 #define NMG_LOCK_T struct sx 51 #define NMG_LOCK_INIT() sx_init(&netmap_global_lock, \ 52 "netmap global lock") 53 #define NMG_LOCK_DESTROY() sx_destroy(&netmap_global_lock) 54 #define NMG_LOCK() sx_xlock(&netmap_global_lock) 55 #define NMG_UNLOCK() sx_xunlock(&netmap_global_lock) 56 #define NMG_LOCK_ASSERT() sx_assert(&netmap_global_lock, SA_XLOCKED) 57 58 #define NM_SELINFO_T struct nm_selinfo 59 #define MBUF_LEN(m) ((m)->m_pkthdr.len) 60 #define MBUF_IFP(m) ((m)->m_pkthdr.rcvif) 61 #define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m) 62 63 #define NM_ATOMIC_T volatile int // XXX ? 64 /* atomic operations */ 65 #include <machine/atomic.h> 66 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) 67 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) 68 69 #if __FreeBSD_version >= 1100030 70 #define WNA(_ifp) (_ifp)->if_netmap 71 #else /* older FreeBSD */ 72 #define WNA(_ifp) (_ifp)->if_pspare[0] 73 #endif /* older FreeBSD */ 74 75 #if __FreeBSD_version >= 1100005 76 struct netmap_adapter *netmap_getna(if_t ifp); 77 #endif 78 79 #if __FreeBSD_version >= 1100027 80 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *((m)->m_ext.ext_cnt) : -1) 81 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ext_cnt) = x 82 #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt) 83 #else 84 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) 85 #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x 86 #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt) 87 #endif 88 89 MALLOC_DECLARE(M_NETMAP); 90 91 struct nm_selinfo { 92 struct selinfo si; 93 struct mtx m; 94 }; 95 96 void freebsd_selwakeup(struct nm_selinfo *si, int pri); 97 98 // XXX linux struct, not used in FreeBSD 99 struct net_device_ops { 100 }; 101 struct ethtool_ops { 102 }; 103 struct hrtimer { 104 }; 105 106 #elif defined (linux) 107 108 #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h 109 #define NM_SELINFO_T wait_queue_head_t 110 #define MBUF_LEN(m) ((m)->len) 111 #define MBUF_IFP(m) ((m)->dev) 112 #define NM_SEND_UP(ifp, m) \ 113 do { \ 114 m->priority = NM_MAGIC_PRIORITY_RX; \ 115 netif_rx(m); \ 116 } while (0) 117 118 #define NM_ATOMIC_T volatile long unsigned int 119 120 #define NM_MTX_T struct mutex 121 #define NM_MTX_INIT(m, s) do { (void)s; mutex_init(&(m)); } while (0) 122 #define NM_MTX_DESTROY(m) do { (void)m; } while (0) 123 #define NM_MTX_LOCK(m) mutex_lock(&(m)) 124 #define NM_MTX_UNLOCK(m) mutex_unlock(&(m)) 125 #define NM_MTX_LOCK_ASSERT(m) mutex_is_locked(&(m)) 126 127 #define NMG_LOCK_T NM_MTX_T 128 #define NMG_LOCK_INIT() NM_MTX_INIT(netmap_global_lock, \ 129 "netmap_global_lock") 130 #define NMG_LOCK_DESTROY() NM_MTX_DESTROY(netmap_global_lock) 131 #define NMG_LOCK() NM_MTX_LOCK(netmap_global_lock) 132 #define NMG_UNLOCK() NM_MTX_UNLOCK(netmap_global_lock) 133 #define NMG_LOCK_ASSERT() NM_MTX_LOCK_ASSERT(netmap_global_lock) 134 135 #ifndef DEV_NETMAP 136 #define DEV_NETMAP 137 #endif /* DEV_NETMAP */ 138 139 #elif defined (__APPLE__) 140 141 #warning apple support is incomplete. 142 #define likely(x) __builtin_expect(!!(x), 1) 143 #define unlikely(x) __builtin_expect(!!(x), 0) 144 #define NM_LOCK_T IOLock * 145 #define NM_SELINFO_T struct selinfo 146 #define MBUF_LEN(m) ((m)->m_pkthdr.len) 147 #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 148 149 #else 150 151 #error unsupported platform 152 153 #endif /* end - platform-specific code */ 154 155 #define ND(format, ...) 156 #define D(format, ...) \ 157 do { \ 158 struct timeval __xxts; \ 159 microtime(&__xxts); \ 160 printf("%03d.%06d [%4d] %-25s " format "\n", \ 161 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 162 __LINE__, __FUNCTION__, ##__VA_ARGS__); \ 163 } while (0) 164 165 /* rate limited, lps indicates how many per second */ 166 #define RD(lps, format, ...) \ 167 do { \ 168 static int t0, __cnt; \ 169 if (t0 != time_second) { \ 170 t0 = time_second; \ 171 __cnt = 0; \ 172 } \ 173 if (__cnt++ < lps) \ 174 D(format, ##__VA_ARGS__); \ 175 } while (0) 176 177 struct netmap_adapter; 178 struct nm_bdg_fwd; 179 struct nm_bridge; 180 struct netmap_priv_d; 181 182 const char *nm_dump_buf(char *p, int len, int lim, char *dst); 183 184 #include "netmap_mbq.h" 185 186 extern NMG_LOCK_T netmap_global_lock; 187 188 /* 189 * private, kernel view of a ring. Keeps track of the status of 190 * a ring across system calls. 191 * 192 * nr_hwcur index of the next buffer to refill. 193 * It corresponds to ring->head 194 * at the time the system call returns. 195 * 196 * nr_hwtail index of the first buffer owned by the kernel. 197 * On RX, hwcur->hwtail are receive buffers 198 * not yet released. hwcur is advanced following 199 * ring->head, hwtail is advanced on incoming packets, 200 * and a wakeup is generated when hwtail passes ring->cur 201 * On TX, hwcur->rcur have been filled by the sender 202 * but not sent yet to the NIC; rcur->hwtail are available 203 * for new transmissions, and hwtail->hwcur-1 are pending 204 * transmissions not yet acknowledged. 205 * 206 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 207 * This is so that, on a reset, buffers owned by userspace are not 208 * modified by the kernel. In particular: 209 * RX rings: the next empty buffer (hwtail + hwofs) coincides with 210 * the next empty buffer as known by the hardware (next_to_check or so). 211 * TX rings: hwcur + hwofs coincides with next_to_send 212 * 213 * For received packets, slot->flags is set to nkr_slot_flags 214 * so we can provide a proper initial value (e.g. set NS_FORWARD 215 * when operating in 'transparent' mode). 216 * 217 * The following fields are used to implement lock-free copy of packets 218 * from input to output ports in VALE switch: 219 * nkr_hwlease buffer after the last one being copied. 220 * A writer in nm_bdg_flush reserves N buffers 221 * from nr_hwlease, advances it, then does the 222 * copy outside the lock. 223 * In RX rings (used for VALE ports), 224 * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 225 * In TX rings (used for NIC or host stack ports) 226 * nkr_hwcur <= nkr_hwlease < nkr_hwtail 227 * nkr_leases array of nkr_num_slots where writers can report 228 * completion of their block. NR_NOSLOT (~0) indicates 229 * that the writer has not finished yet 230 * nkr_lease_idx index of next free slot in nr_leases, to be assigned 231 * 232 * The kring is manipulated by txsync/rxsync and generic netmap function. 233 * 234 * Concurrent rxsync or txsync on the same ring are prevented through 235 * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need 236 * for NIC rings, and for TX rings attached to the host stack. 237 * 238 * RX rings attached to the host stack use an mbq (rx_queue) on both 239 * rxsync_from_host() and netmap_transmit(). The mbq is protected 240 * by its internal lock. 241 * 242 * RX rings attached to the VALE switch are accessed by both senders 243 * and receiver. They are protected through the q_lock on the RX ring. 244 */ 245 struct netmap_kring { 246 struct netmap_ring *ring; 247 248 uint32_t nr_hwcur; 249 uint32_t nr_hwtail; 250 251 /* 252 * Copies of values in user rings, so we do not need to look 253 * at the ring (which could be modified). These are set in the 254 * *sync_prologue()/finalize() routines. 255 */ 256 uint32_t rhead; 257 uint32_t rcur; 258 uint32_t rtail; 259 260 uint32_t nr_kflags; /* private driver flags */ 261 #define NKR_PENDINTR 0x1 // Pending interrupt. 262 uint32_t nkr_num_slots; 263 264 /* 265 * On a NIC reset, the NIC ring indexes may be reset but the 266 * indexes in the netmap rings remain the same. nkr_hwofs 267 * keeps track of the offset between the two. 268 */ 269 int32_t nkr_hwofs; 270 271 uint16_t nkr_slot_flags; /* initial value for flags */ 272 273 /* last_reclaim is opaque marker to help reduce the frequency 274 * of operations such as reclaiming tx buffers. A possible use 275 * is set it to ticks and do the reclaim only once per tick. 276 */ 277 uint64_t last_reclaim; 278 279 280 NM_SELINFO_T si; /* poll/select wait queue */ 281 NM_LOCK_T q_lock; /* protects kring and ring. */ 282 NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ 283 284 struct netmap_adapter *na; 285 286 /* The following fields are for VALE switch support */ 287 struct nm_bdg_fwd *nkr_ft; 288 uint32_t *nkr_leases; 289 #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ 290 uint32_t nkr_hwlease; 291 uint32_t nkr_lease_idx; 292 293 /* while nkr_stopped is set, no new [tr]xsync operations can 294 * be started on this kring. 295 * This is used by netmap_disable_all_rings() 296 * to find a synchronization point where critical data 297 * structures pointed to by the kring can be added or removed 298 */ 299 volatile int nkr_stopped; 300 301 /* Support for adapters without native netmap support. 302 * On tx rings we preallocate an array of tx buffers 303 * (same size as the netmap ring), on rx rings we 304 * store incoming mbufs in a queue that is drained by 305 * a rxsync. 306 */ 307 struct mbuf **tx_pool; 308 // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */ 309 struct mbq rx_queue; /* intercepted rx mbufs. */ 310 311 uint32_t ring_id; /* debugging */ 312 char name[64]; /* diagnostic */ 313 314 /* [tx]sync callback for this kring. 315 * The default nm_kring_create callback (netmap_krings_create) 316 * sets the nm_sync callback of each hardware tx(rx) kring to 317 * the corresponding nm_txsync(nm_rxsync) taken from the 318 * netmap_adapter; moreover, it sets the sync callback 319 * of the host tx(rx) ring to netmap_txsync_to_host 320 * (netmap_rxsync_from_host). 321 * 322 * Overrides: the above configuration is not changed by 323 * any of the nm_krings_create callbacks. 324 */ 325 int (*nm_sync)(struct netmap_kring *kring, int flags); 326 327 #ifdef WITH_PIPES 328 struct netmap_kring *pipe; /* if this is a pipe ring, 329 * pointer to the other end 330 */ 331 struct netmap_ring *save_ring; /* pointer to hidden rings 332 * (see netmap_pipe.c for details) 333 */ 334 #endif /* WITH_PIPES */ 335 336 #ifdef WITH_MONITOR 337 /* pointer to the adapter that is monitoring this kring (if any) 338 */ 339 struct netmap_monitor_adapter *monitor; 340 /* 341 * Monitors work by intercepting the txsync and/or rxsync of the 342 * monitored krings. This is implemented by replacing 343 * the nm_sync pointer above and saving the previous 344 * one in save_sync below. 345 */ 346 int (*save_sync)(struct netmap_kring *kring, int flags); 347 #endif 348 } __attribute__((__aligned__(64))); 349 350 351 /* return the next index, with wraparound */ 352 static inline uint32_t 353 nm_next(uint32_t i, uint32_t lim) 354 { 355 return unlikely (i == lim) ? 0 : i + 1; 356 } 357 358 359 /* return the previous index, with wraparound */ 360 static inline uint32_t 361 nm_prev(uint32_t i, uint32_t lim) 362 { 363 return unlikely (i == 0) ? lim : i - 1; 364 } 365 366 367 /* 368 * 369 * Here is the layout for the Rx and Tx rings. 370 371 RxRING TxRING 372 373 +-----------------+ +-----------------+ 374 | | | | 375 |XXX free slot XXX| |XXX free slot XXX| 376 +-----------------+ +-----------------+ 377 head->| owned by user |<-hwcur | not sent to nic |<-hwcur 378 | | | yet | 379 +-----------------+ | | 380 cur->| available to | | | 381 | user, not read | +-----------------+ 382 | yet | cur->| (being | 383 | | | prepared) | 384 | | | | 385 +-----------------+ + ------ + 386 tail->| |<-hwtail | |<-hwlease 387 | (being | ... | | ... 388 | prepared) | ... | | ... 389 +-----------------+ ... | | ... 390 | |<-hwlease +-----------------+ 391 | | tail->| |<-hwtail 392 | | | | 393 | | | | 394 | | | | 395 +-----------------+ +-----------------+ 396 397 * The cur/tail (user view) and hwcur/hwtail (kernel view) 398 * are used in the normal operation of the card. 399 * 400 * When a ring is the output of a switch port (Rx ring for 401 * a VALE port, Tx ring for the host stack or NIC), slots 402 * are reserved in blocks through 'hwlease' which points 403 * to the next unused slot. 404 * On an Rx ring, hwlease is always after hwtail, 405 * and completions cause hwtail to advance. 406 * On a Tx ring, hwlease is always between cur and hwtail, 407 * and completions cause cur to advance. 408 * 409 * nm_kr_space() returns the maximum number of slots that 410 * can be assigned. 411 * nm_kr_lease() reserves the required number of buffers, 412 * advances nkr_hwlease and also returns an entry in 413 * a circular array where completions should be reported. 414 */ 415 416 417 418 enum txrx { NR_RX = 0, NR_TX = 1 }; 419 420 struct netmap_vp_adapter; // forward 421 422 /* 423 * The "struct netmap_adapter" extends the "struct adapter" 424 * (or equivalent) device descriptor. 425 * It contains all base fields needed to support netmap operation. 426 * There are in fact different types of netmap adapters 427 * (native, generic, VALE switch...) so a netmap_adapter is 428 * just the first field in the derived type. 429 */ 430 struct netmap_adapter { 431 /* 432 * On linux we do not have a good way to tell if an interface 433 * is netmap-capable. So we always use the following trick: 434 * NA(ifp) points here, and the first entry (which hopefully 435 * always exists and is at least 32 bits) contains a magic 436 * value which we can use to detect that the interface is good. 437 */ 438 uint32_t magic; 439 uint32_t na_flags; /* enabled, and other flags */ 440 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. 441 * useful during initialization 442 */ 443 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ 444 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when 445 * forwarding packets coming from this 446 * interface 447 */ 448 #define NAF_MEM_OWNER 8 /* the adapter is responsible for the 449 * deallocation of the memory allocator 450 */ 451 #define NAF_NATIVE_ON 16 /* the adapter is native and the attached 452 * interface is in netmap mode. 453 * Virtual ports (vale, pipe, monitor...) 454 * should never use this flag. 455 */ 456 #define NAF_NETMAP_ON 32 /* netmap is active (either native or 457 * emulated). Where possible (e.g. FreeBSD) 458 * IFCAP_NETMAP also mirrors this flag. 459 */ 460 #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ 461 #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ 462 #define NAF_BUSY (1U<<31) /* the adapter is used internally and 463 * cannot be registered from userspace 464 */ 465 int active_fds; /* number of user-space descriptors using this 466 interface, which is equal to the number of 467 struct netmap_if objs in the mapped region. */ 468 469 u_int num_rx_rings; /* number of adapter receive rings */ 470 u_int num_tx_rings; /* number of adapter transmit rings */ 471 472 u_int num_tx_desc; /* number of descriptor in each queue */ 473 u_int num_rx_desc; 474 475 /* tx_rings and rx_rings are private but allocated 476 * as a contiguous chunk of memory. Each array has 477 * N+1 entries, for the adapter queues and for the host queue. 478 */ 479 struct netmap_kring *tx_rings; /* array of TX rings. */ 480 struct netmap_kring *rx_rings; /* array of RX rings. */ 481 482 void *tailroom; /* space below the rings array */ 483 /* (used for leases) */ 484 485 486 NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 487 488 /* count users of the global wait queues */ 489 int tx_si_users, rx_si_users; 490 491 void *pdev; /* used to store pci device */ 492 493 /* copy of if_qflush and if_transmit pointers, to intercept 494 * packets from the network stack when netmap is active. 495 */ 496 int (*if_transmit)(struct ifnet *, struct mbuf *); 497 498 /* copy of if_input for netmap_send_up() */ 499 void (*if_input)(struct ifnet *, struct mbuf *); 500 501 /* references to the ifnet and device routines, used by 502 * the generic netmap functions. 503 */ 504 struct ifnet *ifp; /* adapter is ifp->if_softc */ 505 506 /*---- callbacks for this netmap adapter -----*/ 507 /* 508 * nm_dtor() is the cleanup routine called when destroying 509 * the adapter. 510 * Called with NMG_LOCK held. 511 * 512 * nm_register() is called on NIOCREGIF and close() to enter 513 * or exit netmap mode on the NIC 514 * Called with NNG_LOCK held. 515 * 516 * nm_txsync() pushes packets to the underlying hw/switch 517 * 518 * nm_rxsync() collects packets from the underlying hw/switch 519 * 520 * nm_config() returns configuration information from the OS 521 * Called with NMG_LOCK held. 522 * 523 * nm_krings_create() create and init the tx_rings and 524 * rx_rings arrays of kring structures. In particular, 525 * set the nm_sync callbacks for each ring. 526 * There is no need to also allocate the corresponding 527 * netmap_rings, since netmap_mem_rings_create() will always 528 * be called to provide the missing ones. 529 * Called with NNG_LOCK held. 530 * 531 * nm_krings_delete() cleanup and delete the tx_rings and rx_rings 532 * arrays 533 * Called with NMG_LOCK held. 534 * 535 * nm_notify() is used to act after data have become available 536 * (or the stopped state of the ring has changed) 537 * For hw devices this is typically a selwakeup(), 538 * but for NIC/host ports attached to a switch (or vice-versa) 539 * we also need to invoke the 'txsync' code downstream. 540 */ 541 void (*nm_dtor)(struct netmap_adapter *); 542 543 int (*nm_register)(struct netmap_adapter *, int onoff); 544 545 int (*nm_txsync)(struct netmap_kring *kring, int flags); 546 int (*nm_rxsync)(struct netmap_kring *kring, int flags); 547 #define NAF_FORCE_READ 1 548 #define NAF_FORCE_RECLAIM 2 549 /* return configuration information */ 550 int (*nm_config)(struct netmap_adapter *, 551 u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); 552 int (*nm_krings_create)(struct netmap_adapter *); 553 void (*nm_krings_delete)(struct netmap_adapter *); 554 int (*nm_notify)(struct netmap_adapter *, 555 u_int ring, enum txrx, int flags); 556 #define NAF_DISABLE_NOTIFY 8 /* notify that the stopped state of the 557 * ring has changed (kring->nkr_stopped) 558 */ 559 560 #ifdef WITH_VALE 561 /* 562 * nm_bdg_attach() initializes the na_vp field to point 563 * to an adapter that can be attached to a VALE switch. If the 564 * current adapter is already a VALE port, na_vp is simply a cast; 565 * otherwise, na_vp points to a netmap_bwrap_adapter. 566 * If applicable, this callback also initializes na_hostvp, 567 * that can be used to connect the adapter host rings to the 568 * switch. 569 * Called with NMG_LOCK held. 570 * 571 * nm_bdg_ctl() is called on the actual attach/detach to/from 572 * to/from the switch, to perform adapter-specific 573 * initializations 574 * Called with NMG_LOCK held. 575 */ 576 int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *); 577 int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int); 578 579 /* adapter used to attach this adapter to a VALE switch (if any) */ 580 struct netmap_vp_adapter *na_vp; 581 /* adapter used to attach the host rings of this adapter 582 * to a VALE switch (if any) */ 583 struct netmap_vp_adapter *na_hostvp; 584 #endif 585 586 /* standard refcount to control the lifetime of the adapter 587 * (it should be equal to the lifetime of the corresponding ifp) 588 */ 589 int na_refcount; 590 591 /* memory allocator (opaque) 592 * We also cache a pointer to the lut_entry for translating 593 * buffer addresses, and the total number of buffers. 594 */ 595 struct netmap_mem_d *nm_mem; 596 struct lut_entry *na_lut; 597 uint32_t na_lut_objtotal; /* max buffer index */ 598 uint32_t na_lut_objsize; /* buffer size */ 599 600 /* additional information attached to this adapter 601 * by other netmap subsystems. Currently used by 602 * bwrap and LINUX/v1000. 603 */ 604 void *na_private; 605 606 #ifdef WITH_PIPES 607 /* array of pipes that have this adapter as a parent */ 608 struct netmap_pipe_adapter **na_pipes; 609 int na_next_pipe; /* next free slot in the array */ 610 int na_max_pipes; /* size of the array */ 611 #endif /* WITH_PIPES */ 612 613 char name[64]; 614 }; 615 616 617 /* 618 * If the NIC is owned by the kernel 619 * (i.e., bridge), neither another bridge nor user can use it; 620 * if the NIC is owned by a user, only users can share it. 621 * Evaluation must be done under NMG_LOCK(). 622 */ 623 #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) 624 #define NETMAP_OWNED_BY_ANY(na) \ 625 (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) 626 627 628 /* 629 * derived netmap adapters for various types of ports 630 */ 631 struct netmap_vp_adapter { /* VALE software port */ 632 struct netmap_adapter up; 633 634 /* 635 * Bridge support: 636 * 637 * bdg_port is the port number used in the bridge; 638 * na_bdg points to the bridge this NA is attached to. 639 */ 640 int bdg_port; 641 struct nm_bridge *na_bdg; 642 int retry; 643 644 /* Offset of ethernet header for each packet. */ 645 u_int virt_hdr_len; 646 /* Maximum Frame Size, used in bdg_mismatch_datapath() */ 647 u_int mfs; 648 }; 649 650 651 struct netmap_hw_adapter { /* physical device */ 652 struct netmap_adapter up; 653 654 struct net_device_ops nm_ndo; // XXX linux only 655 struct ethtool_ops nm_eto; // XXX linux only 656 const struct ethtool_ops* save_ethtool; 657 658 int (*nm_hw_register)(struct netmap_adapter *, int onoff); 659 }; 660 661 #ifdef WITH_GENERIC 662 /* Mitigation support. */ 663 struct nm_generic_mit { 664 struct hrtimer mit_timer; 665 int mit_pending; 666 int mit_ring_idx; /* index of the ring being mitigated */ 667 struct netmap_adapter *mit_na; /* backpointer */ 668 }; 669 670 struct netmap_generic_adapter { /* emulated device */ 671 struct netmap_hw_adapter up; 672 673 /* Pointer to a previously used netmap adapter. */ 674 struct netmap_adapter *prev; 675 676 /* generic netmap adapters support: 677 * a net_device_ops struct overrides ndo_select_queue(), 678 * save_if_input saves the if_input hook (FreeBSD), 679 * mit implements rx interrupt mitigation, 680 */ 681 struct net_device_ops generic_ndo; 682 void (*save_if_input)(struct ifnet *, struct mbuf *); 683 684 struct nm_generic_mit *mit; 685 #ifdef linux 686 netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); 687 #endif 688 }; 689 #endif /* WITH_GENERIC */ 690 691 static __inline int 692 netmap_real_tx_rings(struct netmap_adapter *na) 693 { 694 return na->num_tx_rings + !!(na->na_flags & NAF_HOST_RINGS); 695 } 696 697 static __inline int 698 netmap_real_rx_rings(struct netmap_adapter *na) 699 { 700 return na->num_rx_rings + !!(na->na_flags & NAF_HOST_RINGS); 701 } 702 703 #ifdef WITH_VALE 704 705 /* 706 * Bridge wrapper for non VALE ports attached to a VALE switch. 707 * 708 * The real device must already have its own netmap adapter (hwna). 709 * The bridge wrapper and the hwna adapter share the same set of 710 * netmap rings and buffers, but they have two separate sets of 711 * krings descriptors, with tx/rx meanings swapped: 712 * 713 * netmap 714 * bwrap krings rings krings hwna 715 * +------+ +------+ +-----+ +------+ +------+ 716 * |tx_rings->| |\ /| |----| |<-tx_rings| 717 * | | +------+ \ / +-----+ +------+ | | 718 * | | X | | 719 * | | / \ | | 720 * | | +------+/ \+-----+ +------+ | | 721 * |rx_rings->| | | |----| |<-rx_rings| 722 * | | +------+ +-----+ +------+ | | 723 * +------+ +------+ 724 * 725 * - packets coming from the bridge go to the brwap rx rings, 726 * which are also the hwna tx rings. The bwrap notify callback 727 * will then complete the hwna tx (see netmap_bwrap_notify). 728 * 729 * - packets coming from the outside go to the hwna rx rings, 730 * which are also the bwrap tx rings. The (overwritten) hwna 731 * notify method will then complete the bridge tx 732 * (see netmap_bwrap_intr_notify). 733 * 734 * The bridge wrapper may optionally connect the hwna 'host' rings 735 * to the bridge. This is done by using a second port in the 736 * bridge and connecting it to the 'host' netmap_vp_adapter 737 * contained in the netmap_bwrap_adapter. The brwap host adapter 738 * cross-links the hwna host rings in the same way as shown above. 739 * 740 * - packets coming from the bridge and directed to the host stack 741 * are handled by the bwrap host notify callback 742 * (see netmap_bwrap_host_notify) 743 * 744 * - packets coming from the host stack are still handled by the 745 * overwritten hwna notify callback (netmap_bwrap_intr_notify), 746 * but are diverted to the host adapter depending on the ring number. 747 * 748 */ 749 struct netmap_bwrap_adapter { 750 struct netmap_vp_adapter up; 751 struct netmap_vp_adapter host; /* for host rings */ 752 struct netmap_adapter *hwna; /* the underlying device */ 753 754 /* backup of the hwna notify callback */ 755 int (*save_notify)(struct netmap_adapter *, 756 u_int ring, enum txrx, int flags); 757 /* backup of the hwna memory allocator */ 758 struct netmap_mem_d *save_nmd; 759 760 /* 761 * When we attach a physical interface to the bridge, we 762 * allow the controlling process to terminate, so we need 763 * a place to store the n_detmap_priv_d data structure. 764 * This is only done when physical interfaces 765 * are attached to a bridge. 766 */ 767 struct netmap_priv_d *na_kpriv; 768 }; 769 int netmap_bwrap_attach(const char *name, struct netmap_adapter *); 770 771 772 #endif /* WITH_VALE */ 773 774 #ifdef WITH_PIPES 775 776 #define NM_MAXPIPES 64 /* max number of pipes per adapter */ 777 778 struct netmap_pipe_adapter { 779 struct netmap_adapter up; 780 781 u_int id; /* pipe identifier */ 782 int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */ 783 784 struct netmap_adapter *parent; /* adapter that owns the memory */ 785 struct netmap_pipe_adapter *peer; /* the other end of the pipe */ 786 int peer_ref; /* 1 iff we are holding a ref to the peer */ 787 788 u_int parent_slot; /* index in the parent pipe array */ 789 }; 790 791 #endif /* WITH_PIPES */ 792 793 794 /* return slots reserved to rx clients; used in drivers */ 795 static inline uint32_t 796 nm_kr_rxspace(struct netmap_kring *k) 797 { 798 int space = k->nr_hwtail - k->nr_hwcur; 799 if (space < 0) 800 space += k->nkr_num_slots; 801 ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); 802 803 return space; 804 } 805 806 807 /* True if no space in the tx ring. only valid after txsync_prologue */ 808 static inline int 809 nm_kr_txempty(struct netmap_kring *kring) 810 { 811 return kring->rcur == kring->nr_hwtail; 812 } 813 814 815 /* 816 * protect against multiple threads using the same ring. 817 * also check that the ring has not been stopped. 818 * We only care for 0 or !=0 as a return code. 819 */ 820 #define NM_KR_BUSY 1 821 #define NM_KR_STOPPED 2 822 823 824 static __inline void nm_kr_put(struct netmap_kring *kr) 825 { 826 NM_ATOMIC_CLEAR(&kr->nr_busy); 827 } 828 829 830 static __inline int nm_kr_tryget(struct netmap_kring *kr) 831 { 832 /* check a first time without taking the lock 833 * to avoid starvation for nm_kr_get() 834 */ 835 if (unlikely(kr->nkr_stopped)) { 836 ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 837 return NM_KR_STOPPED; 838 } 839 if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))) 840 return NM_KR_BUSY; 841 /* check a second time with lock held */ 842 if (unlikely(kr->nkr_stopped)) { 843 ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 844 nm_kr_put(kr); 845 return NM_KR_STOPPED; 846 } 847 return 0; 848 } 849 850 851 /* 852 * The following functions are used by individual drivers to 853 * support netmap operation. 854 * 855 * netmap_attach() initializes a struct netmap_adapter, allocating the 856 * struct netmap_ring's and the struct selinfo. 857 * 858 * netmap_detach() frees the memory allocated by netmap_attach(). 859 * 860 * netmap_transmit() replaces the if_transmit routine of the interface, 861 * and is used to intercept packets coming from the stack. 862 * 863 * netmap_load_map/netmap_reload_map are helper routines to set/reset 864 * the dmamap for a packet buffer 865 * 866 * netmap_reset() is a helper routine to be called in the hw driver 867 * when reinitializing a ring. It should not be called by 868 * virtual ports (vale, pipes, monitor) 869 */ 870 int netmap_attach(struct netmap_adapter *); 871 void netmap_detach(struct ifnet *); 872 int netmap_transmit(struct ifnet *, struct mbuf *); 873 struct netmap_slot *netmap_reset(struct netmap_adapter *na, 874 enum txrx tx, u_int n, u_int new_cur); 875 int netmap_ring_reinit(struct netmap_kring *); 876 877 /* default functions to handle rx/tx interrupts */ 878 int netmap_rx_irq(struct ifnet *, u_int, u_int *); 879 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 880 void netmap_common_irq(struct ifnet *, u_int, u_int *work_done); 881 882 883 #ifdef WITH_VALE 884 /* functions used by external modules to interface with VALE */ 885 #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) 886 #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) 887 #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) 888 #define netmap_bdg_idx(_vp) ((_vp)->bdg_port) 889 const char *netmap_bdg_name(struct netmap_vp_adapter *); 890 #else /* !WITH_VALE */ 891 #define netmap_vp_to_ifp(_vp) NULL 892 #define netmap_ifp_to_vp(_ifp) NULL 893 #define netmap_ifp_to_host_vp(_ifp) NULL 894 #define netmap_bdg_idx(_vp) -1 895 #define netmap_bdg_name(_vp) NULL 896 #endif /* WITH_VALE */ 897 898 static inline int 899 nm_native_on(struct netmap_adapter *na) 900 { 901 return na && na->na_flags & NAF_NATIVE_ON; 902 } 903 904 static inline int 905 nm_netmap_on(struct netmap_adapter *na) 906 { 907 return na && na->na_flags & NAF_NETMAP_ON; 908 } 909 910 /* set/clear native flags and if_transmit/netdev_ops */ 911 static inline void 912 nm_set_native_flags(struct netmap_adapter *na) 913 { 914 struct ifnet *ifp = na->ifp; 915 916 na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON); 917 #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 918 ifp->if_capenable |= IFCAP_NETMAP; 919 #endif 920 #ifdef __FreeBSD__ 921 na->if_transmit = ifp->if_transmit; 922 ifp->if_transmit = netmap_transmit; 923 #else 924 na->if_transmit = (void *)ifp->netdev_ops; 925 ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo; 926 ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops; 927 ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto; 928 #endif 929 } 930 931 932 static inline void 933 nm_clear_native_flags(struct netmap_adapter *na) 934 { 935 struct ifnet *ifp = na->ifp; 936 937 #ifdef __FreeBSD__ 938 ifp->if_transmit = na->if_transmit; 939 #else 940 ifp->netdev_ops = (void *)na->if_transmit; 941 ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool; 942 #endif 943 na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON); 944 #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 945 ifp->if_capenable &= ~IFCAP_NETMAP; 946 #endif 947 } 948 949 950 /* 951 * validates parameters in the ring/kring, returns a value for head 952 * If any error, returns ring_size to force a reinit. 953 */ 954 uint32_t nm_txsync_prologue(struct netmap_kring *); 955 956 957 /* 958 * validates parameters in the ring/kring, returns a value for head, 959 * and the 'reserved' value in the argument. 960 * If any error, returns ring_size lim to force a reinit. 961 */ 962 uint32_t nm_rxsync_prologue(struct netmap_kring *); 963 964 965 /* 966 * update kring and ring at the end of txsync. 967 */ 968 static inline void 969 nm_txsync_finalize(struct netmap_kring *kring) 970 { 971 /* update ring tail to what the kernel knows */ 972 kring->ring->tail = kring->rtail = kring->nr_hwtail; 973 974 /* note, head/rhead/hwcur might be behind cur/rcur 975 * if no carrier 976 */ 977 ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", 978 kring->name, kring->nr_hwcur, kring->nr_hwtail, 979 kring->rhead, kring->rcur, kring->rtail); 980 } 981 982 983 /* 984 * update kring and ring at the end of rxsync 985 */ 986 static inline void 987 nm_rxsync_finalize(struct netmap_kring *kring) 988 { 989 /* tell userspace that there might be new packets */ 990 //struct netmap_ring *ring = kring->ring; 991 ND("head %d cur %d tail %d -> %d", ring->head, ring->cur, ring->tail, 992 kring->nr_hwtail); 993 kring->ring->tail = kring->rtail = kring->nr_hwtail; 994 /* make a copy of the state for next round */ 995 kring->rhead = kring->ring->head; 996 kring->rcur = kring->ring->cur; 997 } 998 999 1000 /* check/fix address and len in tx rings */ 1001 #if 1 /* debug version */ 1002 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 1003 if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ 1004 RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ 1005 kring->ring_id, nm_i, slot->buf_idx, len); \ 1006 if (_l > NETMAP_BUF_SIZE(_na)) \ 1007 _l = NETMAP_BUF_SIZE(_na); \ 1008 } } while (0) 1009 #else /* no debug version */ 1010 #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 1011 if (_l > NETMAP_BUF_SIZE(_na)) \ 1012 _l = NETMAP_BUF_SIZE(_na); \ 1013 } while (0) 1014 #endif 1015 1016 1017 /*---------------------------------------------------------------*/ 1018 /* 1019 * Support routines used by netmap subsystems 1020 * (native drivers, VALE, generic, pipes, monitors, ...) 1021 */ 1022 1023 1024 /* common routine for all functions that create a netmap adapter. It performs 1025 * two main tasks: 1026 * - if the na points to an ifp, mark the ifp as netmap capable 1027 * using na as its native adapter; 1028 * - provide defaults for the setup callbacks and the memory allocator 1029 */ 1030 int netmap_attach_common(struct netmap_adapter *); 1031 /* common actions to be performed on netmap adapter destruction */ 1032 void netmap_detach_common(struct netmap_adapter *); 1033 /* fill priv->np_[tr]xq{first,last} using the ringid and flags information 1034 * coming from a struct nmreq 1035 */ 1036 int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags); 1037 /* update the ring parameters (number and size of tx and rx rings). 1038 * It calls the nm_config callback, if available. 1039 */ 1040 int netmap_update_config(struct netmap_adapter *na); 1041 /* create and initialize the common fields of the krings array. 1042 * using the information that must be already available in the na. 1043 * tailroom can be used to request the allocation of additional 1044 * tailroom bytes after the krings array. This is used by 1045 * netmap_vp_adapter's (i.e., VALE ports) to make room for 1046 * leasing-related data structures 1047 */ 1048 int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); 1049 /* deletes the kring array of the adapter. The array must have 1050 * been created using netmap_krings_create 1051 */ 1052 void netmap_krings_delete(struct netmap_adapter *na); 1053 1054 /* set the stopped/enabled status of ring 1055 * When stopping, they also wait for all current activity on the ring to 1056 * terminate. The status change is then notified using the na nm_notify 1057 * callback. 1058 */ 1059 void netmap_set_txring(struct netmap_adapter *, u_int ring_id, int stopped); 1060 void netmap_set_rxring(struct netmap_adapter *, u_int ring_id, int stopped); 1061 /* set the stopped/enabled status of all rings of the adapter. */ 1062 void netmap_set_all_rings(struct netmap_adapter *, int stopped); 1063 /* convenience wrappers for netmap_set_all_rings, used in drivers */ 1064 void netmap_disable_all_rings(struct ifnet *); 1065 void netmap_enable_all_rings(struct ifnet *); 1066 1067 int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait); 1068 1069 struct netmap_if * 1070 netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, 1071 uint16_t ringid, uint32_t flags, int *err); 1072 1073 1074 1075 u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); 1076 int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1077 int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na); 1078 1079 1080 #ifdef WITH_VALE 1081 /* 1082 * The following bridge-related functions are used by other 1083 * kernel modules. 1084 * 1085 * VALE only supports unicast or broadcast. The lookup 1086 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, 1087 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown. 1088 * XXX in practice "unknown" might be handled same as broadcast. 1089 */ 1090 typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, 1091 const struct netmap_vp_adapter *); 1092 typedef int (*bdg_config_fn_t)(struct nm_ifreq *); 1093 typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *); 1094 struct netmap_bdg_ops { 1095 bdg_lookup_fn_t lookup; 1096 bdg_config_fn_t config; 1097 bdg_dtor_fn_t dtor; 1098 }; 1099 1100 u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, 1101 const struct netmap_vp_adapter *); 1102 1103 #define NM_BDG_MAXPORTS 254 /* up to 254 */ 1104 #define NM_BDG_BROADCAST NM_BDG_MAXPORTS 1105 #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) 1106 1107 #define NM_NAME "vale" /* prefix for bridge port name */ 1108 1109 /* these are redefined in case of no VALE support */ 1110 int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1111 void netmap_init_bridges(void); 1112 int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops); 1113 int netmap_bdg_config(struct nmreq *nmr); 1114 1115 #else /* !WITH_VALE */ 1116 #define netmap_get_bdg_na(_1, _2, _3) 0 1117 #define netmap_init_bridges(_1) 1118 #define netmap_bdg_ctl(_1, _2) EINVAL 1119 #endif /* !WITH_VALE */ 1120 1121 #ifdef WITH_PIPES 1122 /* max number of pipes per device */ 1123 #define NM_MAXPIPES 64 /* XXX how many? */ 1124 /* in case of no error, returns the actual number of pipes in nmr->nr_arg1 */ 1125 int netmap_pipe_alloc(struct netmap_adapter *, struct nmreq *nmr); 1126 void netmap_pipe_dealloc(struct netmap_adapter *); 1127 int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1128 #else /* !WITH_PIPES */ 1129 #define NM_MAXPIPES 0 1130 #define netmap_pipe_alloc(_1, _2) EOPNOTSUPP 1131 #define netmap_pipe_dealloc(_1) 1132 #define netmap_get_pipe_na(_1, _2, _3) 0 1133 #endif 1134 1135 #ifdef WITH_MONITOR 1136 int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1137 #else 1138 #define netmap_get_monitor_na(_1, _2, _3) 0 1139 #endif 1140 1141 /* Various prototypes */ 1142 int netmap_poll(struct cdev *dev, int events, struct thread *td); 1143 int netmap_init(void); 1144 void netmap_fini(void); 1145 int netmap_get_memory(struct netmap_priv_d* p); 1146 void netmap_dtor(void *data); 1147 int netmap_dtor_locked(struct netmap_priv_d *priv); 1148 1149 int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); 1150 1151 /* netmap_adapter creation/destruction */ 1152 1153 // #define NM_DEBUG_PUTGET 1 1154 1155 #ifdef NM_DEBUG_PUTGET 1156 1157 #define NM_DBG(f) __##f 1158 1159 void __netmap_adapter_get(struct netmap_adapter *na); 1160 1161 #define netmap_adapter_get(na) \ 1162 do { \ 1163 struct netmap_adapter *__na = na; \ 1164 D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1165 __netmap_adapter_get(__na); \ 1166 } while (0) 1167 1168 int __netmap_adapter_put(struct netmap_adapter *na); 1169 1170 #define netmap_adapter_put(na) \ 1171 ({ \ 1172 struct netmap_adapter *__na = na; \ 1173 D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1174 __netmap_adapter_put(__na); \ 1175 }) 1176 1177 #else /* !NM_DEBUG_PUTGET */ 1178 1179 #define NM_DBG(f) f 1180 void netmap_adapter_get(struct netmap_adapter *na); 1181 int netmap_adapter_put(struct netmap_adapter *na); 1182 1183 #endif /* !NM_DEBUG_PUTGET */ 1184 1185 1186 /* 1187 * module variables 1188 */ 1189 #define NETMAP_BUF_BASE(na) ((na)->na_lut[0].vaddr) 1190 #define NETMAP_BUF_SIZE(na) ((na)->na_lut_objsize) 1191 extern int netmap_mitigate; // XXX not really used 1192 extern int netmap_no_pendintr; 1193 extern int netmap_verbose; // XXX debugging 1194 enum { /* verbose flags */ 1195 NM_VERB_ON = 1, /* generic verbose */ 1196 NM_VERB_HOST = 0x2, /* verbose host stack */ 1197 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 1198 NM_VERB_TXSYNC = 0x20, 1199 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 1200 NM_VERB_TXINTR = 0x200, 1201 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 1202 NM_VERB_NIC_TXSYNC = 0x2000, 1203 }; 1204 1205 extern int netmap_txsync_retry; 1206 extern int netmap_generic_mit; 1207 extern int netmap_generic_ringsize; 1208 extern int netmap_generic_rings; 1209 1210 /* 1211 * NA returns a pointer to the struct netmap adapter from the ifp, 1212 * WNA is used to write it. 1213 */ 1214 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 1215 1216 /* 1217 * Macros to determine if an interface is netmap capable or netmap enabled. 1218 * See the magic field in struct netmap_adapter. 1219 */ 1220 #ifdef __FreeBSD__ 1221 /* 1222 * on FreeBSD just use if_capabilities and if_capenable. 1223 */ 1224 #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 1225 (ifp)->if_capabilities & IFCAP_NETMAP ) 1226 1227 #define NETMAP_SET_CAPABLE(ifp) \ 1228 (ifp)->if_capabilities |= IFCAP_NETMAP 1229 1230 #else /* linux */ 1231 1232 /* 1233 * on linux: 1234 * we check if NA(ifp) is set and its first element has a related 1235 * magic value. The capenable is within the struct netmap_adapter. 1236 */ 1237 #define NETMAP_MAGIC 0x52697a7a 1238 1239 #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 1240 ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) 1241 1242 #define NETMAP_SET_CAPABLE(ifp) \ 1243 NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC 1244 1245 #endif /* linux */ 1246 1247 #ifdef __FreeBSD__ 1248 1249 /* Assigns the device IOMMU domain to an allocator. 1250 * Returns -ENOMEM in case the domain is different */ 1251 #define nm_iommu_group_id(dev) (0) 1252 1253 /* Callback invoked by the dma machinery after a successful dmamap_load */ 1254 static void netmap_dmamap_cb(__unused void *arg, 1255 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 1256 { 1257 } 1258 1259 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 1260 * XXX can we do it without a callback ? 1261 */ 1262 static inline void 1263 netmap_load_map(struct netmap_adapter *na, 1264 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1265 { 1266 if (map) 1267 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 1268 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 1269 } 1270 1271 static inline void 1272 netmap_unload_map(struct netmap_adapter *na, 1273 bus_dma_tag_t tag, bus_dmamap_t map) 1274 { 1275 if (map) 1276 bus_dmamap_unload(tag, map); 1277 } 1278 1279 /* update the map when a buffer changes. */ 1280 static inline void 1281 netmap_reload_map(struct netmap_adapter *na, 1282 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1283 { 1284 if (map) { 1285 bus_dmamap_unload(tag, map); 1286 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 1287 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 1288 } 1289 } 1290 1291 #else /* linux */ 1292 1293 int nm_iommu_group_id(bus_dma_tag_t dev); 1294 extern size_t netmap_mem_get_bufsize(struct netmap_mem_d *); 1295 #include <linux/dma-mapping.h> 1296 1297 static inline void 1298 netmap_load_map(struct netmap_adapter *na, 1299 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1300 { 1301 if (map) { 1302 *map = dma_map_single(na->pdev, buf, netmap_mem_get_bufsize(na->nm_mem), 1303 DMA_BIDIRECTIONAL); 1304 } 1305 } 1306 1307 static inline void 1308 netmap_unload_map(struct netmap_adapter *na, 1309 bus_dma_tag_t tag, bus_dmamap_t map) 1310 { 1311 u_int sz = netmap_mem_get_bufsize(na->nm_mem); 1312 1313 if (*map) { 1314 dma_unmap_single(na->pdev, *map, sz, 1315 DMA_BIDIRECTIONAL); 1316 } 1317 } 1318 1319 static inline void 1320 netmap_reload_map(struct netmap_adapter *na, 1321 bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1322 { 1323 u_int sz = netmap_mem_get_bufsize(na->nm_mem); 1324 1325 if (*map) { 1326 dma_unmap_single(na->pdev, *map, sz, 1327 DMA_BIDIRECTIONAL); 1328 } 1329 1330 *map = dma_map_single(na->pdev, buf, sz, 1331 DMA_BIDIRECTIONAL); 1332 } 1333 1334 /* 1335 * XXX How do we redefine these functions: 1336 * 1337 * on linux we need 1338 * dma_map_single(&pdev->dev, virt_addr, len, direction) 1339 * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 1340 * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 1341 * unfortunately the direction is not, so we need to change 1342 * something to have a cross API 1343 */ 1344 1345 #if 0 1346 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 1347 /* set time_stamp *before* dma to help avoid a possible race */ 1348 buffer_info->time_stamp = jiffies; 1349 buffer_info->mapped_as_page = false; 1350 buffer_info->length = len; 1351 //buffer_info->next_to_watch = l; 1352 /* reload dma map */ 1353 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1354 NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1355 buffer_info->dma = dma_map_single(&adapter->pdev->dev, 1356 addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1357 1358 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1359 D("dma mapping error"); 1360 /* goto dma_error; See e1000_put_txbuf() */ 1361 /* XXX reset */ 1362 } 1363 tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 1364 1365 #endif 1366 1367 /* 1368 * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 1369 */ 1370 #define bus_dmamap_sync(_a, _b, _c) 1371 1372 #endif /* linux */ 1373 1374 1375 /* 1376 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 1377 */ 1378 static inline int 1379 netmap_idx_n2k(struct netmap_kring *kr, int idx) 1380 { 1381 int n = kr->nkr_num_slots; 1382 idx += kr->nkr_hwofs; 1383 if (idx < 0) 1384 return idx + n; 1385 else if (idx < n) 1386 return idx; 1387 else 1388 return idx - n; 1389 } 1390 1391 1392 static inline int 1393 netmap_idx_k2n(struct netmap_kring *kr, int idx) 1394 { 1395 int n = kr->nkr_num_slots; 1396 idx -= kr->nkr_hwofs; 1397 if (idx < 0) 1398 return idx + n; 1399 else if (idx < n) 1400 return idx; 1401 else 1402 return idx - n; 1403 } 1404 1405 1406 /* Entries of the look-up table. */ 1407 struct lut_entry { 1408 void *vaddr; /* virtual address. */ 1409 vm_paddr_t paddr; /* physical address. */ 1410 }; 1411 1412 struct netmap_obj_pool; 1413 1414 /* 1415 * NMB return the virtual address of a buffer (buffer 0 on bad index) 1416 * PNMB also fills the physical address 1417 */ 1418 static inline void * 1419 NMB(struct netmap_adapter *na, struct netmap_slot *slot) 1420 { 1421 struct lut_entry *lut = na->na_lut; 1422 uint32_t i = slot->buf_idx; 1423 return (unlikely(i >= na->na_lut_objtotal)) ? 1424 lut[0].vaddr : lut[i].vaddr; 1425 } 1426 1427 static inline void * 1428 PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) 1429 { 1430 uint32_t i = slot->buf_idx; 1431 struct lut_entry *lut = na->na_lut; 1432 void *ret = (i >= na->na_lut_objtotal) ? lut[0].vaddr : lut[i].vaddr; 1433 1434 *pp = (i >= na->na_lut_objtotal) ? lut[0].paddr : lut[i].paddr; 1435 return ret; 1436 } 1437 1438 /* Generic version of NMB, which uses device-specific memory. */ 1439 1440 1441 1442 void netmap_txsync_to_host(struct netmap_adapter *na); 1443 1444 1445 /* 1446 * Structure associated to each thread which registered an interface. 1447 * 1448 * The first 4 fields of this structure are written by NIOCREGIF and 1449 * read by poll() and NIOC?XSYNC. 1450 * 1451 * There is low contention among writers (a correct user program 1452 * should have none) and among writers and readers, so we use a 1453 * single global lock to protect the structure initialization; 1454 * since initialization involves the allocation of memory, 1455 * we reuse the memory allocator lock. 1456 * 1457 * Read access to the structure is lock free. Readers must check that 1458 * np_nifp is not NULL before using the other fields. 1459 * If np_nifp is NULL initialization has not been performed, 1460 * so they should return an error to userspace. 1461 * 1462 * The ref_done field is used to regulate access to the refcount in the 1463 * memory allocator. The refcount must be incremented at most once for 1464 * each open("/dev/netmap"). The increment is performed by the first 1465 * function that calls netmap_get_memory() (currently called by 1466 * mmap(), NIOCGINFO and NIOCREGIF). 1467 * If the refcount is incremented, it is then decremented when the 1468 * private structure is destroyed. 1469 */ 1470 struct netmap_priv_d { 1471 struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ 1472 1473 struct netmap_adapter *np_na; 1474 uint32_t np_flags; /* from the ioctl */ 1475 u_int np_txqfirst, np_txqlast; /* range of tx rings to scan */ 1476 u_int np_rxqfirst, np_rxqlast; /* range of rx rings to scan */ 1477 uint16_t np_txpoll; /* XXX and also np_rxpoll ? */ 1478 1479 struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */ 1480 /* np_refcount is only used on FreeBSD */ 1481 int np_refcount; /* use with NMG_LOCK held */ 1482 1483 /* pointers to the selinfo to be used for selrecord. 1484 * Either the local or the global one depending on the 1485 * number of rings. 1486 */ 1487 NM_SELINFO_T *np_rxsi, *np_txsi; 1488 struct thread *np_td; /* kqueue, just debugging */ 1489 }; 1490 1491 #ifdef WITH_MONITOR 1492 1493 struct netmap_monitor_adapter { 1494 struct netmap_adapter up; 1495 1496 struct netmap_priv_d priv; 1497 uint32_t flags; 1498 }; 1499 1500 #endif /* WITH_MONITOR */ 1501 1502 1503 #ifdef WITH_GENERIC 1504 /* 1505 * generic netmap emulation for devices that do not have 1506 * native netmap support. 1507 */ 1508 int generic_netmap_attach(struct ifnet *ifp); 1509 1510 int netmap_catch_rx(struct netmap_adapter *na, int intercept); 1511 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; 1512 void netmap_catch_tx(struct netmap_generic_adapter *na, int enable); 1513 int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr); 1514 int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); 1515 void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); 1516 1517 //#define RATE_GENERIC /* Enables communication statistics for generic. */ 1518 #ifdef RATE_GENERIC 1519 void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); 1520 #else 1521 #define generic_rate(txp, txs, txi, rxp, rxs, rxi) 1522 #endif 1523 1524 /* 1525 * netmap_mitigation API. This is used by the generic adapter 1526 * to reduce the number of interrupt requests/selwakeup 1527 * to clients on incoming packets. 1528 */ 1529 void netmap_mitigation_init(struct nm_generic_mit *mit, int idx, 1530 struct netmap_adapter *na); 1531 void netmap_mitigation_start(struct nm_generic_mit *mit); 1532 void netmap_mitigation_restart(struct nm_generic_mit *mit); 1533 int netmap_mitigation_active(struct nm_generic_mit *mit); 1534 void netmap_mitigation_cleanup(struct nm_generic_mit *mit); 1535 #endif /* WITH_GENERIC */ 1536 1537 1538 1539 /* Shared declarations for the VALE switch. */ 1540 1541 /* 1542 * Each transmit queue accumulates a batch of packets into 1543 * a structure before forwarding. Packets to the same 1544 * destination are put in a list using ft_next as a link field. 1545 * ft_frags and ft_next are valid only on the first fragment. 1546 */ 1547 struct nm_bdg_fwd { /* forwarding entry for a bridge */ 1548 void *ft_buf; /* netmap or indirect buffer */ 1549 uint8_t ft_frags; /* how many fragments (only on 1st frag) */ 1550 uint8_t _ft_port; /* dst port (unused) */ 1551 uint16_t ft_flags; /* flags, e.g. indirect */ 1552 uint16_t ft_len; /* src fragment len */ 1553 uint16_t ft_next; /* next packet to same destination */ 1554 }; 1555 1556 /* struct 'virtio_net_hdr' from linux. */ 1557 struct nm_vnet_hdr { 1558 #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ 1559 #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ 1560 uint8_t flags; 1561 #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ 1562 #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ 1563 #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ 1564 #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ 1565 #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ 1566 uint8_t gso_type; 1567 uint16_t hdr_len; 1568 uint16_t gso_size; 1569 uint16_t csum_start; 1570 uint16_t csum_offset; 1571 }; 1572 1573 #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ 1574 1575 /* Private definitions for IPv4, IPv6, UDP and TCP headers. */ 1576 1577 struct nm_iphdr { 1578 uint8_t version_ihl; 1579 uint8_t tos; 1580 uint16_t tot_len; 1581 uint16_t id; 1582 uint16_t frag_off; 1583 uint8_t ttl; 1584 uint8_t protocol; 1585 uint16_t check; 1586 uint32_t saddr; 1587 uint32_t daddr; 1588 /*The options start here. */ 1589 }; 1590 1591 struct nm_tcphdr { 1592 uint16_t source; 1593 uint16_t dest; 1594 uint32_t seq; 1595 uint32_t ack_seq; 1596 uint8_t doff; /* Data offset + Reserved */ 1597 uint8_t flags; 1598 uint16_t window; 1599 uint16_t check; 1600 uint16_t urg_ptr; 1601 }; 1602 1603 struct nm_udphdr { 1604 uint16_t source; 1605 uint16_t dest; 1606 uint16_t len; 1607 uint16_t check; 1608 }; 1609 1610 struct nm_ipv6hdr { 1611 uint8_t priority_version; 1612 uint8_t flow_lbl[3]; 1613 1614 uint16_t payload_len; 1615 uint8_t nexthdr; 1616 uint8_t hop_limit; 1617 1618 uint8_t saddr[16]; 1619 uint8_t daddr[16]; 1620 }; 1621 1622 /* Type used to store a checksum (in host byte order) that hasn't been 1623 * folded yet. 1624 */ 1625 #define rawsum_t uint32_t 1626 1627 rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); 1628 uint16_t nm_csum_ipv4(struct nm_iphdr *iph); 1629 void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 1630 size_t datalen, uint16_t *check); 1631 void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 1632 size_t datalen, uint16_t *check); 1633 uint16_t nm_csum_fold(rawsum_t cur_sum); 1634 1635 void bdg_mismatch_datapath(struct netmap_vp_adapter *na, 1636 struct netmap_vp_adapter *dst_na, 1637 struct nm_bdg_fwd *ft_p, struct netmap_ring *ring, 1638 u_int *j, u_int lim, u_int *howmany); 1639 1640 /* persistent virtual port routines */ 1641 int nm_vi_persist(const char *, struct ifnet **); 1642 void nm_vi_detach(struct ifnet *); 1643 void nm_vi_init_index(void); 1644 1645 #endif /* _NET_NETMAP_KERN_H_ */ 1646