168b8534bSLuigi Rizzo /* 217885a7bSLuigi Rizzo * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 317885a7bSLuigi Rizzo * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 468b8534bSLuigi Rizzo * 568b8534bSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 668b8534bSLuigi Rizzo * modification, are permitted provided that the following conditions 768b8534bSLuigi Rizzo * are met: 868b8534bSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 968b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer. 1068b8534bSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 1168b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 1268b8534bSLuigi Rizzo * documentation and/or other materials provided with the distribution. 1368b8534bSLuigi Rizzo * 1468b8534bSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1568b8534bSLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1668b8534bSLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1768b8534bSLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1868b8534bSLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1968b8534bSLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2068b8534bSLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2168b8534bSLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2268b8534bSLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2368b8534bSLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2468b8534bSLuigi Rizzo * SUCH DAMAGE. 2568b8534bSLuigi Rizzo */ 2668b8534bSLuigi Rizzo 2768b8534bSLuigi Rizzo /* 2868b8534bSLuigi Rizzo * $FreeBSD$ 2968b8534bSLuigi Rizzo * 3068b8534bSLuigi Rizzo * The header contains the definitions of constants and function 3168b8534bSLuigi Rizzo * prototypes used only in kernelspace. 3268b8534bSLuigi Rizzo */ 3368b8534bSLuigi Rizzo 3468b8534bSLuigi Rizzo #ifndef _NET_NETMAP_KERN_H_ 3568b8534bSLuigi Rizzo #define _NET_NETMAP_KERN_H_ 3668b8534bSLuigi Rizzo 37f9790aebSLuigi Rizzo #define WITH_VALE // comment out to disable VALE support 38f0ea3689SLuigi Rizzo #define WITH_PIPES 39*4bf50f18SLuigi Rizzo #define WITH_MONITOR 40f9790aebSLuigi Rizzo 411a26580eSLuigi Rizzo #if defined(__FreeBSD__) 42d4b42e08SLuigi Rizzo 43ce3ee1e7SLuigi Rizzo #define likely(x) __builtin_expect((long)!!(x), 1L) 44ce3ee1e7SLuigi Rizzo #define unlikely(x) __builtin_expect((long)!!(x), 0L) 45f196ce38SLuigi Rizzo 461a26580eSLuigi Rizzo #define NM_LOCK_T struct mtx 47f9790aebSLuigi Rizzo #define NMG_LOCK_T struct mtx 48f9790aebSLuigi Rizzo #define NMG_LOCK_INIT() mtx_init(&netmap_global_lock, \ 49f9790aebSLuigi Rizzo "netmap global lock", NULL, MTX_DEF) 50f9790aebSLuigi Rizzo #define NMG_LOCK_DESTROY() mtx_destroy(&netmap_global_lock) 51f9790aebSLuigi Rizzo #define NMG_LOCK() mtx_lock(&netmap_global_lock) 52f9790aebSLuigi Rizzo #define NMG_UNLOCK() mtx_unlock(&netmap_global_lock) 53f9790aebSLuigi Rizzo #define NMG_LOCK_ASSERT() mtx_assert(&netmap_global_lock, MA_OWNED) 54f9790aebSLuigi Rizzo 551a26580eSLuigi Rizzo #define NM_SELINFO_T struct selinfo 561a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 57f9790aebSLuigi Rizzo #define MBUF_IFP(m) ((m)->m_pkthdr.rcvif) 5817885a7bSLuigi Rizzo #define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m) 59d4b42e08SLuigi Rizzo 60f9790aebSLuigi Rizzo #define NM_ATOMIC_T volatile int // XXX ? 61f9790aebSLuigi Rizzo /* atomic operations */ 62f9790aebSLuigi Rizzo #include <machine/atomic.h> 63f9790aebSLuigi Rizzo #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) 64f9790aebSLuigi Rizzo #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) 65f9790aebSLuigi Rizzo 6646aa1303SLuigi Rizzo #if __FreeBSD_version >= 1100005 6746aa1303SLuigi Rizzo struct netmap_adapter *netmap_getna(if_t ifp); 6846aa1303SLuigi Rizzo #endif 69f9790aebSLuigi Rizzo 70*4bf50f18SLuigi Rizzo #if __FreeBSD_version >= 1100027 71*4bf50f18SLuigi Rizzo #define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *((m)->m_ext.ext_cnt) : -1) 72*4bf50f18SLuigi Rizzo #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ext_cnt) = x 73*4bf50f18SLuigi Rizzo #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt) 74*4bf50f18SLuigi Rizzo #else 75*4bf50f18SLuigi Rizzo #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) 76*4bf50f18SLuigi Rizzo #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x 77*4bf50f18SLuigi Rizzo #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt) 78*4bf50f18SLuigi Rizzo #endif 79*4bf50f18SLuigi Rizzo 80f9790aebSLuigi Rizzo MALLOC_DECLARE(M_NETMAP); 81f9790aebSLuigi Rizzo 82f9790aebSLuigi Rizzo // XXX linux struct, not used in FreeBSD 83f9790aebSLuigi Rizzo struct net_device_ops { 84f9790aebSLuigi Rizzo }; 85*4bf50f18SLuigi Rizzo struct ethtool_ops { 86*4bf50f18SLuigi Rizzo }; 87f9790aebSLuigi Rizzo struct hrtimer { 88f9790aebSLuigi Rizzo }; 89ce3ee1e7SLuigi Rizzo 9064ae02c3SLuigi Rizzo #elif defined (linux) 91d4b42e08SLuigi Rizzo 922579e2d7SLuigi Rizzo #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h 931a26580eSLuigi Rizzo #define NM_SELINFO_T wait_queue_head_t 941a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->len) 95f9790aebSLuigi Rizzo #define MBUF_IFP(m) ((m)->dev) 9617885a7bSLuigi Rizzo #define NM_SEND_UP(ifp, m) \ 9717885a7bSLuigi Rizzo do { \ 98*4bf50f18SLuigi Rizzo m->priority = NM_MAGIC_PRIORITY_RX; \ 9917885a7bSLuigi Rizzo netif_rx(m); \ 10017885a7bSLuigi Rizzo } while (0) 101f196ce38SLuigi Rizzo 102ce3ee1e7SLuigi Rizzo #define NM_ATOMIC_T volatile long unsigned int 103ce3ee1e7SLuigi Rizzo 104f9790aebSLuigi Rizzo // XXX a mtx would suffice here too 20130404 gl 105f9790aebSLuigi Rizzo #define NMG_LOCK_T struct semaphore 106f9790aebSLuigi Rizzo #define NMG_LOCK_INIT() sema_init(&netmap_global_lock, 1) 107f9790aebSLuigi Rizzo #define NMG_LOCK_DESTROY() 108f9790aebSLuigi Rizzo #define NMG_LOCK() down(&netmap_global_lock) 109f9790aebSLuigi Rizzo #define NMG_UNLOCK() up(&netmap_global_lock) 110f9790aebSLuigi Rizzo #define NMG_LOCK_ASSERT() // XXX to be completed 111f9790aebSLuigi Rizzo 112f196ce38SLuigi Rizzo #ifndef DEV_NETMAP 113f196ce38SLuigi Rizzo #define DEV_NETMAP 114ce3ee1e7SLuigi Rizzo #endif /* DEV_NETMAP */ 115f196ce38SLuigi Rizzo 116f196ce38SLuigi Rizzo #elif defined (__APPLE__) 117d4b42e08SLuigi Rizzo 1188241616dSLuigi Rizzo #warning apple support is incomplete. 119f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 120f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 121f196ce38SLuigi Rizzo #define NM_LOCK_T IOLock * 122f196ce38SLuigi Rizzo #define NM_SELINFO_T struct selinfo 123f196ce38SLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 124f196ce38SLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 125f196ce38SLuigi Rizzo 1261a26580eSLuigi Rizzo #else 127d4b42e08SLuigi Rizzo 1281a26580eSLuigi Rizzo #error unsupported platform 129d4b42e08SLuigi Rizzo 130d4b42e08SLuigi Rizzo #endif /* end - platform-specific code */ 1311a26580eSLuigi Rizzo 13268b8534bSLuigi Rizzo #define ND(format, ...) 13368b8534bSLuigi Rizzo #define D(format, ...) \ 13468b8534bSLuigi Rizzo do { \ 13568b8534bSLuigi Rizzo struct timeval __xxts; \ 13668b8534bSLuigi Rizzo microtime(&__xxts); \ 13717885a7bSLuigi Rizzo printf("%03d.%06d [%4d] %-25s " format "\n", \ 13868b8534bSLuigi Rizzo (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 13917885a7bSLuigi Rizzo __LINE__, __FUNCTION__, ##__VA_ARGS__); \ 14068b8534bSLuigi Rizzo } while (0) 14168b8534bSLuigi Rizzo 1428241616dSLuigi Rizzo /* rate limited, lps indicates how many per second */ 1438241616dSLuigi Rizzo #define RD(lps, format, ...) \ 1448241616dSLuigi Rizzo do { \ 1458241616dSLuigi Rizzo static int t0, __cnt; \ 1468241616dSLuigi Rizzo if (t0 != time_second) { \ 1478241616dSLuigi Rizzo t0 = time_second; \ 1488241616dSLuigi Rizzo __cnt = 0; \ 1498241616dSLuigi Rizzo } \ 1508241616dSLuigi Rizzo if (__cnt++ < lps) \ 1518241616dSLuigi Rizzo D(format, ##__VA_ARGS__); \ 1528241616dSLuigi Rizzo } while (0) 1538241616dSLuigi Rizzo 15468b8534bSLuigi Rizzo struct netmap_adapter; 155f18be576SLuigi Rizzo struct nm_bdg_fwd; 156f18be576SLuigi Rizzo struct nm_bridge; 157f18be576SLuigi Rizzo struct netmap_priv_d; 15868b8534bSLuigi Rizzo 159ce3ee1e7SLuigi Rizzo const char *nm_dump_buf(char *p, int len, int lim, char *dst); 160ce3ee1e7SLuigi Rizzo 161f9790aebSLuigi Rizzo #include "netmap_mbq.h" 162f9790aebSLuigi Rizzo 163f9790aebSLuigi Rizzo extern NMG_LOCK_T netmap_global_lock; 164f9790aebSLuigi Rizzo 16568b8534bSLuigi Rizzo /* 16664ae02c3SLuigi Rizzo * private, kernel view of a ring. Keeps track of the status of 16764ae02c3SLuigi Rizzo * a ring across system calls. 16864ae02c3SLuigi Rizzo * 16964ae02c3SLuigi Rizzo * nr_hwcur index of the next buffer to refill. 17017885a7bSLuigi Rizzo * It corresponds to ring->head 17117885a7bSLuigi Rizzo * at the time the system call returns. 17264ae02c3SLuigi Rizzo * 17317885a7bSLuigi Rizzo * nr_hwtail index of the first buffer owned by the kernel. 17417885a7bSLuigi Rizzo * On RX, hwcur->hwtail are receive buffers 17517885a7bSLuigi Rizzo * not yet released. hwcur is advanced following 17617885a7bSLuigi Rizzo * ring->head, hwtail is advanced on incoming packets, 17717885a7bSLuigi Rizzo * and a wakeup is generated when hwtail passes ring->cur 17817885a7bSLuigi Rizzo * On TX, hwcur->rcur have been filled by the sender 17917885a7bSLuigi Rizzo * but not sent yet to the NIC; rcur->hwtail are available 18017885a7bSLuigi Rizzo * for new transmissions, and hwtail->hwcur-1 are pending 18117885a7bSLuigi Rizzo * transmissions not yet acknowledged. 18268b8534bSLuigi Rizzo * 1831a26580eSLuigi Rizzo * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 18468b8534bSLuigi Rizzo * This is so that, on a reset, buffers owned by userspace are not 18568b8534bSLuigi Rizzo * modified by the kernel. In particular: 18617885a7bSLuigi Rizzo * RX rings: the next empty buffer (hwtail + hwofs) coincides with 18768b8534bSLuigi Rizzo * the next empty buffer as known by the hardware (next_to_check or so). 18868b8534bSLuigi Rizzo * TX rings: hwcur + hwofs coincides with next_to_send 1891dce924dSLuigi Rizzo * 1901dce924dSLuigi Rizzo * For received packets, slot->flags is set to nkr_slot_flags 1911dce924dSLuigi Rizzo * so we can provide a proper initial value (e.g. set NS_FORWARD 1921dce924dSLuigi Rizzo * when operating in 'transparent' mode). 193ce3ee1e7SLuigi Rizzo * 194ce3ee1e7SLuigi Rizzo * The following fields are used to implement lock-free copy of packets 195ce3ee1e7SLuigi Rizzo * from input to output ports in VALE switch: 196ce3ee1e7SLuigi Rizzo * nkr_hwlease buffer after the last one being copied. 197ce3ee1e7SLuigi Rizzo * A writer in nm_bdg_flush reserves N buffers 198ce3ee1e7SLuigi Rizzo * from nr_hwlease, advances it, then does the 199ce3ee1e7SLuigi Rizzo * copy outside the lock. 200ce3ee1e7SLuigi Rizzo * In RX rings (used for VALE ports), 20117885a7bSLuigi Rizzo * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 202ce3ee1e7SLuigi Rizzo * In TX rings (used for NIC or host stack ports) 20317885a7bSLuigi Rizzo * nkr_hwcur <= nkr_hwlease < nkr_hwtail 204ce3ee1e7SLuigi Rizzo * nkr_leases array of nkr_num_slots where writers can report 205ce3ee1e7SLuigi Rizzo * completion of their block. NR_NOSLOT (~0) indicates 206ce3ee1e7SLuigi Rizzo * that the writer has not finished yet 207ce3ee1e7SLuigi Rizzo * nkr_lease_idx index of next free slot in nr_leases, to be assigned 208ce3ee1e7SLuigi Rizzo * 209ce3ee1e7SLuigi Rizzo * The kring is manipulated by txsync/rxsync and generic netmap function. 21017885a7bSLuigi Rizzo * 21117885a7bSLuigi Rizzo * Concurrent rxsync or txsync on the same ring are prevented through 21289cc2556SLuigi Rizzo * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need 21317885a7bSLuigi Rizzo * for NIC rings, and for TX rings attached to the host stack. 21417885a7bSLuigi Rizzo * 21517885a7bSLuigi Rizzo * RX rings attached to the host stack use an mbq (rx_queue) on both 21617885a7bSLuigi Rizzo * rxsync_from_host() and netmap_transmit(). The mbq is protected 21717885a7bSLuigi Rizzo * by its internal lock. 21817885a7bSLuigi Rizzo * 219*4bf50f18SLuigi Rizzo * RX rings attached to the VALE switch are accessed by both senders 22017885a7bSLuigi Rizzo * and receiver. They are protected through the q_lock on the RX ring. 22168b8534bSLuigi Rizzo */ 22268b8534bSLuigi Rizzo struct netmap_kring { 22368b8534bSLuigi Rizzo struct netmap_ring *ring; 22417885a7bSLuigi Rizzo 225ce3ee1e7SLuigi Rizzo uint32_t nr_hwcur; 22617885a7bSLuigi Rizzo uint32_t nr_hwtail; 22717885a7bSLuigi Rizzo 22817885a7bSLuigi Rizzo /* 22917885a7bSLuigi Rizzo * Copies of values in user rings, so we do not need to look 23017885a7bSLuigi Rizzo * at the ring (which could be modified). These are set in the 23117885a7bSLuigi Rizzo * *sync_prologue()/finalize() routines. 23217885a7bSLuigi Rizzo */ 23317885a7bSLuigi Rizzo uint32_t rhead; 23417885a7bSLuigi Rizzo uint32_t rcur; 23517885a7bSLuigi Rizzo uint32_t rtail; 23617885a7bSLuigi Rizzo 237ce3ee1e7SLuigi Rizzo uint32_t nr_kflags; /* private driver flags */ 2382157a17cSLuigi Rizzo #define NKR_PENDINTR 0x1 // Pending interrupt. 239ce3ee1e7SLuigi Rizzo uint32_t nkr_num_slots; 24017885a7bSLuigi Rizzo 24117885a7bSLuigi Rizzo /* 24217885a7bSLuigi Rizzo * On a NIC reset, the NIC ring indexes may be reset but the 24317885a7bSLuigi Rizzo * indexes in the netmap rings remain the same. nkr_hwofs 24417885a7bSLuigi Rizzo * keeps track of the offset between the two. 24517885a7bSLuigi Rizzo */ 24617885a7bSLuigi Rizzo int32_t nkr_hwofs; 24768b8534bSLuigi Rizzo 2481dce924dSLuigi Rizzo uint16_t nkr_slot_flags; /* initial value for flags */ 24917885a7bSLuigi Rizzo 25017885a7bSLuigi Rizzo /* last_reclaim is opaque marker to help reduce the frequency 25117885a7bSLuigi Rizzo * of operations such as reclaiming tx buffers. A possible use 25217885a7bSLuigi Rizzo * is set it to ticks and do the reclaim only once per tick. 25317885a7bSLuigi Rizzo */ 25417885a7bSLuigi Rizzo uint64_t last_reclaim; 25517885a7bSLuigi Rizzo 256ce3ee1e7SLuigi Rizzo 2571a26580eSLuigi Rizzo NM_SELINFO_T si; /* poll/select wait queue */ 258ce3ee1e7SLuigi Rizzo NM_LOCK_T q_lock; /* protects kring and ring. */ 259ce3ee1e7SLuigi Rizzo NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ 260ce3ee1e7SLuigi Rizzo 26117885a7bSLuigi Rizzo struct netmap_adapter *na; 26217885a7bSLuigi Rizzo 26317885a7bSLuigi Rizzo /* The folloiwing fields are for VALE switch support */ 26417885a7bSLuigi Rizzo struct nm_bdg_fwd *nkr_ft; 26517885a7bSLuigi Rizzo uint32_t *nkr_leases; 26617885a7bSLuigi Rizzo #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ 26717885a7bSLuigi Rizzo uint32_t nkr_hwlease; 26817885a7bSLuigi Rizzo uint32_t nkr_lease_idx; 26917885a7bSLuigi Rizzo 270*4bf50f18SLuigi Rizzo /* while nkr_stopped is set, no new [tr]xsync operations can 271*4bf50f18SLuigi Rizzo * be started on this kring. 272*4bf50f18SLuigi Rizzo * This is used by netmap_disable_all_rings() 273*4bf50f18SLuigi Rizzo * to find a synchronization point where critical data 274*4bf50f18SLuigi Rizzo * structures pointed to by the kring can be added or removed 275*4bf50f18SLuigi Rizzo */ 276*4bf50f18SLuigi Rizzo volatile int nkr_stopped; 277f9790aebSLuigi Rizzo 278f0ea3689SLuigi Rizzo /* Support for adapters without native netmap support. 279f9790aebSLuigi Rizzo * On tx rings we preallocate an array of tx buffers 280f9790aebSLuigi Rizzo * (same size as the netmap ring), on rx rings we 281f0ea3689SLuigi Rizzo * store incoming mbufs in a queue that is drained by 282f0ea3689SLuigi Rizzo * a rxsync. 283f9790aebSLuigi Rizzo */ 284f9790aebSLuigi Rizzo struct mbuf **tx_pool; 28517885a7bSLuigi Rizzo // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */ 28617885a7bSLuigi Rizzo struct mbq rx_queue; /* intercepted rx mbufs. */ 28717885a7bSLuigi Rizzo 28817885a7bSLuigi Rizzo uint32_t ring_id; /* debugging */ 28917885a7bSLuigi Rizzo char name[64]; /* diagnostic */ 290f9790aebSLuigi Rizzo 291*4bf50f18SLuigi Rizzo /* [tx]sync callback for this kring. 292*4bf50f18SLuigi Rizzo * The default nm_kring_create callback (netmap_krings_create) 293*4bf50f18SLuigi Rizzo * sets the nm_sync callback of each hardware tx(rx) kring to 294*4bf50f18SLuigi Rizzo * the corresponding nm_txsync(nm_rxsync) taken from the 295*4bf50f18SLuigi Rizzo * netmap_adapter; moreover, it sets the sync callback 296*4bf50f18SLuigi Rizzo * of the host tx(rx) ring to netmap_txsync_to_host 297*4bf50f18SLuigi Rizzo * (netmap_rxsync_from_host). 298*4bf50f18SLuigi Rizzo * 299*4bf50f18SLuigi Rizzo * Overrides: the above configuration is not changed by 300*4bf50f18SLuigi Rizzo * any of the nm_krings_create callbacks. 301*4bf50f18SLuigi Rizzo */ 302f0ea3689SLuigi Rizzo int (*nm_sync)(struct netmap_kring *kring, int flags); 303f0ea3689SLuigi Rizzo 304f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 305*4bf50f18SLuigi Rizzo struct netmap_kring *pipe; /* if this is a pipe ring, 306*4bf50f18SLuigi Rizzo * pointer to the other end 307*4bf50f18SLuigi Rizzo */ 308*4bf50f18SLuigi Rizzo struct netmap_ring *save_ring; /* pointer to hidden rings 309*4bf50f18SLuigi Rizzo * (see netmap_pipe.c for details) 310*4bf50f18SLuigi Rizzo */ 311f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 312f0ea3689SLuigi Rizzo 313*4bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 314*4bf50f18SLuigi Rizzo /* pointer to the adapter that is monitoring this kring (if any) 315*4bf50f18SLuigi Rizzo */ 316*4bf50f18SLuigi Rizzo struct netmap_monitor_adapter *monitor; 317*4bf50f18SLuigi Rizzo /* 318*4bf50f18SLuigi Rizzo * Monitors work by intercepting the txsync and/or rxsync of the 319*4bf50f18SLuigi Rizzo * monitored krings. This is implemented by replacing 320*4bf50f18SLuigi Rizzo * the nm_sync pointer above and saving the previous 321*4bf50f18SLuigi Rizzo * one in save_sync below. 322*4bf50f18SLuigi Rizzo */ 323*4bf50f18SLuigi Rizzo int (*save_sync)(struct netmap_kring *kring, int flags); 324*4bf50f18SLuigi Rizzo #endif 3252157a17cSLuigi Rizzo } __attribute__((__aligned__(64))); 32668b8534bSLuigi Rizzo 327ce3ee1e7SLuigi Rizzo 328ce3ee1e7SLuigi Rizzo /* return the next index, with wraparound */ 329ce3ee1e7SLuigi Rizzo static inline uint32_t 330ce3ee1e7SLuigi Rizzo nm_next(uint32_t i, uint32_t lim) 331ce3ee1e7SLuigi Rizzo { 332ce3ee1e7SLuigi Rizzo return unlikely (i == lim) ? 0 : i + 1; 333ce3ee1e7SLuigi Rizzo } 334ce3ee1e7SLuigi Rizzo 33517885a7bSLuigi Rizzo 33617885a7bSLuigi Rizzo /* return the previous index, with wraparound */ 33717885a7bSLuigi Rizzo static inline uint32_t 33817885a7bSLuigi Rizzo nm_prev(uint32_t i, uint32_t lim) 33917885a7bSLuigi Rizzo { 34017885a7bSLuigi Rizzo return unlikely (i == 0) ? lim : i - 1; 34117885a7bSLuigi Rizzo } 34217885a7bSLuigi Rizzo 34317885a7bSLuigi Rizzo 344ce3ee1e7SLuigi Rizzo /* 345ce3ee1e7SLuigi Rizzo * 346ce3ee1e7SLuigi Rizzo * Here is the layout for the Rx and Tx rings. 347ce3ee1e7SLuigi Rizzo 348ce3ee1e7SLuigi Rizzo RxRING TxRING 349ce3ee1e7SLuigi Rizzo 350ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 351ce3ee1e7SLuigi Rizzo | | | | 352ce3ee1e7SLuigi Rizzo |XXX free slot XXX| |XXX free slot XXX| 353ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 35417885a7bSLuigi Rizzo head->| owned by user |<-hwcur | not sent to nic |<-hwcur 35517885a7bSLuigi Rizzo | | | yet | 35617885a7bSLuigi Rizzo +-----------------+ | | 35717885a7bSLuigi Rizzo cur->| available to | | | 35817885a7bSLuigi Rizzo | user, not read | +-----------------+ 35917885a7bSLuigi Rizzo | yet | cur->| (being | 36017885a7bSLuigi Rizzo | | | prepared) | 361ce3ee1e7SLuigi Rizzo | | | | 36217885a7bSLuigi Rizzo +-----------------+ + ------ + 36317885a7bSLuigi Rizzo tail->| |<-hwtail | |<-hwlease 36417885a7bSLuigi Rizzo | (being | ... | | ... 36517885a7bSLuigi Rizzo | prepared) | ... | | ... 36617885a7bSLuigi Rizzo +-----------------+ ... | | ... 36717885a7bSLuigi Rizzo | |<-hwlease +-----------------+ 36817885a7bSLuigi Rizzo | | tail->| |<-hwtail 369ce3ee1e7SLuigi Rizzo | | | | 370ce3ee1e7SLuigi Rizzo | | | | 371ce3ee1e7SLuigi Rizzo | | | | 372ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 373ce3ee1e7SLuigi Rizzo 37417885a7bSLuigi Rizzo * The cur/tail (user view) and hwcur/hwtail (kernel view) 375ce3ee1e7SLuigi Rizzo * are used in the normal operation of the card. 376ce3ee1e7SLuigi Rizzo * 377ce3ee1e7SLuigi Rizzo * When a ring is the output of a switch port (Rx ring for 378ce3ee1e7SLuigi Rizzo * a VALE port, Tx ring for the host stack or NIC), slots 379ce3ee1e7SLuigi Rizzo * are reserved in blocks through 'hwlease' which points 380ce3ee1e7SLuigi Rizzo * to the next unused slot. 38117885a7bSLuigi Rizzo * On an Rx ring, hwlease is always after hwtail, 38217885a7bSLuigi Rizzo * and completions cause hwtail to advance. 38317885a7bSLuigi Rizzo * On a Tx ring, hwlease is always between cur and hwtail, 384ce3ee1e7SLuigi Rizzo * and completions cause cur to advance. 385ce3ee1e7SLuigi Rizzo * 386ce3ee1e7SLuigi Rizzo * nm_kr_space() returns the maximum number of slots that 387ce3ee1e7SLuigi Rizzo * can be assigned. 388ce3ee1e7SLuigi Rizzo * nm_kr_lease() reserves the required number of buffers, 389ce3ee1e7SLuigi Rizzo * advances nkr_hwlease and also returns an entry in 390ce3ee1e7SLuigi Rizzo * a circular array where completions should be reported. 391ce3ee1e7SLuigi Rizzo */ 392ce3ee1e7SLuigi Rizzo 393ce3ee1e7SLuigi Rizzo 394ce3ee1e7SLuigi Rizzo 395f9790aebSLuigi Rizzo enum txrx { NR_RX = 0, NR_TX = 1 }; 396ce3ee1e7SLuigi Rizzo 397*4bf50f18SLuigi Rizzo struct netmap_vp_adapter; // forward 398*4bf50f18SLuigi Rizzo 39968b8534bSLuigi Rizzo /* 400f9790aebSLuigi Rizzo * The "struct netmap_adapter" extends the "struct adapter" 401f9790aebSLuigi Rizzo * (or equivalent) device descriptor. 402f9790aebSLuigi Rizzo * It contains all base fields needed to support netmap operation. 403f9790aebSLuigi Rizzo * There are in fact different types of netmap adapters 404f9790aebSLuigi Rizzo * (native, generic, VALE switch...) so a netmap_adapter is 405f9790aebSLuigi Rizzo * just the first field in the derived type. 40668b8534bSLuigi Rizzo */ 40768b8534bSLuigi Rizzo struct netmap_adapter { 4088241616dSLuigi Rizzo /* 4098241616dSLuigi Rizzo * On linux we do not have a good way to tell if an interface 410f9790aebSLuigi Rizzo * is netmap-capable. So we always use the following trick: 4118241616dSLuigi Rizzo * NA(ifp) points here, and the first entry (which hopefully 4128241616dSLuigi Rizzo * always exists and is at least 32 bits) contains a magic 4138241616dSLuigi Rizzo * value which we can use to detect that the interface is good. 4148241616dSLuigi Rizzo */ 4158241616dSLuigi Rizzo uint32_t magic; 416f9790aebSLuigi Rizzo uint32_t na_flags; /* enabled, and other flags */ 4178241616dSLuigi Rizzo #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. 4188241616dSLuigi Rizzo * useful during initialization 4198241616dSLuigi Rizzo */ 420f18be576SLuigi Rizzo #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ 421ce3ee1e7SLuigi Rizzo #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when 422ce3ee1e7SLuigi Rizzo * forwarding packets coming from this 423ce3ee1e7SLuigi Rizzo * interface 424ce3ee1e7SLuigi Rizzo */ 425ce3ee1e7SLuigi Rizzo #define NAF_MEM_OWNER 8 /* the adapter is responsible for the 426ce3ee1e7SLuigi Rizzo * deallocation of the memory allocator 427ce3ee1e7SLuigi Rizzo */ 428f9790aebSLuigi Rizzo #define NAF_NATIVE_ON 16 /* the adapter is native and the attached 429*4bf50f18SLuigi Rizzo * interface is in netmap mode. 430*4bf50f18SLuigi Rizzo * Virtual ports (vale, pipe, monitor...) 431*4bf50f18SLuigi Rizzo * should never use this flag. 432f9790aebSLuigi Rizzo */ 433f9790aebSLuigi Rizzo #define NAF_NETMAP_ON 32 /* netmap is active (either native or 434*4bf50f18SLuigi Rizzo * emulated). Where possible (e.g. FreeBSD) 435f9790aebSLuigi Rizzo * IFCAP_NETMAP also mirrors this flag. 436f9790aebSLuigi Rizzo */ 437f0ea3689SLuigi Rizzo #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ 438*4bf50f18SLuigi Rizzo #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ 439*4bf50f18SLuigi Rizzo #define NAF_BUSY (1U<<31) /* the adapter is used internally and 440*4bf50f18SLuigi Rizzo * cannot be registered from userspace 441*4bf50f18SLuigi Rizzo */ 442f9790aebSLuigi Rizzo int active_fds; /* number of user-space descriptors using this 44368b8534bSLuigi Rizzo interface, which is equal to the number of 44468b8534bSLuigi Rizzo struct netmap_if objs in the mapped region. */ 44568b8534bSLuigi Rizzo 44624e57ec9SEd Maste u_int num_rx_rings; /* number of adapter receive rings */ 44724e57ec9SEd Maste u_int num_tx_rings; /* number of adapter transmit rings */ 44868b8534bSLuigi Rizzo 44968b8534bSLuigi Rizzo u_int num_tx_desc; /* number of descriptor in each queue */ 45068b8534bSLuigi Rizzo u_int num_rx_desc; 45168b8534bSLuigi Rizzo 45268b8534bSLuigi Rizzo /* tx_rings and rx_rings are private but allocated 45368b8534bSLuigi Rizzo * as a contiguous chunk of memory. Each array has 45468b8534bSLuigi Rizzo * N+1 entries, for the adapter queues and for the host queue. 45568b8534bSLuigi Rizzo */ 45668b8534bSLuigi Rizzo struct netmap_kring *tx_rings; /* array of TX rings. */ 45768b8534bSLuigi Rizzo struct netmap_kring *rx_rings; /* array of RX rings. */ 45817885a7bSLuigi Rizzo 459f9790aebSLuigi Rizzo void *tailroom; /* space below the rings array */ 460f9790aebSLuigi Rizzo /* (used for leases) */ 461f9790aebSLuigi Rizzo 46268b8534bSLuigi Rizzo 46364ae02c3SLuigi Rizzo NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 46464ae02c3SLuigi Rizzo 465f0ea3689SLuigi Rizzo /* count users of the global wait queues */ 466f0ea3689SLuigi Rizzo int tx_si_users, rx_si_users; 467f0ea3689SLuigi Rizzo 468*4bf50f18SLuigi Rizzo void *pdev; /* used to store pci device */ 469*4bf50f18SLuigi Rizzo 47068b8534bSLuigi Rizzo /* copy of if_qflush and if_transmit pointers, to intercept 47168b8534bSLuigi Rizzo * packets from the network stack when netmap is active. 47268b8534bSLuigi Rizzo */ 47368b8534bSLuigi Rizzo int (*if_transmit)(struct ifnet *, struct mbuf *); 47468b8534bSLuigi Rizzo 47517885a7bSLuigi Rizzo /* copy of if_input for netmap_send_up() */ 47617885a7bSLuigi Rizzo void (*if_input)(struct ifnet *, struct mbuf *); 47717885a7bSLuigi Rizzo 47868b8534bSLuigi Rizzo /* references to the ifnet and device routines, used by 47968b8534bSLuigi Rizzo * the generic netmap functions. 48068b8534bSLuigi Rizzo */ 48168b8534bSLuigi Rizzo struct ifnet *ifp; /* adapter is ifp->if_softc */ 48268b8534bSLuigi Rizzo 48317885a7bSLuigi Rizzo /*---- callbacks for this netmap adapter -----*/ 48417885a7bSLuigi Rizzo /* 48517885a7bSLuigi Rizzo * nm_dtor() is the cleanup routine called when destroying 48617885a7bSLuigi Rizzo * the adapter. 48789cc2556SLuigi Rizzo * Called with NMG_LOCK held. 48817885a7bSLuigi Rizzo * 48917885a7bSLuigi Rizzo * nm_register() is called on NIOCREGIF and close() to enter 49017885a7bSLuigi Rizzo * or exit netmap mode on the NIC 491*4bf50f18SLuigi Rizzo * Called with NNG_LOCK held. 49217885a7bSLuigi Rizzo * 49317885a7bSLuigi Rizzo * nm_txsync() pushes packets to the underlying hw/switch 49417885a7bSLuigi Rizzo * 49517885a7bSLuigi Rizzo * nm_rxsync() collects packets from the underlying hw/switch 49617885a7bSLuigi Rizzo * 49717885a7bSLuigi Rizzo * nm_config() returns configuration information from the OS 49889cc2556SLuigi Rizzo * Called with NMG_LOCK held. 49917885a7bSLuigi Rizzo * 500*4bf50f18SLuigi Rizzo * nm_krings_create() create and init the tx_rings and 501*4bf50f18SLuigi Rizzo * rx_rings arrays of kring structures. In particular, 502*4bf50f18SLuigi Rizzo * set the nm_sync callbacks for each ring. 503*4bf50f18SLuigi Rizzo * There is no need to also allocate the corresponding 504*4bf50f18SLuigi Rizzo * netmap_rings, since netmap_mem_rings_create() will always 505*4bf50f18SLuigi Rizzo * be called to provide the missing ones. 506*4bf50f18SLuigi Rizzo * Called with NNG_LOCK held. 50717885a7bSLuigi Rizzo * 508*4bf50f18SLuigi Rizzo * nm_krings_delete() cleanup and delete the tx_rings and rx_rings 509*4bf50f18SLuigi Rizzo * arrays 510*4bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 51117885a7bSLuigi Rizzo * 51289cc2556SLuigi Rizzo * nm_notify() is used to act after data have become available 51389cc2556SLuigi Rizzo * (or the stopped state of the ring has changed) 51417885a7bSLuigi Rizzo * For hw devices this is typically a selwakeup(), 51517885a7bSLuigi Rizzo * but for NIC/host ports attached to a switch (or vice-versa) 51617885a7bSLuigi Rizzo * we also need to invoke the 'txsync' code downstream. 51717885a7bSLuigi Rizzo */ 518f9790aebSLuigi Rizzo void (*nm_dtor)(struct netmap_adapter *); 5191a26580eSLuigi Rizzo 520f9790aebSLuigi Rizzo int (*nm_register)(struct netmap_adapter *, int onoff); 521ce3ee1e7SLuigi Rizzo 522*4bf50f18SLuigi Rizzo int (*nm_txsync)(struct netmap_kring *kring, int flags); 523*4bf50f18SLuigi Rizzo int (*nm_rxsync)(struct netmap_kring *kring, int flags); 524ce3ee1e7SLuigi Rizzo #define NAF_FORCE_READ 1 525ce3ee1e7SLuigi Rizzo #define NAF_FORCE_RECLAIM 2 526ae10d1afSLuigi Rizzo /* return configuration information */ 527f9790aebSLuigi Rizzo int (*nm_config)(struct netmap_adapter *, 528f9790aebSLuigi Rizzo u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); 529f9790aebSLuigi Rizzo int (*nm_krings_create)(struct netmap_adapter *); 530f9790aebSLuigi Rizzo void (*nm_krings_delete)(struct netmap_adapter *); 531f9790aebSLuigi Rizzo int (*nm_notify)(struct netmap_adapter *, 532f9790aebSLuigi Rizzo u_int ring, enum txrx, int flags); 533*4bf50f18SLuigi Rizzo #define NAF_DISABLE_NOTIFY 8 /* notify that the stopped state of the 534*4bf50f18SLuigi Rizzo * ring has changed (kring->nkr_stopped) 535*4bf50f18SLuigi Rizzo */ 536*4bf50f18SLuigi Rizzo 537*4bf50f18SLuigi Rizzo #ifdef WITH_VALE 538*4bf50f18SLuigi Rizzo /* 539*4bf50f18SLuigi Rizzo * nm_bdg_attach() initializes the na_vp field to point 540*4bf50f18SLuigi Rizzo * to an adapter that can be attached to a VALE switch. If the 541*4bf50f18SLuigi Rizzo * current adapter is already a VALE port, na_vp is simply a cast; 542*4bf50f18SLuigi Rizzo * otherwise, na_vp points to a netmap_bwrap_adapter. 543*4bf50f18SLuigi Rizzo * If applicable, this callback also initializes na_hostvp, 544*4bf50f18SLuigi Rizzo * that can be used to connect the adapter host rings to the 545*4bf50f18SLuigi Rizzo * switch. 546*4bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 547*4bf50f18SLuigi Rizzo * 548*4bf50f18SLuigi Rizzo * nm_bdg_ctl() is called on the actual attach/detach to/from 549*4bf50f18SLuigi Rizzo * to/from the switch, to perform adapter-specific 550*4bf50f18SLuigi Rizzo * initializations 551*4bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 552*4bf50f18SLuigi Rizzo */ 553*4bf50f18SLuigi Rizzo int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *); 554*4bf50f18SLuigi Rizzo int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int); 555*4bf50f18SLuigi Rizzo 556*4bf50f18SLuigi Rizzo /* adapter used to attach this adapter to a VALE switch (if any) */ 557*4bf50f18SLuigi Rizzo struct netmap_vp_adapter *na_vp; 558*4bf50f18SLuigi Rizzo /* adapter used to attach the host rings of this adapter 559*4bf50f18SLuigi Rizzo * to a VALE switch (if any) */ 560*4bf50f18SLuigi Rizzo struct netmap_vp_adapter *na_hostvp; 561*4bf50f18SLuigi Rizzo #endif 562f9790aebSLuigi Rizzo 563f9790aebSLuigi Rizzo /* standard refcount to control the lifetime of the adapter 564f9790aebSLuigi Rizzo * (it should be equal to the lifetime of the corresponding ifp) 565f9790aebSLuigi Rizzo */ 566f9790aebSLuigi Rizzo int na_refcount; 567f9790aebSLuigi Rizzo 568f9790aebSLuigi Rizzo /* memory allocator (opaque) 569f9790aebSLuigi Rizzo * We also cache a pointer to the lut_entry for translating 570f9790aebSLuigi Rizzo * buffer addresses, and the total number of buffers. 571f9790aebSLuigi Rizzo */ 572f9790aebSLuigi Rizzo struct netmap_mem_d *nm_mem; 573f9790aebSLuigi Rizzo struct lut_entry *na_lut; 574f9790aebSLuigi Rizzo uint32_t na_lut_objtotal; /* max buffer index */ 575*4bf50f18SLuigi Rizzo uint32_t na_lut_objsize; /* buffer size */ 576f9790aebSLuigi Rizzo 577*4bf50f18SLuigi Rizzo /* additional information attached to this adapter 578*4bf50f18SLuigi Rizzo * by other netmap subsystems. Currently used by 579*4bf50f18SLuigi Rizzo * bwrap and LINUX/v1000. 580f9790aebSLuigi Rizzo */ 581f9790aebSLuigi Rizzo void *na_private; 582f0ea3689SLuigi Rizzo 583f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 584*4bf50f18SLuigi Rizzo /* array of pipes that have this adapter as a parent */ 585f0ea3689SLuigi Rizzo struct netmap_pipe_adapter **na_pipes; 586*4bf50f18SLuigi Rizzo int na_next_pipe; /* next free slot in the array */ 587*4bf50f18SLuigi Rizzo int na_max_pipes; /* size of the array */ 588f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 589*4bf50f18SLuigi Rizzo 590*4bf50f18SLuigi Rizzo char name[64]; 591f9790aebSLuigi Rizzo }; 592f9790aebSLuigi Rizzo 59317885a7bSLuigi Rizzo 594f9790aebSLuigi Rizzo /* 595f9790aebSLuigi Rizzo * If the NIC is owned by the kernel 596f9790aebSLuigi Rizzo * (i.e., bridge), neither another bridge nor user can use it; 597f9790aebSLuigi Rizzo * if the NIC is owned by a user, only users can share it. 598f9790aebSLuigi Rizzo * Evaluation must be done under NMG_LOCK(). 599f9790aebSLuigi Rizzo */ 600*4bf50f18SLuigi Rizzo #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) 601f9790aebSLuigi Rizzo #define NETMAP_OWNED_BY_ANY(na) \ 602*4bf50f18SLuigi Rizzo (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) 603f9790aebSLuigi Rizzo 604f9790aebSLuigi Rizzo 605f9790aebSLuigi Rizzo /* 606f9790aebSLuigi Rizzo * derived netmap adapters for various types of ports 607f9790aebSLuigi Rizzo */ 608f9790aebSLuigi Rizzo struct netmap_vp_adapter { /* VALE software port */ 609f9790aebSLuigi Rizzo struct netmap_adapter up; 610f196ce38SLuigi Rizzo 611849bec0eSLuigi Rizzo /* 612849bec0eSLuigi Rizzo * Bridge support: 613849bec0eSLuigi Rizzo * 614849bec0eSLuigi Rizzo * bdg_port is the port number used in the bridge; 615f18be576SLuigi Rizzo * na_bdg points to the bridge this NA is attached to. 616849bec0eSLuigi Rizzo */ 617f196ce38SLuigi Rizzo int bdg_port; 618f18be576SLuigi Rizzo struct nm_bridge *na_bdg; 619f9790aebSLuigi Rizzo int retry; 620f9790aebSLuigi Rizzo 621f0ea3689SLuigi Rizzo /* Offset of ethernet header for each packet. */ 622f0ea3689SLuigi Rizzo u_int virt_hdr_len; 623f0ea3689SLuigi Rizzo /* Maximum Frame Size, used in bdg_mismatch_datapath() */ 624f0ea3689SLuigi Rizzo u_int mfs; 625f9790aebSLuigi Rizzo }; 626f9790aebSLuigi Rizzo 62717885a7bSLuigi Rizzo 628f9790aebSLuigi Rizzo struct netmap_hw_adapter { /* physical device */ 629f9790aebSLuigi Rizzo struct netmap_adapter up; 630f9790aebSLuigi Rizzo 631f9790aebSLuigi Rizzo struct net_device_ops nm_ndo; // XXX linux only 632*4bf50f18SLuigi Rizzo struct ethtool_ops nm_eto; // XXX linux only 633*4bf50f18SLuigi Rizzo const struct ethtool_ops* save_ethtool; 634*4bf50f18SLuigi Rizzo 635*4bf50f18SLuigi Rizzo int (*nm_hw_register)(struct netmap_adapter *, int onoff); 636f9790aebSLuigi Rizzo }; 637f9790aebSLuigi Rizzo 638f0ea3689SLuigi Rizzo /* Mitigation support. */ 639f0ea3689SLuigi Rizzo struct nm_generic_mit { 640f0ea3689SLuigi Rizzo struct hrtimer mit_timer; 641f0ea3689SLuigi Rizzo int mit_pending; 642*4bf50f18SLuigi Rizzo int mit_ring_idx; /* index of the ring being mitigated */ 643f0ea3689SLuigi Rizzo struct netmap_adapter *mit_na; /* backpointer */ 644f0ea3689SLuigi Rizzo }; 64517885a7bSLuigi Rizzo 64617885a7bSLuigi Rizzo struct netmap_generic_adapter { /* emulated device */ 647f9790aebSLuigi Rizzo struct netmap_hw_adapter up; 648f9790aebSLuigi Rizzo 649f9790aebSLuigi Rizzo /* Pointer to a previously used netmap adapter. */ 650f9790aebSLuigi Rizzo struct netmap_adapter *prev; 651f9790aebSLuigi Rizzo 652f9790aebSLuigi Rizzo /* generic netmap adapters support: 653f9790aebSLuigi Rizzo * a net_device_ops struct overrides ndo_select_queue(), 654f9790aebSLuigi Rizzo * save_if_input saves the if_input hook (FreeBSD), 655f0ea3689SLuigi Rizzo * mit implements rx interrupt mitigation, 656f9790aebSLuigi Rizzo */ 657f9790aebSLuigi Rizzo struct net_device_ops generic_ndo; 658f9790aebSLuigi Rizzo void (*save_if_input)(struct ifnet *, struct mbuf *); 659f9790aebSLuigi Rizzo 660f0ea3689SLuigi Rizzo struct nm_generic_mit *mit; 66117885a7bSLuigi Rizzo #ifdef linux 66217885a7bSLuigi Rizzo netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); 66317885a7bSLuigi Rizzo #endif 664f9790aebSLuigi Rizzo }; 665f9790aebSLuigi Rizzo 666f0ea3689SLuigi Rizzo static __inline int 667f0ea3689SLuigi Rizzo netmap_real_tx_rings(struct netmap_adapter *na) 668f0ea3689SLuigi Rizzo { 669f0ea3689SLuigi Rizzo return na->num_tx_rings + !!(na->na_flags & NAF_HOST_RINGS); 670f0ea3689SLuigi Rizzo } 671f0ea3689SLuigi Rizzo 672f0ea3689SLuigi Rizzo static __inline int 673f0ea3689SLuigi Rizzo netmap_real_rx_rings(struct netmap_adapter *na) 674f0ea3689SLuigi Rizzo { 675f0ea3689SLuigi Rizzo return na->num_rx_rings + !!(na->na_flags & NAF_HOST_RINGS); 676f0ea3689SLuigi Rizzo } 677f0ea3689SLuigi Rizzo 678f9790aebSLuigi Rizzo #ifdef WITH_VALE 679f9790aebSLuigi Rizzo 68017885a7bSLuigi Rizzo /* 68117885a7bSLuigi Rizzo * Bridge wrapper for non VALE ports attached to a VALE switch. 682f9790aebSLuigi Rizzo * 68317885a7bSLuigi Rizzo * The real device must already have its own netmap adapter (hwna). 68417885a7bSLuigi Rizzo * The bridge wrapper and the hwna adapter share the same set of 68517885a7bSLuigi Rizzo * netmap rings and buffers, but they have two separate sets of 68617885a7bSLuigi Rizzo * krings descriptors, with tx/rx meanings swapped: 687f9790aebSLuigi Rizzo * 688f9790aebSLuigi Rizzo * netmap 689f9790aebSLuigi Rizzo * bwrap krings rings krings hwna 690f9790aebSLuigi Rizzo * +------+ +------+ +-----+ +------+ +------+ 691f9790aebSLuigi Rizzo * |tx_rings->| |\ /| |----| |<-tx_rings| 692f9790aebSLuigi Rizzo * | | +------+ \ / +-----+ +------+ | | 693f9790aebSLuigi Rizzo * | | X | | 694f9790aebSLuigi Rizzo * | | / \ | | 695f9790aebSLuigi Rizzo * | | +------+/ \+-----+ +------+ | | 696f9790aebSLuigi Rizzo * |rx_rings->| | | |----| |<-rx_rings| 697f9790aebSLuigi Rizzo * | | +------+ +-----+ +------+ | | 698f9790aebSLuigi Rizzo * +------+ +------+ 699f9790aebSLuigi Rizzo * 70017885a7bSLuigi Rizzo * - packets coming from the bridge go to the brwap rx rings, 70117885a7bSLuigi Rizzo * which are also the hwna tx rings. The bwrap notify callback 70217885a7bSLuigi Rizzo * will then complete the hwna tx (see netmap_bwrap_notify). 703f9790aebSLuigi Rizzo * 70417885a7bSLuigi Rizzo * - packets coming from the outside go to the hwna rx rings, 70517885a7bSLuigi Rizzo * which are also the bwrap tx rings. The (overwritten) hwna 70617885a7bSLuigi Rizzo * notify method will then complete the bridge tx 70717885a7bSLuigi Rizzo * (see netmap_bwrap_intr_notify). 708f9790aebSLuigi Rizzo * 70917885a7bSLuigi Rizzo * The bridge wrapper may optionally connect the hwna 'host' rings 71017885a7bSLuigi Rizzo * to the bridge. This is done by using a second port in the 71117885a7bSLuigi Rizzo * bridge and connecting it to the 'host' netmap_vp_adapter 71217885a7bSLuigi Rizzo * contained in the netmap_bwrap_adapter. The brwap host adapter 71317885a7bSLuigi Rizzo * cross-links the hwna host rings in the same way as shown above. 71417885a7bSLuigi Rizzo * 71517885a7bSLuigi Rizzo * - packets coming from the bridge and directed to the host stack 71617885a7bSLuigi Rizzo * are handled by the bwrap host notify callback 71717885a7bSLuigi Rizzo * (see netmap_bwrap_host_notify) 71817885a7bSLuigi Rizzo * 71917885a7bSLuigi Rizzo * - packets coming from the host stack are still handled by the 72017885a7bSLuigi Rizzo * overwritten hwna notify callback (netmap_bwrap_intr_notify), 72117885a7bSLuigi Rizzo * but are diverted to the host adapter depending on the ring number. 722f9790aebSLuigi Rizzo * 723f9790aebSLuigi Rizzo */ 724f9790aebSLuigi Rizzo struct netmap_bwrap_adapter { 725f9790aebSLuigi Rizzo struct netmap_vp_adapter up; 726f9790aebSLuigi Rizzo struct netmap_vp_adapter host; /* for host rings */ 727f9790aebSLuigi Rizzo struct netmap_adapter *hwna; /* the underlying device */ 728f9790aebSLuigi Rizzo 729f9790aebSLuigi Rizzo /* backup of the hwna notify callback */ 730f9790aebSLuigi Rizzo int (*save_notify)(struct netmap_adapter *, 731f9790aebSLuigi Rizzo u_int ring, enum txrx, int flags); 732*4bf50f18SLuigi Rizzo /* backup of the hwna memory allocator */ 733*4bf50f18SLuigi Rizzo struct netmap_mem_d *save_nmd; 73417885a7bSLuigi Rizzo 73517885a7bSLuigi Rizzo /* 73617885a7bSLuigi Rizzo * When we attach a physical interface to the bridge, we 737f18be576SLuigi Rizzo * allow the controlling process to terminate, so we need 738*4bf50f18SLuigi Rizzo * a place to store the n_detmap_priv_d data structure. 73917885a7bSLuigi Rizzo * This is only done when physical interfaces 74017885a7bSLuigi Rizzo * are attached to a bridge. 741f18be576SLuigi Rizzo */ 742f18be576SLuigi Rizzo struct netmap_priv_d *na_kpriv; 74368b8534bSLuigi Rizzo }; 744*4bf50f18SLuigi Rizzo int netmap_bwrap_attach(const char *name, struct netmap_adapter *); 74568b8534bSLuigi Rizzo 746f9790aebSLuigi Rizzo 74717885a7bSLuigi Rizzo #endif /* WITH_VALE */ 748ce3ee1e7SLuigi Rizzo 749f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 750f0ea3689SLuigi Rizzo 751f0ea3689SLuigi Rizzo #define NM_MAXPIPES 64 /* max number of pipes per adapter */ 752f0ea3689SLuigi Rizzo 753f0ea3689SLuigi Rizzo struct netmap_pipe_adapter { 754f0ea3689SLuigi Rizzo struct netmap_adapter up; 755f0ea3689SLuigi Rizzo 756f0ea3689SLuigi Rizzo u_int id; /* pipe identifier */ 757f0ea3689SLuigi Rizzo int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */ 758f0ea3689SLuigi Rizzo 759f0ea3689SLuigi Rizzo struct netmap_adapter *parent; /* adapter that owns the memory */ 760f0ea3689SLuigi Rizzo struct netmap_pipe_adapter *peer; /* the other end of the pipe */ 761f0ea3689SLuigi Rizzo int peer_ref; /* 1 iff we are holding a ref to the peer */ 762f0ea3689SLuigi Rizzo 763f0ea3689SLuigi Rizzo u_int parent_slot; /* index in the parent pipe array */ 764f0ea3689SLuigi Rizzo }; 765f0ea3689SLuigi Rizzo 766f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 767f0ea3689SLuigi Rizzo 76817885a7bSLuigi Rizzo 76917885a7bSLuigi Rizzo /* return slots reserved to rx clients; used in drivers */ 77017885a7bSLuigi Rizzo static inline uint32_t 77117885a7bSLuigi Rizzo nm_kr_rxspace(struct netmap_kring *k) 77217885a7bSLuigi Rizzo { 77317885a7bSLuigi Rizzo int space = k->nr_hwtail - k->nr_hwcur; 774ce3ee1e7SLuigi Rizzo if (space < 0) 775ce3ee1e7SLuigi Rizzo space += k->nkr_num_slots; 77617885a7bSLuigi Rizzo ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); 77717885a7bSLuigi Rizzo 778ce3ee1e7SLuigi Rizzo return space; 779ce3ee1e7SLuigi Rizzo } 780ce3ee1e7SLuigi Rizzo 781ce3ee1e7SLuigi Rizzo 78217885a7bSLuigi Rizzo /* True if no space in the tx ring. only valid after txsync_prologue */ 78317885a7bSLuigi Rizzo static inline int 78417885a7bSLuigi Rizzo nm_kr_txempty(struct netmap_kring *kring) 785ce3ee1e7SLuigi Rizzo { 78617885a7bSLuigi Rizzo return kring->rcur == kring->nr_hwtail; 787f9790aebSLuigi Rizzo } 788f9790aebSLuigi Rizzo 789ce3ee1e7SLuigi Rizzo 790ce3ee1e7SLuigi Rizzo /* 791f9790aebSLuigi Rizzo * protect against multiple threads using the same ring. 792f9790aebSLuigi Rizzo * also check that the ring has not been stopped. 793f9790aebSLuigi Rizzo * We only care for 0 or !=0 as a return code. 79468b8534bSLuigi Rizzo */ 795f9790aebSLuigi Rizzo #define NM_KR_BUSY 1 796f9790aebSLuigi Rizzo #define NM_KR_STOPPED 2 79768b8534bSLuigi Rizzo 79817885a7bSLuigi Rizzo 799f9790aebSLuigi Rizzo static __inline void nm_kr_put(struct netmap_kring *kr) 800f9790aebSLuigi Rizzo { 801f9790aebSLuigi Rizzo NM_ATOMIC_CLEAR(&kr->nr_busy); 802f9790aebSLuigi Rizzo } 803f9790aebSLuigi Rizzo 80417885a7bSLuigi Rizzo 805f9790aebSLuigi Rizzo static __inline int nm_kr_tryget(struct netmap_kring *kr) 806f9790aebSLuigi Rizzo { 807f9790aebSLuigi Rizzo /* check a first time without taking the lock 808f9790aebSLuigi Rizzo * to avoid starvation for nm_kr_get() 809f9790aebSLuigi Rizzo */ 810f9790aebSLuigi Rizzo if (unlikely(kr->nkr_stopped)) { 811f9790aebSLuigi Rizzo ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 812f9790aebSLuigi Rizzo return NM_KR_STOPPED; 813f9790aebSLuigi Rizzo } 814f9790aebSLuigi Rizzo if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))) 815f9790aebSLuigi Rizzo return NM_KR_BUSY; 816f9790aebSLuigi Rizzo /* check a second time with lock held */ 817f9790aebSLuigi Rizzo if (unlikely(kr->nkr_stopped)) { 818f9790aebSLuigi Rizzo ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 819f9790aebSLuigi Rizzo nm_kr_put(kr); 820f9790aebSLuigi Rizzo return NM_KR_STOPPED; 821f9790aebSLuigi Rizzo } 822f9790aebSLuigi Rizzo return 0; 823f9790aebSLuigi Rizzo } 82468b8534bSLuigi Rizzo 825849bec0eSLuigi Rizzo 82668b8534bSLuigi Rizzo /* 82717885a7bSLuigi Rizzo * The following functions are used by individual drivers to 82868b8534bSLuigi Rizzo * support netmap operation. 82968b8534bSLuigi Rizzo * 83068b8534bSLuigi Rizzo * netmap_attach() initializes a struct netmap_adapter, allocating the 83168b8534bSLuigi Rizzo * struct netmap_ring's and the struct selinfo. 83268b8534bSLuigi Rizzo * 83368b8534bSLuigi Rizzo * netmap_detach() frees the memory allocated by netmap_attach(). 83468b8534bSLuigi Rizzo * 835ce3ee1e7SLuigi Rizzo * netmap_transmit() replaces the if_transmit routine of the interface, 83668b8534bSLuigi Rizzo * and is used to intercept packets coming from the stack. 83768b8534bSLuigi Rizzo * 83868b8534bSLuigi Rizzo * netmap_load_map/netmap_reload_map are helper routines to set/reset 83968b8534bSLuigi Rizzo * the dmamap for a packet buffer 84068b8534bSLuigi Rizzo * 841*4bf50f18SLuigi Rizzo * netmap_reset() is a helper routine to be called in the hw driver 842*4bf50f18SLuigi Rizzo * when reinitializing a ring. It should not be called by 843*4bf50f18SLuigi Rizzo * virtual ports (vale, pipes, monitor) 84468b8534bSLuigi Rizzo */ 845f9790aebSLuigi Rizzo int netmap_attach(struct netmap_adapter *); 84668b8534bSLuigi Rizzo void netmap_detach(struct ifnet *); 847ce3ee1e7SLuigi Rizzo int netmap_transmit(struct ifnet *, struct mbuf *); 84868b8534bSLuigi Rizzo struct netmap_slot *netmap_reset(struct netmap_adapter *na, 849ce3ee1e7SLuigi Rizzo enum txrx tx, u_int n, u_int new_cur); 85068b8534bSLuigi Rizzo int netmap_ring_reinit(struct netmap_kring *); 85168b8534bSLuigi Rizzo 85217885a7bSLuigi Rizzo /* default functions to handle rx/tx interrupts */ 85317885a7bSLuigi Rizzo int netmap_rx_irq(struct ifnet *, u_int, u_int *); 85417885a7bSLuigi Rizzo #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 85517885a7bSLuigi Rizzo void netmap_common_irq(struct ifnet *, u_int, u_int *work_done); 85617885a7bSLuigi Rizzo 85717885a7bSLuigi Rizzo 858*4bf50f18SLuigi Rizzo #ifdef WITH_VALE 859*4bf50f18SLuigi Rizzo /* functions used by external modules to interface with VALE */ 860*4bf50f18SLuigi Rizzo #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) 861*4bf50f18SLuigi Rizzo #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) 862*4bf50f18SLuigi Rizzo #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) 863*4bf50f18SLuigi Rizzo #define netmap_bdg_idx(_vp) ((_vp)->bdg_port) 864*4bf50f18SLuigi Rizzo const char *netmap_bdg_name(struct netmap_vp_adapter *); 865*4bf50f18SLuigi Rizzo #else /* !WITH_VALE */ 866*4bf50f18SLuigi Rizzo #define netmap_vp_to_ifp(_vp) NULL 867*4bf50f18SLuigi Rizzo #define netmap_ifp_to_vp(_ifp) NULL 868*4bf50f18SLuigi Rizzo #define netmap_ifp_to_host_vp(_ifp) NULL 869*4bf50f18SLuigi Rizzo #define netmap_bdg_idx(_vp) -1 870*4bf50f18SLuigi Rizzo #define netmap_bdg_name(_vp) NULL 871*4bf50f18SLuigi Rizzo #endif /* WITH_VALE */ 872*4bf50f18SLuigi Rizzo 873*4bf50f18SLuigi Rizzo static inline int 874*4bf50f18SLuigi Rizzo nm_native_on(struct netmap_adapter *na) 875*4bf50f18SLuigi Rizzo { 876*4bf50f18SLuigi Rizzo return na && na->na_flags & NAF_NATIVE_ON; 877*4bf50f18SLuigi Rizzo } 878*4bf50f18SLuigi Rizzo 879*4bf50f18SLuigi Rizzo static inline int 880*4bf50f18SLuigi Rizzo nm_netmap_on(struct netmap_adapter *na) 881*4bf50f18SLuigi Rizzo { 882*4bf50f18SLuigi Rizzo return na && na->na_flags & NAF_NETMAP_ON; 883*4bf50f18SLuigi Rizzo } 88417885a7bSLuigi Rizzo 88517885a7bSLuigi Rizzo /* set/clear native flags and if_transmit/netdev_ops */ 886f9790aebSLuigi Rizzo static inline void 887f9790aebSLuigi Rizzo nm_set_native_flags(struct netmap_adapter *na) 888f9790aebSLuigi Rizzo { 889f9790aebSLuigi Rizzo struct ifnet *ifp = na->ifp; 890ce3ee1e7SLuigi Rizzo 891f9790aebSLuigi Rizzo na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON); 892f9790aebSLuigi Rizzo #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 893f9790aebSLuigi Rizzo ifp->if_capenable |= IFCAP_NETMAP; 894f9790aebSLuigi Rizzo #endif 895f9790aebSLuigi Rizzo #ifdef __FreeBSD__ 896f9790aebSLuigi Rizzo na->if_transmit = ifp->if_transmit; 897f9790aebSLuigi Rizzo ifp->if_transmit = netmap_transmit; 898f9790aebSLuigi Rizzo #else 899f9790aebSLuigi Rizzo na->if_transmit = (void *)ifp->netdev_ops; 900f9790aebSLuigi Rizzo ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo; 901*4bf50f18SLuigi Rizzo ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops; 902*4bf50f18SLuigi Rizzo ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto; 903f9790aebSLuigi Rizzo #endif 904f9790aebSLuigi Rizzo } 905f9790aebSLuigi Rizzo 90617885a7bSLuigi Rizzo 907f9790aebSLuigi Rizzo static inline void 908f9790aebSLuigi Rizzo nm_clear_native_flags(struct netmap_adapter *na) 909f9790aebSLuigi Rizzo { 910f9790aebSLuigi Rizzo struct ifnet *ifp = na->ifp; 911f9790aebSLuigi Rizzo 912f9790aebSLuigi Rizzo #ifdef __FreeBSD__ 913f9790aebSLuigi Rizzo ifp->if_transmit = na->if_transmit; 914f9790aebSLuigi Rizzo #else 915f9790aebSLuigi Rizzo ifp->netdev_ops = (void *)na->if_transmit; 916*4bf50f18SLuigi Rizzo ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool; 917f9790aebSLuigi Rizzo #endif 918f9790aebSLuigi Rizzo na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON); 919f9790aebSLuigi Rizzo #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 920f9790aebSLuigi Rizzo ifp->if_capenable &= ~IFCAP_NETMAP; 921f9790aebSLuigi Rizzo #endif 922f9790aebSLuigi Rizzo } 923f9790aebSLuigi Rizzo 924f9790aebSLuigi Rizzo 925f9790aebSLuigi Rizzo /* 92617885a7bSLuigi Rizzo * validates parameters in the ring/kring, returns a value for head 92717885a7bSLuigi Rizzo * If any error, returns ring_size to force a reinit. 92817885a7bSLuigi Rizzo */ 92917885a7bSLuigi Rizzo uint32_t nm_txsync_prologue(struct netmap_kring *); 93017885a7bSLuigi Rizzo 93117885a7bSLuigi Rizzo 93217885a7bSLuigi Rizzo /* 93317885a7bSLuigi Rizzo * validates parameters in the ring/kring, returns a value for head, 934f9790aebSLuigi Rizzo * and the 'reserved' value in the argument. 93517885a7bSLuigi Rizzo * If any error, returns ring_size lim to force a reinit. 936f9790aebSLuigi Rizzo */ 93717885a7bSLuigi Rizzo uint32_t nm_rxsync_prologue(struct netmap_kring *); 93817885a7bSLuigi Rizzo 939f9790aebSLuigi Rizzo 940f9790aebSLuigi Rizzo /* 94117885a7bSLuigi Rizzo * update kring and ring at the end of txsync. 942f9790aebSLuigi Rizzo */ 943f9790aebSLuigi Rizzo static inline void 94417885a7bSLuigi Rizzo nm_txsync_finalize(struct netmap_kring *kring) 945f9790aebSLuigi Rizzo { 946f0ea3689SLuigi Rizzo /* update ring tail to what the kernel knows */ 94717885a7bSLuigi Rizzo kring->ring->tail = kring->rtail = kring->nr_hwtail; 948f9790aebSLuigi Rizzo 94917885a7bSLuigi Rizzo /* note, head/rhead/hwcur might be behind cur/rcur 95017885a7bSLuigi Rizzo * if no carrier 95117885a7bSLuigi Rizzo */ 95217885a7bSLuigi Rizzo ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", 95317885a7bSLuigi Rizzo kring->name, kring->nr_hwcur, kring->nr_hwtail, 95417885a7bSLuigi Rizzo kring->rhead, kring->rcur, kring->rtail); 955f9790aebSLuigi Rizzo } 956f9790aebSLuigi Rizzo 95717885a7bSLuigi Rizzo 95817885a7bSLuigi Rizzo /* 95917885a7bSLuigi Rizzo * update kring and ring at the end of rxsync 96017885a7bSLuigi Rizzo */ 96117885a7bSLuigi Rizzo static inline void 96217885a7bSLuigi Rizzo nm_rxsync_finalize(struct netmap_kring *kring) 96317885a7bSLuigi Rizzo { 96417885a7bSLuigi Rizzo /* tell userspace that there might be new packets */ 96517885a7bSLuigi Rizzo //struct netmap_ring *ring = kring->ring; 96617885a7bSLuigi Rizzo ND("head %d cur %d tail %d -> %d", ring->head, ring->cur, ring->tail, 96717885a7bSLuigi Rizzo kring->nr_hwtail); 96817885a7bSLuigi Rizzo kring->ring->tail = kring->rtail = kring->nr_hwtail; 96917885a7bSLuigi Rizzo /* make a copy of the state for next round */ 97017885a7bSLuigi Rizzo kring->rhead = kring->ring->head; 97117885a7bSLuigi Rizzo kring->rcur = kring->ring->cur; 97217885a7bSLuigi Rizzo } 97317885a7bSLuigi Rizzo 97417885a7bSLuigi Rizzo 975f9790aebSLuigi Rizzo /* check/fix address and len in tx rings */ 976f9790aebSLuigi Rizzo #if 1 /* debug version */ 977*4bf50f18SLuigi Rizzo #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 978*4bf50f18SLuigi Rizzo if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ 979f9790aebSLuigi Rizzo RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ 980*4bf50f18SLuigi Rizzo kring->ring_id, nm_i, slot->buf_idx, len); \ 981*4bf50f18SLuigi Rizzo if (_l > NETMAP_BUF_SIZE(_na)) \ 982*4bf50f18SLuigi Rizzo _l = NETMAP_BUF_SIZE(_na); \ 983f9790aebSLuigi Rizzo } } while (0) 984f9790aebSLuigi Rizzo #else /* no debug version */ 985*4bf50f18SLuigi Rizzo #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 986*4bf50f18SLuigi Rizzo if (_l > NETMAP_BUF_SIZE(_na)) \ 987*4bf50f18SLuigi Rizzo _l = NETMAP_BUF_SIZE(_na); \ 988f9790aebSLuigi Rizzo } while (0) 989f9790aebSLuigi Rizzo #endif 990f9790aebSLuigi Rizzo 991f9790aebSLuigi Rizzo 992f9790aebSLuigi Rizzo /*---------------------------------------------------------------*/ 993f9790aebSLuigi Rizzo /* 994*4bf50f18SLuigi Rizzo * Support routines used by netmap subsystems 995*4bf50f18SLuigi Rizzo * (native drivers, VALE, generic, pipes, monitors, ...) 996*4bf50f18SLuigi Rizzo */ 997*4bf50f18SLuigi Rizzo 998*4bf50f18SLuigi Rizzo 999*4bf50f18SLuigi Rizzo /* common routine for all functions that create a netmap adapter. It performs 1000*4bf50f18SLuigi Rizzo * two main tasks: 1001*4bf50f18SLuigi Rizzo * - if the na points to an ifp, mark the ifp as netmap capable 1002*4bf50f18SLuigi Rizzo * using na as its native adapter; 1003*4bf50f18SLuigi Rizzo * - provide defaults for the setup callbacks and the memory allocator 1004*4bf50f18SLuigi Rizzo */ 1005*4bf50f18SLuigi Rizzo int netmap_attach_common(struct netmap_adapter *); 1006*4bf50f18SLuigi Rizzo /* common actions to be performed on netmap adapter destruction */ 1007*4bf50f18SLuigi Rizzo void netmap_detach_common(struct netmap_adapter *); 1008*4bf50f18SLuigi Rizzo /* fill priv->np_[tr]xq{first,last} using the ringid and flags information 1009*4bf50f18SLuigi Rizzo * coming from a struct nmreq 1010*4bf50f18SLuigi Rizzo */ 1011*4bf50f18SLuigi Rizzo int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags); 1012*4bf50f18SLuigi Rizzo /* update the ring parameters (number and size of tx and rx rings). 1013*4bf50f18SLuigi Rizzo * It calls the nm_config callback, if available. 1014f9790aebSLuigi Rizzo */ 1015f9790aebSLuigi Rizzo int netmap_update_config(struct netmap_adapter *na); 1016*4bf50f18SLuigi Rizzo /* create and initialize the common fields of the krings array. 1017*4bf50f18SLuigi Rizzo * using the information that must be already available in the na. 1018*4bf50f18SLuigi Rizzo * tailroom can be used to request the allocation of additional 1019*4bf50f18SLuigi Rizzo * tailroom bytes after the krings array. This is used by 1020*4bf50f18SLuigi Rizzo * netmap_vp_adapter's (i.e., VALE ports) to make room for 1021*4bf50f18SLuigi Rizzo * leasing-related data structures 1022*4bf50f18SLuigi Rizzo */ 1023f0ea3689SLuigi Rizzo int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); 1024*4bf50f18SLuigi Rizzo /* deletes the kring array of the adapter. The array must have 1025*4bf50f18SLuigi Rizzo * been created using netmap_krings_create 1026*4bf50f18SLuigi Rizzo */ 1027f9790aebSLuigi Rizzo void netmap_krings_delete(struct netmap_adapter *na); 102817885a7bSLuigi Rizzo 1029*4bf50f18SLuigi Rizzo /* set the stopped/enabled status of ring 1030*4bf50f18SLuigi Rizzo * When stopping, they also wait for all current activity on the ring to 1031*4bf50f18SLuigi Rizzo * terminate. The status change is then notified using the na nm_notify 1032*4bf50f18SLuigi Rizzo * callback. 1033*4bf50f18SLuigi Rizzo */ 1034*4bf50f18SLuigi Rizzo void netmap_set_txring(struct netmap_adapter *, u_int ring_id, int stopped); 1035*4bf50f18SLuigi Rizzo void netmap_set_rxring(struct netmap_adapter *, u_int ring_id, int stopped); 1036*4bf50f18SLuigi Rizzo /* set the stopped/enabled status of all rings of the adapter. */ 1037*4bf50f18SLuigi Rizzo void netmap_set_all_rings(struct netmap_adapter *, int stopped); 1038*4bf50f18SLuigi Rizzo /* convenience wrappers for netmap_set_all_rings, used in drivers */ 1039*4bf50f18SLuigi Rizzo void netmap_disable_all_rings(struct ifnet *); 1040*4bf50f18SLuigi Rizzo void netmap_enable_all_rings(struct ifnet *); 1041*4bf50f18SLuigi Rizzo 1042*4bf50f18SLuigi Rizzo int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait); 1043f9790aebSLuigi Rizzo 1044f9790aebSLuigi Rizzo struct netmap_if * 1045f9790aebSLuigi Rizzo netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, 1046f0ea3689SLuigi Rizzo uint16_t ringid, uint32_t flags, int *err); 1047f9790aebSLuigi Rizzo 1048f9790aebSLuigi Rizzo 1049f9790aebSLuigi Rizzo 1050f9790aebSLuigi Rizzo u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); 1051f9790aebSLuigi Rizzo int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1052f9790aebSLuigi Rizzo int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na); 1053f9790aebSLuigi Rizzo 105417885a7bSLuigi Rizzo 1055f9790aebSLuigi Rizzo #ifdef WITH_VALE 1056f18be576SLuigi Rizzo /* 105717885a7bSLuigi Rizzo * The following bridge-related functions are used by other 105817885a7bSLuigi Rizzo * kernel modules. 105917885a7bSLuigi Rizzo * 106017885a7bSLuigi Rizzo * VALE only supports unicast or broadcast. The lookup 1061f18be576SLuigi Rizzo * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, 1062f18be576SLuigi Rizzo * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown. 1063f18be576SLuigi Rizzo * XXX in practice "unknown" might be handled same as broadcast. 1064f18be576SLuigi Rizzo */ 1065*4bf50f18SLuigi Rizzo typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, 1066*4bf50f18SLuigi Rizzo const struct netmap_vp_adapter *); 1067*4bf50f18SLuigi Rizzo typedef int (*bdg_config_fn_t)(struct nm_ifreq *); 1068*4bf50f18SLuigi Rizzo typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *); 1069*4bf50f18SLuigi Rizzo struct netmap_bdg_ops { 1070*4bf50f18SLuigi Rizzo bdg_lookup_fn_t lookup; 1071*4bf50f18SLuigi Rizzo bdg_config_fn_t config; 1072*4bf50f18SLuigi Rizzo bdg_dtor_fn_t dtor; 1073*4bf50f18SLuigi Rizzo }; 1074*4bf50f18SLuigi Rizzo 1075*4bf50f18SLuigi Rizzo u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, 1076*4bf50f18SLuigi Rizzo const struct netmap_vp_adapter *); 1077f9790aebSLuigi Rizzo 1078f9790aebSLuigi Rizzo #define NM_BDG_MAXPORTS 254 /* up to 254 */ 1079f18be576SLuigi Rizzo #define NM_BDG_BROADCAST NM_BDG_MAXPORTS 1080f18be576SLuigi Rizzo #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) 1081f18be576SLuigi Rizzo 1082f9790aebSLuigi Rizzo #define NM_NAME "vale" /* prefix for bridge port name */ 1083f9790aebSLuigi Rizzo 1084f9790aebSLuigi Rizzo /* these are redefined in case of no VALE support */ 1085f9790aebSLuigi Rizzo int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1086f9790aebSLuigi Rizzo void netmap_init_bridges(void); 1087*4bf50f18SLuigi Rizzo int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops); 1088*4bf50f18SLuigi Rizzo int netmap_bdg_config(struct nmreq *nmr); 1089f9790aebSLuigi Rizzo 1090f9790aebSLuigi Rizzo #else /* !WITH_VALE */ 1091f9790aebSLuigi Rizzo #define netmap_get_bdg_na(_1, _2, _3) 0 1092f9790aebSLuigi Rizzo #define netmap_init_bridges(_1) 1093f9790aebSLuigi Rizzo #define netmap_bdg_ctl(_1, _2) EINVAL 1094f9790aebSLuigi Rizzo #endif /* !WITH_VALE */ 1095f9790aebSLuigi Rizzo 1096f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 1097f0ea3689SLuigi Rizzo /* max number of pipes per device */ 1098f0ea3689SLuigi Rizzo #define NM_MAXPIPES 64 /* XXX how many? */ 1099f0ea3689SLuigi Rizzo /* in case of no error, returns the actual number of pipes in nmr->nr_arg1 */ 1100f0ea3689SLuigi Rizzo int netmap_pipe_alloc(struct netmap_adapter *, struct nmreq *nmr); 1101f0ea3689SLuigi Rizzo void netmap_pipe_dealloc(struct netmap_adapter *); 1102f0ea3689SLuigi Rizzo int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1103f0ea3689SLuigi Rizzo #else /* !WITH_PIPES */ 1104f0ea3689SLuigi Rizzo #define NM_MAXPIPES 0 1105f0ea3689SLuigi Rizzo #define netmap_pipe_alloc(_1, _2) EOPNOTSUPP 1106f0ea3689SLuigi Rizzo #define netmap_pipe_dealloc(_1) 1107f0ea3689SLuigi Rizzo #define netmap_get_pipe_na(_1, _2, _3) 0 1108f0ea3689SLuigi Rizzo #endif 1109f0ea3689SLuigi Rizzo 1110*4bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 1111*4bf50f18SLuigi Rizzo int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1112*4bf50f18SLuigi Rizzo #else 1113*4bf50f18SLuigi Rizzo #define netmap_get_monitor_na(_1, _2, _3) 0 1114*4bf50f18SLuigi Rizzo #endif 1115*4bf50f18SLuigi Rizzo 1116f9790aebSLuigi Rizzo /* Various prototypes */ 1117f9790aebSLuigi Rizzo int netmap_poll(struct cdev *dev, int events, struct thread *td); 1118f9790aebSLuigi Rizzo int netmap_init(void); 1119f9790aebSLuigi Rizzo void netmap_fini(void); 1120f9790aebSLuigi Rizzo int netmap_get_memory(struct netmap_priv_d* p); 1121f9790aebSLuigi Rizzo void netmap_dtor(void *data); 1122f9790aebSLuigi Rizzo int netmap_dtor_locked(struct netmap_priv_d *priv); 1123f9790aebSLuigi Rizzo 1124f9790aebSLuigi Rizzo int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); 1125f9790aebSLuigi Rizzo 1126f9790aebSLuigi Rizzo /* netmap_adapter creation/destruction */ 112717885a7bSLuigi Rizzo 112817885a7bSLuigi Rizzo // #define NM_DEBUG_PUTGET 1 1129f9790aebSLuigi Rizzo 1130f9790aebSLuigi Rizzo #ifdef NM_DEBUG_PUTGET 1131f9790aebSLuigi Rizzo 1132f9790aebSLuigi Rizzo #define NM_DBG(f) __##f 1133f9790aebSLuigi Rizzo 1134f9790aebSLuigi Rizzo void __netmap_adapter_get(struct netmap_adapter *na); 1135f9790aebSLuigi Rizzo 1136f9790aebSLuigi Rizzo #define netmap_adapter_get(na) \ 1137f9790aebSLuigi Rizzo do { \ 1138f9790aebSLuigi Rizzo struct netmap_adapter *__na = na; \ 1139*4bf50f18SLuigi Rizzo D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1140f9790aebSLuigi Rizzo __netmap_adapter_get(__na); \ 1141f9790aebSLuigi Rizzo } while (0) 1142f9790aebSLuigi Rizzo 1143f9790aebSLuigi Rizzo int __netmap_adapter_put(struct netmap_adapter *na); 1144f9790aebSLuigi Rizzo 1145f9790aebSLuigi Rizzo #define netmap_adapter_put(na) \ 1146fb25194fSLuigi Rizzo ({ \ 1147f9790aebSLuigi Rizzo struct netmap_adapter *__na = na; \ 1148*4bf50f18SLuigi Rizzo D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1149f9790aebSLuigi Rizzo __netmap_adapter_put(__na); \ 1150fb25194fSLuigi Rizzo }) 1151f9790aebSLuigi Rizzo 1152f9790aebSLuigi Rizzo #else /* !NM_DEBUG_PUTGET */ 1153f9790aebSLuigi Rizzo 1154f9790aebSLuigi Rizzo #define NM_DBG(f) f 1155f9790aebSLuigi Rizzo void netmap_adapter_get(struct netmap_adapter *na); 1156f9790aebSLuigi Rizzo int netmap_adapter_put(struct netmap_adapter *na); 1157f9790aebSLuigi Rizzo 1158f9790aebSLuigi Rizzo #endif /* !NM_DEBUG_PUTGET */ 1159f9790aebSLuigi Rizzo 1160f9790aebSLuigi Rizzo 116117885a7bSLuigi Rizzo /* 116217885a7bSLuigi Rizzo * module variables 116317885a7bSLuigi Rizzo */ 1164*4bf50f18SLuigi Rizzo #define NETMAP_BUF_BASE(na) ((na)->na_lut[0].vaddr) 1165*4bf50f18SLuigi Rizzo #define NETMAP_BUF_SIZE(na) ((na)->na_lut_objsize) 116617885a7bSLuigi Rizzo extern int netmap_mitigate; // XXX not really used 11675819da83SLuigi Rizzo extern int netmap_no_pendintr; 116868b8534bSLuigi Rizzo extern int netmap_verbose; // XXX debugging 116968b8534bSLuigi Rizzo enum { /* verbose flags */ 117068b8534bSLuigi Rizzo NM_VERB_ON = 1, /* generic verbose */ 117168b8534bSLuigi Rizzo NM_VERB_HOST = 0x2, /* verbose host stack */ 117268b8534bSLuigi Rizzo NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 117368b8534bSLuigi Rizzo NM_VERB_TXSYNC = 0x20, 117468b8534bSLuigi Rizzo NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 117568b8534bSLuigi Rizzo NM_VERB_TXINTR = 0x200, 117668b8534bSLuigi Rizzo NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 117768b8534bSLuigi Rizzo NM_VERB_NIC_TXSYNC = 0x2000, 117868b8534bSLuigi Rizzo }; 117968b8534bSLuigi Rizzo 1180f9790aebSLuigi Rizzo extern int netmap_txsync_retry; 1181f9790aebSLuigi Rizzo extern int netmap_generic_mit; 1182f9790aebSLuigi Rizzo extern int netmap_generic_ringsize; 1183f0ea3689SLuigi Rizzo extern int netmap_generic_rings; 1184f9790aebSLuigi Rizzo 118568b8534bSLuigi Rizzo /* 1186d0c7b075SLuigi Rizzo * NA returns a pointer to the struct netmap adapter from the ifp, 1187d0c7b075SLuigi Rizzo * WNA is used to write it. 118868b8534bSLuigi Rizzo */ 1189d0c7b075SLuigi Rizzo #ifndef WNA 1190d0c7b075SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_pspare[0] 1191d0c7b075SLuigi Rizzo #endif 1192d0c7b075SLuigi Rizzo #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 119368b8534bSLuigi Rizzo 11948241616dSLuigi Rizzo /* 11958241616dSLuigi Rizzo * Macros to determine if an interface is netmap capable or netmap enabled. 11968241616dSLuigi Rizzo * See the magic field in struct netmap_adapter. 11978241616dSLuigi Rizzo */ 11988241616dSLuigi Rizzo #ifdef __FreeBSD__ 11998241616dSLuigi Rizzo /* 12008241616dSLuigi Rizzo * on FreeBSD just use if_capabilities and if_capenable. 12018241616dSLuigi Rizzo */ 12028241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 12038241616dSLuigi Rizzo (ifp)->if_capabilities & IFCAP_NETMAP ) 12048241616dSLuigi Rizzo 12058241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 12068241616dSLuigi Rizzo (ifp)->if_capabilities |= IFCAP_NETMAP 12078241616dSLuigi Rizzo 12088241616dSLuigi Rizzo #else /* linux */ 12098241616dSLuigi Rizzo 12108241616dSLuigi Rizzo /* 12118241616dSLuigi Rizzo * on linux: 12128241616dSLuigi Rizzo * we check if NA(ifp) is set and its first element has a related 12138241616dSLuigi Rizzo * magic value. The capenable is within the struct netmap_adapter. 12148241616dSLuigi Rizzo */ 12158241616dSLuigi Rizzo #define NETMAP_MAGIC 0x52697a7a 12168241616dSLuigi Rizzo 12178241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 12188241616dSLuigi Rizzo ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) 12198241616dSLuigi Rizzo 12208241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 12218241616dSLuigi Rizzo NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC 12228241616dSLuigi Rizzo 12238241616dSLuigi Rizzo #endif /* linux */ 122468b8534bSLuigi Rizzo 1225f196ce38SLuigi Rizzo #ifdef __FreeBSD__ 1226f9790aebSLuigi Rizzo 1227*4bf50f18SLuigi Rizzo /* Assigns the device IOMMU domain to an allocator. 1228*4bf50f18SLuigi Rizzo * Returns -ENOMEM in case the domain is different */ 1229*4bf50f18SLuigi Rizzo #define nm_iommu_group_id(dev) (0) 1230*4bf50f18SLuigi Rizzo 123117885a7bSLuigi Rizzo /* Callback invoked by the dma machinery after a successful dmamap_load */ 12326dba29a2SLuigi Rizzo static void netmap_dmamap_cb(__unused void *arg, 12336dba29a2SLuigi Rizzo __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 12346dba29a2SLuigi Rizzo { 12356dba29a2SLuigi Rizzo } 12366dba29a2SLuigi Rizzo 12376dba29a2SLuigi Rizzo /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 12386dba29a2SLuigi Rizzo * XXX can we do it without a callback ? 12396dba29a2SLuigi Rizzo */ 12406dba29a2SLuigi Rizzo static inline void 1241*4bf50f18SLuigi Rizzo netmap_load_map(struct netmap_adapter *na, 1242*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 12436dba29a2SLuigi Rizzo { 12446dba29a2SLuigi Rizzo if (map) 1245*4bf50f18SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 12466dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 12476dba29a2SLuigi Rizzo } 12486dba29a2SLuigi Rizzo 1249*4bf50f18SLuigi Rizzo static inline void 1250*4bf50f18SLuigi Rizzo netmap_unload_map(struct netmap_adapter *na, 1251*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map) 1252*4bf50f18SLuigi Rizzo { 1253*4bf50f18SLuigi Rizzo if (map) 1254*4bf50f18SLuigi Rizzo bus_dmamap_unload(tag, map); 1255*4bf50f18SLuigi Rizzo } 1256*4bf50f18SLuigi Rizzo 12576dba29a2SLuigi Rizzo /* update the map when a buffer changes. */ 12586dba29a2SLuigi Rizzo static inline void 1259*4bf50f18SLuigi Rizzo netmap_reload_map(struct netmap_adapter *na, 1260*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 12616dba29a2SLuigi Rizzo { 12626dba29a2SLuigi Rizzo if (map) { 12636dba29a2SLuigi Rizzo bus_dmamap_unload(tag, map); 1264*4bf50f18SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 12656dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 12666dba29a2SLuigi Rizzo } 12676dba29a2SLuigi Rizzo } 1268f9790aebSLuigi Rizzo 1269f196ce38SLuigi Rizzo #else /* linux */ 1270f196ce38SLuigi Rizzo 1271*4bf50f18SLuigi Rizzo int nm_iommu_group_id(bus_dma_tag_t dev); 1272*4bf50f18SLuigi Rizzo extern size_t netmap_mem_get_bufsize(struct netmap_mem_d *); 1273*4bf50f18SLuigi Rizzo #include <linux/dma-mapping.h> 1274*4bf50f18SLuigi Rizzo 1275*4bf50f18SLuigi Rizzo static inline void 1276*4bf50f18SLuigi Rizzo netmap_load_map(struct netmap_adapter *na, 1277*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1278*4bf50f18SLuigi Rizzo { 1279*4bf50f18SLuigi Rizzo if (map) { 1280*4bf50f18SLuigi Rizzo *map = dma_map_single(na->pdev, buf, netmap_mem_get_bufsize(na->nm_mem), 1281*4bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 1282*4bf50f18SLuigi Rizzo } 1283*4bf50f18SLuigi Rizzo } 1284*4bf50f18SLuigi Rizzo 1285*4bf50f18SLuigi Rizzo static inline void 1286*4bf50f18SLuigi Rizzo netmap_unload_map(struct netmap_adapter *na, 1287*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map) 1288*4bf50f18SLuigi Rizzo { 1289*4bf50f18SLuigi Rizzo u_int sz = netmap_mem_get_bufsize(na->nm_mem); 1290*4bf50f18SLuigi Rizzo 1291*4bf50f18SLuigi Rizzo if (*map) { 1292*4bf50f18SLuigi Rizzo dma_unmap_single(na->pdev, *map, sz, 1293*4bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 1294*4bf50f18SLuigi Rizzo } 1295*4bf50f18SLuigi Rizzo } 1296*4bf50f18SLuigi Rizzo 1297*4bf50f18SLuigi Rizzo static inline void 1298*4bf50f18SLuigi Rizzo netmap_reload_map(struct netmap_adapter *na, 1299*4bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 1300*4bf50f18SLuigi Rizzo { 1301*4bf50f18SLuigi Rizzo u_int sz = netmap_mem_get_bufsize(na->nm_mem); 1302*4bf50f18SLuigi Rizzo 1303*4bf50f18SLuigi Rizzo if (*map) { 1304*4bf50f18SLuigi Rizzo dma_unmap_single(na->pdev, *map, sz, 1305*4bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 1306*4bf50f18SLuigi Rizzo } 1307*4bf50f18SLuigi Rizzo 1308*4bf50f18SLuigi Rizzo *map = dma_map_single(na->pdev, buf, sz, 1309*4bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 1310*4bf50f18SLuigi Rizzo } 1311*4bf50f18SLuigi Rizzo 1312f196ce38SLuigi Rizzo /* 1313f196ce38SLuigi Rizzo * XXX How do we redefine these functions: 1314f196ce38SLuigi Rizzo * 1315f196ce38SLuigi Rizzo * on linux we need 1316f196ce38SLuigi Rizzo * dma_map_single(&pdev->dev, virt_addr, len, direction) 1317f196ce38SLuigi Rizzo * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 1318f196ce38SLuigi Rizzo * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 1319f196ce38SLuigi Rizzo * unfortunately the direction is not, so we need to change 1320f196ce38SLuigi Rizzo * something to have a cross API 1321f196ce38SLuigi Rizzo */ 1322*4bf50f18SLuigi Rizzo 1323f196ce38SLuigi Rizzo #if 0 1324f196ce38SLuigi Rizzo struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 1325f196ce38SLuigi Rizzo /* set time_stamp *before* dma to help avoid a possible race */ 1326f196ce38SLuigi Rizzo buffer_info->time_stamp = jiffies; 1327f196ce38SLuigi Rizzo buffer_info->mapped_as_page = false; 1328f196ce38SLuigi Rizzo buffer_info->length = len; 1329f196ce38SLuigi Rizzo //buffer_info->next_to_watch = l; 1330f196ce38SLuigi Rizzo /* reload dma map */ 1331f196ce38SLuigi Rizzo dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1332f196ce38SLuigi Rizzo NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1333f196ce38SLuigi Rizzo buffer_info->dma = dma_map_single(&adapter->pdev->dev, 1334f196ce38SLuigi Rizzo addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1335f196ce38SLuigi Rizzo 1336f196ce38SLuigi Rizzo if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1337f196ce38SLuigi Rizzo D("dma mapping error"); 1338f196ce38SLuigi Rizzo /* goto dma_error; See e1000_put_txbuf() */ 1339f196ce38SLuigi Rizzo /* XXX reset */ 1340f196ce38SLuigi Rizzo } 1341f196ce38SLuigi Rizzo tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 1342f196ce38SLuigi Rizzo 1343f196ce38SLuigi Rizzo #endif 1344f196ce38SLuigi Rizzo 1345f196ce38SLuigi Rizzo /* 1346f196ce38SLuigi Rizzo * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 1347f196ce38SLuigi Rizzo */ 1348f196ce38SLuigi Rizzo #define bus_dmamap_sync(_a, _b, _c) 1349f196ce38SLuigi Rizzo 1350f196ce38SLuigi Rizzo #endif /* linux */ 13516dba29a2SLuigi Rizzo 1352ce3ee1e7SLuigi Rizzo 13535644ccecSLuigi Rizzo /* 13545644ccecSLuigi Rizzo * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 13555644ccecSLuigi Rizzo */ 13565644ccecSLuigi Rizzo static inline int 135764ae02c3SLuigi Rizzo netmap_idx_n2k(struct netmap_kring *kr, int idx) 13585644ccecSLuigi Rizzo { 135964ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 136064ae02c3SLuigi Rizzo idx += kr->nkr_hwofs; 136164ae02c3SLuigi Rizzo if (idx < 0) 136264ae02c3SLuigi Rizzo return idx + n; 136364ae02c3SLuigi Rizzo else if (idx < n) 136464ae02c3SLuigi Rizzo return idx; 13655644ccecSLuigi Rizzo else 136664ae02c3SLuigi Rizzo return idx - n; 13675644ccecSLuigi Rizzo } 13685644ccecSLuigi Rizzo 13695644ccecSLuigi Rizzo 13705644ccecSLuigi Rizzo static inline int 137164ae02c3SLuigi Rizzo netmap_idx_k2n(struct netmap_kring *kr, int idx) 13725644ccecSLuigi Rizzo { 137364ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 137464ae02c3SLuigi Rizzo idx -= kr->nkr_hwofs; 137564ae02c3SLuigi Rizzo if (idx < 0) 137664ae02c3SLuigi Rizzo return idx + n; 137764ae02c3SLuigi Rizzo else if (idx < n) 137864ae02c3SLuigi Rizzo return idx; 13795644ccecSLuigi Rizzo else 138064ae02c3SLuigi Rizzo return idx - n; 13815644ccecSLuigi Rizzo } 13825644ccecSLuigi Rizzo 13835644ccecSLuigi Rizzo 1384d76bf4ffSLuigi Rizzo /* Entries of the look-up table. */ 1385d76bf4ffSLuigi Rizzo struct lut_entry { 1386d76bf4ffSLuigi Rizzo void *vaddr; /* virtual address. */ 1387849bec0eSLuigi Rizzo vm_paddr_t paddr; /* physical address. */ 1388d76bf4ffSLuigi Rizzo }; 1389d76bf4ffSLuigi Rizzo 1390d76bf4ffSLuigi Rizzo struct netmap_obj_pool; 1391d76bf4ffSLuigi Rizzo 139268b8534bSLuigi Rizzo /* 13936e10c8b8SLuigi Rizzo * NMB return the virtual address of a buffer (buffer 0 on bad index) 13946e10c8b8SLuigi Rizzo * PNMB also fills the physical address 139568b8534bSLuigi Rizzo */ 13966e10c8b8SLuigi Rizzo static inline void * 1397*4bf50f18SLuigi Rizzo NMB(struct netmap_adapter *na, struct netmap_slot *slot) 1398f9790aebSLuigi Rizzo { 1399f9790aebSLuigi Rizzo struct lut_entry *lut = na->na_lut; 1400f9790aebSLuigi Rizzo uint32_t i = slot->buf_idx; 1401f9790aebSLuigi Rizzo return (unlikely(i >= na->na_lut_objtotal)) ? 1402f9790aebSLuigi Rizzo lut[0].vaddr : lut[i].vaddr; 1403f9790aebSLuigi Rizzo } 1404f9790aebSLuigi Rizzo 1405*4bf50f18SLuigi Rizzo static inline void * 1406*4bf50f18SLuigi Rizzo PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) 1407*4bf50f18SLuigi Rizzo { 1408*4bf50f18SLuigi Rizzo uint32_t i = slot->buf_idx; 1409*4bf50f18SLuigi Rizzo struct lut_entry *lut = na->na_lut; 1410*4bf50f18SLuigi Rizzo void *ret = (i >= na->na_lut_objtotal) ? lut[0].vaddr : lut[i].vaddr; 1411*4bf50f18SLuigi Rizzo 1412*4bf50f18SLuigi Rizzo *pp = (i >= na->na_lut_objtotal) ? lut[0].paddr : lut[i].paddr; 1413*4bf50f18SLuigi Rizzo return ret; 1414*4bf50f18SLuigi Rizzo } 1415*4bf50f18SLuigi Rizzo 1416*4bf50f18SLuigi Rizzo /* Generic version of NMB, which uses device-specific memory. */ 1417*4bf50f18SLuigi Rizzo 1418ce3ee1e7SLuigi Rizzo 1419ce3ee1e7SLuigi Rizzo 1420f9790aebSLuigi Rizzo void netmap_txsync_to_host(struct netmap_adapter *na); 1421f9790aebSLuigi Rizzo 1422f9790aebSLuigi Rizzo 142317885a7bSLuigi Rizzo /* 142417885a7bSLuigi Rizzo * Structure associated to each thread which registered an interface. 1425f9790aebSLuigi Rizzo * 1426f9790aebSLuigi Rizzo * The first 4 fields of this structure are written by NIOCREGIF and 1427f9790aebSLuigi Rizzo * read by poll() and NIOC?XSYNC. 142817885a7bSLuigi Rizzo * 142917885a7bSLuigi Rizzo * There is low contention among writers (a correct user program 143017885a7bSLuigi Rizzo * should have none) and among writers and readers, so we use a 143117885a7bSLuigi Rizzo * single global lock to protect the structure initialization; 143217885a7bSLuigi Rizzo * since initialization involves the allocation of memory, 143317885a7bSLuigi Rizzo * we reuse the memory allocator lock. 143417885a7bSLuigi Rizzo * 1435f9790aebSLuigi Rizzo * Read access to the structure is lock free. Readers must check that 1436f9790aebSLuigi Rizzo * np_nifp is not NULL before using the other fields. 143717885a7bSLuigi Rizzo * If np_nifp is NULL initialization has not been performed, 143817885a7bSLuigi Rizzo * so they should return an error to userspace. 1439f9790aebSLuigi Rizzo * 1440f9790aebSLuigi Rizzo * The ref_done field is used to regulate access to the refcount in the 1441f9790aebSLuigi Rizzo * memory allocator. The refcount must be incremented at most once for 1442f9790aebSLuigi Rizzo * each open("/dev/netmap"). The increment is performed by the first 1443f9790aebSLuigi Rizzo * function that calls netmap_get_memory() (currently called by 1444f9790aebSLuigi Rizzo * mmap(), NIOCGINFO and NIOCREGIF). 1445f9790aebSLuigi Rizzo * If the refcount is incremented, it is then decremented when the 1446f9790aebSLuigi Rizzo * private structure is destroyed. 1447f9790aebSLuigi Rizzo */ 1448f9790aebSLuigi Rizzo struct netmap_priv_d { 1449f9790aebSLuigi Rizzo struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ 1450f9790aebSLuigi Rizzo 1451f9790aebSLuigi Rizzo struct netmap_adapter *np_na; 1452f0ea3689SLuigi Rizzo uint32_t np_flags; /* from the ioctl */ 1453f0ea3689SLuigi Rizzo u_int np_txqfirst, np_txqlast; /* range of tx rings to scan */ 1454f0ea3689SLuigi Rizzo u_int np_rxqfirst, np_rxqlast; /* range of rx rings to scan */ 1455f0ea3689SLuigi Rizzo uint16_t np_txpoll; /* XXX and also np_rxpoll ? */ 1456f9790aebSLuigi Rizzo 1457f9790aebSLuigi Rizzo struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */ 1458f9790aebSLuigi Rizzo /* np_refcount is only used on FreeBSD */ 1459f9790aebSLuigi Rizzo int np_refcount; /* use with NMG_LOCK held */ 1460f0ea3689SLuigi Rizzo 1461f0ea3689SLuigi Rizzo /* pointers to the selinfo to be used for selrecord. 1462f0ea3689SLuigi Rizzo * Either the local or the global one depending on the 1463f0ea3689SLuigi Rizzo * number of rings. 1464f0ea3689SLuigi Rizzo */ 1465f0ea3689SLuigi Rizzo NM_SELINFO_T *np_rxsi, *np_txsi; 1466f0ea3689SLuigi Rizzo struct thread *np_td; /* kqueue, just debugging */ 1467f9790aebSLuigi Rizzo }; 1468f9790aebSLuigi Rizzo 1469*4bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 1470*4bf50f18SLuigi Rizzo 1471*4bf50f18SLuigi Rizzo struct netmap_monitor_adapter { 1472*4bf50f18SLuigi Rizzo struct netmap_adapter up; 1473*4bf50f18SLuigi Rizzo 1474*4bf50f18SLuigi Rizzo struct netmap_priv_d priv; 1475*4bf50f18SLuigi Rizzo uint32_t flags; 1476*4bf50f18SLuigi Rizzo }; 1477*4bf50f18SLuigi Rizzo 1478*4bf50f18SLuigi Rizzo #endif /* WITH_MONITOR */ 1479*4bf50f18SLuigi Rizzo 1480f9790aebSLuigi Rizzo 1481f9790aebSLuigi Rizzo /* 1482f9790aebSLuigi Rizzo * generic netmap emulation for devices that do not have 1483f9790aebSLuigi Rizzo * native netmap support. 1484f9790aebSLuigi Rizzo */ 1485f9790aebSLuigi Rizzo int generic_netmap_attach(struct ifnet *ifp); 1486f9790aebSLuigi Rizzo 1487f9790aebSLuigi Rizzo int netmap_catch_rx(struct netmap_adapter *na, int intercept); 1488f9790aebSLuigi Rizzo void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; 148917885a7bSLuigi Rizzo void netmap_catch_tx(struct netmap_generic_adapter *na, int enable); 1490f9790aebSLuigi Rizzo int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr); 1491f9790aebSLuigi Rizzo int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); 1492f9790aebSLuigi Rizzo void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); 1493f9790aebSLuigi Rizzo 1494*4bf50f18SLuigi Rizzo //#define RATE_GENERIC /* Enables communication statistics for generic. */ 1495*4bf50f18SLuigi Rizzo #ifdef RATE_GENERIC 1496*4bf50f18SLuigi Rizzo void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); 1497*4bf50f18SLuigi Rizzo #else 1498*4bf50f18SLuigi Rizzo #define generic_rate(txp, txs, txi, rxp, rxs, rxi) 1499*4bf50f18SLuigi Rizzo #endif 1500*4bf50f18SLuigi Rizzo 1501f9790aebSLuigi Rizzo /* 1502f9790aebSLuigi Rizzo * netmap_mitigation API. This is used by the generic adapter 1503f9790aebSLuigi Rizzo * to reduce the number of interrupt requests/selwakeup 1504f9790aebSLuigi Rizzo * to clients on incoming packets. 1505f9790aebSLuigi Rizzo */ 1506*4bf50f18SLuigi Rizzo void netmap_mitigation_init(struct nm_generic_mit *mit, int idx, 1507*4bf50f18SLuigi Rizzo struct netmap_adapter *na); 1508f0ea3689SLuigi Rizzo void netmap_mitigation_start(struct nm_generic_mit *mit); 1509f0ea3689SLuigi Rizzo void netmap_mitigation_restart(struct nm_generic_mit *mit); 1510f0ea3689SLuigi Rizzo int netmap_mitigation_active(struct nm_generic_mit *mit); 1511f0ea3689SLuigi Rizzo void netmap_mitigation_cleanup(struct nm_generic_mit *mit); 1512f0ea3689SLuigi Rizzo 1513f0ea3689SLuigi Rizzo 1514f0ea3689SLuigi Rizzo 1515f0ea3689SLuigi Rizzo /* Shared declarations for the VALE switch. */ 1516f0ea3689SLuigi Rizzo 1517f0ea3689SLuigi Rizzo /* 1518f0ea3689SLuigi Rizzo * Each transmit queue accumulates a batch of packets into 1519f0ea3689SLuigi Rizzo * a structure before forwarding. Packets to the same 1520f0ea3689SLuigi Rizzo * destination are put in a list using ft_next as a link field. 1521f0ea3689SLuigi Rizzo * ft_frags and ft_next are valid only on the first fragment. 1522f0ea3689SLuigi Rizzo */ 1523f0ea3689SLuigi Rizzo struct nm_bdg_fwd { /* forwarding entry for a bridge */ 1524f0ea3689SLuigi Rizzo void *ft_buf; /* netmap or indirect buffer */ 1525f0ea3689SLuigi Rizzo uint8_t ft_frags; /* how many fragments (only on 1st frag) */ 1526f0ea3689SLuigi Rizzo uint8_t _ft_port; /* dst port (unused) */ 1527f0ea3689SLuigi Rizzo uint16_t ft_flags; /* flags, e.g. indirect */ 1528f0ea3689SLuigi Rizzo uint16_t ft_len; /* src fragment len */ 1529f0ea3689SLuigi Rizzo uint16_t ft_next; /* next packet to same destination */ 1530f0ea3689SLuigi Rizzo }; 1531f0ea3689SLuigi Rizzo 1532f0ea3689SLuigi Rizzo /* struct 'virtio_net_hdr' from linux. */ 1533f0ea3689SLuigi Rizzo struct nm_vnet_hdr { 1534f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ 1535f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ 1536f0ea3689SLuigi Rizzo uint8_t flags; 1537f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ 1538f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ 1539f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ 1540f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ 1541f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ 1542f0ea3689SLuigi Rizzo uint8_t gso_type; 1543f0ea3689SLuigi Rizzo uint16_t hdr_len; 1544f0ea3689SLuigi Rizzo uint16_t gso_size; 1545f0ea3689SLuigi Rizzo uint16_t csum_start; 1546f0ea3689SLuigi Rizzo uint16_t csum_offset; 1547f0ea3689SLuigi Rizzo }; 1548f0ea3689SLuigi Rizzo 1549f0ea3689SLuigi Rizzo #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ 1550f0ea3689SLuigi Rizzo 1551f0ea3689SLuigi Rizzo /* Private definitions for IPv4, IPv6, UDP and TCP headers. */ 1552f0ea3689SLuigi Rizzo 1553f0ea3689SLuigi Rizzo struct nm_iphdr { 1554f0ea3689SLuigi Rizzo uint8_t version_ihl; 1555f0ea3689SLuigi Rizzo uint8_t tos; 1556f0ea3689SLuigi Rizzo uint16_t tot_len; 1557f0ea3689SLuigi Rizzo uint16_t id; 1558f0ea3689SLuigi Rizzo uint16_t frag_off; 1559f0ea3689SLuigi Rizzo uint8_t ttl; 1560f0ea3689SLuigi Rizzo uint8_t protocol; 1561f0ea3689SLuigi Rizzo uint16_t check; 1562f0ea3689SLuigi Rizzo uint32_t saddr; 1563f0ea3689SLuigi Rizzo uint32_t daddr; 1564f0ea3689SLuigi Rizzo /*The options start here. */ 1565f0ea3689SLuigi Rizzo }; 1566f0ea3689SLuigi Rizzo 1567f0ea3689SLuigi Rizzo struct nm_tcphdr { 1568f0ea3689SLuigi Rizzo uint16_t source; 1569f0ea3689SLuigi Rizzo uint16_t dest; 1570f0ea3689SLuigi Rizzo uint32_t seq; 1571f0ea3689SLuigi Rizzo uint32_t ack_seq; 1572f0ea3689SLuigi Rizzo uint8_t doff; /* Data offset + Reserved */ 1573f0ea3689SLuigi Rizzo uint8_t flags; 1574f0ea3689SLuigi Rizzo uint16_t window; 1575f0ea3689SLuigi Rizzo uint16_t check; 1576f0ea3689SLuigi Rizzo uint16_t urg_ptr; 1577f0ea3689SLuigi Rizzo }; 1578f0ea3689SLuigi Rizzo 1579f0ea3689SLuigi Rizzo struct nm_udphdr { 1580f0ea3689SLuigi Rizzo uint16_t source; 1581f0ea3689SLuigi Rizzo uint16_t dest; 1582f0ea3689SLuigi Rizzo uint16_t len; 1583f0ea3689SLuigi Rizzo uint16_t check; 1584f0ea3689SLuigi Rizzo }; 1585f0ea3689SLuigi Rizzo 1586f0ea3689SLuigi Rizzo struct nm_ipv6hdr { 1587f0ea3689SLuigi Rizzo uint8_t priority_version; 1588f0ea3689SLuigi Rizzo uint8_t flow_lbl[3]; 1589f0ea3689SLuigi Rizzo 1590f0ea3689SLuigi Rizzo uint16_t payload_len; 1591f0ea3689SLuigi Rizzo uint8_t nexthdr; 1592f0ea3689SLuigi Rizzo uint8_t hop_limit; 1593f0ea3689SLuigi Rizzo 1594f0ea3689SLuigi Rizzo uint8_t saddr[16]; 1595f0ea3689SLuigi Rizzo uint8_t daddr[16]; 1596f0ea3689SLuigi Rizzo }; 1597f0ea3689SLuigi Rizzo 1598f0ea3689SLuigi Rizzo /* Type used to store a checksum (in host byte order) that hasn't been 1599f0ea3689SLuigi Rizzo * folded yet. 1600f0ea3689SLuigi Rizzo */ 1601f0ea3689SLuigi Rizzo #define rawsum_t uint32_t 1602f0ea3689SLuigi Rizzo 1603f0ea3689SLuigi Rizzo rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); 1604f0ea3689SLuigi Rizzo uint16_t nm_csum_ipv4(struct nm_iphdr *iph); 1605f0ea3689SLuigi Rizzo void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 1606f0ea3689SLuigi Rizzo size_t datalen, uint16_t *check); 1607f0ea3689SLuigi Rizzo void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 1608f0ea3689SLuigi Rizzo size_t datalen, uint16_t *check); 1609f0ea3689SLuigi Rizzo uint16_t nm_csum_fold(rawsum_t cur_sum); 1610f0ea3689SLuigi Rizzo 1611f0ea3689SLuigi Rizzo void bdg_mismatch_datapath(struct netmap_vp_adapter *na, 1612f0ea3689SLuigi Rizzo struct netmap_vp_adapter *dst_na, 1613f0ea3689SLuigi Rizzo struct nm_bdg_fwd *ft_p, struct netmap_ring *ring, 1614f0ea3689SLuigi Rizzo u_int *j, u_int lim, u_int *howmany); 1615*4bf50f18SLuigi Rizzo 1616*4bf50f18SLuigi Rizzo /* persistent virtual port routines */ 1617*4bf50f18SLuigi Rizzo int nm_vi_persist(const char *, struct ifnet **); 1618*4bf50f18SLuigi Rizzo void nm_vi_detach(struct ifnet *); 1619*4bf50f18SLuigi Rizzo void nm_vi_init_index(void); 1620*4bf50f18SLuigi Rizzo 162168b8534bSLuigi Rizzo #endif /* _NET_NETMAP_KERN_H_ */ 1622