168b8534bSLuigi Rizzo /* 217885a7bSLuigi Rizzo * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. 317885a7bSLuigi Rizzo * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. 468b8534bSLuigi Rizzo * 568b8534bSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 668b8534bSLuigi Rizzo * modification, are permitted provided that the following conditions 768b8534bSLuigi Rizzo * are met: 868b8534bSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 968b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer. 1068b8534bSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 1168b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 1268b8534bSLuigi Rizzo * documentation and/or other materials provided with the distribution. 1368b8534bSLuigi Rizzo * 1468b8534bSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1568b8534bSLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1668b8534bSLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1768b8534bSLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1868b8534bSLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1968b8534bSLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2068b8534bSLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2168b8534bSLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2268b8534bSLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2368b8534bSLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2468b8534bSLuigi Rizzo * SUCH DAMAGE. 2568b8534bSLuigi Rizzo */ 2668b8534bSLuigi Rizzo 2768b8534bSLuigi Rizzo /* 2868b8534bSLuigi Rizzo * $FreeBSD$ 2968b8534bSLuigi Rizzo * 3068b8534bSLuigi Rizzo * The header contains the definitions of constants and function 3168b8534bSLuigi Rizzo * prototypes used only in kernelspace. 3268b8534bSLuigi Rizzo */ 3368b8534bSLuigi Rizzo 3468b8534bSLuigi Rizzo #ifndef _NET_NETMAP_KERN_H_ 3568b8534bSLuigi Rizzo #define _NET_NETMAP_KERN_H_ 3668b8534bSLuigi Rizzo 37f9790aebSLuigi Rizzo #define WITH_VALE // comment out to disable VALE support 38f0ea3689SLuigi Rizzo #define WITH_PIPES 394bf50f18SLuigi Rizzo #define WITH_MONITOR 40f9790aebSLuigi Rizzo 411a26580eSLuigi Rizzo #if defined(__FreeBSD__) 42d4b42e08SLuigi Rizzo 43ce3ee1e7SLuigi Rizzo #define likely(x) __builtin_expect((long)!!(x), 1L) 44ce3ee1e7SLuigi Rizzo #define unlikely(x) __builtin_expect((long)!!(x), 0L) 45f196ce38SLuigi Rizzo 461a26580eSLuigi Rizzo #define NM_LOCK_T struct mtx 479721a22dSNavdeep Parhar #define NMG_LOCK_T struct sx 489721a22dSNavdeep Parhar #define NMG_LOCK_INIT() sx_init(&netmap_global_lock, \ 499721a22dSNavdeep Parhar "netmap global lock") 509721a22dSNavdeep Parhar #define NMG_LOCK_DESTROY() sx_destroy(&netmap_global_lock) 519721a22dSNavdeep Parhar #define NMG_LOCK() sx_xlock(&netmap_global_lock) 529721a22dSNavdeep Parhar #define NMG_UNLOCK() sx_xunlock(&netmap_global_lock) 539721a22dSNavdeep Parhar #define NMG_LOCK_ASSERT() sx_assert(&netmap_global_lock, SA_XLOCKED) 54f9790aebSLuigi Rizzo 551a26580eSLuigi Rizzo #define NM_SELINFO_T struct selinfo 561a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 57f9790aebSLuigi Rizzo #define MBUF_IFP(m) ((m)->m_pkthdr.rcvif) 5817885a7bSLuigi Rizzo #define NM_SEND_UP(ifp, m) ((NA(ifp))->if_input)(ifp, m) 59d4b42e08SLuigi Rizzo 60f9790aebSLuigi Rizzo #define NM_ATOMIC_T volatile int // XXX ? 61f9790aebSLuigi Rizzo /* atomic operations */ 62f9790aebSLuigi Rizzo #include <machine/atomic.h> 63f9790aebSLuigi Rizzo #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1)) 64f9790aebSLuigi Rizzo #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0) 65f9790aebSLuigi Rizzo 667f154b71SLuigi Rizzo #if __FreeBSD_version >= 1100030 677f154b71SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_netmap 687f154b71SLuigi Rizzo #else /* older FreeBSD */ 697f154b71SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_pspare[0] 707f154b71SLuigi Rizzo #endif /* older FreeBSD */ 717f154b71SLuigi Rizzo 7246aa1303SLuigi Rizzo #if __FreeBSD_version >= 1100005 7346aa1303SLuigi Rizzo struct netmap_adapter *netmap_getna(if_t ifp); 7446aa1303SLuigi Rizzo #endif 75f9790aebSLuigi Rizzo 764bf50f18SLuigi Rizzo #if __FreeBSD_version >= 1100027 774bf50f18SLuigi Rizzo #define GET_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt ? *((m)->m_ext.ext_cnt) : -1) 784bf50f18SLuigi Rizzo #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ext_cnt) = x 794bf50f18SLuigi Rizzo #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ext_cnt) 804bf50f18SLuigi Rizzo #else 814bf50f18SLuigi Rizzo #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *((m)->m_ext.ref_cnt) : -1) 824bf50f18SLuigi Rizzo #define SET_MBUF_REFCNT(m, x) *((m)->m_ext.ref_cnt) = x 834bf50f18SLuigi Rizzo #define PNT_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt) 844bf50f18SLuigi Rizzo #endif 854bf50f18SLuigi Rizzo 86f9790aebSLuigi Rizzo MALLOC_DECLARE(M_NETMAP); 87f9790aebSLuigi Rizzo 88f9790aebSLuigi Rizzo // XXX linux struct, not used in FreeBSD 89f9790aebSLuigi Rizzo struct net_device_ops { 90f9790aebSLuigi Rizzo }; 914bf50f18SLuigi Rizzo struct ethtool_ops { 924bf50f18SLuigi Rizzo }; 93f9790aebSLuigi Rizzo struct hrtimer { 94f9790aebSLuigi Rizzo }; 95ce3ee1e7SLuigi Rizzo 9664ae02c3SLuigi Rizzo #elif defined (linux) 97d4b42e08SLuigi Rizzo 982579e2d7SLuigi Rizzo #define NM_LOCK_T safe_spinlock_t // see bsd_glue.h 991a26580eSLuigi Rizzo #define NM_SELINFO_T wait_queue_head_t 1001a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->len) 101f9790aebSLuigi Rizzo #define MBUF_IFP(m) ((m)->dev) 10217885a7bSLuigi Rizzo #define NM_SEND_UP(ifp, m) \ 10317885a7bSLuigi Rizzo do { \ 1044bf50f18SLuigi Rizzo m->priority = NM_MAGIC_PRIORITY_RX; \ 10517885a7bSLuigi Rizzo netif_rx(m); \ 10617885a7bSLuigi Rizzo } while (0) 107f196ce38SLuigi Rizzo 108ce3ee1e7SLuigi Rizzo #define NM_ATOMIC_T volatile long unsigned int 109ce3ee1e7SLuigi Rizzo 110f9790aebSLuigi Rizzo // XXX a mtx would suffice here too 20130404 gl 111f9790aebSLuigi Rizzo #define NMG_LOCK_T struct semaphore 112f9790aebSLuigi Rizzo #define NMG_LOCK_INIT() sema_init(&netmap_global_lock, 1) 113f9790aebSLuigi Rizzo #define NMG_LOCK_DESTROY() 114f9790aebSLuigi Rizzo #define NMG_LOCK() down(&netmap_global_lock) 115f9790aebSLuigi Rizzo #define NMG_UNLOCK() up(&netmap_global_lock) 116f9790aebSLuigi Rizzo #define NMG_LOCK_ASSERT() // XXX to be completed 117f9790aebSLuigi Rizzo 118f196ce38SLuigi Rizzo #ifndef DEV_NETMAP 119f196ce38SLuigi Rizzo #define DEV_NETMAP 120ce3ee1e7SLuigi Rizzo #endif /* DEV_NETMAP */ 121f196ce38SLuigi Rizzo 122f196ce38SLuigi Rizzo #elif defined (__APPLE__) 123d4b42e08SLuigi Rizzo 1248241616dSLuigi Rizzo #warning apple support is incomplete. 125f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 126f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 127f196ce38SLuigi Rizzo #define NM_LOCK_T IOLock * 128f196ce38SLuigi Rizzo #define NM_SELINFO_T struct selinfo 129f196ce38SLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 130f196ce38SLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 131f196ce38SLuigi Rizzo 1321a26580eSLuigi Rizzo #else 133d4b42e08SLuigi Rizzo 1341a26580eSLuigi Rizzo #error unsupported platform 135d4b42e08SLuigi Rizzo 136d4b42e08SLuigi Rizzo #endif /* end - platform-specific code */ 1371a26580eSLuigi Rizzo 13868b8534bSLuigi Rizzo #define ND(format, ...) 13968b8534bSLuigi Rizzo #define D(format, ...) \ 14068b8534bSLuigi Rizzo do { \ 14168b8534bSLuigi Rizzo struct timeval __xxts; \ 14268b8534bSLuigi Rizzo microtime(&__xxts); \ 14317885a7bSLuigi Rizzo printf("%03d.%06d [%4d] %-25s " format "\n", \ 14468b8534bSLuigi Rizzo (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 14517885a7bSLuigi Rizzo __LINE__, __FUNCTION__, ##__VA_ARGS__); \ 14668b8534bSLuigi Rizzo } while (0) 14768b8534bSLuigi Rizzo 1488241616dSLuigi Rizzo /* rate limited, lps indicates how many per second */ 1498241616dSLuigi Rizzo #define RD(lps, format, ...) \ 1508241616dSLuigi Rizzo do { \ 1518241616dSLuigi Rizzo static int t0, __cnt; \ 1528241616dSLuigi Rizzo if (t0 != time_second) { \ 1538241616dSLuigi Rizzo t0 = time_second; \ 1548241616dSLuigi Rizzo __cnt = 0; \ 1558241616dSLuigi Rizzo } \ 1568241616dSLuigi Rizzo if (__cnt++ < lps) \ 1578241616dSLuigi Rizzo D(format, ##__VA_ARGS__); \ 1588241616dSLuigi Rizzo } while (0) 1598241616dSLuigi Rizzo 16068b8534bSLuigi Rizzo struct netmap_adapter; 161f18be576SLuigi Rizzo struct nm_bdg_fwd; 162f18be576SLuigi Rizzo struct nm_bridge; 163f18be576SLuigi Rizzo struct netmap_priv_d; 16468b8534bSLuigi Rizzo 165ce3ee1e7SLuigi Rizzo const char *nm_dump_buf(char *p, int len, int lim, char *dst); 166ce3ee1e7SLuigi Rizzo 167f9790aebSLuigi Rizzo #include "netmap_mbq.h" 168f9790aebSLuigi Rizzo 169f9790aebSLuigi Rizzo extern NMG_LOCK_T netmap_global_lock; 170f9790aebSLuigi Rizzo 17168b8534bSLuigi Rizzo /* 17264ae02c3SLuigi Rizzo * private, kernel view of a ring. Keeps track of the status of 17364ae02c3SLuigi Rizzo * a ring across system calls. 17464ae02c3SLuigi Rizzo * 17564ae02c3SLuigi Rizzo * nr_hwcur index of the next buffer to refill. 17617885a7bSLuigi Rizzo * It corresponds to ring->head 17717885a7bSLuigi Rizzo * at the time the system call returns. 17864ae02c3SLuigi Rizzo * 17917885a7bSLuigi Rizzo * nr_hwtail index of the first buffer owned by the kernel. 18017885a7bSLuigi Rizzo * On RX, hwcur->hwtail are receive buffers 18117885a7bSLuigi Rizzo * not yet released. hwcur is advanced following 18217885a7bSLuigi Rizzo * ring->head, hwtail is advanced on incoming packets, 18317885a7bSLuigi Rizzo * and a wakeup is generated when hwtail passes ring->cur 18417885a7bSLuigi Rizzo * On TX, hwcur->rcur have been filled by the sender 18517885a7bSLuigi Rizzo * but not sent yet to the NIC; rcur->hwtail are available 18617885a7bSLuigi Rizzo * for new transmissions, and hwtail->hwcur-1 are pending 18717885a7bSLuigi Rizzo * transmissions not yet acknowledged. 18868b8534bSLuigi Rizzo * 1891a26580eSLuigi Rizzo * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 19068b8534bSLuigi Rizzo * This is so that, on a reset, buffers owned by userspace are not 19168b8534bSLuigi Rizzo * modified by the kernel. In particular: 19217885a7bSLuigi Rizzo * RX rings: the next empty buffer (hwtail + hwofs) coincides with 19368b8534bSLuigi Rizzo * the next empty buffer as known by the hardware (next_to_check or so). 19468b8534bSLuigi Rizzo * TX rings: hwcur + hwofs coincides with next_to_send 1951dce924dSLuigi Rizzo * 1961dce924dSLuigi Rizzo * For received packets, slot->flags is set to nkr_slot_flags 1971dce924dSLuigi Rizzo * so we can provide a proper initial value (e.g. set NS_FORWARD 1981dce924dSLuigi Rizzo * when operating in 'transparent' mode). 199ce3ee1e7SLuigi Rizzo * 200ce3ee1e7SLuigi Rizzo * The following fields are used to implement lock-free copy of packets 201ce3ee1e7SLuigi Rizzo * from input to output ports in VALE switch: 202ce3ee1e7SLuigi Rizzo * nkr_hwlease buffer after the last one being copied. 203ce3ee1e7SLuigi Rizzo * A writer in nm_bdg_flush reserves N buffers 204ce3ee1e7SLuigi Rizzo * from nr_hwlease, advances it, then does the 205ce3ee1e7SLuigi Rizzo * copy outside the lock. 206ce3ee1e7SLuigi Rizzo * In RX rings (used for VALE ports), 20717885a7bSLuigi Rizzo * nkr_hwtail <= nkr_hwlease < nkr_hwcur+N-1 208ce3ee1e7SLuigi Rizzo * In TX rings (used for NIC or host stack ports) 20917885a7bSLuigi Rizzo * nkr_hwcur <= nkr_hwlease < nkr_hwtail 210ce3ee1e7SLuigi Rizzo * nkr_leases array of nkr_num_slots where writers can report 211ce3ee1e7SLuigi Rizzo * completion of their block. NR_NOSLOT (~0) indicates 212ce3ee1e7SLuigi Rizzo * that the writer has not finished yet 213ce3ee1e7SLuigi Rizzo * nkr_lease_idx index of next free slot in nr_leases, to be assigned 214ce3ee1e7SLuigi Rizzo * 215ce3ee1e7SLuigi Rizzo * The kring is manipulated by txsync/rxsync and generic netmap function. 21617885a7bSLuigi Rizzo * 21717885a7bSLuigi Rizzo * Concurrent rxsync or txsync on the same ring are prevented through 21889cc2556SLuigi Rizzo * by nm_kr_(try)lock() which in turn uses nr_busy. This is all we need 21917885a7bSLuigi Rizzo * for NIC rings, and for TX rings attached to the host stack. 22017885a7bSLuigi Rizzo * 22117885a7bSLuigi Rizzo * RX rings attached to the host stack use an mbq (rx_queue) on both 22217885a7bSLuigi Rizzo * rxsync_from_host() and netmap_transmit(). The mbq is protected 22317885a7bSLuigi Rizzo * by its internal lock. 22417885a7bSLuigi Rizzo * 2254bf50f18SLuigi Rizzo * RX rings attached to the VALE switch are accessed by both senders 22617885a7bSLuigi Rizzo * and receiver. They are protected through the q_lock on the RX ring. 22768b8534bSLuigi Rizzo */ 22868b8534bSLuigi Rizzo struct netmap_kring { 22968b8534bSLuigi Rizzo struct netmap_ring *ring; 23017885a7bSLuigi Rizzo 231ce3ee1e7SLuigi Rizzo uint32_t nr_hwcur; 23217885a7bSLuigi Rizzo uint32_t nr_hwtail; 23317885a7bSLuigi Rizzo 23417885a7bSLuigi Rizzo /* 23517885a7bSLuigi Rizzo * Copies of values in user rings, so we do not need to look 23617885a7bSLuigi Rizzo * at the ring (which could be modified). These are set in the 23717885a7bSLuigi Rizzo * *sync_prologue()/finalize() routines. 23817885a7bSLuigi Rizzo */ 23917885a7bSLuigi Rizzo uint32_t rhead; 24017885a7bSLuigi Rizzo uint32_t rcur; 24117885a7bSLuigi Rizzo uint32_t rtail; 24217885a7bSLuigi Rizzo 243ce3ee1e7SLuigi Rizzo uint32_t nr_kflags; /* private driver flags */ 2442157a17cSLuigi Rizzo #define NKR_PENDINTR 0x1 // Pending interrupt. 245ce3ee1e7SLuigi Rizzo uint32_t nkr_num_slots; 24617885a7bSLuigi Rizzo 24717885a7bSLuigi Rizzo /* 24817885a7bSLuigi Rizzo * On a NIC reset, the NIC ring indexes may be reset but the 24917885a7bSLuigi Rizzo * indexes in the netmap rings remain the same. nkr_hwofs 25017885a7bSLuigi Rizzo * keeps track of the offset between the two. 25117885a7bSLuigi Rizzo */ 25217885a7bSLuigi Rizzo int32_t nkr_hwofs; 25368b8534bSLuigi Rizzo 2541dce924dSLuigi Rizzo uint16_t nkr_slot_flags; /* initial value for flags */ 25517885a7bSLuigi Rizzo 25617885a7bSLuigi Rizzo /* last_reclaim is opaque marker to help reduce the frequency 25717885a7bSLuigi Rizzo * of operations such as reclaiming tx buffers. A possible use 25817885a7bSLuigi Rizzo * is set it to ticks and do the reclaim only once per tick. 25917885a7bSLuigi Rizzo */ 26017885a7bSLuigi Rizzo uint64_t last_reclaim; 26117885a7bSLuigi Rizzo 262ce3ee1e7SLuigi Rizzo 2631a26580eSLuigi Rizzo NM_SELINFO_T si; /* poll/select wait queue */ 264ce3ee1e7SLuigi Rizzo NM_LOCK_T q_lock; /* protects kring and ring. */ 265ce3ee1e7SLuigi Rizzo NM_ATOMIC_T nr_busy; /* prevent concurrent syscalls */ 266ce3ee1e7SLuigi Rizzo 26717885a7bSLuigi Rizzo struct netmap_adapter *na; 26817885a7bSLuigi Rizzo 269*6435a0dcSLuigi Rizzo /* The following fields are for VALE switch support */ 27017885a7bSLuigi Rizzo struct nm_bdg_fwd *nkr_ft; 27117885a7bSLuigi Rizzo uint32_t *nkr_leases; 27217885a7bSLuigi Rizzo #define NR_NOSLOT ((uint32_t)~0) /* used in nkr_*lease* */ 27317885a7bSLuigi Rizzo uint32_t nkr_hwlease; 27417885a7bSLuigi Rizzo uint32_t nkr_lease_idx; 27517885a7bSLuigi Rizzo 2764bf50f18SLuigi Rizzo /* while nkr_stopped is set, no new [tr]xsync operations can 2774bf50f18SLuigi Rizzo * be started on this kring. 2784bf50f18SLuigi Rizzo * This is used by netmap_disable_all_rings() 2794bf50f18SLuigi Rizzo * to find a synchronization point where critical data 2804bf50f18SLuigi Rizzo * structures pointed to by the kring can be added or removed 2814bf50f18SLuigi Rizzo */ 2824bf50f18SLuigi Rizzo volatile int nkr_stopped; 283f9790aebSLuigi Rizzo 284f0ea3689SLuigi Rizzo /* Support for adapters without native netmap support. 285f9790aebSLuigi Rizzo * On tx rings we preallocate an array of tx buffers 286f9790aebSLuigi Rizzo * (same size as the netmap ring), on rx rings we 287f0ea3689SLuigi Rizzo * store incoming mbufs in a queue that is drained by 288f0ea3689SLuigi Rizzo * a rxsync. 289f9790aebSLuigi Rizzo */ 290f9790aebSLuigi Rizzo struct mbuf **tx_pool; 29117885a7bSLuigi Rizzo // u_int nr_ntc; /* Emulation of a next-to-clean RX ring pointer. */ 29217885a7bSLuigi Rizzo struct mbq rx_queue; /* intercepted rx mbufs. */ 29317885a7bSLuigi Rizzo 29417885a7bSLuigi Rizzo uint32_t ring_id; /* debugging */ 29517885a7bSLuigi Rizzo char name[64]; /* diagnostic */ 296f9790aebSLuigi Rizzo 2974bf50f18SLuigi Rizzo /* [tx]sync callback for this kring. 2984bf50f18SLuigi Rizzo * The default nm_kring_create callback (netmap_krings_create) 2994bf50f18SLuigi Rizzo * sets the nm_sync callback of each hardware tx(rx) kring to 3004bf50f18SLuigi Rizzo * the corresponding nm_txsync(nm_rxsync) taken from the 3014bf50f18SLuigi Rizzo * netmap_adapter; moreover, it sets the sync callback 3024bf50f18SLuigi Rizzo * of the host tx(rx) ring to netmap_txsync_to_host 3034bf50f18SLuigi Rizzo * (netmap_rxsync_from_host). 3044bf50f18SLuigi Rizzo * 3054bf50f18SLuigi Rizzo * Overrides: the above configuration is not changed by 3064bf50f18SLuigi Rizzo * any of the nm_krings_create callbacks. 3074bf50f18SLuigi Rizzo */ 308f0ea3689SLuigi Rizzo int (*nm_sync)(struct netmap_kring *kring, int flags); 309f0ea3689SLuigi Rizzo 310f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 3114bf50f18SLuigi Rizzo struct netmap_kring *pipe; /* if this is a pipe ring, 3124bf50f18SLuigi Rizzo * pointer to the other end 3134bf50f18SLuigi Rizzo */ 3144bf50f18SLuigi Rizzo struct netmap_ring *save_ring; /* pointer to hidden rings 3154bf50f18SLuigi Rizzo * (see netmap_pipe.c for details) 3164bf50f18SLuigi Rizzo */ 317f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 318f0ea3689SLuigi Rizzo 3194bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 3204bf50f18SLuigi Rizzo /* pointer to the adapter that is monitoring this kring (if any) 3214bf50f18SLuigi Rizzo */ 3224bf50f18SLuigi Rizzo struct netmap_monitor_adapter *monitor; 3234bf50f18SLuigi Rizzo /* 3244bf50f18SLuigi Rizzo * Monitors work by intercepting the txsync and/or rxsync of the 3254bf50f18SLuigi Rizzo * monitored krings. This is implemented by replacing 3264bf50f18SLuigi Rizzo * the nm_sync pointer above and saving the previous 3274bf50f18SLuigi Rizzo * one in save_sync below. 3284bf50f18SLuigi Rizzo */ 3294bf50f18SLuigi Rizzo int (*save_sync)(struct netmap_kring *kring, int flags); 3304bf50f18SLuigi Rizzo #endif 3312157a17cSLuigi Rizzo } __attribute__((__aligned__(64))); 33268b8534bSLuigi Rizzo 333ce3ee1e7SLuigi Rizzo 334ce3ee1e7SLuigi Rizzo /* return the next index, with wraparound */ 335ce3ee1e7SLuigi Rizzo static inline uint32_t 336ce3ee1e7SLuigi Rizzo nm_next(uint32_t i, uint32_t lim) 337ce3ee1e7SLuigi Rizzo { 338ce3ee1e7SLuigi Rizzo return unlikely (i == lim) ? 0 : i + 1; 339ce3ee1e7SLuigi Rizzo } 340ce3ee1e7SLuigi Rizzo 34117885a7bSLuigi Rizzo 34217885a7bSLuigi Rizzo /* return the previous index, with wraparound */ 34317885a7bSLuigi Rizzo static inline uint32_t 34417885a7bSLuigi Rizzo nm_prev(uint32_t i, uint32_t lim) 34517885a7bSLuigi Rizzo { 34617885a7bSLuigi Rizzo return unlikely (i == 0) ? lim : i - 1; 34717885a7bSLuigi Rizzo } 34817885a7bSLuigi Rizzo 34917885a7bSLuigi Rizzo 350ce3ee1e7SLuigi Rizzo /* 351ce3ee1e7SLuigi Rizzo * 352ce3ee1e7SLuigi Rizzo * Here is the layout for the Rx and Tx rings. 353ce3ee1e7SLuigi Rizzo 354ce3ee1e7SLuigi Rizzo RxRING TxRING 355ce3ee1e7SLuigi Rizzo 356ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 357ce3ee1e7SLuigi Rizzo | | | | 358ce3ee1e7SLuigi Rizzo |XXX free slot XXX| |XXX free slot XXX| 359ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 36017885a7bSLuigi Rizzo head->| owned by user |<-hwcur | not sent to nic |<-hwcur 36117885a7bSLuigi Rizzo | | | yet | 36217885a7bSLuigi Rizzo +-----------------+ | | 36317885a7bSLuigi Rizzo cur->| available to | | | 36417885a7bSLuigi Rizzo | user, not read | +-----------------+ 36517885a7bSLuigi Rizzo | yet | cur->| (being | 36617885a7bSLuigi Rizzo | | | prepared) | 367ce3ee1e7SLuigi Rizzo | | | | 36817885a7bSLuigi Rizzo +-----------------+ + ------ + 36917885a7bSLuigi Rizzo tail->| |<-hwtail | |<-hwlease 37017885a7bSLuigi Rizzo | (being | ... | | ... 37117885a7bSLuigi Rizzo | prepared) | ... | | ... 37217885a7bSLuigi Rizzo +-----------------+ ... | | ... 37317885a7bSLuigi Rizzo | |<-hwlease +-----------------+ 37417885a7bSLuigi Rizzo | | tail->| |<-hwtail 375ce3ee1e7SLuigi Rizzo | | | | 376ce3ee1e7SLuigi Rizzo | | | | 377ce3ee1e7SLuigi Rizzo | | | | 378ce3ee1e7SLuigi Rizzo +-----------------+ +-----------------+ 379ce3ee1e7SLuigi Rizzo 38017885a7bSLuigi Rizzo * The cur/tail (user view) and hwcur/hwtail (kernel view) 381ce3ee1e7SLuigi Rizzo * are used in the normal operation of the card. 382ce3ee1e7SLuigi Rizzo * 383ce3ee1e7SLuigi Rizzo * When a ring is the output of a switch port (Rx ring for 384ce3ee1e7SLuigi Rizzo * a VALE port, Tx ring for the host stack or NIC), slots 385ce3ee1e7SLuigi Rizzo * are reserved in blocks through 'hwlease' which points 386ce3ee1e7SLuigi Rizzo * to the next unused slot. 38717885a7bSLuigi Rizzo * On an Rx ring, hwlease is always after hwtail, 38817885a7bSLuigi Rizzo * and completions cause hwtail to advance. 38917885a7bSLuigi Rizzo * On a Tx ring, hwlease is always between cur and hwtail, 390ce3ee1e7SLuigi Rizzo * and completions cause cur to advance. 391ce3ee1e7SLuigi Rizzo * 392ce3ee1e7SLuigi Rizzo * nm_kr_space() returns the maximum number of slots that 393ce3ee1e7SLuigi Rizzo * can be assigned. 394ce3ee1e7SLuigi Rizzo * nm_kr_lease() reserves the required number of buffers, 395ce3ee1e7SLuigi Rizzo * advances nkr_hwlease and also returns an entry in 396ce3ee1e7SLuigi Rizzo * a circular array where completions should be reported. 397ce3ee1e7SLuigi Rizzo */ 398ce3ee1e7SLuigi Rizzo 399ce3ee1e7SLuigi Rizzo 400ce3ee1e7SLuigi Rizzo 401f9790aebSLuigi Rizzo enum txrx { NR_RX = 0, NR_TX = 1 }; 402ce3ee1e7SLuigi Rizzo 4034bf50f18SLuigi Rizzo struct netmap_vp_adapter; // forward 4044bf50f18SLuigi Rizzo 40568b8534bSLuigi Rizzo /* 406f9790aebSLuigi Rizzo * The "struct netmap_adapter" extends the "struct adapter" 407f9790aebSLuigi Rizzo * (or equivalent) device descriptor. 408f9790aebSLuigi Rizzo * It contains all base fields needed to support netmap operation. 409f9790aebSLuigi Rizzo * There are in fact different types of netmap adapters 410f9790aebSLuigi Rizzo * (native, generic, VALE switch...) so a netmap_adapter is 411f9790aebSLuigi Rizzo * just the first field in the derived type. 41268b8534bSLuigi Rizzo */ 41368b8534bSLuigi Rizzo struct netmap_adapter { 4148241616dSLuigi Rizzo /* 4158241616dSLuigi Rizzo * On linux we do not have a good way to tell if an interface 416f9790aebSLuigi Rizzo * is netmap-capable. So we always use the following trick: 4178241616dSLuigi Rizzo * NA(ifp) points here, and the first entry (which hopefully 4188241616dSLuigi Rizzo * always exists and is at least 32 bits) contains a magic 4198241616dSLuigi Rizzo * value which we can use to detect that the interface is good. 4208241616dSLuigi Rizzo */ 4218241616dSLuigi Rizzo uint32_t magic; 422f9790aebSLuigi Rizzo uint32_t na_flags; /* enabled, and other flags */ 4238241616dSLuigi Rizzo #define NAF_SKIP_INTR 1 /* use the regular interrupt handler. 4248241616dSLuigi Rizzo * useful during initialization 4258241616dSLuigi Rizzo */ 426f18be576SLuigi Rizzo #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */ 427ce3ee1e7SLuigi Rizzo #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when 428ce3ee1e7SLuigi Rizzo * forwarding packets coming from this 429ce3ee1e7SLuigi Rizzo * interface 430ce3ee1e7SLuigi Rizzo */ 431ce3ee1e7SLuigi Rizzo #define NAF_MEM_OWNER 8 /* the adapter is responsible for the 432ce3ee1e7SLuigi Rizzo * deallocation of the memory allocator 433ce3ee1e7SLuigi Rizzo */ 434f9790aebSLuigi Rizzo #define NAF_NATIVE_ON 16 /* the adapter is native and the attached 4354bf50f18SLuigi Rizzo * interface is in netmap mode. 4364bf50f18SLuigi Rizzo * Virtual ports (vale, pipe, monitor...) 4374bf50f18SLuigi Rizzo * should never use this flag. 438f9790aebSLuigi Rizzo */ 439f9790aebSLuigi Rizzo #define NAF_NETMAP_ON 32 /* netmap is active (either native or 4404bf50f18SLuigi Rizzo * emulated). Where possible (e.g. FreeBSD) 441f9790aebSLuigi Rizzo * IFCAP_NETMAP also mirrors this flag. 442f9790aebSLuigi Rizzo */ 443f0ea3689SLuigi Rizzo #define NAF_HOST_RINGS 64 /* the adapter supports the host rings */ 4444bf50f18SLuigi Rizzo #define NAF_FORCE_NATIVE 128 /* the adapter is always NATIVE */ 4454bf50f18SLuigi Rizzo #define NAF_BUSY (1U<<31) /* the adapter is used internally and 4464bf50f18SLuigi Rizzo * cannot be registered from userspace 4474bf50f18SLuigi Rizzo */ 448f9790aebSLuigi Rizzo int active_fds; /* number of user-space descriptors using this 44968b8534bSLuigi Rizzo interface, which is equal to the number of 45068b8534bSLuigi Rizzo struct netmap_if objs in the mapped region. */ 45168b8534bSLuigi Rizzo 45224e57ec9SEd Maste u_int num_rx_rings; /* number of adapter receive rings */ 45324e57ec9SEd Maste u_int num_tx_rings; /* number of adapter transmit rings */ 45468b8534bSLuigi Rizzo 45568b8534bSLuigi Rizzo u_int num_tx_desc; /* number of descriptor in each queue */ 45668b8534bSLuigi Rizzo u_int num_rx_desc; 45768b8534bSLuigi Rizzo 45868b8534bSLuigi Rizzo /* tx_rings and rx_rings are private but allocated 45968b8534bSLuigi Rizzo * as a contiguous chunk of memory. Each array has 46068b8534bSLuigi Rizzo * N+1 entries, for the adapter queues and for the host queue. 46168b8534bSLuigi Rizzo */ 46268b8534bSLuigi Rizzo struct netmap_kring *tx_rings; /* array of TX rings. */ 46368b8534bSLuigi Rizzo struct netmap_kring *rx_rings; /* array of RX rings. */ 46417885a7bSLuigi Rizzo 465f9790aebSLuigi Rizzo void *tailroom; /* space below the rings array */ 466f9790aebSLuigi Rizzo /* (used for leases) */ 467f9790aebSLuigi Rizzo 46868b8534bSLuigi Rizzo 46964ae02c3SLuigi Rizzo NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 47064ae02c3SLuigi Rizzo 471f0ea3689SLuigi Rizzo /* count users of the global wait queues */ 472f0ea3689SLuigi Rizzo int tx_si_users, rx_si_users; 473f0ea3689SLuigi Rizzo 4744bf50f18SLuigi Rizzo void *pdev; /* used to store pci device */ 4754bf50f18SLuigi Rizzo 47668b8534bSLuigi Rizzo /* copy of if_qflush and if_transmit pointers, to intercept 47768b8534bSLuigi Rizzo * packets from the network stack when netmap is active. 47868b8534bSLuigi Rizzo */ 47968b8534bSLuigi Rizzo int (*if_transmit)(struct ifnet *, struct mbuf *); 48068b8534bSLuigi Rizzo 48117885a7bSLuigi Rizzo /* copy of if_input for netmap_send_up() */ 48217885a7bSLuigi Rizzo void (*if_input)(struct ifnet *, struct mbuf *); 48317885a7bSLuigi Rizzo 48468b8534bSLuigi Rizzo /* references to the ifnet and device routines, used by 48568b8534bSLuigi Rizzo * the generic netmap functions. 48668b8534bSLuigi Rizzo */ 48768b8534bSLuigi Rizzo struct ifnet *ifp; /* adapter is ifp->if_softc */ 48868b8534bSLuigi Rizzo 48917885a7bSLuigi Rizzo /*---- callbacks for this netmap adapter -----*/ 49017885a7bSLuigi Rizzo /* 49117885a7bSLuigi Rizzo * nm_dtor() is the cleanup routine called when destroying 49217885a7bSLuigi Rizzo * the adapter. 49389cc2556SLuigi Rizzo * Called with NMG_LOCK held. 49417885a7bSLuigi Rizzo * 49517885a7bSLuigi Rizzo * nm_register() is called on NIOCREGIF and close() to enter 49617885a7bSLuigi Rizzo * or exit netmap mode on the NIC 4974bf50f18SLuigi Rizzo * Called with NNG_LOCK held. 49817885a7bSLuigi Rizzo * 49917885a7bSLuigi Rizzo * nm_txsync() pushes packets to the underlying hw/switch 50017885a7bSLuigi Rizzo * 50117885a7bSLuigi Rizzo * nm_rxsync() collects packets from the underlying hw/switch 50217885a7bSLuigi Rizzo * 50317885a7bSLuigi Rizzo * nm_config() returns configuration information from the OS 50489cc2556SLuigi Rizzo * Called with NMG_LOCK held. 50517885a7bSLuigi Rizzo * 5064bf50f18SLuigi Rizzo * nm_krings_create() create and init the tx_rings and 5074bf50f18SLuigi Rizzo * rx_rings arrays of kring structures. In particular, 5084bf50f18SLuigi Rizzo * set the nm_sync callbacks for each ring. 5094bf50f18SLuigi Rizzo * There is no need to also allocate the corresponding 5104bf50f18SLuigi Rizzo * netmap_rings, since netmap_mem_rings_create() will always 5114bf50f18SLuigi Rizzo * be called to provide the missing ones. 5124bf50f18SLuigi Rizzo * Called with NNG_LOCK held. 51317885a7bSLuigi Rizzo * 5144bf50f18SLuigi Rizzo * nm_krings_delete() cleanup and delete the tx_rings and rx_rings 5154bf50f18SLuigi Rizzo * arrays 5164bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 51717885a7bSLuigi Rizzo * 51889cc2556SLuigi Rizzo * nm_notify() is used to act after data have become available 51989cc2556SLuigi Rizzo * (or the stopped state of the ring has changed) 52017885a7bSLuigi Rizzo * For hw devices this is typically a selwakeup(), 52117885a7bSLuigi Rizzo * but for NIC/host ports attached to a switch (or vice-versa) 52217885a7bSLuigi Rizzo * we also need to invoke the 'txsync' code downstream. 52317885a7bSLuigi Rizzo */ 524f9790aebSLuigi Rizzo void (*nm_dtor)(struct netmap_adapter *); 5251a26580eSLuigi Rizzo 526f9790aebSLuigi Rizzo int (*nm_register)(struct netmap_adapter *, int onoff); 527ce3ee1e7SLuigi Rizzo 5284bf50f18SLuigi Rizzo int (*nm_txsync)(struct netmap_kring *kring, int flags); 5294bf50f18SLuigi Rizzo int (*nm_rxsync)(struct netmap_kring *kring, int flags); 530ce3ee1e7SLuigi Rizzo #define NAF_FORCE_READ 1 531ce3ee1e7SLuigi Rizzo #define NAF_FORCE_RECLAIM 2 532ae10d1afSLuigi Rizzo /* return configuration information */ 533f9790aebSLuigi Rizzo int (*nm_config)(struct netmap_adapter *, 534f9790aebSLuigi Rizzo u_int *txr, u_int *txd, u_int *rxr, u_int *rxd); 535f9790aebSLuigi Rizzo int (*nm_krings_create)(struct netmap_adapter *); 536f9790aebSLuigi Rizzo void (*nm_krings_delete)(struct netmap_adapter *); 537f9790aebSLuigi Rizzo int (*nm_notify)(struct netmap_adapter *, 538f9790aebSLuigi Rizzo u_int ring, enum txrx, int flags); 5394bf50f18SLuigi Rizzo #define NAF_DISABLE_NOTIFY 8 /* notify that the stopped state of the 5404bf50f18SLuigi Rizzo * ring has changed (kring->nkr_stopped) 5414bf50f18SLuigi Rizzo */ 5424bf50f18SLuigi Rizzo 5434bf50f18SLuigi Rizzo #ifdef WITH_VALE 5444bf50f18SLuigi Rizzo /* 5454bf50f18SLuigi Rizzo * nm_bdg_attach() initializes the na_vp field to point 5464bf50f18SLuigi Rizzo * to an adapter that can be attached to a VALE switch. If the 5474bf50f18SLuigi Rizzo * current adapter is already a VALE port, na_vp is simply a cast; 5484bf50f18SLuigi Rizzo * otherwise, na_vp points to a netmap_bwrap_adapter. 5494bf50f18SLuigi Rizzo * If applicable, this callback also initializes na_hostvp, 5504bf50f18SLuigi Rizzo * that can be used to connect the adapter host rings to the 5514bf50f18SLuigi Rizzo * switch. 5524bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 5534bf50f18SLuigi Rizzo * 5544bf50f18SLuigi Rizzo * nm_bdg_ctl() is called on the actual attach/detach to/from 5554bf50f18SLuigi Rizzo * to/from the switch, to perform adapter-specific 5564bf50f18SLuigi Rizzo * initializations 5574bf50f18SLuigi Rizzo * Called with NMG_LOCK held. 5584bf50f18SLuigi Rizzo */ 5594bf50f18SLuigi Rizzo int (*nm_bdg_attach)(const char *bdg_name, struct netmap_adapter *); 5604bf50f18SLuigi Rizzo int (*nm_bdg_ctl)(struct netmap_adapter *, struct nmreq *, int); 5614bf50f18SLuigi Rizzo 5624bf50f18SLuigi Rizzo /* adapter used to attach this adapter to a VALE switch (if any) */ 5634bf50f18SLuigi Rizzo struct netmap_vp_adapter *na_vp; 5644bf50f18SLuigi Rizzo /* adapter used to attach the host rings of this adapter 5654bf50f18SLuigi Rizzo * to a VALE switch (if any) */ 5664bf50f18SLuigi Rizzo struct netmap_vp_adapter *na_hostvp; 5674bf50f18SLuigi Rizzo #endif 568f9790aebSLuigi Rizzo 569f9790aebSLuigi Rizzo /* standard refcount to control the lifetime of the adapter 570f9790aebSLuigi Rizzo * (it should be equal to the lifetime of the corresponding ifp) 571f9790aebSLuigi Rizzo */ 572f9790aebSLuigi Rizzo int na_refcount; 573f9790aebSLuigi Rizzo 574f9790aebSLuigi Rizzo /* memory allocator (opaque) 575f9790aebSLuigi Rizzo * We also cache a pointer to the lut_entry for translating 576f9790aebSLuigi Rizzo * buffer addresses, and the total number of buffers. 577f9790aebSLuigi Rizzo */ 578f9790aebSLuigi Rizzo struct netmap_mem_d *nm_mem; 579f9790aebSLuigi Rizzo struct lut_entry *na_lut; 580f9790aebSLuigi Rizzo uint32_t na_lut_objtotal; /* max buffer index */ 5814bf50f18SLuigi Rizzo uint32_t na_lut_objsize; /* buffer size */ 582f9790aebSLuigi Rizzo 5834bf50f18SLuigi Rizzo /* additional information attached to this adapter 5844bf50f18SLuigi Rizzo * by other netmap subsystems. Currently used by 5854bf50f18SLuigi Rizzo * bwrap and LINUX/v1000. 586f9790aebSLuigi Rizzo */ 587f9790aebSLuigi Rizzo void *na_private; 588f0ea3689SLuigi Rizzo 589f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 5904bf50f18SLuigi Rizzo /* array of pipes that have this adapter as a parent */ 591f0ea3689SLuigi Rizzo struct netmap_pipe_adapter **na_pipes; 5924bf50f18SLuigi Rizzo int na_next_pipe; /* next free slot in the array */ 5934bf50f18SLuigi Rizzo int na_max_pipes; /* size of the array */ 594f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 5954bf50f18SLuigi Rizzo 5964bf50f18SLuigi Rizzo char name[64]; 597f9790aebSLuigi Rizzo }; 598f9790aebSLuigi Rizzo 59917885a7bSLuigi Rizzo 600f9790aebSLuigi Rizzo /* 601f9790aebSLuigi Rizzo * If the NIC is owned by the kernel 602f9790aebSLuigi Rizzo * (i.e., bridge), neither another bridge nor user can use it; 603f9790aebSLuigi Rizzo * if the NIC is owned by a user, only users can share it. 604f9790aebSLuigi Rizzo * Evaluation must be done under NMG_LOCK(). 605f9790aebSLuigi Rizzo */ 6064bf50f18SLuigi Rizzo #define NETMAP_OWNED_BY_KERN(na) ((na)->na_flags & NAF_BUSY) 607f9790aebSLuigi Rizzo #define NETMAP_OWNED_BY_ANY(na) \ 6084bf50f18SLuigi Rizzo (NETMAP_OWNED_BY_KERN(na) || ((na)->active_fds > 0)) 609f9790aebSLuigi Rizzo 610f9790aebSLuigi Rizzo 611f9790aebSLuigi Rizzo /* 612f9790aebSLuigi Rizzo * derived netmap adapters for various types of ports 613f9790aebSLuigi Rizzo */ 614f9790aebSLuigi Rizzo struct netmap_vp_adapter { /* VALE software port */ 615f9790aebSLuigi Rizzo struct netmap_adapter up; 616f196ce38SLuigi Rizzo 617849bec0eSLuigi Rizzo /* 618849bec0eSLuigi Rizzo * Bridge support: 619849bec0eSLuigi Rizzo * 620849bec0eSLuigi Rizzo * bdg_port is the port number used in the bridge; 621f18be576SLuigi Rizzo * na_bdg points to the bridge this NA is attached to. 622849bec0eSLuigi Rizzo */ 623f196ce38SLuigi Rizzo int bdg_port; 624f18be576SLuigi Rizzo struct nm_bridge *na_bdg; 625f9790aebSLuigi Rizzo int retry; 626f9790aebSLuigi Rizzo 627f0ea3689SLuigi Rizzo /* Offset of ethernet header for each packet. */ 628f0ea3689SLuigi Rizzo u_int virt_hdr_len; 629f0ea3689SLuigi Rizzo /* Maximum Frame Size, used in bdg_mismatch_datapath() */ 630f0ea3689SLuigi Rizzo u_int mfs; 631f9790aebSLuigi Rizzo }; 632f9790aebSLuigi Rizzo 63317885a7bSLuigi Rizzo 634f9790aebSLuigi Rizzo struct netmap_hw_adapter { /* physical device */ 635f9790aebSLuigi Rizzo struct netmap_adapter up; 636f9790aebSLuigi Rizzo 637f9790aebSLuigi Rizzo struct net_device_ops nm_ndo; // XXX linux only 6384bf50f18SLuigi Rizzo struct ethtool_ops nm_eto; // XXX linux only 6394bf50f18SLuigi Rizzo const struct ethtool_ops* save_ethtool; 6404bf50f18SLuigi Rizzo 6414bf50f18SLuigi Rizzo int (*nm_hw_register)(struct netmap_adapter *, int onoff); 642f9790aebSLuigi Rizzo }; 643f9790aebSLuigi Rizzo 644f0ea3689SLuigi Rizzo /* Mitigation support. */ 645f0ea3689SLuigi Rizzo struct nm_generic_mit { 646f0ea3689SLuigi Rizzo struct hrtimer mit_timer; 647f0ea3689SLuigi Rizzo int mit_pending; 6484bf50f18SLuigi Rizzo int mit_ring_idx; /* index of the ring being mitigated */ 649f0ea3689SLuigi Rizzo struct netmap_adapter *mit_na; /* backpointer */ 650f0ea3689SLuigi Rizzo }; 65117885a7bSLuigi Rizzo 65217885a7bSLuigi Rizzo struct netmap_generic_adapter { /* emulated device */ 653f9790aebSLuigi Rizzo struct netmap_hw_adapter up; 654f9790aebSLuigi Rizzo 655f9790aebSLuigi Rizzo /* Pointer to a previously used netmap adapter. */ 656f9790aebSLuigi Rizzo struct netmap_adapter *prev; 657f9790aebSLuigi Rizzo 658f9790aebSLuigi Rizzo /* generic netmap adapters support: 659f9790aebSLuigi Rizzo * a net_device_ops struct overrides ndo_select_queue(), 660f9790aebSLuigi Rizzo * save_if_input saves the if_input hook (FreeBSD), 661f0ea3689SLuigi Rizzo * mit implements rx interrupt mitigation, 662f9790aebSLuigi Rizzo */ 663f9790aebSLuigi Rizzo struct net_device_ops generic_ndo; 664f9790aebSLuigi Rizzo void (*save_if_input)(struct ifnet *, struct mbuf *); 665f9790aebSLuigi Rizzo 666f0ea3689SLuigi Rizzo struct nm_generic_mit *mit; 66717885a7bSLuigi Rizzo #ifdef linux 66817885a7bSLuigi Rizzo netdev_tx_t (*save_start_xmit)(struct mbuf *, struct ifnet *); 66917885a7bSLuigi Rizzo #endif 670f9790aebSLuigi Rizzo }; 671f9790aebSLuigi Rizzo 672f0ea3689SLuigi Rizzo static __inline int 673f0ea3689SLuigi Rizzo netmap_real_tx_rings(struct netmap_adapter *na) 674f0ea3689SLuigi Rizzo { 675f0ea3689SLuigi Rizzo return na->num_tx_rings + !!(na->na_flags & NAF_HOST_RINGS); 676f0ea3689SLuigi Rizzo } 677f0ea3689SLuigi Rizzo 678f0ea3689SLuigi Rizzo static __inline int 679f0ea3689SLuigi Rizzo netmap_real_rx_rings(struct netmap_adapter *na) 680f0ea3689SLuigi Rizzo { 681f0ea3689SLuigi Rizzo return na->num_rx_rings + !!(na->na_flags & NAF_HOST_RINGS); 682f0ea3689SLuigi Rizzo } 683f0ea3689SLuigi Rizzo 684f9790aebSLuigi Rizzo #ifdef WITH_VALE 685f9790aebSLuigi Rizzo 68617885a7bSLuigi Rizzo /* 68717885a7bSLuigi Rizzo * Bridge wrapper for non VALE ports attached to a VALE switch. 688f9790aebSLuigi Rizzo * 68917885a7bSLuigi Rizzo * The real device must already have its own netmap adapter (hwna). 69017885a7bSLuigi Rizzo * The bridge wrapper and the hwna adapter share the same set of 69117885a7bSLuigi Rizzo * netmap rings and buffers, but they have two separate sets of 69217885a7bSLuigi Rizzo * krings descriptors, with tx/rx meanings swapped: 693f9790aebSLuigi Rizzo * 694f9790aebSLuigi Rizzo * netmap 695f9790aebSLuigi Rizzo * bwrap krings rings krings hwna 696f9790aebSLuigi Rizzo * +------+ +------+ +-----+ +------+ +------+ 697f9790aebSLuigi Rizzo * |tx_rings->| |\ /| |----| |<-tx_rings| 698f9790aebSLuigi Rizzo * | | +------+ \ / +-----+ +------+ | | 699f9790aebSLuigi Rizzo * | | X | | 700f9790aebSLuigi Rizzo * | | / \ | | 701f9790aebSLuigi Rizzo * | | +------+/ \+-----+ +------+ | | 702f9790aebSLuigi Rizzo * |rx_rings->| | | |----| |<-rx_rings| 703f9790aebSLuigi Rizzo * | | +------+ +-----+ +------+ | | 704f9790aebSLuigi Rizzo * +------+ +------+ 705f9790aebSLuigi Rizzo * 70617885a7bSLuigi Rizzo * - packets coming from the bridge go to the brwap rx rings, 70717885a7bSLuigi Rizzo * which are also the hwna tx rings. The bwrap notify callback 70817885a7bSLuigi Rizzo * will then complete the hwna tx (see netmap_bwrap_notify). 709f9790aebSLuigi Rizzo * 71017885a7bSLuigi Rizzo * - packets coming from the outside go to the hwna rx rings, 71117885a7bSLuigi Rizzo * which are also the bwrap tx rings. The (overwritten) hwna 71217885a7bSLuigi Rizzo * notify method will then complete the bridge tx 71317885a7bSLuigi Rizzo * (see netmap_bwrap_intr_notify). 714f9790aebSLuigi Rizzo * 71517885a7bSLuigi Rizzo * The bridge wrapper may optionally connect the hwna 'host' rings 71617885a7bSLuigi Rizzo * to the bridge. This is done by using a second port in the 71717885a7bSLuigi Rizzo * bridge and connecting it to the 'host' netmap_vp_adapter 71817885a7bSLuigi Rizzo * contained in the netmap_bwrap_adapter. The brwap host adapter 71917885a7bSLuigi Rizzo * cross-links the hwna host rings in the same way as shown above. 72017885a7bSLuigi Rizzo * 72117885a7bSLuigi Rizzo * - packets coming from the bridge and directed to the host stack 72217885a7bSLuigi Rizzo * are handled by the bwrap host notify callback 72317885a7bSLuigi Rizzo * (see netmap_bwrap_host_notify) 72417885a7bSLuigi Rizzo * 72517885a7bSLuigi Rizzo * - packets coming from the host stack are still handled by the 72617885a7bSLuigi Rizzo * overwritten hwna notify callback (netmap_bwrap_intr_notify), 72717885a7bSLuigi Rizzo * but are diverted to the host adapter depending on the ring number. 728f9790aebSLuigi Rizzo * 729f9790aebSLuigi Rizzo */ 730f9790aebSLuigi Rizzo struct netmap_bwrap_adapter { 731f9790aebSLuigi Rizzo struct netmap_vp_adapter up; 732f9790aebSLuigi Rizzo struct netmap_vp_adapter host; /* for host rings */ 733f9790aebSLuigi Rizzo struct netmap_adapter *hwna; /* the underlying device */ 734f9790aebSLuigi Rizzo 735f9790aebSLuigi Rizzo /* backup of the hwna notify callback */ 736f9790aebSLuigi Rizzo int (*save_notify)(struct netmap_adapter *, 737f9790aebSLuigi Rizzo u_int ring, enum txrx, int flags); 7384bf50f18SLuigi Rizzo /* backup of the hwna memory allocator */ 7394bf50f18SLuigi Rizzo struct netmap_mem_d *save_nmd; 74017885a7bSLuigi Rizzo 74117885a7bSLuigi Rizzo /* 74217885a7bSLuigi Rizzo * When we attach a physical interface to the bridge, we 743f18be576SLuigi Rizzo * allow the controlling process to terminate, so we need 7444bf50f18SLuigi Rizzo * a place to store the n_detmap_priv_d data structure. 74517885a7bSLuigi Rizzo * This is only done when physical interfaces 74617885a7bSLuigi Rizzo * are attached to a bridge. 747f18be576SLuigi Rizzo */ 748f18be576SLuigi Rizzo struct netmap_priv_d *na_kpriv; 74968b8534bSLuigi Rizzo }; 7504bf50f18SLuigi Rizzo int netmap_bwrap_attach(const char *name, struct netmap_adapter *); 75168b8534bSLuigi Rizzo 752f9790aebSLuigi Rizzo 75317885a7bSLuigi Rizzo #endif /* WITH_VALE */ 754ce3ee1e7SLuigi Rizzo 755f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 756f0ea3689SLuigi Rizzo 757f0ea3689SLuigi Rizzo #define NM_MAXPIPES 64 /* max number of pipes per adapter */ 758f0ea3689SLuigi Rizzo 759f0ea3689SLuigi Rizzo struct netmap_pipe_adapter { 760f0ea3689SLuigi Rizzo struct netmap_adapter up; 761f0ea3689SLuigi Rizzo 762f0ea3689SLuigi Rizzo u_int id; /* pipe identifier */ 763f0ea3689SLuigi Rizzo int role; /* either NR_REG_PIPE_MASTER or NR_REG_PIPE_SLAVE */ 764f0ea3689SLuigi Rizzo 765f0ea3689SLuigi Rizzo struct netmap_adapter *parent; /* adapter that owns the memory */ 766f0ea3689SLuigi Rizzo struct netmap_pipe_adapter *peer; /* the other end of the pipe */ 767f0ea3689SLuigi Rizzo int peer_ref; /* 1 iff we are holding a ref to the peer */ 768f0ea3689SLuigi Rizzo 769f0ea3689SLuigi Rizzo u_int parent_slot; /* index in the parent pipe array */ 770f0ea3689SLuigi Rizzo }; 771f0ea3689SLuigi Rizzo 772f0ea3689SLuigi Rizzo #endif /* WITH_PIPES */ 773f0ea3689SLuigi Rizzo 77417885a7bSLuigi Rizzo 77517885a7bSLuigi Rizzo /* return slots reserved to rx clients; used in drivers */ 77617885a7bSLuigi Rizzo static inline uint32_t 77717885a7bSLuigi Rizzo nm_kr_rxspace(struct netmap_kring *k) 77817885a7bSLuigi Rizzo { 77917885a7bSLuigi Rizzo int space = k->nr_hwtail - k->nr_hwcur; 780ce3ee1e7SLuigi Rizzo if (space < 0) 781ce3ee1e7SLuigi Rizzo space += k->nkr_num_slots; 78217885a7bSLuigi Rizzo ND("preserving %d rx slots %d -> %d", space, k->nr_hwcur, k->nr_hwtail); 78317885a7bSLuigi Rizzo 784ce3ee1e7SLuigi Rizzo return space; 785ce3ee1e7SLuigi Rizzo } 786ce3ee1e7SLuigi Rizzo 787ce3ee1e7SLuigi Rizzo 78817885a7bSLuigi Rizzo /* True if no space in the tx ring. only valid after txsync_prologue */ 78917885a7bSLuigi Rizzo static inline int 79017885a7bSLuigi Rizzo nm_kr_txempty(struct netmap_kring *kring) 791ce3ee1e7SLuigi Rizzo { 79217885a7bSLuigi Rizzo return kring->rcur == kring->nr_hwtail; 793f9790aebSLuigi Rizzo } 794f9790aebSLuigi Rizzo 795ce3ee1e7SLuigi Rizzo 796ce3ee1e7SLuigi Rizzo /* 797f9790aebSLuigi Rizzo * protect against multiple threads using the same ring. 798f9790aebSLuigi Rizzo * also check that the ring has not been stopped. 799f9790aebSLuigi Rizzo * We only care for 0 or !=0 as a return code. 80068b8534bSLuigi Rizzo */ 801f9790aebSLuigi Rizzo #define NM_KR_BUSY 1 802f9790aebSLuigi Rizzo #define NM_KR_STOPPED 2 80368b8534bSLuigi Rizzo 80417885a7bSLuigi Rizzo 805f9790aebSLuigi Rizzo static __inline void nm_kr_put(struct netmap_kring *kr) 806f9790aebSLuigi Rizzo { 807f9790aebSLuigi Rizzo NM_ATOMIC_CLEAR(&kr->nr_busy); 808f9790aebSLuigi Rizzo } 809f9790aebSLuigi Rizzo 81017885a7bSLuigi Rizzo 811f9790aebSLuigi Rizzo static __inline int nm_kr_tryget(struct netmap_kring *kr) 812f9790aebSLuigi Rizzo { 813f9790aebSLuigi Rizzo /* check a first time without taking the lock 814f9790aebSLuigi Rizzo * to avoid starvation for nm_kr_get() 815f9790aebSLuigi Rizzo */ 816f9790aebSLuigi Rizzo if (unlikely(kr->nkr_stopped)) { 817f9790aebSLuigi Rizzo ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 818f9790aebSLuigi Rizzo return NM_KR_STOPPED; 819f9790aebSLuigi Rizzo } 820f9790aebSLuigi Rizzo if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr->nr_busy))) 821f9790aebSLuigi Rizzo return NM_KR_BUSY; 822f9790aebSLuigi Rizzo /* check a second time with lock held */ 823f9790aebSLuigi Rizzo if (unlikely(kr->nkr_stopped)) { 824f9790aebSLuigi Rizzo ND("ring %p stopped (%d)", kr, kr->nkr_stopped); 825f9790aebSLuigi Rizzo nm_kr_put(kr); 826f9790aebSLuigi Rizzo return NM_KR_STOPPED; 827f9790aebSLuigi Rizzo } 828f9790aebSLuigi Rizzo return 0; 829f9790aebSLuigi Rizzo } 83068b8534bSLuigi Rizzo 831849bec0eSLuigi Rizzo 83268b8534bSLuigi Rizzo /* 83317885a7bSLuigi Rizzo * The following functions are used by individual drivers to 83468b8534bSLuigi Rizzo * support netmap operation. 83568b8534bSLuigi Rizzo * 83668b8534bSLuigi Rizzo * netmap_attach() initializes a struct netmap_adapter, allocating the 83768b8534bSLuigi Rizzo * struct netmap_ring's and the struct selinfo. 83868b8534bSLuigi Rizzo * 83968b8534bSLuigi Rizzo * netmap_detach() frees the memory allocated by netmap_attach(). 84068b8534bSLuigi Rizzo * 841ce3ee1e7SLuigi Rizzo * netmap_transmit() replaces the if_transmit routine of the interface, 84268b8534bSLuigi Rizzo * and is used to intercept packets coming from the stack. 84368b8534bSLuigi Rizzo * 84468b8534bSLuigi Rizzo * netmap_load_map/netmap_reload_map are helper routines to set/reset 84568b8534bSLuigi Rizzo * the dmamap for a packet buffer 84668b8534bSLuigi Rizzo * 8474bf50f18SLuigi Rizzo * netmap_reset() is a helper routine to be called in the hw driver 8484bf50f18SLuigi Rizzo * when reinitializing a ring. It should not be called by 8494bf50f18SLuigi Rizzo * virtual ports (vale, pipes, monitor) 85068b8534bSLuigi Rizzo */ 851f9790aebSLuigi Rizzo int netmap_attach(struct netmap_adapter *); 85268b8534bSLuigi Rizzo void netmap_detach(struct ifnet *); 853ce3ee1e7SLuigi Rizzo int netmap_transmit(struct ifnet *, struct mbuf *); 85468b8534bSLuigi Rizzo struct netmap_slot *netmap_reset(struct netmap_adapter *na, 855ce3ee1e7SLuigi Rizzo enum txrx tx, u_int n, u_int new_cur); 85668b8534bSLuigi Rizzo int netmap_ring_reinit(struct netmap_kring *); 85768b8534bSLuigi Rizzo 85817885a7bSLuigi Rizzo /* default functions to handle rx/tx interrupts */ 85917885a7bSLuigi Rizzo int netmap_rx_irq(struct ifnet *, u_int, u_int *); 86017885a7bSLuigi Rizzo #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 86117885a7bSLuigi Rizzo void netmap_common_irq(struct ifnet *, u_int, u_int *work_done); 86217885a7bSLuigi Rizzo 86317885a7bSLuigi Rizzo 8644bf50f18SLuigi Rizzo #ifdef WITH_VALE 8654bf50f18SLuigi Rizzo /* functions used by external modules to interface with VALE */ 8664bf50f18SLuigi Rizzo #define netmap_vp_to_ifp(_vp) ((_vp)->up.ifp) 8674bf50f18SLuigi Rizzo #define netmap_ifp_to_vp(_ifp) (NA(_ifp)->na_vp) 8684bf50f18SLuigi Rizzo #define netmap_ifp_to_host_vp(_ifp) (NA(_ifp)->na_hostvp) 8694bf50f18SLuigi Rizzo #define netmap_bdg_idx(_vp) ((_vp)->bdg_port) 8704bf50f18SLuigi Rizzo const char *netmap_bdg_name(struct netmap_vp_adapter *); 8714bf50f18SLuigi Rizzo #else /* !WITH_VALE */ 8724bf50f18SLuigi Rizzo #define netmap_vp_to_ifp(_vp) NULL 8734bf50f18SLuigi Rizzo #define netmap_ifp_to_vp(_ifp) NULL 8744bf50f18SLuigi Rizzo #define netmap_ifp_to_host_vp(_ifp) NULL 8754bf50f18SLuigi Rizzo #define netmap_bdg_idx(_vp) -1 8764bf50f18SLuigi Rizzo #define netmap_bdg_name(_vp) NULL 8774bf50f18SLuigi Rizzo #endif /* WITH_VALE */ 8784bf50f18SLuigi Rizzo 8794bf50f18SLuigi Rizzo static inline int 8804bf50f18SLuigi Rizzo nm_native_on(struct netmap_adapter *na) 8814bf50f18SLuigi Rizzo { 8824bf50f18SLuigi Rizzo return na && na->na_flags & NAF_NATIVE_ON; 8834bf50f18SLuigi Rizzo } 8844bf50f18SLuigi Rizzo 8854bf50f18SLuigi Rizzo static inline int 8864bf50f18SLuigi Rizzo nm_netmap_on(struct netmap_adapter *na) 8874bf50f18SLuigi Rizzo { 8884bf50f18SLuigi Rizzo return na && na->na_flags & NAF_NETMAP_ON; 8894bf50f18SLuigi Rizzo } 89017885a7bSLuigi Rizzo 89117885a7bSLuigi Rizzo /* set/clear native flags and if_transmit/netdev_ops */ 892f9790aebSLuigi Rizzo static inline void 893f9790aebSLuigi Rizzo nm_set_native_flags(struct netmap_adapter *na) 894f9790aebSLuigi Rizzo { 895f9790aebSLuigi Rizzo struct ifnet *ifp = na->ifp; 896ce3ee1e7SLuigi Rizzo 897f9790aebSLuigi Rizzo na->na_flags |= (NAF_NATIVE_ON | NAF_NETMAP_ON); 898f9790aebSLuigi Rizzo #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 899f9790aebSLuigi Rizzo ifp->if_capenable |= IFCAP_NETMAP; 900f9790aebSLuigi Rizzo #endif 901f9790aebSLuigi Rizzo #ifdef __FreeBSD__ 902f9790aebSLuigi Rizzo na->if_transmit = ifp->if_transmit; 903f9790aebSLuigi Rizzo ifp->if_transmit = netmap_transmit; 904f9790aebSLuigi Rizzo #else 905f9790aebSLuigi Rizzo na->if_transmit = (void *)ifp->netdev_ops; 906f9790aebSLuigi Rizzo ifp->netdev_ops = &((struct netmap_hw_adapter *)na)->nm_ndo; 9074bf50f18SLuigi Rizzo ((struct netmap_hw_adapter *)na)->save_ethtool = ifp->ethtool_ops; 9084bf50f18SLuigi Rizzo ifp->ethtool_ops = &((struct netmap_hw_adapter*)na)->nm_eto; 909f9790aebSLuigi Rizzo #endif 910f9790aebSLuigi Rizzo } 911f9790aebSLuigi Rizzo 91217885a7bSLuigi Rizzo 913f9790aebSLuigi Rizzo static inline void 914f9790aebSLuigi Rizzo nm_clear_native_flags(struct netmap_adapter *na) 915f9790aebSLuigi Rizzo { 916f9790aebSLuigi Rizzo struct ifnet *ifp = na->ifp; 917f9790aebSLuigi Rizzo 918f9790aebSLuigi Rizzo #ifdef __FreeBSD__ 919f9790aebSLuigi Rizzo ifp->if_transmit = na->if_transmit; 920f9790aebSLuigi Rizzo #else 921f9790aebSLuigi Rizzo ifp->netdev_ops = (void *)na->if_transmit; 9224bf50f18SLuigi Rizzo ifp->ethtool_ops = ((struct netmap_hw_adapter*)na)->save_ethtool; 923f9790aebSLuigi Rizzo #endif 924f9790aebSLuigi Rizzo na->na_flags &= ~(NAF_NATIVE_ON | NAF_NETMAP_ON); 925f9790aebSLuigi Rizzo #ifdef IFCAP_NETMAP /* or FreeBSD ? */ 926f9790aebSLuigi Rizzo ifp->if_capenable &= ~IFCAP_NETMAP; 927f9790aebSLuigi Rizzo #endif 928f9790aebSLuigi Rizzo } 929f9790aebSLuigi Rizzo 930f9790aebSLuigi Rizzo 931f9790aebSLuigi Rizzo /* 93217885a7bSLuigi Rizzo * validates parameters in the ring/kring, returns a value for head 93317885a7bSLuigi Rizzo * If any error, returns ring_size to force a reinit. 93417885a7bSLuigi Rizzo */ 93517885a7bSLuigi Rizzo uint32_t nm_txsync_prologue(struct netmap_kring *); 93617885a7bSLuigi Rizzo 93717885a7bSLuigi Rizzo 93817885a7bSLuigi Rizzo /* 93917885a7bSLuigi Rizzo * validates parameters in the ring/kring, returns a value for head, 940f9790aebSLuigi Rizzo * and the 'reserved' value in the argument. 94117885a7bSLuigi Rizzo * If any error, returns ring_size lim to force a reinit. 942f9790aebSLuigi Rizzo */ 94317885a7bSLuigi Rizzo uint32_t nm_rxsync_prologue(struct netmap_kring *); 94417885a7bSLuigi Rizzo 945f9790aebSLuigi Rizzo 946f9790aebSLuigi Rizzo /* 94717885a7bSLuigi Rizzo * update kring and ring at the end of txsync. 948f9790aebSLuigi Rizzo */ 949f9790aebSLuigi Rizzo static inline void 95017885a7bSLuigi Rizzo nm_txsync_finalize(struct netmap_kring *kring) 951f9790aebSLuigi Rizzo { 952f0ea3689SLuigi Rizzo /* update ring tail to what the kernel knows */ 95317885a7bSLuigi Rizzo kring->ring->tail = kring->rtail = kring->nr_hwtail; 954f9790aebSLuigi Rizzo 95517885a7bSLuigi Rizzo /* note, head/rhead/hwcur might be behind cur/rcur 95617885a7bSLuigi Rizzo * if no carrier 95717885a7bSLuigi Rizzo */ 95817885a7bSLuigi Rizzo ND(5, "%s now hwcur %d hwtail %d head %d cur %d tail %d", 95917885a7bSLuigi Rizzo kring->name, kring->nr_hwcur, kring->nr_hwtail, 96017885a7bSLuigi Rizzo kring->rhead, kring->rcur, kring->rtail); 961f9790aebSLuigi Rizzo } 962f9790aebSLuigi Rizzo 96317885a7bSLuigi Rizzo 96417885a7bSLuigi Rizzo /* 96517885a7bSLuigi Rizzo * update kring and ring at the end of rxsync 96617885a7bSLuigi Rizzo */ 96717885a7bSLuigi Rizzo static inline void 96817885a7bSLuigi Rizzo nm_rxsync_finalize(struct netmap_kring *kring) 96917885a7bSLuigi Rizzo { 97017885a7bSLuigi Rizzo /* tell userspace that there might be new packets */ 97117885a7bSLuigi Rizzo //struct netmap_ring *ring = kring->ring; 97217885a7bSLuigi Rizzo ND("head %d cur %d tail %d -> %d", ring->head, ring->cur, ring->tail, 97317885a7bSLuigi Rizzo kring->nr_hwtail); 97417885a7bSLuigi Rizzo kring->ring->tail = kring->rtail = kring->nr_hwtail; 97517885a7bSLuigi Rizzo /* make a copy of the state for next round */ 97617885a7bSLuigi Rizzo kring->rhead = kring->ring->head; 97717885a7bSLuigi Rizzo kring->rcur = kring->ring->cur; 97817885a7bSLuigi Rizzo } 97917885a7bSLuigi Rizzo 98017885a7bSLuigi Rizzo 981f9790aebSLuigi Rizzo /* check/fix address and len in tx rings */ 982f9790aebSLuigi Rizzo #if 1 /* debug version */ 9834bf50f18SLuigi Rizzo #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 9844bf50f18SLuigi Rizzo if (_a == NETMAP_BUF_BASE(_na) || _l > NETMAP_BUF_SIZE(_na)) { \ 985f9790aebSLuigi Rizzo RD(5, "bad addr/len ring %d slot %d idx %d len %d", \ 9864bf50f18SLuigi Rizzo kring->ring_id, nm_i, slot->buf_idx, len); \ 9874bf50f18SLuigi Rizzo if (_l > NETMAP_BUF_SIZE(_na)) \ 9884bf50f18SLuigi Rizzo _l = NETMAP_BUF_SIZE(_na); \ 989f9790aebSLuigi Rizzo } } while (0) 990f9790aebSLuigi Rizzo #else /* no debug version */ 9914bf50f18SLuigi Rizzo #define NM_CHECK_ADDR_LEN(_na, _a, _l) do { \ 9924bf50f18SLuigi Rizzo if (_l > NETMAP_BUF_SIZE(_na)) \ 9934bf50f18SLuigi Rizzo _l = NETMAP_BUF_SIZE(_na); \ 994f9790aebSLuigi Rizzo } while (0) 995f9790aebSLuigi Rizzo #endif 996f9790aebSLuigi Rizzo 997f9790aebSLuigi Rizzo 998f9790aebSLuigi Rizzo /*---------------------------------------------------------------*/ 999f9790aebSLuigi Rizzo /* 10004bf50f18SLuigi Rizzo * Support routines used by netmap subsystems 10014bf50f18SLuigi Rizzo * (native drivers, VALE, generic, pipes, monitors, ...) 10024bf50f18SLuigi Rizzo */ 10034bf50f18SLuigi Rizzo 10044bf50f18SLuigi Rizzo 10054bf50f18SLuigi Rizzo /* common routine for all functions that create a netmap adapter. It performs 10064bf50f18SLuigi Rizzo * two main tasks: 10074bf50f18SLuigi Rizzo * - if the na points to an ifp, mark the ifp as netmap capable 10084bf50f18SLuigi Rizzo * using na as its native adapter; 10094bf50f18SLuigi Rizzo * - provide defaults for the setup callbacks and the memory allocator 10104bf50f18SLuigi Rizzo */ 10114bf50f18SLuigi Rizzo int netmap_attach_common(struct netmap_adapter *); 10124bf50f18SLuigi Rizzo /* common actions to be performed on netmap adapter destruction */ 10134bf50f18SLuigi Rizzo void netmap_detach_common(struct netmap_adapter *); 10144bf50f18SLuigi Rizzo /* fill priv->np_[tr]xq{first,last} using the ringid and flags information 10154bf50f18SLuigi Rizzo * coming from a struct nmreq 10164bf50f18SLuigi Rizzo */ 10174bf50f18SLuigi Rizzo int netmap_interp_ringid(struct netmap_priv_d *priv, uint16_t ringid, uint32_t flags); 10184bf50f18SLuigi Rizzo /* update the ring parameters (number and size of tx and rx rings). 10194bf50f18SLuigi Rizzo * It calls the nm_config callback, if available. 1020f9790aebSLuigi Rizzo */ 1021f9790aebSLuigi Rizzo int netmap_update_config(struct netmap_adapter *na); 10224bf50f18SLuigi Rizzo /* create and initialize the common fields of the krings array. 10234bf50f18SLuigi Rizzo * using the information that must be already available in the na. 10244bf50f18SLuigi Rizzo * tailroom can be used to request the allocation of additional 10254bf50f18SLuigi Rizzo * tailroom bytes after the krings array. This is used by 10264bf50f18SLuigi Rizzo * netmap_vp_adapter's (i.e., VALE ports) to make room for 10274bf50f18SLuigi Rizzo * leasing-related data structures 10284bf50f18SLuigi Rizzo */ 1029f0ea3689SLuigi Rizzo int netmap_krings_create(struct netmap_adapter *na, u_int tailroom); 10304bf50f18SLuigi Rizzo /* deletes the kring array of the adapter. The array must have 10314bf50f18SLuigi Rizzo * been created using netmap_krings_create 10324bf50f18SLuigi Rizzo */ 1033f9790aebSLuigi Rizzo void netmap_krings_delete(struct netmap_adapter *na); 103417885a7bSLuigi Rizzo 10354bf50f18SLuigi Rizzo /* set the stopped/enabled status of ring 10364bf50f18SLuigi Rizzo * When stopping, they also wait for all current activity on the ring to 10374bf50f18SLuigi Rizzo * terminate. The status change is then notified using the na nm_notify 10384bf50f18SLuigi Rizzo * callback. 10394bf50f18SLuigi Rizzo */ 10404bf50f18SLuigi Rizzo void netmap_set_txring(struct netmap_adapter *, u_int ring_id, int stopped); 10414bf50f18SLuigi Rizzo void netmap_set_rxring(struct netmap_adapter *, u_int ring_id, int stopped); 10424bf50f18SLuigi Rizzo /* set the stopped/enabled status of all rings of the adapter. */ 10434bf50f18SLuigi Rizzo void netmap_set_all_rings(struct netmap_adapter *, int stopped); 10444bf50f18SLuigi Rizzo /* convenience wrappers for netmap_set_all_rings, used in drivers */ 10454bf50f18SLuigi Rizzo void netmap_disable_all_rings(struct ifnet *); 10464bf50f18SLuigi Rizzo void netmap_enable_all_rings(struct ifnet *); 10474bf50f18SLuigi Rizzo 10484bf50f18SLuigi Rizzo int netmap_rxsync_from_host(struct netmap_adapter *na, struct thread *td, void *pwait); 1049f9790aebSLuigi Rizzo 1050f9790aebSLuigi Rizzo struct netmap_if * 1051f9790aebSLuigi Rizzo netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na, 1052f0ea3689SLuigi Rizzo uint16_t ringid, uint32_t flags, int *err); 1053f9790aebSLuigi Rizzo 1054f9790aebSLuigi Rizzo 1055f9790aebSLuigi Rizzo 1056f9790aebSLuigi Rizzo u_int nm_bound_var(u_int *v, u_int dflt, u_int lo, u_int hi, const char *msg); 1057f9790aebSLuigi Rizzo int netmap_get_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1058f9790aebSLuigi Rizzo int netmap_get_hw_na(struct ifnet *ifp, struct netmap_adapter **na); 1059f9790aebSLuigi Rizzo 106017885a7bSLuigi Rizzo 1061f9790aebSLuigi Rizzo #ifdef WITH_VALE 1062f18be576SLuigi Rizzo /* 106317885a7bSLuigi Rizzo * The following bridge-related functions are used by other 106417885a7bSLuigi Rizzo * kernel modules. 106517885a7bSLuigi Rizzo * 106617885a7bSLuigi Rizzo * VALE only supports unicast or broadcast. The lookup 1067f18be576SLuigi Rizzo * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports, 1068f18be576SLuigi Rizzo * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown. 1069f18be576SLuigi Rizzo * XXX in practice "unknown" might be handled same as broadcast. 1070f18be576SLuigi Rizzo */ 10714bf50f18SLuigi Rizzo typedef u_int (*bdg_lookup_fn_t)(struct nm_bdg_fwd *ft, uint8_t *ring_nr, 10724bf50f18SLuigi Rizzo const struct netmap_vp_adapter *); 10734bf50f18SLuigi Rizzo typedef int (*bdg_config_fn_t)(struct nm_ifreq *); 10744bf50f18SLuigi Rizzo typedef void (*bdg_dtor_fn_t)(const struct netmap_vp_adapter *); 10754bf50f18SLuigi Rizzo struct netmap_bdg_ops { 10764bf50f18SLuigi Rizzo bdg_lookup_fn_t lookup; 10774bf50f18SLuigi Rizzo bdg_config_fn_t config; 10784bf50f18SLuigi Rizzo bdg_dtor_fn_t dtor; 10794bf50f18SLuigi Rizzo }; 10804bf50f18SLuigi Rizzo 10814bf50f18SLuigi Rizzo u_int netmap_bdg_learning(struct nm_bdg_fwd *ft, uint8_t *dst_ring, 10824bf50f18SLuigi Rizzo const struct netmap_vp_adapter *); 1083f9790aebSLuigi Rizzo 1084f9790aebSLuigi Rizzo #define NM_BDG_MAXPORTS 254 /* up to 254 */ 1085f18be576SLuigi Rizzo #define NM_BDG_BROADCAST NM_BDG_MAXPORTS 1086f18be576SLuigi Rizzo #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1) 1087f18be576SLuigi Rizzo 1088f9790aebSLuigi Rizzo #define NM_NAME "vale" /* prefix for bridge port name */ 1089f9790aebSLuigi Rizzo 1090f9790aebSLuigi Rizzo /* these are redefined in case of no VALE support */ 1091f9790aebSLuigi Rizzo int netmap_get_bdg_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1092f9790aebSLuigi Rizzo void netmap_init_bridges(void); 10934bf50f18SLuigi Rizzo int netmap_bdg_ctl(struct nmreq *nmr, struct netmap_bdg_ops *bdg_ops); 10944bf50f18SLuigi Rizzo int netmap_bdg_config(struct nmreq *nmr); 1095f9790aebSLuigi Rizzo 1096f9790aebSLuigi Rizzo #else /* !WITH_VALE */ 1097f9790aebSLuigi Rizzo #define netmap_get_bdg_na(_1, _2, _3) 0 1098f9790aebSLuigi Rizzo #define netmap_init_bridges(_1) 1099f9790aebSLuigi Rizzo #define netmap_bdg_ctl(_1, _2) EINVAL 1100f9790aebSLuigi Rizzo #endif /* !WITH_VALE */ 1101f9790aebSLuigi Rizzo 1102f0ea3689SLuigi Rizzo #ifdef WITH_PIPES 1103f0ea3689SLuigi Rizzo /* max number of pipes per device */ 1104f0ea3689SLuigi Rizzo #define NM_MAXPIPES 64 /* XXX how many? */ 1105f0ea3689SLuigi Rizzo /* in case of no error, returns the actual number of pipes in nmr->nr_arg1 */ 1106f0ea3689SLuigi Rizzo int netmap_pipe_alloc(struct netmap_adapter *, struct nmreq *nmr); 1107f0ea3689SLuigi Rizzo void netmap_pipe_dealloc(struct netmap_adapter *); 1108f0ea3689SLuigi Rizzo int netmap_get_pipe_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 1109f0ea3689SLuigi Rizzo #else /* !WITH_PIPES */ 1110f0ea3689SLuigi Rizzo #define NM_MAXPIPES 0 1111f0ea3689SLuigi Rizzo #define netmap_pipe_alloc(_1, _2) EOPNOTSUPP 1112f0ea3689SLuigi Rizzo #define netmap_pipe_dealloc(_1) 1113f0ea3689SLuigi Rizzo #define netmap_get_pipe_na(_1, _2, _3) 0 1114f0ea3689SLuigi Rizzo #endif 1115f0ea3689SLuigi Rizzo 11164bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 11174bf50f18SLuigi Rizzo int netmap_get_monitor_na(struct nmreq *nmr, struct netmap_adapter **na, int create); 11184bf50f18SLuigi Rizzo #else 11194bf50f18SLuigi Rizzo #define netmap_get_monitor_na(_1, _2, _3) 0 11204bf50f18SLuigi Rizzo #endif 11214bf50f18SLuigi Rizzo 1122f9790aebSLuigi Rizzo /* Various prototypes */ 1123f9790aebSLuigi Rizzo int netmap_poll(struct cdev *dev, int events, struct thread *td); 1124f9790aebSLuigi Rizzo int netmap_init(void); 1125f9790aebSLuigi Rizzo void netmap_fini(void); 1126f9790aebSLuigi Rizzo int netmap_get_memory(struct netmap_priv_d* p); 1127f9790aebSLuigi Rizzo void netmap_dtor(void *data); 1128f9790aebSLuigi Rizzo int netmap_dtor_locked(struct netmap_priv_d *priv); 1129f9790aebSLuigi Rizzo 1130f9790aebSLuigi Rizzo int netmap_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); 1131f9790aebSLuigi Rizzo 1132f9790aebSLuigi Rizzo /* netmap_adapter creation/destruction */ 113317885a7bSLuigi Rizzo 113417885a7bSLuigi Rizzo // #define NM_DEBUG_PUTGET 1 1135f9790aebSLuigi Rizzo 1136f9790aebSLuigi Rizzo #ifdef NM_DEBUG_PUTGET 1137f9790aebSLuigi Rizzo 1138f9790aebSLuigi Rizzo #define NM_DBG(f) __##f 1139f9790aebSLuigi Rizzo 1140f9790aebSLuigi Rizzo void __netmap_adapter_get(struct netmap_adapter *na); 1141f9790aebSLuigi Rizzo 1142f9790aebSLuigi Rizzo #define netmap_adapter_get(na) \ 1143f9790aebSLuigi Rizzo do { \ 1144f9790aebSLuigi Rizzo struct netmap_adapter *__na = na; \ 11454bf50f18SLuigi Rizzo D("getting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1146f9790aebSLuigi Rizzo __netmap_adapter_get(__na); \ 1147f9790aebSLuigi Rizzo } while (0) 1148f9790aebSLuigi Rizzo 1149f9790aebSLuigi Rizzo int __netmap_adapter_put(struct netmap_adapter *na); 1150f9790aebSLuigi Rizzo 1151f9790aebSLuigi Rizzo #define netmap_adapter_put(na) \ 1152fb25194fSLuigi Rizzo ({ \ 1153f9790aebSLuigi Rizzo struct netmap_adapter *__na = na; \ 11544bf50f18SLuigi Rizzo D("putting %p:%s (%d)", __na, (__na)->name, (__na)->na_refcount); \ 1155f9790aebSLuigi Rizzo __netmap_adapter_put(__na); \ 1156fb25194fSLuigi Rizzo }) 1157f9790aebSLuigi Rizzo 1158f9790aebSLuigi Rizzo #else /* !NM_DEBUG_PUTGET */ 1159f9790aebSLuigi Rizzo 1160f9790aebSLuigi Rizzo #define NM_DBG(f) f 1161f9790aebSLuigi Rizzo void netmap_adapter_get(struct netmap_adapter *na); 1162f9790aebSLuigi Rizzo int netmap_adapter_put(struct netmap_adapter *na); 1163f9790aebSLuigi Rizzo 1164f9790aebSLuigi Rizzo #endif /* !NM_DEBUG_PUTGET */ 1165f9790aebSLuigi Rizzo 1166f9790aebSLuigi Rizzo 116717885a7bSLuigi Rizzo /* 116817885a7bSLuigi Rizzo * module variables 116917885a7bSLuigi Rizzo */ 11704bf50f18SLuigi Rizzo #define NETMAP_BUF_BASE(na) ((na)->na_lut[0].vaddr) 11714bf50f18SLuigi Rizzo #define NETMAP_BUF_SIZE(na) ((na)->na_lut_objsize) 117217885a7bSLuigi Rizzo extern int netmap_mitigate; // XXX not really used 11735819da83SLuigi Rizzo extern int netmap_no_pendintr; 117468b8534bSLuigi Rizzo extern int netmap_verbose; // XXX debugging 117568b8534bSLuigi Rizzo enum { /* verbose flags */ 117668b8534bSLuigi Rizzo NM_VERB_ON = 1, /* generic verbose */ 117768b8534bSLuigi Rizzo NM_VERB_HOST = 0x2, /* verbose host stack */ 117868b8534bSLuigi Rizzo NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 117968b8534bSLuigi Rizzo NM_VERB_TXSYNC = 0x20, 118068b8534bSLuigi Rizzo NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 118168b8534bSLuigi Rizzo NM_VERB_TXINTR = 0x200, 118268b8534bSLuigi Rizzo NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 118368b8534bSLuigi Rizzo NM_VERB_NIC_TXSYNC = 0x2000, 118468b8534bSLuigi Rizzo }; 118568b8534bSLuigi Rizzo 1186f9790aebSLuigi Rizzo extern int netmap_txsync_retry; 1187f9790aebSLuigi Rizzo extern int netmap_generic_mit; 1188f9790aebSLuigi Rizzo extern int netmap_generic_ringsize; 1189f0ea3689SLuigi Rizzo extern int netmap_generic_rings; 1190f9790aebSLuigi Rizzo 119168b8534bSLuigi Rizzo /* 1192d0c7b075SLuigi Rizzo * NA returns a pointer to the struct netmap adapter from the ifp, 1193d0c7b075SLuigi Rizzo * WNA is used to write it. 119468b8534bSLuigi Rizzo */ 1195d0c7b075SLuigi Rizzo #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 119668b8534bSLuigi Rizzo 11978241616dSLuigi Rizzo /* 11988241616dSLuigi Rizzo * Macros to determine if an interface is netmap capable or netmap enabled. 11998241616dSLuigi Rizzo * See the magic field in struct netmap_adapter. 12008241616dSLuigi Rizzo */ 12018241616dSLuigi Rizzo #ifdef __FreeBSD__ 12028241616dSLuigi Rizzo /* 12038241616dSLuigi Rizzo * on FreeBSD just use if_capabilities and if_capenable. 12048241616dSLuigi Rizzo */ 12058241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 12068241616dSLuigi Rizzo (ifp)->if_capabilities & IFCAP_NETMAP ) 12078241616dSLuigi Rizzo 12088241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 12098241616dSLuigi Rizzo (ifp)->if_capabilities |= IFCAP_NETMAP 12108241616dSLuigi Rizzo 12118241616dSLuigi Rizzo #else /* linux */ 12128241616dSLuigi Rizzo 12138241616dSLuigi Rizzo /* 12148241616dSLuigi Rizzo * on linux: 12158241616dSLuigi Rizzo * we check if NA(ifp) is set and its first element has a related 12168241616dSLuigi Rizzo * magic value. The capenable is within the struct netmap_adapter. 12178241616dSLuigi Rizzo */ 12188241616dSLuigi Rizzo #define NETMAP_MAGIC 0x52697a7a 12198241616dSLuigi Rizzo 12208241616dSLuigi Rizzo #define NETMAP_CAPABLE(ifp) (NA(ifp) && \ 12218241616dSLuigi Rizzo ((uint32_t)(uintptr_t)NA(ifp) ^ NA(ifp)->magic) == NETMAP_MAGIC ) 12228241616dSLuigi Rizzo 12238241616dSLuigi Rizzo #define NETMAP_SET_CAPABLE(ifp) \ 12248241616dSLuigi Rizzo NA(ifp)->magic = ((uint32_t)(uintptr_t)NA(ifp)) ^ NETMAP_MAGIC 12258241616dSLuigi Rizzo 12268241616dSLuigi Rizzo #endif /* linux */ 122768b8534bSLuigi Rizzo 1228f196ce38SLuigi Rizzo #ifdef __FreeBSD__ 1229f9790aebSLuigi Rizzo 12304bf50f18SLuigi Rizzo /* Assigns the device IOMMU domain to an allocator. 12314bf50f18SLuigi Rizzo * Returns -ENOMEM in case the domain is different */ 12324bf50f18SLuigi Rizzo #define nm_iommu_group_id(dev) (0) 12334bf50f18SLuigi Rizzo 123417885a7bSLuigi Rizzo /* Callback invoked by the dma machinery after a successful dmamap_load */ 12356dba29a2SLuigi Rizzo static void netmap_dmamap_cb(__unused void *arg, 12366dba29a2SLuigi Rizzo __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 12376dba29a2SLuigi Rizzo { 12386dba29a2SLuigi Rizzo } 12396dba29a2SLuigi Rizzo 12406dba29a2SLuigi Rizzo /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 12416dba29a2SLuigi Rizzo * XXX can we do it without a callback ? 12426dba29a2SLuigi Rizzo */ 12436dba29a2SLuigi Rizzo static inline void 12444bf50f18SLuigi Rizzo netmap_load_map(struct netmap_adapter *na, 12454bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 12466dba29a2SLuigi Rizzo { 12476dba29a2SLuigi Rizzo if (map) 12484bf50f18SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 12496dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 12506dba29a2SLuigi Rizzo } 12516dba29a2SLuigi Rizzo 12524bf50f18SLuigi Rizzo static inline void 12534bf50f18SLuigi Rizzo netmap_unload_map(struct netmap_adapter *na, 12544bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map) 12554bf50f18SLuigi Rizzo { 12564bf50f18SLuigi Rizzo if (map) 12574bf50f18SLuigi Rizzo bus_dmamap_unload(tag, map); 12584bf50f18SLuigi Rizzo } 12594bf50f18SLuigi Rizzo 12606dba29a2SLuigi Rizzo /* update the map when a buffer changes. */ 12616dba29a2SLuigi Rizzo static inline void 12624bf50f18SLuigi Rizzo netmap_reload_map(struct netmap_adapter *na, 12634bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 12646dba29a2SLuigi Rizzo { 12656dba29a2SLuigi Rizzo if (map) { 12666dba29a2SLuigi Rizzo bus_dmamap_unload(tag, map); 12674bf50f18SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE(na), 12686dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 12696dba29a2SLuigi Rizzo } 12706dba29a2SLuigi Rizzo } 1271f9790aebSLuigi Rizzo 1272f196ce38SLuigi Rizzo #else /* linux */ 1273f196ce38SLuigi Rizzo 12744bf50f18SLuigi Rizzo int nm_iommu_group_id(bus_dma_tag_t dev); 12754bf50f18SLuigi Rizzo extern size_t netmap_mem_get_bufsize(struct netmap_mem_d *); 12764bf50f18SLuigi Rizzo #include <linux/dma-mapping.h> 12774bf50f18SLuigi Rizzo 12784bf50f18SLuigi Rizzo static inline void 12794bf50f18SLuigi Rizzo netmap_load_map(struct netmap_adapter *na, 12804bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 12814bf50f18SLuigi Rizzo { 12824bf50f18SLuigi Rizzo if (map) { 12834bf50f18SLuigi Rizzo *map = dma_map_single(na->pdev, buf, netmap_mem_get_bufsize(na->nm_mem), 12844bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 12854bf50f18SLuigi Rizzo } 12864bf50f18SLuigi Rizzo } 12874bf50f18SLuigi Rizzo 12884bf50f18SLuigi Rizzo static inline void 12894bf50f18SLuigi Rizzo netmap_unload_map(struct netmap_adapter *na, 12904bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map) 12914bf50f18SLuigi Rizzo { 12924bf50f18SLuigi Rizzo u_int sz = netmap_mem_get_bufsize(na->nm_mem); 12934bf50f18SLuigi Rizzo 12944bf50f18SLuigi Rizzo if (*map) { 12954bf50f18SLuigi Rizzo dma_unmap_single(na->pdev, *map, sz, 12964bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 12974bf50f18SLuigi Rizzo } 12984bf50f18SLuigi Rizzo } 12994bf50f18SLuigi Rizzo 13004bf50f18SLuigi Rizzo static inline void 13014bf50f18SLuigi Rizzo netmap_reload_map(struct netmap_adapter *na, 13024bf50f18SLuigi Rizzo bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 13034bf50f18SLuigi Rizzo { 13044bf50f18SLuigi Rizzo u_int sz = netmap_mem_get_bufsize(na->nm_mem); 13054bf50f18SLuigi Rizzo 13064bf50f18SLuigi Rizzo if (*map) { 13074bf50f18SLuigi Rizzo dma_unmap_single(na->pdev, *map, sz, 13084bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 13094bf50f18SLuigi Rizzo } 13104bf50f18SLuigi Rizzo 13114bf50f18SLuigi Rizzo *map = dma_map_single(na->pdev, buf, sz, 13124bf50f18SLuigi Rizzo DMA_BIDIRECTIONAL); 13134bf50f18SLuigi Rizzo } 13144bf50f18SLuigi Rizzo 1315f196ce38SLuigi Rizzo /* 1316f196ce38SLuigi Rizzo * XXX How do we redefine these functions: 1317f196ce38SLuigi Rizzo * 1318f196ce38SLuigi Rizzo * on linux we need 1319f196ce38SLuigi Rizzo * dma_map_single(&pdev->dev, virt_addr, len, direction) 1320f196ce38SLuigi Rizzo * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 1321f196ce38SLuigi Rizzo * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 1322f196ce38SLuigi Rizzo * unfortunately the direction is not, so we need to change 1323f196ce38SLuigi Rizzo * something to have a cross API 1324f196ce38SLuigi Rizzo */ 13254bf50f18SLuigi Rizzo 1326f196ce38SLuigi Rizzo #if 0 1327f196ce38SLuigi Rizzo struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 1328f196ce38SLuigi Rizzo /* set time_stamp *before* dma to help avoid a possible race */ 1329f196ce38SLuigi Rizzo buffer_info->time_stamp = jiffies; 1330f196ce38SLuigi Rizzo buffer_info->mapped_as_page = false; 1331f196ce38SLuigi Rizzo buffer_info->length = len; 1332f196ce38SLuigi Rizzo //buffer_info->next_to_watch = l; 1333f196ce38SLuigi Rizzo /* reload dma map */ 1334f196ce38SLuigi Rizzo dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 1335f196ce38SLuigi Rizzo NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1336f196ce38SLuigi Rizzo buffer_info->dma = dma_map_single(&adapter->pdev->dev, 1337f196ce38SLuigi Rizzo addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 1338f196ce38SLuigi Rizzo 1339f196ce38SLuigi Rizzo if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1340f196ce38SLuigi Rizzo D("dma mapping error"); 1341f196ce38SLuigi Rizzo /* goto dma_error; See e1000_put_txbuf() */ 1342f196ce38SLuigi Rizzo /* XXX reset */ 1343f196ce38SLuigi Rizzo } 1344f196ce38SLuigi Rizzo tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 1345f196ce38SLuigi Rizzo 1346f196ce38SLuigi Rizzo #endif 1347f196ce38SLuigi Rizzo 1348f196ce38SLuigi Rizzo /* 1349f196ce38SLuigi Rizzo * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 1350f196ce38SLuigi Rizzo */ 1351f196ce38SLuigi Rizzo #define bus_dmamap_sync(_a, _b, _c) 1352f196ce38SLuigi Rizzo 1353f196ce38SLuigi Rizzo #endif /* linux */ 13546dba29a2SLuigi Rizzo 1355ce3ee1e7SLuigi Rizzo 13565644ccecSLuigi Rizzo /* 13575644ccecSLuigi Rizzo * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 13585644ccecSLuigi Rizzo */ 13595644ccecSLuigi Rizzo static inline int 136064ae02c3SLuigi Rizzo netmap_idx_n2k(struct netmap_kring *kr, int idx) 13615644ccecSLuigi Rizzo { 136264ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 136364ae02c3SLuigi Rizzo idx += kr->nkr_hwofs; 136464ae02c3SLuigi Rizzo if (idx < 0) 136564ae02c3SLuigi Rizzo return idx + n; 136664ae02c3SLuigi Rizzo else if (idx < n) 136764ae02c3SLuigi Rizzo return idx; 13685644ccecSLuigi Rizzo else 136964ae02c3SLuigi Rizzo return idx - n; 13705644ccecSLuigi Rizzo } 13715644ccecSLuigi Rizzo 13725644ccecSLuigi Rizzo 13735644ccecSLuigi Rizzo static inline int 137464ae02c3SLuigi Rizzo netmap_idx_k2n(struct netmap_kring *kr, int idx) 13755644ccecSLuigi Rizzo { 137664ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 137764ae02c3SLuigi Rizzo idx -= kr->nkr_hwofs; 137864ae02c3SLuigi Rizzo if (idx < 0) 137964ae02c3SLuigi Rizzo return idx + n; 138064ae02c3SLuigi Rizzo else if (idx < n) 138164ae02c3SLuigi Rizzo return idx; 13825644ccecSLuigi Rizzo else 138364ae02c3SLuigi Rizzo return idx - n; 13845644ccecSLuigi Rizzo } 13855644ccecSLuigi Rizzo 13865644ccecSLuigi Rizzo 1387d76bf4ffSLuigi Rizzo /* Entries of the look-up table. */ 1388d76bf4ffSLuigi Rizzo struct lut_entry { 1389d76bf4ffSLuigi Rizzo void *vaddr; /* virtual address. */ 1390849bec0eSLuigi Rizzo vm_paddr_t paddr; /* physical address. */ 1391d76bf4ffSLuigi Rizzo }; 1392d76bf4ffSLuigi Rizzo 1393d76bf4ffSLuigi Rizzo struct netmap_obj_pool; 1394d76bf4ffSLuigi Rizzo 139568b8534bSLuigi Rizzo /* 13966e10c8b8SLuigi Rizzo * NMB return the virtual address of a buffer (buffer 0 on bad index) 13976e10c8b8SLuigi Rizzo * PNMB also fills the physical address 139868b8534bSLuigi Rizzo */ 13996e10c8b8SLuigi Rizzo static inline void * 14004bf50f18SLuigi Rizzo NMB(struct netmap_adapter *na, struct netmap_slot *slot) 1401f9790aebSLuigi Rizzo { 1402f9790aebSLuigi Rizzo struct lut_entry *lut = na->na_lut; 1403f9790aebSLuigi Rizzo uint32_t i = slot->buf_idx; 1404f9790aebSLuigi Rizzo return (unlikely(i >= na->na_lut_objtotal)) ? 1405f9790aebSLuigi Rizzo lut[0].vaddr : lut[i].vaddr; 1406f9790aebSLuigi Rizzo } 1407f9790aebSLuigi Rizzo 14084bf50f18SLuigi Rizzo static inline void * 14094bf50f18SLuigi Rizzo PNMB(struct netmap_adapter *na, struct netmap_slot *slot, uint64_t *pp) 14104bf50f18SLuigi Rizzo { 14114bf50f18SLuigi Rizzo uint32_t i = slot->buf_idx; 14124bf50f18SLuigi Rizzo struct lut_entry *lut = na->na_lut; 14134bf50f18SLuigi Rizzo void *ret = (i >= na->na_lut_objtotal) ? lut[0].vaddr : lut[i].vaddr; 14144bf50f18SLuigi Rizzo 14154bf50f18SLuigi Rizzo *pp = (i >= na->na_lut_objtotal) ? lut[0].paddr : lut[i].paddr; 14164bf50f18SLuigi Rizzo return ret; 14174bf50f18SLuigi Rizzo } 14184bf50f18SLuigi Rizzo 14194bf50f18SLuigi Rizzo /* Generic version of NMB, which uses device-specific memory. */ 14204bf50f18SLuigi Rizzo 1421ce3ee1e7SLuigi Rizzo 1422ce3ee1e7SLuigi Rizzo 1423f9790aebSLuigi Rizzo void netmap_txsync_to_host(struct netmap_adapter *na); 1424f9790aebSLuigi Rizzo 1425f9790aebSLuigi Rizzo 142617885a7bSLuigi Rizzo /* 142717885a7bSLuigi Rizzo * Structure associated to each thread which registered an interface. 1428f9790aebSLuigi Rizzo * 1429f9790aebSLuigi Rizzo * The first 4 fields of this structure are written by NIOCREGIF and 1430f9790aebSLuigi Rizzo * read by poll() and NIOC?XSYNC. 143117885a7bSLuigi Rizzo * 143217885a7bSLuigi Rizzo * There is low contention among writers (a correct user program 143317885a7bSLuigi Rizzo * should have none) and among writers and readers, so we use a 143417885a7bSLuigi Rizzo * single global lock to protect the structure initialization; 143517885a7bSLuigi Rizzo * since initialization involves the allocation of memory, 143617885a7bSLuigi Rizzo * we reuse the memory allocator lock. 143717885a7bSLuigi Rizzo * 1438f9790aebSLuigi Rizzo * Read access to the structure is lock free. Readers must check that 1439f9790aebSLuigi Rizzo * np_nifp is not NULL before using the other fields. 144017885a7bSLuigi Rizzo * If np_nifp is NULL initialization has not been performed, 144117885a7bSLuigi Rizzo * so they should return an error to userspace. 1442f9790aebSLuigi Rizzo * 1443f9790aebSLuigi Rizzo * The ref_done field is used to regulate access to the refcount in the 1444f9790aebSLuigi Rizzo * memory allocator. The refcount must be incremented at most once for 1445f9790aebSLuigi Rizzo * each open("/dev/netmap"). The increment is performed by the first 1446f9790aebSLuigi Rizzo * function that calls netmap_get_memory() (currently called by 1447f9790aebSLuigi Rizzo * mmap(), NIOCGINFO and NIOCREGIF). 1448f9790aebSLuigi Rizzo * If the refcount is incremented, it is then decremented when the 1449f9790aebSLuigi Rizzo * private structure is destroyed. 1450f9790aebSLuigi Rizzo */ 1451f9790aebSLuigi Rizzo struct netmap_priv_d { 1452f9790aebSLuigi Rizzo struct netmap_if * volatile np_nifp; /* netmap if descriptor. */ 1453f9790aebSLuigi Rizzo 1454f9790aebSLuigi Rizzo struct netmap_adapter *np_na; 1455f0ea3689SLuigi Rizzo uint32_t np_flags; /* from the ioctl */ 1456f0ea3689SLuigi Rizzo u_int np_txqfirst, np_txqlast; /* range of tx rings to scan */ 1457f0ea3689SLuigi Rizzo u_int np_rxqfirst, np_rxqlast; /* range of rx rings to scan */ 1458f0ea3689SLuigi Rizzo uint16_t np_txpoll; /* XXX and also np_rxpoll ? */ 1459f9790aebSLuigi Rizzo 1460f9790aebSLuigi Rizzo struct netmap_mem_d *np_mref; /* use with NMG_LOCK held */ 1461f9790aebSLuigi Rizzo /* np_refcount is only used on FreeBSD */ 1462f9790aebSLuigi Rizzo int np_refcount; /* use with NMG_LOCK held */ 1463f0ea3689SLuigi Rizzo 1464f0ea3689SLuigi Rizzo /* pointers to the selinfo to be used for selrecord. 1465f0ea3689SLuigi Rizzo * Either the local or the global one depending on the 1466f0ea3689SLuigi Rizzo * number of rings. 1467f0ea3689SLuigi Rizzo */ 1468f0ea3689SLuigi Rizzo NM_SELINFO_T *np_rxsi, *np_txsi; 1469f0ea3689SLuigi Rizzo struct thread *np_td; /* kqueue, just debugging */ 1470f9790aebSLuigi Rizzo }; 1471f9790aebSLuigi Rizzo 14724bf50f18SLuigi Rizzo #ifdef WITH_MONITOR 14734bf50f18SLuigi Rizzo 14744bf50f18SLuigi Rizzo struct netmap_monitor_adapter { 14754bf50f18SLuigi Rizzo struct netmap_adapter up; 14764bf50f18SLuigi Rizzo 14774bf50f18SLuigi Rizzo struct netmap_priv_d priv; 14784bf50f18SLuigi Rizzo uint32_t flags; 14794bf50f18SLuigi Rizzo }; 14804bf50f18SLuigi Rizzo 14814bf50f18SLuigi Rizzo #endif /* WITH_MONITOR */ 14824bf50f18SLuigi Rizzo 1483f9790aebSLuigi Rizzo 1484f9790aebSLuigi Rizzo /* 1485f9790aebSLuigi Rizzo * generic netmap emulation for devices that do not have 1486f9790aebSLuigi Rizzo * native netmap support. 1487f9790aebSLuigi Rizzo */ 1488f9790aebSLuigi Rizzo int generic_netmap_attach(struct ifnet *ifp); 1489f9790aebSLuigi Rizzo 1490f9790aebSLuigi Rizzo int netmap_catch_rx(struct netmap_adapter *na, int intercept); 1491f9790aebSLuigi Rizzo void generic_rx_handler(struct ifnet *ifp, struct mbuf *m);; 149217885a7bSLuigi Rizzo void netmap_catch_tx(struct netmap_generic_adapter *na, int enable); 1493f9790aebSLuigi Rizzo int generic_xmit_frame(struct ifnet *ifp, struct mbuf *m, void *addr, u_int len, u_int ring_nr); 1494f9790aebSLuigi Rizzo int generic_find_num_desc(struct ifnet *ifp, u_int *tx, u_int *rx); 1495f9790aebSLuigi Rizzo void generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq); 1496f9790aebSLuigi Rizzo 14974bf50f18SLuigi Rizzo //#define RATE_GENERIC /* Enables communication statistics for generic. */ 14984bf50f18SLuigi Rizzo #ifdef RATE_GENERIC 14994bf50f18SLuigi Rizzo void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi); 15004bf50f18SLuigi Rizzo #else 15014bf50f18SLuigi Rizzo #define generic_rate(txp, txs, txi, rxp, rxs, rxi) 15024bf50f18SLuigi Rizzo #endif 15034bf50f18SLuigi Rizzo 1504f9790aebSLuigi Rizzo /* 1505f9790aebSLuigi Rizzo * netmap_mitigation API. This is used by the generic adapter 1506f9790aebSLuigi Rizzo * to reduce the number of interrupt requests/selwakeup 1507f9790aebSLuigi Rizzo * to clients on incoming packets. 1508f9790aebSLuigi Rizzo */ 15094bf50f18SLuigi Rizzo void netmap_mitigation_init(struct nm_generic_mit *mit, int idx, 15104bf50f18SLuigi Rizzo struct netmap_adapter *na); 1511f0ea3689SLuigi Rizzo void netmap_mitigation_start(struct nm_generic_mit *mit); 1512f0ea3689SLuigi Rizzo void netmap_mitigation_restart(struct nm_generic_mit *mit); 1513f0ea3689SLuigi Rizzo int netmap_mitigation_active(struct nm_generic_mit *mit); 1514f0ea3689SLuigi Rizzo void netmap_mitigation_cleanup(struct nm_generic_mit *mit); 1515f0ea3689SLuigi Rizzo 1516f0ea3689SLuigi Rizzo 1517f0ea3689SLuigi Rizzo 1518f0ea3689SLuigi Rizzo /* Shared declarations for the VALE switch. */ 1519f0ea3689SLuigi Rizzo 1520f0ea3689SLuigi Rizzo /* 1521f0ea3689SLuigi Rizzo * Each transmit queue accumulates a batch of packets into 1522f0ea3689SLuigi Rizzo * a structure before forwarding. Packets to the same 1523f0ea3689SLuigi Rizzo * destination are put in a list using ft_next as a link field. 1524f0ea3689SLuigi Rizzo * ft_frags and ft_next are valid only on the first fragment. 1525f0ea3689SLuigi Rizzo */ 1526f0ea3689SLuigi Rizzo struct nm_bdg_fwd { /* forwarding entry for a bridge */ 1527f0ea3689SLuigi Rizzo void *ft_buf; /* netmap or indirect buffer */ 1528f0ea3689SLuigi Rizzo uint8_t ft_frags; /* how many fragments (only on 1st frag) */ 1529f0ea3689SLuigi Rizzo uint8_t _ft_port; /* dst port (unused) */ 1530f0ea3689SLuigi Rizzo uint16_t ft_flags; /* flags, e.g. indirect */ 1531f0ea3689SLuigi Rizzo uint16_t ft_len; /* src fragment len */ 1532f0ea3689SLuigi Rizzo uint16_t ft_next; /* next packet to same destination */ 1533f0ea3689SLuigi Rizzo }; 1534f0ea3689SLuigi Rizzo 1535f0ea3689SLuigi Rizzo /* struct 'virtio_net_hdr' from linux. */ 1536f0ea3689SLuigi Rizzo struct nm_vnet_hdr { 1537f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ 1538f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ 1539f0ea3689SLuigi Rizzo uint8_t flags; 1540f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ 1541f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ 1542f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ 1543f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ 1544f0ea3689SLuigi Rizzo #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ 1545f0ea3689SLuigi Rizzo uint8_t gso_type; 1546f0ea3689SLuigi Rizzo uint16_t hdr_len; 1547f0ea3689SLuigi Rizzo uint16_t gso_size; 1548f0ea3689SLuigi Rizzo uint16_t csum_start; 1549f0ea3689SLuigi Rizzo uint16_t csum_offset; 1550f0ea3689SLuigi Rizzo }; 1551f0ea3689SLuigi Rizzo 1552f0ea3689SLuigi Rizzo #define WORST_CASE_GSO_HEADER (14+40+60) /* IPv6 + TCP */ 1553f0ea3689SLuigi Rizzo 1554f0ea3689SLuigi Rizzo /* Private definitions for IPv4, IPv6, UDP and TCP headers. */ 1555f0ea3689SLuigi Rizzo 1556f0ea3689SLuigi Rizzo struct nm_iphdr { 1557f0ea3689SLuigi Rizzo uint8_t version_ihl; 1558f0ea3689SLuigi Rizzo uint8_t tos; 1559f0ea3689SLuigi Rizzo uint16_t tot_len; 1560f0ea3689SLuigi Rizzo uint16_t id; 1561f0ea3689SLuigi Rizzo uint16_t frag_off; 1562f0ea3689SLuigi Rizzo uint8_t ttl; 1563f0ea3689SLuigi Rizzo uint8_t protocol; 1564f0ea3689SLuigi Rizzo uint16_t check; 1565f0ea3689SLuigi Rizzo uint32_t saddr; 1566f0ea3689SLuigi Rizzo uint32_t daddr; 1567f0ea3689SLuigi Rizzo /*The options start here. */ 1568f0ea3689SLuigi Rizzo }; 1569f0ea3689SLuigi Rizzo 1570f0ea3689SLuigi Rizzo struct nm_tcphdr { 1571f0ea3689SLuigi Rizzo uint16_t source; 1572f0ea3689SLuigi Rizzo uint16_t dest; 1573f0ea3689SLuigi Rizzo uint32_t seq; 1574f0ea3689SLuigi Rizzo uint32_t ack_seq; 1575f0ea3689SLuigi Rizzo uint8_t doff; /* Data offset + Reserved */ 1576f0ea3689SLuigi Rizzo uint8_t flags; 1577f0ea3689SLuigi Rizzo uint16_t window; 1578f0ea3689SLuigi Rizzo uint16_t check; 1579f0ea3689SLuigi Rizzo uint16_t urg_ptr; 1580f0ea3689SLuigi Rizzo }; 1581f0ea3689SLuigi Rizzo 1582f0ea3689SLuigi Rizzo struct nm_udphdr { 1583f0ea3689SLuigi Rizzo uint16_t source; 1584f0ea3689SLuigi Rizzo uint16_t dest; 1585f0ea3689SLuigi Rizzo uint16_t len; 1586f0ea3689SLuigi Rizzo uint16_t check; 1587f0ea3689SLuigi Rizzo }; 1588f0ea3689SLuigi Rizzo 1589f0ea3689SLuigi Rizzo struct nm_ipv6hdr { 1590f0ea3689SLuigi Rizzo uint8_t priority_version; 1591f0ea3689SLuigi Rizzo uint8_t flow_lbl[3]; 1592f0ea3689SLuigi Rizzo 1593f0ea3689SLuigi Rizzo uint16_t payload_len; 1594f0ea3689SLuigi Rizzo uint8_t nexthdr; 1595f0ea3689SLuigi Rizzo uint8_t hop_limit; 1596f0ea3689SLuigi Rizzo 1597f0ea3689SLuigi Rizzo uint8_t saddr[16]; 1598f0ea3689SLuigi Rizzo uint8_t daddr[16]; 1599f0ea3689SLuigi Rizzo }; 1600f0ea3689SLuigi Rizzo 1601f0ea3689SLuigi Rizzo /* Type used to store a checksum (in host byte order) that hasn't been 1602f0ea3689SLuigi Rizzo * folded yet. 1603f0ea3689SLuigi Rizzo */ 1604f0ea3689SLuigi Rizzo #define rawsum_t uint32_t 1605f0ea3689SLuigi Rizzo 1606f0ea3689SLuigi Rizzo rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum); 1607f0ea3689SLuigi Rizzo uint16_t nm_csum_ipv4(struct nm_iphdr *iph); 1608f0ea3689SLuigi Rizzo void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data, 1609f0ea3689SLuigi Rizzo size_t datalen, uint16_t *check); 1610f0ea3689SLuigi Rizzo void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data, 1611f0ea3689SLuigi Rizzo size_t datalen, uint16_t *check); 1612f0ea3689SLuigi Rizzo uint16_t nm_csum_fold(rawsum_t cur_sum); 1613f0ea3689SLuigi Rizzo 1614f0ea3689SLuigi Rizzo void bdg_mismatch_datapath(struct netmap_vp_adapter *na, 1615f0ea3689SLuigi Rizzo struct netmap_vp_adapter *dst_na, 1616f0ea3689SLuigi Rizzo struct nm_bdg_fwd *ft_p, struct netmap_ring *ring, 1617f0ea3689SLuigi Rizzo u_int *j, u_int lim, u_int *howmany); 16184bf50f18SLuigi Rizzo 16194bf50f18SLuigi Rizzo /* persistent virtual port routines */ 16204bf50f18SLuigi Rizzo int nm_vi_persist(const char *, struct ifnet **); 16214bf50f18SLuigi Rizzo void nm_vi_detach(struct ifnet *); 16224bf50f18SLuigi Rizzo void nm_vi_init_index(void); 16234bf50f18SLuigi Rizzo 162468b8534bSLuigi Rizzo #endif /* _NET_NETMAP_KERN_H_ */ 1625