168b8534bSLuigi Rizzo /* 21a26580eSLuigi Rizzo * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 368b8534bSLuigi Rizzo * 468b8534bSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 568b8534bSLuigi Rizzo * modification, are permitted provided that the following conditions 668b8534bSLuigi Rizzo * are met: 768b8534bSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 868b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer. 968b8534bSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 1068b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 1168b8534bSLuigi Rizzo * documentation and/or other materials provided with the distribution. 1268b8534bSLuigi Rizzo * 1368b8534bSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1468b8534bSLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1568b8534bSLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1668b8534bSLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1768b8534bSLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1868b8534bSLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 1968b8534bSLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2068b8534bSLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2168b8534bSLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2268b8534bSLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2368b8534bSLuigi Rizzo * SUCH DAMAGE. 2468b8534bSLuigi Rizzo */ 2568b8534bSLuigi Rizzo 2668b8534bSLuigi Rizzo /* 2768b8534bSLuigi Rizzo * $FreeBSD$ 2864ae02c3SLuigi Rizzo * $Id: netmap_kern.h 10602 2012-02-21 16:47:55Z luigi $ 2968b8534bSLuigi Rizzo * 3068b8534bSLuigi Rizzo * The header contains the definitions of constants and function 3168b8534bSLuigi Rizzo * prototypes used only in kernelspace. 3268b8534bSLuigi Rizzo */ 3368b8534bSLuigi Rizzo 3468b8534bSLuigi Rizzo #ifndef _NET_NETMAP_KERN_H_ 3568b8534bSLuigi Rizzo #define _NET_NETMAP_KERN_H_ 3668b8534bSLuigi Rizzo 37*d76bf4ffSLuigi Rizzo #define NETMAP_MEM2 // use the new memory allocator 38*d76bf4ffSLuigi Rizzo 391a26580eSLuigi Rizzo #if defined(__FreeBSD__) 401a26580eSLuigi Rizzo #define NM_LOCK_T struct mtx 411a26580eSLuigi Rizzo #define NM_SELINFO_T struct selinfo 421a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 431a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 4464ae02c3SLuigi Rizzo #elif defined (linux) 451a26580eSLuigi Rizzo #define NM_LOCK_T spinlock_t 461a26580eSLuigi Rizzo #define NM_SELINFO_T wait_queue_head_t 471a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->len) 481a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) netif_rx(m) 491a26580eSLuigi Rizzo #else 501a26580eSLuigi Rizzo #error unsupported platform 511a26580eSLuigi Rizzo #endif 521a26580eSLuigi Rizzo 5368b8534bSLuigi Rizzo #ifdef MALLOC_DECLARE 5468b8534bSLuigi Rizzo MALLOC_DECLARE(M_NETMAP); 5568b8534bSLuigi Rizzo #endif 5668b8534bSLuigi Rizzo 5768b8534bSLuigi Rizzo #define ND(format, ...) 5868b8534bSLuigi Rizzo #define D(format, ...) \ 5968b8534bSLuigi Rizzo do { \ 6068b8534bSLuigi Rizzo struct timeval __xxts; \ 6168b8534bSLuigi Rizzo microtime(&__xxts); \ 6268b8534bSLuigi Rizzo printf("%03d.%06d %s [%d] " format "\n", \ 6368b8534bSLuigi Rizzo (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 6468b8534bSLuigi Rizzo __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 6568b8534bSLuigi Rizzo } while (0) 6668b8534bSLuigi Rizzo 6768b8534bSLuigi Rizzo struct netmap_adapter; 6868b8534bSLuigi Rizzo 6968b8534bSLuigi Rizzo /* 7064ae02c3SLuigi Rizzo * private, kernel view of a ring. Keeps track of the status of 7164ae02c3SLuigi Rizzo * a ring across system calls. 7264ae02c3SLuigi Rizzo * 7364ae02c3SLuigi Rizzo * nr_hwcur index of the next buffer to refill. 7464ae02c3SLuigi Rizzo * It corresponds to ring->cur - ring->reserved 7564ae02c3SLuigi Rizzo * 7664ae02c3SLuigi Rizzo * nr_hwavail the number of slots "owned" by userspace. 7764ae02c3SLuigi Rizzo * nr_hwavail =:= ring->avail + ring->reserved 7868b8534bSLuigi Rizzo * 791a26580eSLuigi Rizzo * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 8068b8534bSLuigi Rizzo * This is so that, on a reset, buffers owned by userspace are not 8168b8534bSLuigi Rizzo * modified by the kernel. In particular: 821a26580eSLuigi Rizzo * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with 8368b8534bSLuigi Rizzo * the next empty buffer as known by the hardware (next_to_check or so). 8468b8534bSLuigi Rizzo * TX rings: hwcur + hwofs coincides with next_to_send 8568b8534bSLuigi Rizzo */ 8668b8534bSLuigi Rizzo struct netmap_kring { 8768b8534bSLuigi Rizzo struct netmap_ring *ring; 8868b8534bSLuigi Rizzo u_int nr_hwcur; 8968b8534bSLuigi Rizzo int nr_hwavail; 902157a17cSLuigi Rizzo u_int nr_kflags; /* private driver flags */ 912157a17cSLuigi Rizzo #define NKR_PENDINTR 0x1 // Pending interrupt. 9268b8534bSLuigi Rizzo u_int nkr_num_slots; 9368b8534bSLuigi Rizzo 94506cc70cSLuigi Rizzo int nkr_hwofs; /* offset between NIC and netmap ring */ 951a26580eSLuigi Rizzo struct netmap_adapter *na; 961a26580eSLuigi Rizzo NM_SELINFO_T si; /* poll/select wait queue */ 971a26580eSLuigi Rizzo NM_LOCK_T q_lock; /* used if no device lock available */ 982157a17cSLuigi Rizzo } __attribute__((__aligned__(64))); 9968b8534bSLuigi Rizzo 10068b8534bSLuigi Rizzo /* 1011a26580eSLuigi Rizzo * This struct extends the 'struct adapter' (or 10268b8534bSLuigi Rizzo * equivalent) device descriptor. It contains all fields needed to 10368b8534bSLuigi Rizzo * support netmap operation. 10468b8534bSLuigi Rizzo */ 10568b8534bSLuigi Rizzo struct netmap_adapter { 10668b8534bSLuigi Rizzo int refcount; /* number of user-space descriptors using this 10768b8534bSLuigi Rizzo interface, which is equal to the number of 10868b8534bSLuigi Rizzo struct netmap_if objs in the mapped region. */ 109*d76bf4ffSLuigi Rizzo /* 110*d76bf4ffSLuigi Rizzo * The selwakeup in the interrupt thread can use per-ring 111*d76bf4ffSLuigi Rizzo * and/or global wait queues. We track how many clients 112*d76bf4ffSLuigi Rizzo * of each type we have so we can optimize the drivers, 113*d76bf4ffSLuigi Rizzo * and especially avoid huge contention on the locks. 114*d76bf4ffSLuigi Rizzo */ 115*d76bf4ffSLuigi Rizzo int na_single; /* threads attached to a single hw queue */ 116*d76bf4ffSLuigi Rizzo int na_multi; /* threads attached to multiple hw queues */ 11768b8534bSLuigi Rizzo 11868b8534bSLuigi Rizzo int separate_locks; /* set if the interface suports different 11968b8534bSLuigi Rizzo locks for rx, tx and core. */ 12068b8534bSLuigi Rizzo 121*d76bf4ffSLuigi Rizzo u_int num_rx_rings; /* number of tx/rx ring pairs */ 122*d76bf4ffSLuigi Rizzo u_int num_tx_rings; // if nonzero, overrides num_rx_rings 12368b8534bSLuigi Rizzo 12468b8534bSLuigi Rizzo u_int num_tx_desc; /* number of descriptor in each queue */ 12568b8534bSLuigi Rizzo u_int num_rx_desc; 126*d76bf4ffSLuigi Rizzo //u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE 12768b8534bSLuigi Rizzo 12868b8534bSLuigi Rizzo /* tx_rings and rx_rings are private but allocated 12968b8534bSLuigi Rizzo * as a contiguous chunk of memory. Each array has 13068b8534bSLuigi Rizzo * N+1 entries, for the adapter queues and for the host queue. 13168b8534bSLuigi Rizzo */ 13268b8534bSLuigi Rizzo struct netmap_kring *tx_rings; /* array of TX rings. */ 13368b8534bSLuigi Rizzo struct netmap_kring *rx_rings; /* array of RX rings. */ 13468b8534bSLuigi Rizzo 13564ae02c3SLuigi Rizzo NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 13664ae02c3SLuigi Rizzo 13768b8534bSLuigi Rizzo /* copy of if_qflush and if_transmit pointers, to intercept 13868b8534bSLuigi Rizzo * packets from the network stack when netmap is active. 13968b8534bSLuigi Rizzo */ 14068b8534bSLuigi Rizzo int (*if_transmit)(struct ifnet *, struct mbuf *); 14168b8534bSLuigi Rizzo 14268b8534bSLuigi Rizzo /* references to the ifnet and device routines, used by 14368b8534bSLuigi Rizzo * the generic netmap functions. 14468b8534bSLuigi Rizzo */ 14568b8534bSLuigi Rizzo struct ifnet *ifp; /* adapter is ifp->if_softc */ 14668b8534bSLuigi Rizzo 1471a26580eSLuigi Rizzo NM_LOCK_T core_lock; /* used if no device lock available */ 1481a26580eSLuigi Rizzo 14968b8534bSLuigi Rizzo int (*nm_register)(struct ifnet *, int onoff); 1501a26580eSLuigi Rizzo void (*nm_lock)(struct ifnet *, int what, u_int ringid); 1511a26580eSLuigi Rizzo int (*nm_txsync)(struct ifnet *, u_int ring, int lock); 1521a26580eSLuigi Rizzo int (*nm_rxsync)(struct ifnet *, u_int ring, int lock); 15364ae02c3SLuigi Rizzo #ifdef linux 15464ae02c3SLuigi Rizzo struct net_device_ops nm_ndo; 15564ae02c3SLuigi Rizzo #endif /* linux */ 15668b8534bSLuigi Rizzo }; 15768b8534bSLuigi Rizzo 15868b8534bSLuigi Rizzo /* 15968b8534bSLuigi Rizzo * The combination of "enable" (ifp->if_capabilities &IFCAP_NETMAP) 16068b8534bSLuigi Rizzo * and refcount gives the status of the interface, namely: 16168b8534bSLuigi Rizzo * 16268b8534bSLuigi Rizzo * enable refcount Status 16368b8534bSLuigi Rizzo * 16468b8534bSLuigi Rizzo * FALSE 0 normal operation 16568b8534bSLuigi Rizzo * FALSE != 0 -- (impossible) 16668b8534bSLuigi Rizzo * TRUE 1 netmap mode 16768b8534bSLuigi Rizzo * TRUE 0 being deleted. 16868b8534bSLuigi Rizzo */ 16968b8534bSLuigi Rizzo 17068b8534bSLuigi Rizzo #define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \ 17168b8534bSLuigi Rizzo ( (_na)->ifp->if_capenable & IFCAP_NETMAP) ) 17268b8534bSLuigi Rizzo 17368b8534bSLuigi Rizzo /* 17468b8534bSLuigi Rizzo * parameters for (*nm_lock)(adapter, what, index) 17568b8534bSLuigi Rizzo */ 17668b8534bSLuigi Rizzo enum { 17768b8534bSLuigi Rizzo NETMAP_NO_LOCK = 0, 17868b8534bSLuigi Rizzo NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK, 17968b8534bSLuigi Rizzo NETMAP_TX_LOCK, NETMAP_TX_UNLOCK, 18068b8534bSLuigi Rizzo NETMAP_RX_LOCK, NETMAP_RX_UNLOCK, 1811a26580eSLuigi Rizzo #ifdef __FreeBSD__ 1821a26580eSLuigi Rizzo #define NETMAP_REG_LOCK NETMAP_CORE_LOCK 1831a26580eSLuigi Rizzo #define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK 1841a26580eSLuigi Rizzo #else 1851a26580eSLuigi Rizzo NETMAP_REG_LOCK, NETMAP_REG_UNLOCK 1861a26580eSLuigi Rizzo #endif 18768b8534bSLuigi Rizzo }; 18868b8534bSLuigi Rizzo 18968b8534bSLuigi Rizzo /* 19068b8534bSLuigi Rizzo * The following are support routines used by individual drivers to 19168b8534bSLuigi Rizzo * support netmap operation. 19268b8534bSLuigi Rizzo * 19368b8534bSLuigi Rizzo * netmap_attach() initializes a struct netmap_adapter, allocating the 19468b8534bSLuigi Rizzo * struct netmap_ring's and the struct selinfo. 19568b8534bSLuigi Rizzo * 19668b8534bSLuigi Rizzo * netmap_detach() frees the memory allocated by netmap_attach(). 19768b8534bSLuigi Rizzo * 19868b8534bSLuigi Rizzo * netmap_start() replaces the if_transmit routine of the interface, 19968b8534bSLuigi Rizzo * and is used to intercept packets coming from the stack. 20068b8534bSLuigi Rizzo * 20168b8534bSLuigi Rizzo * netmap_load_map/netmap_reload_map are helper routines to set/reset 20268b8534bSLuigi Rizzo * the dmamap for a packet buffer 20368b8534bSLuigi Rizzo * 20468b8534bSLuigi Rizzo * netmap_reset() is a helper routine to be called in the driver 20568b8534bSLuigi Rizzo * when reinitializing a ring. 20668b8534bSLuigi Rizzo */ 20768b8534bSLuigi Rizzo int netmap_attach(struct netmap_adapter *, int); 20868b8534bSLuigi Rizzo void netmap_detach(struct ifnet *); 20968b8534bSLuigi Rizzo int netmap_start(struct ifnet *, struct mbuf *); 21068b8534bSLuigi Rizzo enum txrx { NR_RX = 0, NR_TX = 1 }; 21168b8534bSLuigi Rizzo struct netmap_slot *netmap_reset(struct netmap_adapter *na, 21268b8534bSLuigi Rizzo enum txrx tx, int n, u_int new_cur); 21368b8534bSLuigi Rizzo int netmap_ring_reinit(struct netmap_kring *); 21468b8534bSLuigi Rizzo 2155819da83SLuigi Rizzo extern int netmap_buf_size; 2165819da83SLuigi Rizzo #define NETMAP_BUF_SIZE netmap_buf_size 2172157a17cSLuigi Rizzo extern int netmap_mitigate; 2185819da83SLuigi Rizzo extern int netmap_no_pendintr; 21968b8534bSLuigi Rizzo extern u_int netmap_total_buffers; 22068b8534bSLuigi Rizzo extern char *netmap_buffer_base; 22168b8534bSLuigi Rizzo extern int netmap_verbose; // XXX debugging 22268b8534bSLuigi Rizzo enum { /* verbose flags */ 22368b8534bSLuigi Rizzo NM_VERB_ON = 1, /* generic verbose */ 22468b8534bSLuigi Rizzo NM_VERB_HOST = 0x2, /* verbose host stack */ 22568b8534bSLuigi Rizzo NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 22668b8534bSLuigi Rizzo NM_VERB_TXSYNC = 0x20, 22768b8534bSLuigi Rizzo NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 22868b8534bSLuigi Rizzo NM_VERB_TXINTR = 0x200, 22968b8534bSLuigi Rizzo NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 23068b8534bSLuigi Rizzo NM_VERB_NIC_TXSYNC = 0x2000, 23168b8534bSLuigi Rizzo }; 23268b8534bSLuigi Rizzo 23368b8534bSLuigi Rizzo /* 234d0c7b075SLuigi Rizzo * NA returns a pointer to the struct netmap adapter from the ifp, 235d0c7b075SLuigi Rizzo * WNA is used to write it. 23668b8534bSLuigi Rizzo */ 237d0c7b075SLuigi Rizzo #ifndef WNA 238d0c7b075SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_pspare[0] 239d0c7b075SLuigi Rizzo #endif 240d0c7b075SLuigi Rizzo #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 24168b8534bSLuigi Rizzo 24268b8534bSLuigi Rizzo 2436dba29a2SLuigi Rizzo /* Callback invoked by the dma machinery after a successfull dmamap_load */ 2446dba29a2SLuigi Rizzo static void netmap_dmamap_cb(__unused void *arg, 2456dba29a2SLuigi Rizzo __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 2466dba29a2SLuigi Rizzo { 2476dba29a2SLuigi Rizzo } 2486dba29a2SLuigi Rizzo 2496dba29a2SLuigi Rizzo /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 2506dba29a2SLuigi Rizzo * XXX can we do it without a callback ? 2516dba29a2SLuigi Rizzo */ 2526dba29a2SLuigi Rizzo static inline void 2536dba29a2SLuigi Rizzo netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 2546dba29a2SLuigi Rizzo { 2556dba29a2SLuigi Rizzo if (map) 2566dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 2576dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 2586dba29a2SLuigi Rizzo } 2596dba29a2SLuigi Rizzo 2606dba29a2SLuigi Rizzo /* update the map when a buffer changes. */ 2616dba29a2SLuigi Rizzo static inline void 2626dba29a2SLuigi Rizzo netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 2636dba29a2SLuigi Rizzo { 2646dba29a2SLuigi Rizzo if (map) { 2656dba29a2SLuigi Rizzo bus_dmamap_unload(tag, map); 2666dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 2676dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 2686dba29a2SLuigi Rizzo } 2696dba29a2SLuigi Rizzo } 2706dba29a2SLuigi Rizzo 2715644ccecSLuigi Rizzo /* 2725644ccecSLuigi Rizzo * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 2735644ccecSLuigi Rizzo */ 2745644ccecSLuigi Rizzo static inline int 27564ae02c3SLuigi Rizzo netmap_idx_n2k(struct netmap_kring *kr, int idx) 2765644ccecSLuigi Rizzo { 27764ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 27864ae02c3SLuigi Rizzo idx += kr->nkr_hwofs; 27964ae02c3SLuigi Rizzo if (idx < 0) 28064ae02c3SLuigi Rizzo return idx + n; 28164ae02c3SLuigi Rizzo else if (idx < n) 28264ae02c3SLuigi Rizzo return idx; 2835644ccecSLuigi Rizzo else 28464ae02c3SLuigi Rizzo return idx - n; 2855644ccecSLuigi Rizzo } 2865644ccecSLuigi Rizzo 2875644ccecSLuigi Rizzo 2885644ccecSLuigi Rizzo static inline int 28964ae02c3SLuigi Rizzo netmap_idx_k2n(struct netmap_kring *kr, int idx) 2905644ccecSLuigi Rizzo { 29164ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 29264ae02c3SLuigi Rizzo idx -= kr->nkr_hwofs; 29364ae02c3SLuigi Rizzo if (idx < 0) 29464ae02c3SLuigi Rizzo return idx + n; 29564ae02c3SLuigi Rizzo else if (idx < n) 29664ae02c3SLuigi Rizzo return idx; 2975644ccecSLuigi Rizzo else 29864ae02c3SLuigi Rizzo return idx - n; 2995644ccecSLuigi Rizzo } 3005644ccecSLuigi Rizzo 3015644ccecSLuigi Rizzo 302*d76bf4ffSLuigi Rizzo #ifdef NETMAP_MEM2 303*d76bf4ffSLuigi Rizzo /* Entries of the look-up table. */ 304*d76bf4ffSLuigi Rizzo struct lut_entry { 305*d76bf4ffSLuigi Rizzo void *vaddr; /* virtual address. */ 306*d76bf4ffSLuigi Rizzo vm_paddr_t paddr; /* phisical address. */ 307*d76bf4ffSLuigi Rizzo }; 308*d76bf4ffSLuigi Rizzo 309*d76bf4ffSLuigi Rizzo struct netmap_obj_pool; 310*d76bf4ffSLuigi Rizzo extern struct lut_entry *netmap_buffer_lut; 311*d76bf4ffSLuigi Rizzo #define NMB_VA(i) (netmap_buffer_lut[i].vaddr) 312*d76bf4ffSLuigi Rizzo #define NMB_PA(i) (netmap_buffer_lut[i].paddr) 313*d76bf4ffSLuigi Rizzo #else /* NETMAP_MEM1 */ 314*d76bf4ffSLuigi Rizzo #define NMB_VA(i) (netmap_buffer_base + (i * NETMAP_BUF_SIZE) ) 315*d76bf4ffSLuigi Rizzo #endif /* NETMAP_MEM2 */ 316*d76bf4ffSLuigi Rizzo 31768b8534bSLuigi Rizzo /* 3186e10c8b8SLuigi Rizzo * NMB return the virtual address of a buffer (buffer 0 on bad index) 3196e10c8b8SLuigi Rizzo * PNMB also fills the physical address 32068b8534bSLuigi Rizzo */ 3216e10c8b8SLuigi Rizzo static inline void * 32268b8534bSLuigi Rizzo NMB(struct netmap_slot *slot) 32368b8534bSLuigi Rizzo { 32468b8534bSLuigi Rizzo uint32_t i = slot->buf_idx; 325*d76bf4ffSLuigi Rizzo return (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i); 32668b8534bSLuigi Rizzo } 32768b8534bSLuigi Rizzo 3286e10c8b8SLuigi Rizzo static inline void * 3296e10c8b8SLuigi Rizzo PNMB(struct netmap_slot *slot, uint64_t *pp) 3306e10c8b8SLuigi Rizzo { 3316e10c8b8SLuigi Rizzo uint32_t i = slot->buf_idx; 332*d76bf4ffSLuigi Rizzo void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i); 333*d76bf4ffSLuigi Rizzo #ifdef NETMAP_MEM2 334*d76bf4ffSLuigi Rizzo *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i); 335*d76bf4ffSLuigi Rizzo #else 3366e10c8b8SLuigi Rizzo *pp = vtophys(ret); 337*d76bf4ffSLuigi Rizzo #endif 3386e10c8b8SLuigi Rizzo return ret; 3396e10c8b8SLuigi Rizzo } 3406e10c8b8SLuigi Rizzo 3411a26580eSLuigi Rizzo /* default functions to handle rx/tx interrupts */ 3421a26580eSLuigi Rizzo int netmap_rx_irq(struct ifnet *, int, int *); 3431a26580eSLuigi Rizzo #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 34468b8534bSLuigi Rizzo #endif /* _NET_NETMAP_KERN_H_ */ 345