168b8534bSLuigi Rizzo /* 21a26580eSLuigi Rizzo * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 368b8534bSLuigi Rizzo * 468b8534bSLuigi Rizzo * Redistribution and use in source and binary forms, with or without 568b8534bSLuigi Rizzo * modification, are permitted provided that the following conditions 668b8534bSLuigi Rizzo * are met: 768b8534bSLuigi Rizzo * 1. Redistributions of source code must retain the above copyright 868b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer. 968b8534bSLuigi Rizzo * 2. Redistributions in binary form must reproduce the above copyright 1068b8534bSLuigi Rizzo * notice, this list of conditions and the following disclaimer in the 1168b8534bSLuigi Rizzo * documentation and/or other materials provided with the distribution. 1268b8534bSLuigi Rizzo * 1368b8534bSLuigi Rizzo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1468b8534bSLuigi Rizzo * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1568b8534bSLuigi Rizzo * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1668b8534bSLuigi Rizzo * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1768b8534bSLuigi Rizzo * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1868b8534bSLuigi Rizzo * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 1968b8534bSLuigi Rizzo * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2068b8534bSLuigi Rizzo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2168b8534bSLuigi Rizzo * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2268b8534bSLuigi Rizzo * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2368b8534bSLuigi Rizzo * SUCH DAMAGE. 2468b8534bSLuigi Rizzo */ 2568b8534bSLuigi Rizzo 2668b8534bSLuigi Rizzo /* 2768b8534bSLuigi Rizzo * $FreeBSD$ 28f196ce38SLuigi Rizzo * $Id: netmap_kern.h 11343 2012-07-03 09:08:38Z luigi $ 2968b8534bSLuigi Rizzo * 3068b8534bSLuigi Rizzo * The header contains the definitions of constants and function 3168b8534bSLuigi Rizzo * prototypes used only in kernelspace. 3268b8534bSLuigi Rizzo */ 3368b8534bSLuigi Rizzo 3468b8534bSLuigi Rizzo #ifndef _NET_NETMAP_KERN_H_ 3568b8534bSLuigi Rizzo #define _NET_NETMAP_KERN_H_ 3668b8534bSLuigi Rizzo 37d76bf4ffSLuigi Rizzo #define NETMAP_MEM2 // use the new memory allocator 38d76bf4ffSLuigi Rizzo 391a26580eSLuigi Rizzo #if defined(__FreeBSD__) 40f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 41f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 42f196ce38SLuigi Rizzo 431a26580eSLuigi Rizzo #define NM_LOCK_T struct mtx 441a26580eSLuigi Rizzo #define NM_SELINFO_T struct selinfo 451a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 461a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 4764ae02c3SLuigi Rizzo #elif defined (linux) 481a26580eSLuigi Rizzo #define NM_LOCK_T spinlock_t 491a26580eSLuigi Rizzo #define NM_SELINFO_T wait_queue_head_t 501a26580eSLuigi Rizzo #define MBUF_LEN(m) ((m)->len) 511a26580eSLuigi Rizzo #define NM_SEND_UP(ifp, m) netif_rx(m) 52f196ce38SLuigi Rizzo 53f196ce38SLuigi Rizzo #ifndef DEV_NETMAP 54f196ce38SLuigi Rizzo #define DEV_NETMAP 55f196ce38SLuigi Rizzo #endif 56f196ce38SLuigi Rizzo 57f196ce38SLuigi Rizzo /* 58f196ce38SLuigi Rizzo * IFCAP_NETMAP goes into net_device's flags (if_capabilities) 59f196ce38SLuigi Rizzo * and priv_flags (if_capenable). The latter used to be 16 bits 60f196ce38SLuigi Rizzo * up to linux 2.6.36, so we need to use a 16 bit value on older 61f196ce38SLuigi Rizzo * platforms and tolerate the clash with IFF_DYNAMIC and IFF_BRIDGE_PORT. 62f196ce38SLuigi Rizzo * For the 32-bit value, 0x100000 (bit 20) has no clashes up to 3.3.1 63f196ce38SLuigi Rizzo */ 64f196ce38SLuigi Rizzo #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) 65f196ce38SLuigi Rizzo #define IFCAP_NETMAP 0x8000 66f196ce38SLuigi Rizzo #else 67f196ce38SLuigi Rizzo #define IFCAP_NETMAP 0x100000 68f196ce38SLuigi Rizzo #endif 69f196ce38SLuigi Rizzo 70f196ce38SLuigi Rizzo #elif defined (__APPLE__) 71f196ce38SLuigi Rizzo #warning apple support is experimental 72f196ce38SLuigi Rizzo #define likely(x) __builtin_expect(!!(x), 1) 73f196ce38SLuigi Rizzo #define unlikely(x) __builtin_expect(!!(x), 0) 74f196ce38SLuigi Rizzo #define NM_LOCK_T IOLock * 75f196ce38SLuigi Rizzo #define NM_SELINFO_T struct selinfo 76f196ce38SLuigi Rizzo #define MBUF_LEN(m) ((m)->m_pkthdr.len) 77f196ce38SLuigi Rizzo #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 78f196ce38SLuigi Rizzo 791a26580eSLuigi Rizzo #else 801a26580eSLuigi Rizzo #error unsupported platform 811a26580eSLuigi Rizzo #endif 821a26580eSLuigi Rizzo 8368b8534bSLuigi Rizzo #define ND(format, ...) 8468b8534bSLuigi Rizzo #define D(format, ...) \ 8568b8534bSLuigi Rizzo do { \ 8668b8534bSLuigi Rizzo struct timeval __xxts; \ 8768b8534bSLuigi Rizzo microtime(&__xxts); \ 8868b8534bSLuigi Rizzo printf("%03d.%06d %s [%d] " format "\n", \ 8968b8534bSLuigi Rizzo (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 9068b8534bSLuigi Rizzo __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 9168b8534bSLuigi Rizzo } while (0) 9268b8534bSLuigi Rizzo 9368b8534bSLuigi Rizzo struct netmap_adapter; 9468b8534bSLuigi Rizzo 9568b8534bSLuigi Rizzo /* 9664ae02c3SLuigi Rizzo * private, kernel view of a ring. Keeps track of the status of 9764ae02c3SLuigi Rizzo * a ring across system calls. 9864ae02c3SLuigi Rizzo * 9964ae02c3SLuigi Rizzo * nr_hwcur index of the next buffer to refill. 10064ae02c3SLuigi Rizzo * It corresponds to ring->cur - ring->reserved 10164ae02c3SLuigi Rizzo * 10264ae02c3SLuigi Rizzo * nr_hwavail the number of slots "owned" by userspace. 10364ae02c3SLuigi Rizzo * nr_hwavail =:= ring->avail + ring->reserved 10468b8534bSLuigi Rizzo * 1051a26580eSLuigi Rizzo * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 10668b8534bSLuigi Rizzo * This is so that, on a reset, buffers owned by userspace are not 10768b8534bSLuigi Rizzo * modified by the kernel. In particular: 1081a26580eSLuigi Rizzo * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with 10968b8534bSLuigi Rizzo * the next empty buffer as known by the hardware (next_to_check or so). 11068b8534bSLuigi Rizzo * TX rings: hwcur + hwofs coincides with next_to_send 11168b8534bSLuigi Rizzo */ 11268b8534bSLuigi Rizzo struct netmap_kring { 11368b8534bSLuigi Rizzo struct netmap_ring *ring; 11468b8534bSLuigi Rizzo u_int nr_hwcur; 11568b8534bSLuigi Rizzo int nr_hwavail; 1162157a17cSLuigi Rizzo u_int nr_kflags; /* private driver flags */ 1172157a17cSLuigi Rizzo #define NKR_PENDINTR 0x1 // Pending interrupt. 11868b8534bSLuigi Rizzo u_int nkr_num_slots; 11968b8534bSLuigi Rizzo 120506cc70cSLuigi Rizzo int nkr_hwofs; /* offset between NIC and netmap ring */ 1211a26580eSLuigi Rizzo struct netmap_adapter *na; 1221a26580eSLuigi Rizzo NM_SELINFO_T si; /* poll/select wait queue */ 1231a26580eSLuigi Rizzo NM_LOCK_T q_lock; /* used if no device lock available */ 1242157a17cSLuigi Rizzo } __attribute__((__aligned__(64))); 12568b8534bSLuigi Rizzo 12668b8534bSLuigi Rizzo /* 1271a26580eSLuigi Rizzo * This struct extends the 'struct adapter' (or 12868b8534bSLuigi Rizzo * equivalent) device descriptor. It contains all fields needed to 12968b8534bSLuigi Rizzo * support netmap operation. 13068b8534bSLuigi Rizzo */ 13168b8534bSLuigi Rizzo struct netmap_adapter { 13268b8534bSLuigi Rizzo int refcount; /* number of user-space descriptors using this 13368b8534bSLuigi Rizzo interface, which is equal to the number of 13468b8534bSLuigi Rizzo struct netmap_if objs in the mapped region. */ 135d76bf4ffSLuigi Rizzo /* 136d76bf4ffSLuigi Rizzo * The selwakeup in the interrupt thread can use per-ring 137d76bf4ffSLuigi Rizzo * and/or global wait queues. We track how many clients 138d76bf4ffSLuigi Rizzo * of each type we have so we can optimize the drivers, 139d76bf4ffSLuigi Rizzo * and especially avoid huge contention on the locks. 140d76bf4ffSLuigi Rizzo */ 141d76bf4ffSLuigi Rizzo int na_single; /* threads attached to a single hw queue */ 142d76bf4ffSLuigi Rizzo int na_multi; /* threads attached to multiple hw queues */ 14368b8534bSLuigi Rizzo 14468b8534bSLuigi Rizzo int separate_locks; /* set if the interface suports different 14568b8534bSLuigi Rizzo locks for rx, tx and core. */ 14668b8534bSLuigi Rizzo 147*24e57ec9SEd Maste u_int num_rx_rings; /* number of adapter receive rings */ 148*24e57ec9SEd Maste u_int num_tx_rings; /* number of adapter transmit rings */ 14968b8534bSLuigi Rizzo 15068b8534bSLuigi Rizzo u_int num_tx_desc; /* number of descriptor in each queue */ 15168b8534bSLuigi Rizzo u_int num_rx_desc; 152d76bf4ffSLuigi Rizzo //u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE 15368b8534bSLuigi Rizzo 15468b8534bSLuigi Rizzo /* tx_rings and rx_rings are private but allocated 15568b8534bSLuigi Rizzo * as a contiguous chunk of memory. Each array has 15668b8534bSLuigi Rizzo * N+1 entries, for the adapter queues and for the host queue. 15768b8534bSLuigi Rizzo */ 15868b8534bSLuigi Rizzo struct netmap_kring *tx_rings; /* array of TX rings. */ 15968b8534bSLuigi Rizzo struct netmap_kring *rx_rings; /* array of RX rings. */ 16068b8534bSLuigi Rizzo 16164ae02c3SLuigi Rizzo NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 16264ae02c3SLuigi Rizzo 16368b8534bSLuigi Rizzo /* copy of if_qflush and if_transmit pointers, to intercept 16468b8534bSLuigi Rizzo * packets from the network stack when netmap is active. 16568b8534bSLuigi Rizzo */ 16668b8534bSLuigi Rizzo int (*if_transmit)(struct ifnet *, struct mbuf *); 16768b8534bSLuigi Rizzo 16868b8534bSLuigi Rizzo /* references to the ifnet and device routines, used by 16968b8534bSLuigi Rizzo * the generic netmap functions. 17068b8534bSLuigi Rizzo */ 17168b8534bSLuigi Rizzo struct ifnet *ifp; /* adapter is ifp->if_softc */ 17268b8534bSLuigi Rizzo 1731a26580eSLuigi Rizzo NM_LOCK_T core_lock; /* used if no device lock available */ 1741a26580eSLuigi Rizzo 17568b8534bSLuigi Rizzo int (*nm_register)(struct ifnet *, int onoff); 1761a26580eSLuigi Rizzo void (*nm_lock)(struct ifnet *, int what, u_int ringid); 1771a26580eSLuigi Rizzo int (*nm_txsync)(struct ifnet *, u_int ring, int lock); 1781a26580eSLuigi Rizzo int (*nm_rxsync)(struct ifnet *, u_int ring, int lock); 179f196ce38SLuigi Rizzo 180f196ce38SLuigi Rizzo int bdg_port; 18164ae02c3SLuigi Rizzo #ifdef linux 18264ae02c3SLuigi Rizzo struct net_device_ops nm_ndo; 183f196ce38SLuigi Rizzo int if_refcount; // XXX additions for bridge 18464ae02c3SLuigi Rizzo #endif /* linux */ 18568b8534bSLuigi Rizzo }; 18668b8534bSLuigi Rizzo 18768b8534bSLuigi Rizzo /* 18868b8534bSLuigi Rizzo * The combination of "enable" (ifp->if_capabilities &IFCAP_NETMAP) 18968b8534bSLuigi Rizzo * and refcount gives the status of the interface, namely: 19068b8534bSLuigi Rizzo * 19168b8534bSLuigi Rizzo * enable refcount Status 19268b8534bSLuigi Rizzo * 19368b8534bSLuigi Rizzo * FALSE 0 normal operation 19468b8534bSLuigi Rizzo * FALSE != 0 -- (impossible) 19568b8534bSLuigi Rizzo * TRUE 1 netmap mode 19668b8534bSLuigi Rizzo * TRUE 0 being deleted. 19768b8534bSLuigi Rizzo */ 19868b8534bSLuigi Rizzo 19968b8534bSLuigi Rizzo #define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \ 20068b8534bSLuigi Rizzo ( (_na)->ifp->if_capenable & IFCAP_NETMAP) ) 20168b8534bSLuigi Rizzo 20268b8534bSLuigi Rizzo /* 20368b8534bSLuigi Rizzo * parameters for (*nm_lock)(adapter, what, index) 20468b8534bSLuigi Rizzo */ 20568b8534bSLuigi Rizzo enum { 20668b8534bSLuigi Rizzo NETMAP_NO_LOCK = 0, 20768b8534bSLuigi Rizzo NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK, 20868b8534bSLuigi Rizzo NETMAP_TX_LOCK, NETMAP_TX_UNLOCK, 20968b8534bSLuigi Rizzo NETMAP_RX_LOCK, NETMAP_RX_UNLOCK, 2101a26580eSLuigi Rizzo #ifdef __FreeBSD__ 2111a26580eSLuigi Rizzo #define NETMAP_REG_LOCK NETMAP_CORE_LOCK 2121a26580eSLuigi Rizzo #define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK 2131a26580eSLuigi Rizzo #else 2141a26580eSLuigi Rizzo NETMAP_REG_LOCK, NETMAP_REG_UNLOCK 2151a26580eSLuigi Rizzo #endif 21668b8534bSLuigi Rizzo }; 21768b8534bSLuigi Rizzo 21868b8534bSLuigi Rizzo /* 21968b8534bSLuigi Rizzo * The following are support routines used by individual drivers to 22068b8534bSLuigi Rizzo * support netmap operation. 22168b8534bSLuigi Rizzo * 22268b8534bSLuigi Rizzo * netmap_attach() initializes a struct netmap_adapter, allocating the 22368b8534bSLuigi Rizzo * struct netmap_ring's and the struct selinfo. 22468b8534bSLuigi Rizzo * 22568b8534bSLuigi Rizzo * netmap_detach() frees the memory allocated by netmap_attach(). 22668b8534bSLuigi Rizzo * 22768b8534bSLuigi Rizzo * netmap_start() replaces the if_transmit routine of the interface, 22868b8534bSLuigi Rizzo * and is used to intercept packets coming from the stack. 22968b8534bSLuigi Rizzo * 23068b8534bSLuigi Rizzo * netmap_load_map/netmap_reload_map are helper routines to set/reset 23168b8534bSLuigi Rizzo * the dmamap for a packet buffer 23268b8534bSLuigi Rizzo * 23368b8534bSLuigi Rizzo * netmap_reset() is a helper routine to be called in the driver 23468b8534bSLuigi Rizzo * when reinitializing a ring. 23568b8534bSLuigi Rizzo */ 23668b8534bSLuigi Rizzo int netmap_attach(struct netmap_adapter *, int); 23768b8534bSLuigi Rizzo void netmap_detach(struct ifnet *); 23868b8534bSLuigi Rizzo int netmap_start(struct ifnet *, struct mbuf *); 23968b8534bSLuigi Rizzo enum txrx { NR_RX = 0, NR_TX = 1 }; 24068b8534bSLuigi Rizzo struct netmap_slot *netmap_reset(struct netmap_adapter *na, 24168b8534bSLuigi Rizzo enum txrx tx, int n, u_int new_cur); 24268b8534bSLuigi Rizzo int netmap_ring_reinit(struct netmap_kring *); 24368b8534bSLuigi Rizzo 244b3d53016SLuigi Rizzo extern u_int netmap_buf_size; 2455819da83SLuigi Rizzo #define NETMAP_BUF_SIZE netmap_buf_size 2462157a17cSLuigi Rizzo extern int netmap_mitigate; 2475819da83SLuigi Rizzo extern int netmap_no_pendintr; 24868b8534bSLuigi Rizzo extern u_int netmap_total_buffers; 24968b8534bSLuigi Rizzo extern char *netmap_buffer_base; 25068b8534bSLuigi Rizzo extern int netmap_verbose; // XXX debugging 25168b8534bSLuigi Rizzo enum { /* verbose flags */ 25268b8534bSLuigi Rizzo NM_VERB_ON = 1, /* generic verbose */ 25368b8534bSLuigi Rizzo NM_VERB_HOST = 0x2, /* verbose host stack */ 25468b8534bSLuigi Rizzo NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 25568b8534bSLuigi Rizzo NM_VERB_TXSYNC = 0x20, 25668b8534bSLuigi Rizzo NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 25768b8534bSLuigi Rizzo NM_VERB_TXINTR = 0x200, 25868b8534bSLuigi Rizzo NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 25968b8534bSLuigi Rizzo NM_VERB_NIC_TXSYNC = 0x2000, 26068b8534bSLuigi Rizzo }; 26168b8534bSLuigi Rizzo 26268b8534bSLuigi Rizzo /* 263d0c7b075SLuigi Rizzo * NA returns a pointer to the struct netmap adapter from the ifp, 264d0c7b075SLuigi Rizzo * WNA is used to write it. 26568b8534bSLuigi Rizzo */ 266d0c7b075SLuigi Rizzo #ifndef WNA 267d0c7b075SLuigi Rizzo #define WNA(_ifp) (_ifp)->if_pspare[0] 268d0c7b075SLuigi Rizzo #endif 269d0c7b075SLuigi Rizzo #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 27068b8534bSLuigi Rizzo 27168b8534bSLuigi Rizzo 272f196ce38SLuigi Rizzo #ifdef __FreeBSD__ 2736dba29a2SLuigi Rizzo /* Callback invoked by the dma machinery after a successfull dmamap_load */ 2746dba29a2SLuigi Rizzo static void netmap_dmamap_cb(__unused void *arg, 2756dba29a2SLuigi Rizzo __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 2766dba29a2SLuigi Rizzo { 2776dba29a2SLuigi Rizzo } 2786dba29a2SLuigi Rizzo 2796dba29a2SLuigi Rizzo /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 2806dba29a2SLuigi Rizzo * XXX can we do it without a callback ? 2816dba29a2SLuigi Rizzo */ 2826dba29a2SLuigi Rizzo static inline void 2836dba29a2SLuigi Rizzo netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 2846dba29a2SLuigi Rizzo { 2856dba29a2SLuigi Rizzo if (map) 2866dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 2876dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 2886dba29a2SLuigi Rizzo } 2896dba29a2SLuigi Rizzo 2906dba29a2SLuigi Rizzo /* update the map when a buffer changes. */ 2916dba29a2SLuigi Rizzo static inline void 2926dba29a2SLuigi Rizzo netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 2936dba29a2SLuigi Rizzo { 2946dba29a2SLuigi Rizzo if (map) { 2956dba29a2SLuigi Rizzo bus_dmamap_unload(tag, map); 2966dba29a2SLuigi Rizzo bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 2976dba29a2SLuigi Rizzo netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 2986dba29a2SLuigi Rizzo } 2996dba29a2SLuigi Rizzo } 300f196ce38SLuigi Rizzo #else /* linux */ 301f196ce38SLuigi Rizzo 302f196ce38SLuigi Rizzo /* 303f196ce38SLuigi Rizzo * XXX How do we redefine these functions: 304f196ce38SLuigi Rizzo * 305f196ce38SLuigi Rizzo * on linux we need 306f196ce38SLuigi Rizzo * dma_map_single(&pdev->dev, virt_addr, len, direction) 307f196ce38SLuigi Rizzo * dma_unmap_single(&adapter->pdev->dev, phys_addr, len, direction 308f196ce38SLuigi Rizzo * The len can be implicit (on netmap it is NETMAP_BUF_SIZE) 309f196ce38SLuigi Rizzo * unfortunately the direction is not, so we need to change 310f196ce38SLuigi Rizzo * something to have a cross API 311f196ce38SLuigi Rizzo */ 312f196ce38SLuigi Rizzo #define netmap_load_map(_t, _m, _b) 313f196ce38SLuigi Rizzo #define netmap_reload_map(_t, _m, _b) 314f196ce38SLuigi Rizzo #if 0 315f196ce38SLuigi Rizzo struct e1000_buffer *buffer_info = &tx_ring->buffer_info[l]; 316f196ce38SLuigi Rizzo /* set time_stamp *before* dma to help avoid a possible race */ 317f196ce38SLuigi Rizzo buffer_info->time_stamp = jiffies; 318f196ce38SLuigi Rizzo buffer_info->mapped_as_page = false; 319f196ce38SLuigi Rizzo buffer_info->length = len; 320f196ce38SLuigi Rizzo //buffer_info->next_to_watch = l; 321f196ce38SLuigi Rizzo /* reload dma map */ 322f196ce38SLuigi Rizzo dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, 323f196ce38SLuigi Rizzo NETMAP_BUF_SIZE, DMA_TO_DEVICE); 324f196ce38SLuigi Rizzo buffer_info->dma = dma_map_single(&adapter->pdev->dev, 325f196ce38SLuigi Rizzo addr, NETMAP_BUF_SIZE, DMA_TO_DEVICE); 326f196ce38SLuigi Rizzo 327f196ce38SLuigi Rizzo if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 328f196ce38SLuigi Rizzo D("dma mapping error"); 329f196ce38SLuigi Rizzo /* goto dma_error; See e1000_put_txbuf() */ 330f196ce38SLuigi Rizzo /* XXX reset */ 331f196ce38SLuigi Rizzo } 332f196ce38SLuigi Rizzo tx_desc->buffer_addr = htole64(buffer_info->dma); //XXX 333f196ce38SLuigi Rizzo 334f196ce38SLuigi Rizzo #endif 335f196ce38SLuigi Rizzo 336f196ce38SLuigi Rizzo /* 337f196ce38SLuigi Rizzo * The bus_dmamap_sync() can be one of wmb() or rmb() depending on direction. 338f196ce38SLuigi Rizzo */ 339f196ce38SLuigi Rizzo #define bus_dmamap_sync(_a, _b, _c) 340f196ce38SLuigi Rizzo 341f196ce38SLuigi Rizzo #endif /* linux */ 3426dba29a2SLuigi Rizzo 3435644ccecSLuigi Rizzo /* 3445644ccecSLuigi Rizzo * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 3455644ccecSLuigi Rizzo */ 3465644ccecSLuigi Rizzo static inline int 34764ae02c3SLuigi Rizzo netmap_idx_n2k(struct netmap_kring *kr, int idx) 3485644ccecSLuigi Rizzo { 34964ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 35064ae02c3SLuigi Rizzo idx += kr->nkr_hwofs; 35164ae02c3SLuigi Rizzo if (idx < 0) 35264ae02c3SLuigi Rizzo return idx + n; 35364ae02c3SLuigi Rizzo else if (idx < n) 35464ae02c3SLuigi Rizzo return idx; 3555644ccecSLuigi Rizzo else 35664ae02c3SLuigi Rizzo return idx - n; 3575644ccecSLuigi Rizzo } 3585644ccecSLuigi Rizzo 3595644ccecSLuigi Rizzo 3605644ccecSLuigi Rizzo static inline int 36164ae02c3SLuigi Rizzo netmap_idx_k2n(struct netmap_kring *kr, int idx) 3625644ccecSLuigi Rizzo { 36364ae02c3SLuigi Rizzo int n = kr->nkr_num_slots; 36464ae02c3SLuigi Rizzo idx -= kr->nkr_hwofs; 36564ae02c3SLuigi Rizzo if (idx < 0) 36664ae02c3SLuigi Rizzo return idx + n; 36764ae02c3SLuigi Rizzo else if (idx < n) 36864ae02c3SLuigi Rizzo return idx; 3695644ccecSLuigi Rizzo else 37064ae02c3SLuigi Rizzo return idx - n; 3715644ccecSLuigi Rizzo } 3725644ccecSLuigi Rizzo 3735644ccecSLuigi Rizzo 374d76bf4ffSLuigi Rizzo #ifdef NETMAP_MEM2 375d76bf4ffSLuigi Rizzo /* Entries of the look-up table. */ 376d76bf4ffSLuigi Rizzo struct lut_entry { 377d76bf4ffSLuigi Rizzo void *vaddr; /* virtual address. */ 378d76bf4ffSLuigi Rizzo vm_paddr_t paddr; /* phisical address. */ 379d76bf4ffSLuigi Rizzo }; 380d76bf4ffSLuigi Rizzo 381d76bf4ffSLuigi Rizzo struct netmap_obj_pool; 382d76bf4ffSLuigi Rizzo extern struct lut_entry *netmap_buffer_lut; 383d76bf4ffSLuigi Rizzo #define NMB_VA(i) (netmap_buffer_lut[i].vaddr) 384d76bf4ffSLuigi Rizzo #define NMB_PA(i) (netmap_buffer_lut[i].paddr) 385d76bf4ffSLuigi Rizzo #else /* NETMAP_MEM1 */ 386d76bf4ffSLuigi Rizzo #define NMB_VA(i) (netmap_buffer_base + (i * NETMAP_BUF_SIZE) ) 387d76bf4ffSLuigi Rizzo #endif /* NETMAP_MEM2 */ 388d76bf4ffSLuigi Rizzo 38968b8534bSLuigi Rizzo /* 3906e10c8b8SLuigi Rizzo * NMB return the virtual address of a buffer (buffer 0 on bad index) 3916e10c8b8SLuigi Rizzo * PNMB also fills the physical address 39268b8534bSLuigi Rizzo */ 3936e10c8b8SLuigi Rizzo static inline void * 39468b8534bSLuigi Rizzo NMB(struct netmap_slot *slot) 39568b8534bSLuigi Rizzo { 39668b8534bSLuigi Rizzo uint32_t i = slot->buf_idx; 397f196ce38SLuigi Rizzo return (unlikely(i >= netmap_total_buffers)) ? NMB_VA(0) : NMB_VA(i); 39868b8534bSLuigi Rizzo } 39968b8534bSLuigi Rizzo 4006e10c8b8SLuigi Rizzo static inline void * 4016e10c8b8SLuigi Rizzo PNMB(struct netmap_slot *slot, uint64_t *pp) 4026e10c8b8SLuigi Rizzo { 4036e10c8b8SLuigi Rizzo uint32_t i = slot->buf_idx; 404d76bf4ffSLuigi Rizzo void *ret = (i >= netmap_total_buffers) ? NMB_VA(0) : NMB_VA(i); 405d76bf4ffSLuigi Rizzo #ifdef NETMAP_MEM2 406d76bf4ffSLuigi Rizzo *pp = (i >= netmap_total_buffers) ? NMB_PA(0) : NMB_PA(i); 407d76bf4ffSLuigi Rizzo #else 4086e10c8b8SLuigi Rizzo *pp = vtophys(ret); 409d76bf4ffSLuigi Rizzo #endif 4106e10c8b8SLuigi Rizzo return ret; 4116e10c8b8SLuigi Rizzo } 4126e10c8b8SLuigi Rizzo 4131a26580eSLuigi Rizzo /* default functions to handle rx/tx interrupts */ 4141a26580eSLuigi Rizzo int netmap_rx_irq(struct ifnet *, int, int *); 4151a26580eSLuigi Rizzo #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 416f196ce38SLuigi Rizzo 417f196ce38SLuigi Rizzo extern int netmap_copy; 41868b8534bSLuigi Rizzo #endif /* _NET_NETMAP_KERN_H_ */ 419