1 /* 2 * Copyright (C) 2011-2012 Matteo Landi, Luigi Rizzo. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * $Id: netmap_kern.h 10602 2012-02-21 16:47:55Z luigi $ 29 * 30 * The header contains the definitions of constants and function 31 * prototypes used only in kernelspace. 32 */ 33 34 #ifndef _NET_NETMAP_KERN_H_ 35 #define _NET_NETMAP_KERN_H_ 36 37 #if defined(__FreeBSD__) 38 #define NM_LOCK_T struct mtx 39 #define NM_SELINFO_T struct selinfo 40 #define MBUF_LEN(m) ((m)->m_pkthdr.len) 41 #define NM_SEND_UP(ifp, m) ((ifp)->if_input)(ifp, m) 42 #elif defined (linux) 43 #define NM_LOCK_T spinlock_t 44 #define NM_SELINFO_T wait_queue_head_t 45 #define MBUF_LEN(m) ((m)->len) 46 #define NM_SEND_UP(ifp, m) netif_rx(m) 47 #else 48 #error unsupported platform 49 #endif 50 51 #ifdef MALLOC_DECLARE 52 MALLOC_DECLARE(M_NETMAP); 53 #endif 54 55 #define ND(format, ...) 56 #define D(format, ...) \ 57 do { \ 58 struct timeval __xxts; \ 59 microtime(&__xxts); \ 60 printf("%03d.%06d %s [%d] " format "\n", \ 61 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \ 62 __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 63 } while (0) 64 65 struct netmap_adapter; 66 67 /* 68 * private, kernel view of a ring. Keeps track of the status of 69 * a ring across system calls. 70 * 71 * nr_hwcur index of the next buffer to refill. 72 * It corresponds to ring->cur - ring->reserved 73 * 74 * nr_hwavail the number of slots "owned" by userspace. 75 * nr_hwavail =:= ring->avail + ring->reserved 76 * 77 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots. 78 * This is so that, on a reset, buffers owned by userspace are not 79 * modified by the kernel. In particular: 80 * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with 81 * the next empty buffer as known by the hardware (next_to_check or so). 82 * TX rings: hwcur + hwofs coincides with next_to_send 83 */ 84 struct netmap_kring { 85 struct netmap_ring *ring; 86 u_int nr_hwcur; 87 int nr_hwavail; 88 u_int nr_kflags; /* private driver flags */ 89 #define NKR_PENDINTR 0x1 // Pending interrupt. 90 u_int nkr_num_slots; 91 92 int nkr_hwofs; /* offset between NIC and netmap ring */ 93 struct netmap_adapter *na; 94 NM_SELINFO_T si; /* poll/select wait queue */ 95 NM_LOCK_T q_lock; /* used if no device lock available */ 96 } __attribute__((__aligned__(64))); 97 98 /* 99 * This struct extends the 'struct adapter' (or 100 * equivalent) device descriptor. It contains all fields needed to 101 * support netmap operation. 102 */ 103 struct netmap_adapter { 104 int refcount; /* number of user-space descriptors using this 105 interface, which is equal to the number of 106 struct netmap_if objs in the mapped region. */ 107 108 int separate_locks; /* set if the interface suports different 109 locks for rx, tx and core. */ 110 111 u_int num_rx_queues; /* number of tx/rx queue pairs: this is 112 a duplicate field needed to simplify the 113 signature of ``netmap_detach``. */ 114 u_int num_tx_queues; // if nonzero, overrides num_queues XXX 115 116 u_int num_tx_desc; /* number of descriptor in each queue */ 117 u_int num_rx_desc; 118 u_int buff_size; // XXX deprecate, use NETMAP_BUF_SIZE 119 120 //u_int flags; // XXX unused 121 /* tx_rings and rx_rings are private but allocated 122 * as a contiguous chunk of memory. Each array has 123 * N+1 entries, for the adapter queues and for the host queue. 124 */ 125 struct netmap_kring *tx_rings; /* array of TX rings. */ 126 struct netmap_kring *rx_rings; /* array of RX rings. */ 127 128 NM_SELINFO_T tx_si, rx_si; /* global wait queues */ 129 130 /* copy of if_qflush and if_transmit pointers, to intercept 131 * packets from the network stack when netmap is active. 132 * XXX probably if_qflush is not necessary. 133 */ 134 //void (*if_qflush)(struct ifnet *); // XXX unused 135 int (*if_transmit)(struct ifnet *, struct mbuf *); 136 137 /* references to the ifnet and device routines, used by 138 * the generic netmap functions. 139 */ 140 struct ifnet *ifp; /* adapter is ifp->if_softc */ 141 142 NM_LOCK_T core_lock; /* used if no device lock available */ 143 144 int (*nm_register)(struct ifnet *, int onoff); 145 void (*nm_lock)(struct ifnet *, int what, u_int ringid); 146 int (*nm_txsync)(struct ifnet *, u_int ring, int lock); 147 int (*nm_rxsync)(struct ifnet *, u_int ring, int lock); 148 #ifdef linux 149 struct net_device_ops nm_ndo; 150 #endif /* linux */ 151 }; 152 153 /* 154 * The combination of "enable" (ifp->if_capabilities &IFCAP_NETMAP) 155 * and refcount gives the status of the interface, namely: 156 * 157 * enable refcount Status 158 * 159 * FALSE 0 normal operation 160 * FALSE != 0 -- (impossible) 161 * TRUE 1 netmap mode 162 * TRUE 0 being deleted. 163 */ 164 165 #define NETMAP_DELETING(_na) ( ((_na)->refcount == 0) && \ 166 ( (_na)->ifp->if_capenable & IFCAP_NETMAP) ) 167 168 /* 169 * parameters for (*nm_lock)(adapter, what, index) 170 */ 171 enum { 172 NETMAP_NO_LOCK = 0, 173 NETMAP_CORE_LOCK, NETMAP_CORE_UNLOCK, 174 NETMAP_TX_LOCK, NETMAP_TX_UNLOCK, 175 NETMAP_RX_LOCK, NETMAP_RX_UNLOCK, 176 #ifdef __FreeBSD__ 177 #define NETMAP_REG_LOCK NETMAP_CORE_LOCK 178 #define NETMAP_REG_UNLOCK NETMAP_CORE_UNLOCK 179 #else 180 NETMAP_REG_LOCK, NETMAP_REG_UNLOCK 181 #endif 182 }; 183 184 /* 185 * The following are support routines used by individual drivers to 186 * support netmap operation. 187 * 188 * netmap_attach() initializes a struct netmap_adapter, allocating the 189 * struct netmap_ring's and the struct selinfo. 190 * 191 * netmap_detach() frees the memory allocated by netmap_attach(). 192 * 193 * netmap_start() replaces the if_transmit routine of the interface, 194 * and is used to intercept packets coming from the stack. 195 * 196 * netmap_load_map/netmap_reload_map are helper routines to set/reset 197 * the dmamap for a packet buffer 198 * 199 * netmap_reset() is a helper routine to be called in the driver 200 * when reinitializing a ring. 201 */ 202 int netmap_attach(struct netmap_adapter *, int); 203 void netmap_detach(struct ifnet *); 204 int netmap_start(struct ifnet *, struct mbuf *); 205 enum txrx { NR_RX = 0, NR_TX = 1 }; 206 struct netmap_slot *netmap_reset(struct netmap_adapter *na, 207 enum txrx tx, int n, u_int new_cur); 208 int netmap_ring_reinit(struct netmap_kring *); 209 210 extern int netmap_buf_size; 211 #define NETMAP_BUF_SIZE netmap_buf_size 212 extern int netmap_mitigate; 213 extern int netmap_no_pendintr; 214 extern u_int netmap_total_buffers; 215 extern char *netmap_buffer_base; 216 extern int netmap_verbose; // XXX debugging 217 enum { /* verbose flags */ 218 NM_VERB_ON = 1, /* generic verbose */ 219 NM_VERB_HOST = 0x2, /* verbose host stack */ 220 NM_VERB_RXSYNC = 0x10, /* verbose on rxsync/txsync */ 221 NM_VERB_TXSYNC = 0x20, 222 NM_VERB_RXINTR = 0x100, /* verbose on rx/tx intr (driver) */ 223 NM_VERB_TXINTR = 0x200, 224 NM_VERB_NIC_RXSYNC = 0x1000, /* verbose on rx/tx intr (driver) */ 225 NM_VERB_NIC_TXSYNC = 0x2000, 226 }; 227 228 /* 229 * NA returns a pointer to the struct netmap adapter from the ifp, 230 * WNA is used to write it. 231 */ 232 #ifndef WNA 233 #define WNA(_ifp) (_ifp)->if_pspare[0] 234 #endif 235 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp)) 236 237 238 /* Callback invoked by the dma machinery after a successfull dmamap_load */ 239 static void netmap_dmamap_cb(__unused void *arg, 240 __unused bus_dma_segment_t * segs, __unused int nseg, __unused int error) 241 { 242 } 243 244 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL. 245 * XXX can we do it without a callback ? 246 */ 247 static inline void 248 netmap_load_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 249 { 250 if (map) 251 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 252 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 253 } 254 255 /* update the map when a buffer changes. */ 256 static inline void 257 netmap_reload_map(bus_dma_tag_t tag, bus_dmamap_t map, void *buf) 258 { 259 if (map) { 260 bus_dmamap_unload(tag, map); 261 bus_dmamap_load(tag, map, buf, NETMAP_BUF_SIZE, 262 netmap_dmamap_cb, NULL, BUS_DMA_NOWAIT); 263 } 264 } 265 266 /* 267 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n) 268 */ 269 static inline int 270 netmap_idx_n2k(struct netmap_kring *kr, int idx) 271 { 272 int n = kr->nkr_num_slots; 273 idx += kr->nkr_hwofs; 274 if (idx < 0) 275 return idx + n; 276 else if (idx < n) 277 return idx; 278 else 279 return idx - n; 280 } 281 282 283 static inline int 284 netmap_idx_k2n(struct netmap_kring *kr, int idx) 285 { 286 int n = kr->nkr_num_slots; 287 idx -= kr->nkr_hwofs; 288 if (idx < 0) 289 return idx + n; 290 else if (idx < n) 291 return idx; 292 else 293 return idx - n; 294 } 295 296 297 /* 298 * NMB return the virtual address of a buffer (buffer 0 on bad index) 299 * PNMB also fills the physical address 300 */ 301 static inline void * 302 NMB(struct netmap_slot *slot) 303 { 304 uint32_t i = slot->buf_idx; 305 return (i >= netmap_total_buffers) ? netmap_buffer_base : 306 netmap_buffer_base + (i *NETMAP_BUF_SIZE); 307 } 308 309 static inline void * 310 PNMB(struct netmap_slot *slot, uint64_t *pp) 311 { 312 uint32_t i = slot->buf_idx; 313 void *ret = (i >= netmap_total_buffers) ? netmap_buffer_base : 314 netmap_buffer_base + (i *NETMAP_BUF_SIZE); 315 *pp = vtophys(ret); 316 return ret; 317 } 318 319 /* default functions to handle rx/tx interrupts */ 320 int netmap_rx_irq(struct ifnet *, int, int *); 321 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL) 322 #ifdef __linux__ 323 #define bus_dmamap_sync(_a, _b, _c) // wmb() or rmb() ? 324 netdev_tx_t netmap_start_linux(struct sk_buff *skb, struct net_device *dev); 325 #endif 326 #endif /* _NET_NETMAP_KERN_H_ */ 327