1 /* 2 * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 /* 27 * $FreeBSD$ 28 * 29 * (New) memory allocator for netmap 30 */ 31 32 /* 33 * This allocator creates three memory pools: 34 * nm_if_pool for the struct netmap_if 35 * nm_ring_pool for the struct netmap_ring 36 * nm_buf_pool for the packet buffers. 37 * 38 * that contain netmap objects. Each pool is made of a number of clusters, 39 * multiple of a page size, each containing an integer number of objects. 40 * The clusters are contiguous in user space but not in the kernel. 41 * Only nm_buf_pool needs to be dma-able, 42 * but for convenience use the same type of allocator for all. 43 * 44 * Once mapped, the three pools are exported to userspace 45 * as a contiguous block, starting from nm_if_pool. Each 46 * cluster (and pool) is an integral number of pages. 47 * [ . . . ][ . . . . . .][ . . . . . . . . . .] 48 * nm_if nm_ring nm_buf 49 * 50 * The userspace areas contain offsets of the objects in userspace. 51 * When (at init time) we write these offsets, we find out the index 52 * of the object, and from there locate the offset from the beginning 53 * of the region. 54 * 55 * The invididual allocators manage a pool of memory for objects of 56 * the same size. 57 * The pool is split into smaller clusters, whose size is a 58 * multiple of the page size. The cluster size is chosen 59 * to minimize the waste for a given max cluster size 60 * (we do it by brute force, as we have relatively few objects 61 * per cluster). 62 * 63 * Objects are aligned to the cache line (64 bytes) rounding up object 64 * sizes when needed. A bitmap contains the state of each object. 65 * Allocation scans the bitmap; this is done only on attach, so we are not 66 * too worried about performance 67 * 68 * For each allocator we can define (thorugh sysctl) the size and 69 * number of each object. Memory is allocated at the first use of a 70 * netmap file descriptor, and can be freed when all such descriptors 71 * have been released (including unmapping the memory). 72 * If memory is scarce, the system tries to get as much as possible 73 * and the sysctl values reflect the actual allocation. 74 * Together with desired values, the sysctl export also absolute 75 * min and maximum values that cannot be overridden. 76 * 77 * struct netmap_if: 78 * variable size, max 16 bytes per ring pair plus some fixed amount. 79 * 1024 bytes should be large enough in practice. 80 * 81 * In the worst case we have one netmap_if per ring in the system. 82 * 83 * struct netmap_ring 84 * variable size, 8 byte per slot plus some fixed amount. 85 * Rings can be large (e.g. 4k slots, or >32Kbytes). 86 * We default to 36 KB (9 pages), and a few hundred rings. 87 * 88 * struct netmap_buffer 89 * The more the better, both because fast interfaces tend to have 90 * many slots, and because we may want to use buffers to store 91 * packets in userspace avoiding copies. 92 * Must contain a full frame (eg 1518, or more for vlans, jumbo 93 * frames etc.) plus be nicely aligned, plus some NICs restrict 94 * the size to multiple of 1K or so. Default to 2K 95 */ 96 #ifndef _NET_NETMAP_MEM2_H_ 97 #define _NET_NETMAP_MEM2_H_ 98 99 100 #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ 101 102 #define NETMAP_POOL_MAX_NAMSZ 32 103 104 105 enum { 106 NETMAP_IF_POOL = 0, 107 NETMAP_RING_POOL, 108 NETMAP_BUF_POOL, 109 NETMAP_POOLS_NR 110 }; 111 112 113 struct netmap_obj_params { 114 u_int size; 115 u_int num; 116 }; 117 struct netmap_obj_pool { 118 char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ 119 120 /* ---------------------------------------------------*/ 121 /* these are only meaningful if the pool is finalized */ 122 /* (see 'finalized' field in netmap_mem_d) */ 123 u_int objtotal; /* actual total number of objects. */ 124 u_int memtotal; /* actual total memory space */ 125 u_int numclusters; /* actual number of clusters */ 126 127 u_int objfree; /* number of free objects. */ 128 129 struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ 130 uint32_t *bitmap; /* one bit per buffer, 1 means free */ 131 uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ 132 /* ---------------------------------------------------*/ 133 134 /* limits */ 135 u_int objminsize; /* minimum object size */ 136 u_int objmaxsize; /* maximum object size */ 137 u_int nummin; /* minimum number of objects */ 138 u_int nummax; /* maximum number of objects */ 139 140 /* these are changed only by config */ 141 u_int _objtotal; /* total number of objects */ 142 u_int _objsize; /* object size */ 143 u_int _clustsize; /* cluster size */ 144 u_int _clustentries; /* objects per cluster */ 145 u_int _numclusters; /* number of clusters */ 146 147 /* requested values */ 148 u_int r_objtotal; 149 u_int r_objsize; 150 }; 151 152 #ifdef linux 153 // XXX a mtx would suffice here 20130415 lr 154 #define NMA_LOCK_T struct semaphore 155 #else /* !linux */ 156 #define NMA_LOCK_T struct mtx 157 #endif /* linux */ 158 159 typedef int (*netmap_mem_config_t)(struct netmap_mem_d*); 160 typedef int (*netmap_mem_finalize_t)(struct netmap_mem_d*); 161 typedef void (*netmap_mem_deref_t)(struct netmap_mem_d*); 162 163 typedef uint16_t nm_memid_t; 164 165 /* We implement two kinds of netmap_mem_d structures: 166 * 167 * - global: used by hardware NICS; 168 * 169 * - private: used by VALE ports. 170 * 171 * In both cases, the netmap_mem_d structure has the same lifetime as the 172 * netmap_adapter of the corresponding NIC or port. It is the responsibility of 173 * the client code to delete the private allocator when the associated 174 * netmap_adapter is freed (this is implemented by the NAF_MEM_OWNER flag in 175 * netmap.c). The 'refcount' field counts the number of active users of the 176 * structure. The global allocator uses this information to prevent/allow 177 * reconfiguration. The private allocators release all their memory when there 178 * are no active users. By 'active user' we mean an existing netmap_priv 179 * structure holding a reference to the allocator. 180 */ 181 struct netmap_mem_d { 182 NMA_LOCK_T nm_mtx; /* protect the allocator */ 183 u_int nm_totalsize; /* shorthand */ 184 185 u_int flags; 186 #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ 187 #define NETMAP_MEM_PRIVATE 0x2 /* uses private address space */ 188 int lasterr; /* last error for curr config */ 189 int refcount; /* existing priv structures */ 190 /* the three allocators */ 191 struct netmap_obj_pool pools[NETMAP_POOLS_NR]; 192 193 netmap_mem_config_t config; 194 netmap_mem_finalize_t finalize; 195 netmap_mem_deref_t deref; 196 197 nm_memid_t nm_id; /* allocator identifier */ 198 199 /* list of all existing allocators, sorted by nm_id */ 200 struct netmap_mem_d *prev, *next; 201 }; 202 203 extern struct netmap_mem_d nm_mem; 204 205 vm_paddr_t netmap_mem_ofstophys(struct netmap_mem_d *, vm_ooffset_t); 206 int netmap_mem_finalize(struct netmap_mem_d *); 207 int netmap_mem_init(void); 208 void netmap_mem_fini(void); 209 struct netmap_if * 210 netmap_mem_if_new(const char *, struct netmap_adapter *); 211 void netmap_mem_if_delete(struct netmap_adapter *, struct netmap_if *); 212 int netmap_mem_rings_create(struct netmap_adapter *); 213 void netmap_mem_rings_delete(struct netmap_adapter *); 214 void netmap_mem_deref(struct netmap_mem_d *); 215 int netmap_mem_get_info(struct netmap_mem_d *, u_int *size, u_int *memflags, uint16_t *id); 216 ssize_t netmap_mem_if_offset(struct netmap_mem_d *, const void *vaddr); 217 struct netmap_mem_d* netmap_mem_private_new(const char *name, 218 u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, 219 int* error); 220 void netmap_mem_private_delete(struct netmap_mem_d *); 221 222 #define NETMAP_BDG_BUF_SIZE(n) ((n)->pools[NETMAP_BUF_POOL]._objsize) 223 224 uint32_t netmap_extra_alloc(struct netmap_adapter *, uint32_t *, uint32_t n); 225 226 227 #endif 228