1 /* 2 * This module derived from code donated to the FreeBSD Project by 3 * Matthew Dillon <dillon@backplane.com> 4 * 5 * Copyright (c) 1998 The FreeBSD Project 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include <sys/param.h> 32 33 /* 34 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation 35 * subsystem 36 * 37 * This subsystem implements memory pools and memory allocation 38 * routines. 39 * 40 * Pools are managed via a linked list of 'free' areas. Allocating 41 * memory creates holes in the freelist, freeing memory fills them. 42 * Since the freelist consists only of free memory areas, it is possible 43 * to allocate the entire pool without incuring any structural overhead. 44 * 45 * The system works best when allocating similarly-sized chunks of 46 * memory. Care must be taken to avoid fragmentation when 47 * allocating/deallocating dissimilar chunks. 48 * 49 * When a memory pool is first allocated, the entire pool is marked as 50 * allocated. This is done mainly because we do not want to modify any 51 * portion of a pool's data area until we are given permission. The 52 * caller must explicitly deallocate portions of the pool to make them 53 * available. 54 * 55 * z[n]xalloc() works like z[n]alloc() but the allocation is made from 56 * within the specified address range. If the segment could not be 57 * allocated, NULL is returned. WARNING! The address range will be 58 * aligned to an 8 or 16 byte boundry depending on the cpu so if you 59 * give an unaligned address range, unexpected results may occur. 60 * 61 * If a standard allocation fails, the reclaim function will be called 62 * to recover some space. This usually causes other portions of the 63 * same pool to be released. Memory allocations at this low level 64 * should not block but you can do that too in your reclaim function 65 * if you want. Reclaim does not function when z[n]xalloc() is used, 66 * only for z[n]alloc(). 67 * 68 * Allocation and frees of 0 bytes are valid operations. 69 */ 70 71 #include "zalloc_defs.h" 72 73 /* 74 * Objects in the pool must be aligned to at least the size of struct MemNode. 75 * They must also be aligned to MALLOCALIGN, which should normally be larger 76 * than the struct, so assert that to be so at compile time. 77 */ 78 typedef char assert_align[(sizeof (struct MemNode) <= MALLOCALIGN) ? 1 : -1]; 79 80 #define MEMNODE_SIZE_MASK MALLOCALIGN_MASK 81 82 /* 83 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim 84 * and retry if appropriate, return NULL if unable to allocate 85 * memory. 86 */ 87 88 void * 89 znalloc(MemPool *mp, uintptr_t bytes, size_t align) 90 { 91 MemNode **pmn; 92 MemNode *mn; 93 94 /* 95 * align according to pool object size (can be 0). This is 96 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 97 * 98 */ 99 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; 100 101 if (bytes == 0) 102 return ((void *)-1); 103 104 /* 105 * locate freelist entry big enough to hold the object. If all objects 106 * are the same size, this is a constant-time function. 107 */ 108 109 if (bytes > mp->mp_Size - mp->mp_Used) 110 return (NULL); 111 112 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { 113 char *ptr = (char *)mn; 114 uintptr_t dptr; 115 char *aligned; 116 size_t extra; 117 118 dptr = (uintptr_t)(ptr + MALLOCALIGN); /* pointer to data */ 119 aligned = (char *)(roundup2(dptr, align) - MALLOCALIGN); 120 extra = aligned - ptr; 121 122 if (bytes + extra > mn->mr_Bytes) 123 continue; 124 125 /* 126 * Cut extra from head and create new memory node from 127 * remainder. 128 */ 129 130 if (extra != 0) { 131 MemNode *new; 132 133 new = (MemNode *)aligned; 134 new->mr_Next = mn->mr_Next; 135 new->mr_Bytes = mn->mr_Bytes - extra; 136 137 /* And update current memory node */ 138 mn->mr_Bytes = extra; 139 mn->mr_Next = new; 140 /* In next iteration, we will get our aligned address */ 141 continue; 142 } 143 144 /* 145 * Cut a chunk of memory out of the beginning of this 146 * block and fixup the link appropriately. 147 */ 148 149 if (mn->mr_Bytes == bytes) { 150 *pmn = mn->mr_Next; 151 } else { 152 mn = (MemNode *)((char *)mn + bytes); 153 mn->mr_Next = ((MemNode *)ptr)->mr_Next; 154 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes; 155 *pmn = mn; 156 } 157 mp->mp_Used += bytes; 158 return (ptr); 159 } 160 161 /* 162 * Memory pool is full, return NULL. 163 */ 164 165 return (NULL); 166 } 167 168 /* 169 * zfree() - free previously allocated memory 170 */ 171 172 void 173 zfree(MemPool *mp, void *ptr, uintptr_t bytes) 174 { 175 MemNode **pmn; 176 MemNode *mn; 177 178 /* 179 * align according to pool object size (can be 0). This is 180 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 181 */ 182 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; 183 184 if (bytes == 0) 185 return; 186 187 /* 188 * panic if illegal pointer 189 */ 190 191 if ((char *)ptr < (char *)mp->mp_Base || 192 (char *)ptr + bytes > (char *)mp->mp_End || 193 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0) 194 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes); 195 196 /* 197 * free the segment 198 */ 199 mp->mp_Used -= bytes; 200 201 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { 202 /* 203 * If area between last node and current node 204 * - check range 205 * - check merge with next area 206 * - check merge with previous area 207 */ 208 if ((char *)ptr <= (char *)mn) { 209 /* 210 * range check 211 */ 212 if ((char *)ptr + bytes > (char *)mn) { 213 panic("zfree(%p,%ju): corrupt memlist1", ptr, 214 (uintmax_t)bytes); 215 } 216 217 /* 218 * merge against next area or create independant area 219 */ 220 221 if ((char *)ptr + bytes == (char *)mn) { 222 ((MemNode *)ptr)->mr_Next = mn->mr_Next; 223 ((MemNode *)ptr)->mr_Bytes = 224 bytes + mn->mr_Bytes; 225 } else { 226 ((MemNode *)ptr)->mr_Next = mn; 227 ((MemNode *)ptr)->mr_Bytes = bytes; 228 } 229 *pmn = mn = (MemNode *)ptr; 230 231 /* 232 * merge against previous area (if there is a previous 233 * area). 234 */ 235 236 if (pmn != &mp->mp_First) { 237 if ((char *)pmn + ((MemNode*)pmn)->mr_Bytes == 238 (char *)ptr) { 239 ((MemNode *)pmn)->mr_Next = mn->mr_Next; 240 ((MemNode *)pmn)->mr_Bytes += 241 mn->mr_Bytes; 242 mn = (MemNode *)pmn; 243 } 244 } 245 return; 246 } 247 if ((char *)ptr < (char *)mn + mn->mr_Bytes) { 248 panic("zfree(%p,%ju): corrupt memlist2", ptr, 249 (uintmax_t)bytes); 250 } 251 } 252 /* 253 * We are beyond the last MemNode, append new MemNode. Merge against 254 * previous area if possible. 255 */ 256 if (pmn == &mp->mp_First || 257 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr) { 258 ((MemNode *)ptr)->mr_Next = NULL; 259 ((MemNode *)ptr)->mr_Bytes = bytes; 260 *pmn = (MemNode *)ptr; 261 mn = (MemNode *)ptr; 262 } else { 263 ((MemNode *)pmn)->mr_Bytes += bytes; 264 mn = (MemNode *)pmn; 265 } 266 } 267 268 /* 269 * zextendPool() - extend memory pool to cover additional space. 270 * 271 * Note: the added memory starts out as allocated, you 272 * must free it to make it available to the memory subsystem. 273 * 274 * Note: mp_Size may not reflect (mp_End - mp_Base) range 275 * due to other parts of the system doing their own sbrk() 276 * calls. 277 */ 278 279 void 280 zextendPool(MemPool *mp, void *base, uintptr_t bytes) 281 { 282 if (mp->mp_Size == 0) { 283 mp->mp_Base = base; 284 mp->mp_Used = bytes; 285 mp->mp_End = (char *)base + bytes; 286 mp->mp_Size = bytes; 287 } else { 288 void *pend = (char *)mp->mp_Base + mp->mp_Size; 289 290 if (base < mp->mp_Base) { 291 mp->mp_Size += (char *)mp->mp_Base - (char *)base; 292 mp->mp_Used += (char *)mp->mp_Base - (char *)base; 293 mp->mp_Base = base; 294 } 295 base = (char *)base + bytes; 296 if (base > pend) { 297 mp->mp_Size += (char *)base - (char *)pend; 298 mp->mp_Used += (char *)base - (char *)pend; 299 mp->mp_End = (char *)base; 300 } 301 } 302 } 303 304 #ifdef ZALLOCDEBUG 305 306 void 307 zallocstats(MemPool *mp) 308 { 309 int abytes = 0; 310 int hbytes = 0; 311 int fcount = 0; 312 MemNode *mn; 313 314 printf("%d bytes reserved", (int)mp->mp_Size); 315 316 mn = mp->mp_First; 317 318 if ((void *)mn != (void *)mp->mp_Base) { 319 abytes += (char *)mn - (char *)mp->mp_Base; 320 } 321 322 while (mn != NULL) { 323 if ((char *)mn + mn->mr_Bytes != mp->mp_End) { 324 hbytes += mn->mr_Bytes; 325 ++fcount; 326 } 327 if (mn->mr_Next != NULL) { 328 abytes += (char *)mn->mr_Next - 329 ((char *)mn + mn->mr_Bytes); 330 } 331 mn = mn->mr_Next; 332 } 333 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n", 334 abytes, fcount, hbytes); 335 } 336 337 #endif 338