zalloc.c (55b1c6e7e4a6909004e13c6d2f328f911a8e7b83) | zalloc.c (e57c0c2afbaef051b01022588ed54a6c4ace79da) |
---|---|
1/* | 1/* |
2 * This module derived from code donated to the FreeBSD Project by | 2 * This module derived from code donated to the FreeBSD Project by |
3 * Matthew Dillon <dillon@backplane.com> 4 * 5 * Copyright (c) 1998 The FreeBSD Project 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: --- 15 unchanged lines hidden (view full) --- 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33/* | 3 * Matthew Dillon <dillon@backplane.com> 4 * 5 * Copyright (c) 1998 The FreeBSD Project 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: --- 15 unchanged lines hidden (view full) --- 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33/* |
34 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation | 34 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation |
35 * subsystem 36 * | 35 * subsystem 36 * |
37 * This subsystem implements memory pools and memory allocation | 37 * This subsystem implements memory pools and memory allocation |
38 * routines. 39 * 40 * Pools are managed via a linked list of 'free' areas. Allocating 41 * memory creates holes in the freelist, freeing memory fills them. 42 * Since the freelist consists only of free memory areas, it is possible 43 * to allocate the entire pool without incuring any structural overhead. 44 * 45 * The system works best when allocating similarly-sized chunks of | 38 * routines. 39 * 40 * Pools are managed via a linked list of 'free' areas. Allocating 41 * memory creates holes in the freelist, freeing memory fills them. 42 * Since the freelist consists only of free memory areas, it is possible 43 * to allocate the entire pool without incuring any structural overhead. 44 * 45 * The system works best when allocating similarly-sized chunks of |
46 * memory. Care must be taken to avoid fragmentation when | 46 * memory. Care must be taken to avoid fragmentation when |
47 * allocating/deallocating dissimilar chunks. 48 * 49 * When a memory pool is first allocated, the entire pool is marked as 50 * allocated. This is done mainly because we do not want to modify any 51 * portion of a pool's data area until we are given permission. The 52 * caller must explicitly deallocate portions of the pool to make them 53 * available. 54 * 55 * z[n]xalloc() works like z[n]alloc() but the allocation is made from | 47 * allocating/deallocating dissimilar chunks. 48 * 49 * When a memory pool is first allocated, the entire pool is marked as 50 * allocated. This is done mainly because we do not want to modify any 51 * portion of a pool's data area until we are given permission. The 52 * caller must explicitly deallocate portions of the pool to make them 53 * available. 54 * 55 * z[n]xalloc() works like z[n]alloc() but the allocation is made from |
56 * within the specified address range. If the segment could not be | 56 * within the specified address range. If the segment could not be |
57 * allocated, NULL is returned. WARNING! The address range will be 58 * aligned to an 8 or 16 byte boundry depending on the cpu so if you 59 * give an unaligned address range, unexpected results may occur. 60 * 61 * If a standard allocation fails, the reclaim function will be called 62 * to recover some space. This usually causes other portions of the 63 * same pool to be released. Memory allocations at this low level 64 * should not block but you can do that too in your reclaim function --- 18 unchanged lines hidden (view full) --- 83 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim 84 * and retry if appropriate, return NULL if unable to allocate 85 * memory. 86 */ 87 88void * 89znalloc(MemPool *mp, uintptr_t bytes) 90{ | 57 * allocated, NULL is returned. WARNING! The address range will be 58 * aligned to an 8 or 16 byte boundry depending on the cpu so if you 59 * give an unaligned address range, unexpected results may occur. 60 * 61 * If a standard allocation fails, the reclaim function will be called 62 * to recover some space. This usually causes other portions of the 63 * same pool to be released. Memory allocations at this low level 64 * should not block but you can do that too in your reclaim function --- 18 unchanged lines hidden (view full) --- 83 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim 84 * and retry if appropriate, return NULL if unable to allocate 85 * memory. 86 */ 87 88void * 89znalloc(MemPool *mp, uintptr_t bytes) 90{ |
91 /* 92 * align according to pool object size (can be 0). This is 93 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 94 * 95 */ 96 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; 97 98 if (bytes == 0) 99 return((void *)-1); 100 101 /* 102 * locate freelist entry big enough to hold the object. If all objects 103 * are the same size, this is a constant-time function. 104 */ 105 106 if (bytes <= mp->mp_Size - mp->mp_Used) { | |
107 MemNode **pmn; 108 MemNode *mn; 109 | 91 MemNode **pmn; 92 MemNode *mn; 93 |
110 for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) { 111 if (bytes > mn->mr_Bytes) 112 continue; | 94 /* 95 * align according to pool object size (can be 0). This is 96 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 97 * 98 */ 99 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; |
113 | 100 |
114 /* 115 * Cut a chunk of memory out of the beginning of this 116 * block and fixup the link appropriately. 117 */ | 101 if (bytes == 0) 102 return ((void *)-1); |
118 | 103 |
119 { | 104 /* 105 * locate freelist entry big enough to hold the object. If all objects 106 * are the same size, this is a constant-time function. 107 */ 108 109 if (bytes > mp->mp_Size - mp->mp_Used) 110 return (NULL); 111 112 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { |
120 char *ptr = (char *)mn; 121 | 113 char *ptr = (char *)mn; 114 |
115 if (bytes > mn->mr_Bytes) 116 continue; 117 118 /* 119 * Cut a chunk of memory out of the beginning of this 120 * block and fixup the link appropriately. 121 */ |
|
122 if (mn->mr_Bytes == bytes) { | 122 if (mn->mr_Bytes == bytes) { |
123 *pmn = mn->mr_Next; | 123 *pmn = mn->mr_Next; |
124 } else { | 124 } else { |
125 mn = (MemNode *)((char *)mn + bytes); 126 mn->mr_Next = ((MemNode *)ptr)->mr_Next; 127 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes; 128 *pmn = mn; | 125 mn = (MemNode *)((char *)mn + bytes); 126 mn->mr_Next = ((MemNode *)ptr)->mr_Next; 127 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes; 128 *pmn = mn; |
129 } 130 mp->mp_Used += bytes; 131 return(ptr); | 129 } 130 mp->mp_Used += bytes; 131 return(ptr); |
132 } | |
133 } | 132 } |
134 } | |
135 | 133 |
136 /* 137 * Memory pool is full, return NULL. 138 */ | 134 /* 135 * Memory pool is full, return NULL. 136 */ |
139 | 137 |
140 return(NULL); | 138 return (NULL); |
141} 142 143/* 144 * zfree() - free previously allocated memory 145 */ 146 147void 148zfree(MemPool *mp, void *ptr, uintptr_t bytes) 149{ | 139} 140 141/* 142 * zfree() - free previously allocated memory 143 */ 144 145void 146zfree(MemPool *mp, void *ptr, uintptr_t bytes) 147{ |
150 /* 151 * align according to pool object size (can be 0). This is 152 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 153 */ 154 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; | 148 MemNode **pmn; 149 MemNode *mn; |
155 | 150 |
156 if (bytes == 0) 157 return; | 151 /* 152 * align according to pool object size (can be 0). This is 153 * inclusive of the MEMNODE_SIZE_MASK minimum alignment. 154 */ 155 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK; |
158 | 156 |
159 /* 160 * panic if illegal pointer 161 */ | 157 if (bytes == 0) 158 return; |
162 | 159 |
163 if ((char *)ptr < (char *)mp->mp_Base || 164 (char *)ptr + bytes > (char *)mp->mp_End || 165 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0) 166 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes); | 160 /* 161 * panic if illegal pointer 162 */ |
167 | 163 |
168 /* 169 * free the segment 170 */ | 164 if ((char *)ptr < (char *)mp->mp_Base || 165 (char *)ptr + bytes > (char *)mp->mp_End || 166 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0) 167 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes); |
171 | 168 |
172 { 173 MemNode **pmn; 174 MemNode *mn; 175 | 169 /* 170 * free the segment 171 */ |
176 mp->mp_Used -= bytes; 177 178 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { | 172 mp->mp_Used -= bytes; 173 174 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) { |
179 /* 180 * If area between last node and current node 181 * - check range 182 * - check merge with next area 183 * - check merge with previous area 184 */ 185 if ((char *)ptr <= (char *)mn) { | |
186 /* | 175 /* |
187 * range check | 176 * If area between last node and current node 177 * - check range 178 * - check merge with next area 179 * - check merge with previous area |
188 */ | 180 */ |
189 if ((char *)ptr + bytes > (char *)mn) { 190 panic("zfree(%p,%ju): corrupt memlist1", ptr, 191 (uintmax_t)bytes); 192 } | 181 if ((char *)ptr <= (char *)mn) { 182 /* 183 * range check 184 */ 185 if ((char *)ptr + bytes > (char *)mn) { 186 panic("zfree(%p,%ju): corrupt memlist1", ptr, 187 (uintmax_t)bytes); 188 } |
193 | 189 |
194 /* 195 * merge against next area or create independant area 196 */ | 190 /* 191 * merge against next area or create independant area 192 */ |
197 | 193 |
198 if ((char *)ptr + bytes == (char *)mn) { 199 ((MemNode *)ptr)->mr_Next = mn->mr_Next; 200 ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes; 201 } else { 202 ((MemNode *)ptr)->mr_Next = mn; 203 ((MemNode *)ptr)->mr_Bytes= bytes; 204 } 205 *pmn = mn = (MemNode *)ptr; | 194 if ((char *)ptr + bytes == (char *)mn) { 195 ((MemNode *)ptr)->mr_Next = mn->mr_Next; 196 ((MemNode *)ptr)->mr_Bytes = 197 bytes + mn->mr_Bytes; 198 } else { 199 ((MemNode *)ptr)->mr_Next = mn; 200 ((MemNode *)ptr)->mr_Bytes = bytes; 201 } 202 *pmn = mn = (MemNode *)ptr; |
206 | 203 |
207 /* 208 * merge against previous area (if there is a previous 209 * area). 210 */ | 204 /* 205 * merge against previous area (if there is a previous 206 * area). 207 */ |
211 | 208 |
212 if (pmn != &mp->mp_First) { 213 if ((char*)pmn + ((MemNode*)pmn)->mr_Bytes == (char*)ptr) { 214 ((MemNode *)pmn)->mr_Next = mn->mr_Next; 215 ((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes; 216 mn = (MemNode *)pmn; 217 } | 209 if (pmn != &mp->mp_First) { 210 if ((char *)pmn + ((MemNode*)pmn)->mr_Bytes == 211 (char *)ptr) { 212 ((MemNode *)pmn)->mr_Next = mn->mr_Next; 213 ((MemNode *)pmn)->mr_Bytes += 214 mn->mr_Bytes; 215 mn = (MemNode *)pmn; 216 } 217 } 218 return; |
218 } | 219 } |
219 return; 220 /* NOT REACHED */ 221 } 222 if ((char *)ptr < (char *)mn + mn->mr_Bytes) { 223 panic("zfree(%p,%ju): corrupt memlist2", ptr, 224 (uintmax_t)bytes); 225 } | 220 if ((char *)ptr < (char *)mn + mn->mr_Bytes) { 221 panic("zfree(%p,%ju): corrupt memlist2", ptr, 222 (uintmax_t)bytes); 223 } |
226 } 227 /* 228 * We are beyond the last MemNode, append new MemNode. Merge against 229 * previous area if possible. 230 */ | 224 } 225 /* 226 * We are beyond the last MemNode, append new MemNode. Merge against 227 * previous area if possible. 228 */ |
231 if (pmn == &mp->mp_First || 232 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr 233 ) { 234 ((MemNode *)ptr)->mr_Next = NULL; 235 ((MemNode *)ptr)->mr_Bytes = bytes; 236 *pmn = (MemNode *)ptr; 237 mn = (MemNode *)ptr; | 229 if (pmn == &mp->mp_First || 230 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr) { 231 ((MemNode *)ptr)->mr_Next = NULL; 232 ((MemNode *)ptr)->mr_Bytes = bytes; 233 *pmn = (MemNode *)ptr; 234 mn = (MemNode *)ptr; |
238 } else { | 235 } else { |
239 ((MemNode *)pmn)->mr_Bytes += bytes; 240 mn = (MemNode *)pmn; | 236 ((MemNode *)pmn)->mr_Bytes += bytes; 237 mn = (MemNode *)pmn; |
241 } | 238 } |
242 } | |
243} 244 245/* 246 * zextendPool() - extend memory pool to cover additional space. 247 * 248 * Note: the added memory starts out as allocated, you 249 * must free it to make it available to the memory subsystem. 250 * 251 * Note: mp_Size may not reflect (mp_End - mp_Base) range 252 * due to other parts of the system doing their own sbrk() 253 * calls. 254 */ 255 256void 257zextendPool(MemPool *mp, void *base, uintptr_t bytes) 258{ | 239} 240 241/* 242 * zextendPool() - extend memory pool to cover additional space. 243 * 244 * Note: the added memory starts out as allocated, you 245 * must free it to make it available to the memory subsystem. 246 * 247 * Note: mp_Size may not reflect (mp_End - mp_Base) range 248 * due to other parts of the system doing their own sbrk() 249 * calls. 250 */ 251 252void 253zextendPool(MemPool *mp, void *base, uintptr_t bytes) 254{ |
259 if (mp->mp_Size == 0) { 260 mp->mp_Base = base; 261 mp->mp_Used = bytes; 262 mp->mp_End = (char *)base + bytes; 263 mp->mp_Size = bytes; 264 } else { 265 void *pend = (char *)mp->mp_Base + mp->mp_Size; | 255 if (mp->mp_Size == 0) { 256 mp->mp_Base = base; 257 mp->mp_Used = bytes; 258 mp->mp_End = (char *)base + bytes; 259 mp->mp_Size = bytes; 260 } else { 261 void *pend = (char *)mp->mp_Base + mp->mp_Size; |
266 | 262 |
267 if (base < mp->mp_Base) { 268 mp->mp_Size += (char *)mp->mp_Base - (char *)base; 269 mp->mp_Used += (char *)mp->mp_Base - (char *)base; 270 mp->mp_Base = base; | 263 if (base < mp->mp_Base) { 264 mp->mp_Size += (char *)mp->mp_Base - (char *)base; 265 mp->mp_Used += (char *)mp->mp_Base - (char *)base; 266 mp->mp_Base = base; 267 } 268 base = (char *)base + bytes; 269 if (base > pend) { 270 mp->mp_Size += (char *)base - (char *)pend; 271 mp->mp_Used += (char *)base - (char *)pend; 272 mp->mp_End = (char *)base; 273 } |
271 } | 274 } |
272 base = (char *)base + bytes; 273 if (base > pend) { 274 mp->mp_Size += (char *)base - (char *)pend; 275 mp->mp_Used += (char *)base - (char *)pend; 276 mp->mp_End = (char *)base; 277 } 278 } | |
279} 280 281#ifdef ZALLOCDEBUG 282 283void 284zallocstats(MemPool *mp) 285{ | 275} 276 277#ifdef ZALLOCDEBUG 278 279void 280zallocstats(MemPool *mp) 281{ |
286 int abytes = 0; 287 int hbytes = 0; 288 int fcount = 0; 289 MemNode *mn; | 282 int abytes = 0; 283 int hbytes = 0; 284 int fcount = 0; 285 MemNode *mn; |
290 | 286 |
291 printf("%d bytes reserved", (int) mp->mp_Size); | 287 printf("%d bytes reserved", (int)mp->mp_Size); |
292 | 288 |
293 mn = mp->mp_First; | 289 mn = mp->mp_First; |
294 | 290 |
295 if ((void *)mn != (void *)mp->mp_Base) { 296 abytes += (char *)mn - (char *)mp->mp_Base; 297 } | 291 if ((void *)mn != (void *)mp->mp_Base) { 292 abytes += (char *)mn - (char *)mp->mp_Base; 293 } |
298 | 294 |
299 while (mn) { 300 if ((char *)mn + mn->mr_Bytes != mp->mp_End) { 301 hbytes += mn->mr_Bytes; 302 ++fcount; | 295 while (mn != NULL) { 296 if ((char *)mn + mn->mr_Bytes != mp->mp_End) { 297 hbytes += mn->mr_Bytes; 298 ++fcount; 299 } 300 if (mn->mr_Next != NULL) { 301 abytes += (char *)mn->mr_Next - 302 ((char *)mn + mn->mr_Bytes); 303 } 304 mn = mn->mr_Next; |
303 } | 305 } |
304 if (mn->mr_Next) 305 abytes += (char *)mn->mr_Next - ((char *)mn + mn->mr_Bytes); 306 mn = mn->mr_Next; 307 } 308 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n", 309 abytes, 310 fcount, 311 hbytes 312 ); | 306 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n", 307 abytes, fcount, hbytes); |
313} 314 315#endif | 308} 309 310#endif |
316 | |