1 /*
2 * This module derived from code donated to the FreeBSD Project by
3 * Matthew Dillon <dillon@backplane.com>
4 *
5 * Copyright (c) 1998 The FreeBSD Project
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31
32 /*
33 * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
34 * subsystem
35 *
36 * This subsystem implements memory pools and memory allocation
37 * routines.
38 *
39 * Pools are managed via a linked list of 'free' areas. Allocating
40 * memory creates holes in the freelist, freeing memory fills them.
41 * Since the freelist consists only of free memory areas, it is possible
42 * to allocate the entire pool without incuring any structural overhead.
43 *
44 * The system works best when allocating similarly-sized chunks of
45 * memory. Care must be taken to avoid fragmentation when
46 * allocating/deallocating dissimilar chunks.
47 *
48 * When a memory pool is first allocated, the entire pool is marked as
49 * allocated. This is done mainly because we do not want to modify any
50 * portion of a pool's data area until we are given permission. The
51 * caller must explicitly deallocate portions of the pool to make them
52 * available.
53 *
54 * z[n]xalloc() works like z[n]alloc() but the allocation is made from
55 * within the specified address range. If the segment could not be
56 * allocated, NULL is returned. WARNING! The address range will be
57 * aligned to an 8 or 16 byte boundry depending on the cpu so if you
58 * give an unaligned address range, unexpected results may occur.
59 *
60 * If a standard allocation fails, the reclaim function will be called
61 * to recover some space. This usually causes other portions of the
62 * same pool to be released. Memory allocations at this low level
63 * should not block but you can do that too in your reclaim function
64 * if you want. Reclaim does not function when z[n]xalloc() is used,
65 * only for z[n]alloc().
66 *
67 * Allocation and frees of 0 bytes are valid operations.
68 */
69
70 #include "zalloc_defs.h"
71
72 /*
73 * Objects in the pool must be aligned to at least the size of struct MemNode.
74 * They must also be aligned to MALLOCALIGN, which should normally be larger
75 * than the struct, so assert that to be so at compile time.
76 */
77 typedef char assert_align[(sizeof(struct MemNode) <= MALLOCALIGN) ? 1 : -1];
78
79 #define MEMNODE_SIZE_MASK MALLOCALIGN_MASK
80
81 /*
82 * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
83 * and retry if appropriate, return NULL if unable to allocate
84 * memory.
85 */
86
87 void *
znalloc(MemPool * mp,uintptr_t bytes,size_t align)88 znalloc(MemPool *mp, uintptr_t bytes, size_t align)
89 {
90 MemNode **pmn;
91 MemNode *mn;
92
93 /*
94 * align according to pool object size (can be 0). This is
95 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
96 *
97 */
98 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
99
100 if (bytes == 0)
101 return ((void *)-1);
102
103 /*
104 * locate freelist entry big enough to hold the object. If all objects
105 * are the same size, this is a constant-time function.
106 */
107
108 if (bytes > mp->mp_Size - mp->mp_Used)
109 return (NULL);
110
111 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
112 char *ptr = (char *)mn;
113 uintptr_t dptr;
114 char *aligned;
115 size_t extra;
116
117 dptr = (uintptr_t)(ptr + MALLOCALIGN); /* pointer to data */
118 aligned = (char *)(roundup2(dptr, align) - MALLOCALIGN);
119 extra = aligned - ptr;
120
121 if (bytes + extra > mn->mr_Bytes)
122 continue;
123
124 /*
125 * Cut extra from head and create new memory node from
126 * remainder.
127 */
128
129 if (extra != 0) {
130 MemNode *new;
131
132 new = (MemNode *)aligned;
133 new->mr_Next = mn->mr_Next;
134 new->mr_Bytes = mn->mr_Bytes - extra;
135
136 /* And update current memory node */
137 mn->mr_Bytes = extra;
138 mn->mr_Next = new;
139 /* In next iteration, we will get our aligned address */
140 continue;
141 }
142
143 /*
144 * Cut a chunk of memory out of the beginning of this
145 * block and fixup the link appropriately.
146 */
147
148 if (mn->mr_Bytes == bytes) {
149 *pmn = mn->mr_Next;
150 } else {
151 mn = (MemNode *)((char *)mn + bytes);
152 mn->mr_Next = ((MemNode *)ptr)->mr_Next;
153 mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
154 *pmn = mn;
155 }
156 mp->mp_Used += bytes;
157 return(ptr);
158 }
159
160 /*
161 * Memory pool is full, return NULL.
162 */
163
164 return (NULL);
165 }
166
167 /*
168 * zfree() - free previously allocated memory
169 */
170
171 void
zfree(MemPool * mp,void * ptr,uintptr_t bytes)172 zfree(MemPool *mp, void *ptr, uintptr_t bytes)
173 {
174 MemNode **pmn;
175 MemNode *mn;
176
177 /*
178 * align according to pool object size (can be 0). This is
179 * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
180 */
181 bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
182
183 if (bytes == 0)
184 return;
185
186 /*
187 * panic if illegal pointer
188 */
189
190 if ((char *)ptr < (char *)mp->mp_Base ||
191 (char *)ptr + bytes > (char *)mp->mp_End ||
192 ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
193 panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
194
195 /*
196 * free the segment
197 */
198 mp->mp_Used -= bytes;
199
200 for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
201 /*
202 * If area between last node and current node
203 * - check range
204 * - check merge with next area
205 * - check merge with previous area
206 */
207 if ((char *)ptr <= (char *)mn) {
208 /*
209 * range check
210 */
211 if ((char *)ptr + bytes > (char *)mn) {
212 panic("zfree(%p,%ju): corrupt memlist1", ptr,
213 (uintmax_t)bytes);
214 }
215
216 /*
217 * merge against next area or create independent area
218 */
219
220 if ((char *)ptr + bytes == (char *)mn) {
221 ((MemNode *)ptr)->mr_Next = mn->mr_Next;
222 ((MemNode *)ptr)->mr_Bytes =
223 bytes + mn->mr_Bytes;
224 } else {
225 ((MemNode *)ptr)->mr_Next = mn;
226 ((MemNode *)ptr)->mr_Bytes = bytes;
227 }
228 *pmn = mn = (MemNode *)ptr;
229
230 /*
231 * merge against previous area (if there is a previous
232 * area).
233 */
234
235 if (pmn != &mp->mp_First) {
236 if ((char *)pmn + ((MemNode*)pmn)->mr_Bytes ==
237 (char *)ptr) {
238 ((MemNode *)pmn)->mr_Next = mn->mr_Next;
239 ((MemNode *)pmn)->mr_Bytes +=
240 mn->mr_Bytes;
241 mn = (MemNode *)pmn;
242 }
243 }
244 return;
245 }
246 if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
247 panic("zfree(%p,%ju): corrupt memlist2", ptr,
248 (uintmax_t)bytes);
249 }
250 }
251 /*
252 * We are beyond the last MemNode, append new MemNode. Merge against
253 * previous area if possible.
254 */
255 if (pmn == &mp->mp_First ||
256 (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr) {
257 ((MemNode *)ptr)->mr_Next = NULL;
258 ((MemNode *)ptr)->mr_Bytes = bytes;
259 *pmn = (MemNode *)ptr;
260 mn = (MemNode *)ptr;
261 } else {
262 ((MemNode *)pmn)->mr_Bytes += bytes;
263 mn = (MemNode *)pmn;
264 }
265 }
266
267 /*
268 * zextendPool() - extend memory pool to cover additional space.
269 *
270 * Note: the added memory starts out as allocated, you
271 * must free it to make it available to the memory subsystem.
272 *
273 * Note: mp_Size may not reflect (mp_End - mp_Base) range
274 * due to other parts of the system doing their own sbrk()
275 * calls.
276 */
277
278 void
zextendPool(MemPool * mp,void * base,uintptr_t bytes)279 zextendPool(MemPool *mp, void *base, uintptr_t bytes)
280 {
281 if (mp->mp_Size == 0) {
282 mp->mp_Base = base;
283 mp->mp_Used = bytes;
284 mp->mp_End = (char *)base + bytes;
285 mp->mp_Size = bytes;
286 } else {
287 void *pend = (char *)mp->mp_Base + mp->mp_Size;
288
289 if (base < mp->mp_Base) {
290 mp->mp_Size += (char *)mp->mp_Base - (char *)base;
291 mp->mp_Used += (char *)mp->mp_Base - (char *)base;
292 mp->mp_Base = base;
293 }
294 base = (char *)base + bytes;
295 if (base > pend) {
296 mp->mp_Size += (char *)base - (char *)pend;
297 mp->mp_Used += (char *)base - (char *)pend;
298 mp->mp_End = (char *)base;
299 }
300 }
301 }
302
303 #ifdef ZALLOCDEBUG
304
305 void
zallocstats(MemPool * mp)306 zallocstats(MemPool *mp)
307 {
308 int abytes = 0;
309 int hbytes = 0;
310 int fcount = 0;
311 MemNode *mn;
312
313 printf("%d bytes reserved", (int)mp->mp_Size);
314
315 mn = mp->mp_First;
316
317 if ((void *)mn != (void *)mp->mp_Base) {
318 abytes += (char *)mn - (char *)mp->mp_Base;
319 }
320
321 while (mn != NULL) {
322 if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
323 hbytes += mn->mr_Bytes;
324 ++fcount;
325 }
326 if (mn->mr_Next != NULL) {
327 abytes += (char *)mn->mr_Next -
328 ((char *)mn + mn->mr_Bytes);
329 }
330 mn = mn->mr_Next;
331 }
332 printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
333 abytes, fcount, hbytes);
334 }
335
336 #endif
337