xref: /titanic_51/usr/src/boot/lib/libstand/zalloc.c (revision 4a5d661a82b942b6538acd26209d959ce98b593a)
1*4a5d661aSToomas Soome /*
2*4a5d661aSToomas Soome  * This module derived from code donated to the FreeBSD Project by
3*4a5d661aSToomas Soome  * Matthew Dillon <dillon@backplane.com>
4*4a5d661aSToomas Soome  *
5*4a5d661aSToomas Soome  * Copyright (c) 1998 The FreeBSD Project
6*4a5d661aSToomas Soome  * All rights reserved.
7*4a5d661aSToomas Soome  *
8*4a5d661aSToomas Soome  * Redistribution and use in source and binary forms, with or without
9*4a5d661aSToomas Soome  * modification, are permitted provided that the following conditions
10*4a5d661aSToomas Soome  * are met:
11*4a5d661aSToomas Soome  * 1. Redistributions of source code must retain the above copyright
12*4a5d661aSToomas Soome  *    notice, this list of conditions and the following disclaimer.
13*4a5d661aSToomas Soome  * 2. Redistributions in binary form must reproduce the above copyright
14*4a5d661aSToomas Soome  *    notice, this list of conditions and the following disclaimer in the
15*4a5d661aSToomas Soome  *    documentation and/or other materials provided with the distribution.
16*4a5d661aSToomas Soome  *
17*4a5d661aSToomas Soome  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18*4a5d661aSToomas Soome  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19*4a5d661aSToomas Soome  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20*4a5d661aSToomas Soome  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21*4a5d661aSToomas Soome  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22*4a5d661aSToomas Soome  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23*4a5d661aSToomas Soome  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24*4a5d661aSToomas Soome  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25*4a5d661aSToomas Soome  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26*4a5d661aSToomas Soome  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27*4a5d661aSToomas Soome  * SUCH DAMAGE.
28*4a5d661aSToomas Soome  */
29*4a5d661aSToomas Soome 
30*4a5d661aSToomas Soome #include <sys/cdefs.h>
31*4a5d661aSToomas Soome __FBSDID("$FreeBSD$");
32*4a5d661aSToomas Soome 
33*4a5d661aSToomas Soome /*
34*4a5d661aSToomas Soome  * LIB/MEMORY/ZALLOC.C	- self contained low-overhead memory pool/allocation
35*4a5d661aSToomas Soome  *			  subsystem
36*4a5d661aSToomas Soome  *
37*4a5d661aSToomas Soome  *	This subsystem implements memory pools and memory allocation
38*4a5d661aSToomas Soome  *	routines.
39*4a5d661aSToomas Soome  *
40*4a5d661aSToomas Soome  *	Pools are managed via a linked list of 'free' areas.  Allocating
41*4a5d661aSToomas Soome  *	memory creates holes in the freelist, freeing memory fills them.
42*4a5d661aSToomas Soome  *	Since the freelist consists only of free memory areas, it is possible
43*4a5d661aSToomas Soome  *	to allocate the entire pool without incuring any structural overhead.
44*4a5d661aSToomas Soome  *
45*4a5d661aSToomas Soome  *	The system works best when allocating similarly-sized chunks of
46*4a5d661aSToomas Soome  *	memory.  Care must be taken to avoid fragmentation when
47*4a5d661aSToomas Soome  *	allocating/deallocating dissimilar chunks.
48*4a5d661aSToomas Soome  *
49*4a5d661aSToomas Soome  *	When a memory pool is first allocated, the entire pool is marked as
50*4a5d661aSToomas Soome  *	allocated.  This is done mainly because we do not want to modify any
51*4a5d661aSToomas Soome  *	portion of a pool's data area until we are given permission.  The
52*4a5d661aSToomas Soome  *	caller must explicitly deallocate portions of the pool to make them
53*4a5d661aSToomas Soome  *	available.
54*4a5d661aSToomas Soome  *
55*4a5d661aSToomas Soome  *	z[n]xalloc() works like z[n]alloc() but the allocation is made from
56*4a5d661aSToomas Soome  *	within the specified address range.  If the segment could not be
57*4a5d661aSToomas Soome  *	allocated, NULL is returned.  WARNING!  The address range will be
58*4a5d661aSToomas Soome  *	aligned to an 8 or 16 byte boundry depending on the cpu so if you
59*4a5d661aSToomas Soome  *	give an unaligned address range, unexpected results may occur.
60*4a5d661aSToomas Soome  *
61*4a5d661aSToomas Soome  *	If a standard allocation fails, the reclaim function will be called
62*4a5d661aSToomas Soome  *	to recover some space.  This usually causes other portions of the
63*4a5d661aSToomas Soome  *	same pool to be released.  Memory allocations at this low level
64*4a5d661aSToomas Soome  *	should not block but you can do that too in your reclaim function
65*4a5d661aSToomas Soome  *	if you want.  Reclaim does not function when z[n]xalloc() is used,
66*4a5d661aSToomas Soome  *	only for z[n]alloc().
67*4a5d661aSToomas Soome  *
68*4a5d661aSToomas Soome  *	Allocation and frees of 0 bytes are valid operations.
69*4a5d661aSToomas Soome  */
70*4a5d661aSToomas Soome 
71*4a5d661aSToomas Soome #include "zalloc_defs.h"
72*4a5d661aSToomas Soome 
73*4a5d661aSToomas Soome /*
74*4a5d661aSToomas Soome  * Objects in the pool must be aligned to at least the size of struct MemNode.
75*4a5d661aSToomas Soome  * They must also be aligned to MALLOCALIGN, which should normally be larger
76*4a5d661aSToomas Soome  * than the struct, so assert that to be so at compile time.
77*4a5d661aSToomas Soome  */
78*4a5d661aSToomas Soome typedef char assert_align[(sizeof(struct MemNode) <= MALLOCALIGN) ? 1 : -1];
79*4a5d661aSToomas Soome 
80*4a5d661aSToomas Soome #define	MEMNODE_SIZE_MASK	MALLOCALIGN_MASK
81*4a5d661aSToomas Soome 
82*4a5d661aSToomas Soome /*
83*4a5d661aSToomas Soome  * znalloc() -	allocate memory (without zeroing) from pool.  Call reclaim
84*4a5d661aSToomas Soome  *		and retry if appropriate, return NULL if unable to allocate
85*4a5d661aSToomas Soome  *		memory.
86*4a5d661aSToomas Soome  */
87*4a5d661aSToomas Soome 
88*4a5d661aSToomas Soome void *
89*4a5d661aSToomas Soome znalloc(MemPool *mp, uintptr_t bytes)
90*4a5d661aSToomas Soome {
91*4a5d661aSToomas Soome     /*
92*4a5d661aSToomas Soome      * align according to pool object size (can be 0).  This is
93*4a5d661aSToomas Soome      * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
94*4a5d661aSToomas Soome      *
95*4a5d661aSToomas Soome      */
96*4a5d661aSToomas Soome     bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
97*4a5d661aSToomas Soome 
98*4a5d661aSToomas Soome     if (bytes == 0)
99*4a5d661aSToomas Soome 	return((void *)-1);
100*4a5d661aSToomas Soome 
101*4a5d661aSToomas Soome     /*
102*4a5d661aSToomas Soome      * locate freelist entry big enough to hold the object.  If all objects
103*4a5d661aSToomas Soome      * are the same size, this is a constant-time function.
104*4a5d661aSToomas Soome      */
105*4a5d661aSToomas Soome 
106*4a5d661aSToomas Soome     if (bytes <= mp->mp_Size - mp->mp_Used) {
107*4a5d661aSToomas Soome 	MemNode **pmn;
108*4a5d661aSToomas Soome 	MemNode *mn;
109*4a5d661aSToomas Soome 
110*4a5d661aSToomas Soome 	for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) {
111*4a5d661aSToomas Soome 	    if (bytes > mn->mr_Bytes)
112*4a5d661aSToomas Soome 		continue;
113*4a5d661aSToomas Soome 
114*4a5d661aSToomas Soome 	    /*
115*4a5d661aSToomas Soome 	     *  Cut a chunk of memory out of the beginning of this
116*4a5d661aSToomas Soome 	     *  block and fixup the link appropriately.
117*4a5d661aSToomas Soome 	     */
118*4a5d661aSToomas Soome 
119*4a5d661aSToomas Soome 	    {
120*4a5d661aSToomas Soome 		char *ptr = (char *)mn;
121*4a5d661aSToomas Soome 
122*4a5d661aSToomas Soome 		if (mn->mr_Bytes == bytes) {
123*4a5d661aSToomas Soome 		    *pmn = mn->mr_Next;
124*4a5d661aSToomas Soome 		} else {
125*4a5d661aSToomas Soome 		    mn = (MemNode *)((char *)mn + bytes);
126*4a5d661aSToomas Soome 		    mn->mr_Next  = ((MemNode *)ptr)->mr_Next;
127*4a5d661aSToomas Soome 		    mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
128*4a5d661aSToomas Soome 		    *pmn = mn;
129*4a5d661aSToomas Soome 		}
130*4a5d661aSToomas Soome 		mp->mp_Used += bytes;
131*4a5d661aSToomas Soome 		return(ptr);
132*4a5d661aSToomas Soome 	    }
133*4a5d661aSToomas Soome 	}
134*4a5d661aSToomas Soome     }
135*4a5d661aSToomas Soome 
136*4a5d661aSToomas Soome     /*
137*4a5d661aSToomas Soome      * Memory pool is full, return NULL.
138*4a5d661aSToomas Soome      */
139*4a5d661aSToomas Soome 
140*4a5d661aSToomas Soome     return(NULL);
141*4a5d661aSToomas Soome }
142*4a5d661aSToomas Soome 
143*4a5d661aSToomas Soome /*
144*4a5d661aSToomas Soome  * zfree() - free previously allocated memory
145*4a5d661aSToomas Soome  */
146*4a5d661aSToomas Soome 
147*4a5d661aSToomas Soome void
148*4a5d661aSToomas Soome zfree(MemPool *mp, void *ptr, uintptr_t bytes)
149*4a5d661aSToomas Soome {
150*4a5d661aSToomas Soome     /*
151*4a5d661aSToomas Soome      * align according to pool object size (can be 0).  This is
152*4a5d661aSToomas Soome      * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
153*4a5d661aSToomas Soome      */
154*4a5d661aSToomas Soome     bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
155*4a5d661aSToomas Soome 
156*4a5d661aSToomas Soome     if (bytes == 0)
157*4a5d661aSToomas Soome 	return;
158*4a5d661aSToomas Soome 
159*4a5d661aSToomas Soome     /*
160*4a5d661aSToomas Soome      * panic if illegal pointer
161*4a5d661aSToomas Soome      */
162*4a5d661aSToomas Soome 
163*4a5d661aSToomas Soome     if ((char *)ptr < (char *)mp->mp_Base ||
164*4a5d661aSToomas Soome 	(char *)ptr + bytes > (char *)mp->mp_End ||
165*4a5d661aSToomas Soome 	((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
166*4a5d661aSToomas Soome 	panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
167*4a5d661aSToomas Soome 
168*4a5d661aSToomas Soome     /*
169*4a5d661aSToomas Soome      * free the segment
170*4a5d661aSToomas Soome      */
171*4a5d661aSToomas Soome 
172*4a5d661aSToomas Soome     {
173*4a5d661aSToomas Soome 	MemNode **pmn;
174*4a5d661aSToomas Soome 	MemNode *mn;
175*4a5d661aSToomas Soome 
176*4a5d661aSToomas Soome 	mp->mp_Used -= bytes;
177*4a5d661aSToomas Soome 
178*4a5d661aSToomas Soome 	for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
179*4a5d661aSToomas Soome 	    /*
180*4a5d661aSToomas Soome 	     * If area between last node and current node
181*4a5d661aSToomas Soome 	     *  - check range
182*4a5d661aSToomas Soome 	     *  - check merge with next area
183*4a5d661aSToomas Soome 	     *  - check merge with previous area
184*4a5d661aSToomas Soome 	     */
185*4a5d661aSToomas Soome 	    if ((char *)ptr <= (char *)mn) {
186*4a5d661aSToomas Soome 		/*
187*4a5d661aSToomas Soome 		 * range check
188*4a5d661aSToomas Soome 		 */
189*4a5d661aSToomas Soome 		if ((char *)ptr + bytes > (char *)mn) {
190*4a5d661aSToomas Soome 		    panic("zfree(%p,%ju): corrupt memlist1", ptr,
191*4a5d661aSToomas Soome 			(uintmax_t)bytes);
192*4a5d661aSToomas Soome 		}
193*4a5d661aSToomas Soome 
194*4a5d661aSToomas Soome 		/*
195*4a5d661aSToomas Soome 		 * merge against next area or create independant area
196*4a5d661aSToomas Soome 		 */
197*4a5d661aSToomas Soome 
198*4a5d661aSToomas Soome 		if ((char *)ptr + bytes == (char *)mn) {
199*4a5d661aSToomas Soome 		    ((MemNode *)ptr)->mr_Next = mn->mr_Next;
200*4a5d661aSToomas Soome 		    ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes;
201*4a5d661aSToomas Soome 		} else {
202*4a5d661aSToomas Soome 		    ((MemNode *)ptr)->mr_Next = mn;
203*4a5d661aSToomas Soome 		    ((MemNode *)ptr)->mr_Bytes= bytes;
204*4a5d661aSToomas Soome 		}
205*4a5d661aSToomas Soome 		*pmn = mn = (MemNode *)ptr;
206*4a5d661aSToomas Soome 
207*4a5d661aSToomas Soome 		/*
208*4a5d661aSToomas Soome 		 * merge against previous area (if there is a previous
209*4a5d661aSToomas Soome 		 * area).
210*4a5d661aSToomas Soome 		 */
211*4a5d661aSToomas Soome 
212*4a5d661aSToomas Soome 		if (pmn != &mp->mp_First) {
213*4a5d661aSToomas Soome 		    if ((char*)pmn + ((MemNode*)pmn)->mr_Bytes == (char*)ptr) {
214*4a5d661aSToomas Soome 			((MemNode *)pmn)->mr_Next = mn->mr_Next;
215*4a5d661aSToomas Soome 			((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes;
216*4a5d661aSToomas Soome 			mn = (MemNode *)pmn;
217*4a5d661aSToomas Soome 		    }
218*4a5d661aSToomas Soome 		}
219*4a5d661aSToomas Soome 		return;
220*4a5d661aSToomas Soome 		/* NOT REACHED */
221*4a5d661aSToomas Soome 	    }
222*4a5d661aSToomas Soome 	    if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
223*4a5d661aSToomas Soome 		panic("zfree(%p,%ju): corrupt memlist2", ptr,
224*4a5d661aSToomas Soome 		    (uintmax_t)bytes);
225*4a5d661aSToomas Soome 	    }
226*4a5d661aSToomas Soome 	}
227*4a5d661aSToomas Soome 	/*
228*4a5d661aSToomas Soome 	 * We are beyond the last MemNode, append new MemNode.  Merge against
229*4a5d661aSToomas Soome 	 * previous area if possible.
230*4a5d661aSToomas Soome 	 */
231*4a5d661aSToomas Soome 	if (pmn == &mp->mp_First ||
232*4a5d661aSToomas Soome 	    (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr
233*4a5d661aSToomas Soome 	) {
234*4a5d661aSToomas Soome 	    ((MemNode *)ptr)->mr_Next = NULL;
235*4a5d661aSToomas Soome 	    ((MemNode *)ptr)->mr_Bytes = bytes;
236*4a5d661aSToomas Soome 	    *pmn = (MemNode *)ptr;
237*4a5d661aSToomas Soome 	    mn = (MemNode *)ptr;
238*4a5d661aSToomas Soome 	} else {
239*4a5d661aSToomas Soome 	    ((MemNode *)pmn)->mr_Bytes += bytes;
240*4a5d661aSToomas Soome 	    mn = (MemNode *)pmn;
241*4a5d661aSToomas Soome 	}
242*4a5d661aSToomas Soome     }
243*4a5d661aSToomas Soome }
244*4a5d661aSToomas Soome 
245*4a5d661aSToomas Soome /*
246*4a5d661aSToomas Soome  * zextendPool() - extend memory pool to cover additional space.
247*4a5d661aSToomas Soome  *
248*4a5d661aSToomas Soome  *		   Note: the added memory starts out as allocated, you
249*4a5d661aSToomas Soome  *		   must free it to make it available to the memory subsystem.
250*4a5d661aSToomas Soome  *
251*4a5d661aSToomas Soome  *		   Note: mp_Size may not reflect (mp_End - mp_Base) range
252*4a5d661aSToomas Soome  *		   due to other parts of the system doing their own sbrk()
253*4a5d661aSToomas Soome  *		   calls.
254*4a5d661aSToomas Soome  */
255*4a5d661aSToomas Soome 
256*4a5d661aSToomas Soome void
257*4a5d661aSToomas Soome zextendPool(MemPool *mp, void *base, uintptr_t bytes)
258*4a5d661aSToomas Soome {
259*4a5d661aSToomas Soome     if (mp->mp_Size == 0) {
260*4a5d661aSToomas Soome 	mp->mp_Base = base;
261*4a5d661aSToomas Soome 	mp->mp_Used = bytes;
262*4a5d661aSToomas Soome 	mp->mp_End = (char *)base + bytes;
263*4a5d661aSToomas Soome 	mp->mp_Size = bytes;
264*4a5d661aSToomas Soome     } else {
265*4a5d661aSToomas Soome 	void *pend = (char *)mp->mp_Base + mp->mp_Size;
266*4a5d661aSToomas Soome 
267*4a5d661aSToomas Soome 	if (base < mp->mp_Base) {
268*4a5d661aSToomas Soome 	    mp->mp_Size += (char *)mp->mp_Base - (char *)base;
269*4a5d661aSToomas Soome 	    mp->mp_Used += (char *)mp->mp_Base - (char *)base;
270*4a5d661aSToomas Soome 	    mp->mp_Base = base;
271*4a5d661aSToomas Soome 	}
272*4a5d661aSToomas Soome 	base = (char *)base + bytes;
273*4a5d661aSToomas Soome 	if (base > pend) {
274*4a5d661aSToomas Soome 	    mp->mp_Size += (char *)base - (char *)pend;
275*4a5d661aSToomas Soome 	    mp->mp_Used += (char *)base - (char *)pend;
276*4a5d661aSToomas Soome 	    mp->mp_End = (char *)base;
277*4a5d661aSToomas Soome 	}
278*4a5d661aSToomas Soome     }
279*4a5d661aSToomas Soome }
280*4a5d661aSToomas Soome 
281*4a5d661aSToomas Soome #ifdef ZALLOCDEBUG
282*4a5d661aSToomas Soome 
283*4a5d661aSToomas Soome void
284*4a5d661aSToomas Soome zallocstats(MemPool *mp)
285*4a5d661aSToomas Soome {
286*4a5d661aSToomas Soome     int abytes = 0;
287*4a5d661aSToomas Soome     int hbytes = 0;
288*4a5d661aSToomas Soome     int fcount = 0;
289*4a5d661aSToomas Soome     MemNode *mn;
290*4a5d661aSToomas Soome 
291*4a5d661aSToomas Soome     printf("%d bytes reserved", (int) mp->mp_Size);
292*4a5d661aSToomas Soome 
293*4a5d661aSToomas Soome     mn = mp->mp_First;
294*4a5d661aSToomas Soome 
295*4a5d661aSToomas Soome     if ((void *)mn != (void *)mp->mp_Base) {
296*4a5d661aSToomas Soome 	abytes += (char *)mn - (char *)mp->mp_Base;
297*4a5d661aSToomas Soome     }
298*4a5d661aSToomas Soome 
299*4a5d661aSToomas Soome     while (mn) {
300*4a5d661aSToomas Soome 	if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
301*4a5d661aSToomas Soome 	    hbytes += mn->mr_Bytes;
302*4a5d661aSToomas Soome 	    ++fcount;
303*4a5d661aSToomas Soome 	}
304*4a5d661aSToomas Soome 	if (mn->mr_Next)
305*4a5d661aSToomas Soome 	    abytes += (char *)mn->mr_Next - ((char *)mn + mn->mr_Bytes);
306*4a5d661aSToomas Soome 	mn = mn->mr_Next;
307*4a5d661aSToomas Soome     }
308*4a5d661aSToomas Soome     printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
309*4a5d661aSToomas Soome 	abytes,
310*4a5d661aSToomas Soome 	fcount,
311*4a5d661aSToomas Soome 	hbytes
312*4a5d661aSToomas Soome     );
313*4a5d661aSToomas Soome }
314*4a5d661aSToomas Soome 
315*4a5d661aSToomas Soome #endif
316*4a5d661aSToomas Soome 
317