xref: /freebsd/libexec/rtld-elf/rtld_malloc.c (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1983 Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 
33 /*
34  * malloc.c (Caltech) 2/21/82
35  * Chris Kingsley, kingsley@cit-20.
36  *
37  * This is a very fast storage allocator.  It allocates blocks of a small
38  * number of different sizes, and keeps free lists of each size.  Blocks that
39  * don't exactly fit are passed up to the next larger size.  In this
40  * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
41  * This is designed for use in a virtual memory environment.
42  */
43 
44 #include <sys/param.h>
45 #include <sys/sysctl.h>
46 #include <sys/mman.h>
47 #include <errno.h>
48 #include <stdarg.h>
49 #include <stddef.h>
50 #include <string.h>
51 #include <unistd.h>
52 #ifdef IN_RTLD
53 #include "rtld.h"
54 #include "rtld_printf.h"
55 #include "rtld_paths.h"
56 #endif
57 #include "rtld_malloc.h"
58 
59 /*
60  * Pre-allocate mmap'ed pages
61  */
62 #define	NPOOLPAGES	(128*1024/pagesz)
63 static caddr_t		pagepool_start, pagepool_end;
64 
65 /*
66  * The overhead on a block is at least 4 bytes.  When free, this space
67  * contains a pointer to the next free block, and the bottom two bits must
68  * be zero.  When in use, the first byte is set to MAGIC, and the second
69  * byte is the size index.  The remaining bytes are for alignment.
70  */
71 union	overhead {
72 	union	overhead *ov_next;	/* when free */
73 	struct {
74 		uint16_t ovu_index;	/* bucket # */
75 		uint8_t ovu_magic;	/* magic number */
76 	} ovu;
77 #define	ov_magic	ovu.ovu_magic
78 #define	ov_index	ovu.ovu_index
79 };
80 
81 static void morecore(int bucket);
82 static int morepages(int n);
83 
84 #define	MAGIC		0xef		/* magic # on accounting info */
85 #define	AMAGIC		0xdf		/* magic # for aligned alloc */
86 
87 /*
88  * nextf[i] is the pointer to the next free block of size
89  * (FIRST_BUCKET_SIZE << i).  The overhead information precedes the data
90  * area returned to the user.
91  */
92 #define	LOW_BITS		3
93 #define	FIRST_BUCKET_SIZE	(1U << LOW_BITS)
94 #define	NBUCKETS 30
95 static	union overhead *nextf[NBUCKETS];
96 
97 static	int pagesz;			/* page size */
98 
99 /*
100  * The array of supported page sizes is provided by the user, i.e., the
101  * program that calls this storage allocator.  That program must initialize
102  * the array before making its first call to allocate storage.  The array
103  * must contain at least one page size.  The page sizes must be stored in
104  * increasing order.
105  */
106 
107 static void *
108 cp2op(void *cp)
109 {
110 	return (((caddr_t)cp - sizeof(union overhead)));
111 }
112 
113 void *
114 __crt_malloc(size_t nbytes)
115 {
116 	union overhead *op;
117 	int bucket;
118 	size_t amt;
119 
120 	/*
121 	 * First time malloc is called, setup page size.
122 	 */
123 	if (pagesz == 0)
124 		pagesz = pagesizes[0];
125 	/*
126 	 * Convert amount of memory requested into closest block size
127 	 * stored in hash buckets which satisfies request.
128 	 * Account for space used per block for accounting.
129 	 */
130 	amt = FIRST_BUCKET_SIZE;
131 	bucket = 0;
132 	while (nbytes > amt - sizeof(*op)) {
133 		amt <<= 1;
134 		bucket++;
135 		if (amt == 0 || bucket >= NBUCKETS)
136 			return (NULL);
137 	}
138 	/*
139 	 * If nothing in hash bucket right now,
140 	 * request more memory from the system.
141 	 */
142   	if ((op = nextf[bucket]) == NULL) {
143   		morecore(bucket);
144   		if ((op = nextf[bucket]) == NULL)
145   			return (NULL);
146 	}
147 	/* remove from linked list */
148   	nextf[bucket] = op->ov_next;
149 	op->ov_magic = MAGIC;
150 	op->ov_index = bucket;
151   	return ((char *)(op + 1));
152 }
153 
154 void *
155 __crt_calloc(size_t num, size_t size)
156 {
157 	void *ret;
158 
159 	if (size != 0 && (num * size) / size != num) {
160 		/* size_t overflow. */
161 		return (NULL);
162 	}
163 
164 	if ((ret = __crt_malloc(num * size)) != NULL)
165 		memset(ret, 0, num * size);
166 
167 	return (ret);
168 }
169 
170 void *
171 __crt_aligned_alloc_offset(size_t align, size_t size, size_t offset)
172 {
173 	void *mem, *ov;
174 	union overhead ov1;
175 	uintptr_t x;
176 
177 	if (align < FIRST_BUCKET_SIZE)
178 		align = FIRST_BUCKET_SIZE;
179 	offset &= align - 1;
180 	mem = __crt_malloc(size + align + offset + sizeof(union overhead));
181 	if (mem == NULL)
182 		return (NULL);
183 	x = roundup2((uintptr_t)mem + sizeof(union overhead), align);
184 	x += offset;
185 	ov = cp2op((void *)x);
186 	ov1.ov_magic = AMAGIC;
187 	ov1.ov_index = x - (uintptr_t)mem + sizeof(union overhead);
188 	memcpy(ov, &ov1, sizeof(ov1));
189 	return ((void *)x);
190 }
191 
192 /*
193  * Allocate more memory to the indicated bucket.
194  */
195 static void
196 morecore(int bucket)
197 {
198 	union overhead *op;
199 	int sz;		/* size of desired block */
200   	int amt;			/* amount to allocate */
201   	int nblks;			/* how many blocks we get */
202 
203 	sz = FIRST_BUCKET_SIZE << bucket;
204 	if (sz < pagesz) {
205 		amt = pagesz;
206   		nblks = amt / sz;
207 	} else {
208 		amt = sz;
209 		nblks = 1;
210 	}
211 	if (amt > pagepool_end - pagepool_start)
212 		if (morepages(amt / pagesz + NPOOLPAGES) == 0 &&
213 		    /* Retry with min required size */
214 		    morepages(amt / pagesz) == 0)
215 			return;
216 	op = (union overhead *)pagepool_start;
217 	pagepool_start += amt;
218 
219 	/*
220 	 * Add new memory allocated to that on
221 	 * free list for this hash bucket.
222 	 */
223   	nextf[bucket] = op;
224   	while (--nblks > 0) {
225 		op->ov_next = (union overhead *)((caddr_t)op + sz);
226 		op = (union overhead *)((caddr_t)op + sz);
227   	}
228 }
229 
230 void
231 __crt_free(void *cp)
232 {
233 	union overhead *op, op1;
234 	void *opx;
235 	int size;
236 
237   	if (cp == NULL)
238   		return;
239 	opx = cp2op(cp);
240 	memcpy(&op1, opx, sizeof(op1));
241 	op = op1.ov_magic == AMAGIC ? (void *)((caddr_t)cp - op1.ov_index) :
242 	    opx;
243 	if (op->ov_magic != MAGIC)
244 		return;				/* sanity */
245   	size = op->ov_index;
246 	op->ov_next = nextf[size];	/* also clobbers ov_magic */
247   	nextf[size] = op;
248 }
249 
250 void *
251 __crt_realloc(void *cp, size_t nbytes)
252 {
253 	u_int onb;
254 	int i;
255 	union overhead *op;
256   	char *res;
257 
258   	if (cp == NULL)
259 		return (__crt_malloc(nbytes));
260 	op = cp2op(cp);
261 	if (op->ov_magic != MAGIC)
262 		return (NULL);	/* Double-free or bad argument */
263 	i = op->ov_index;
264 	onb = 1 << (i + 3);
265 	if (onb < (u_int)pagesz)
266 		onb -= sizeof(*op);
267 	else
268 		onb += pagesz - sizeof(*op);
269 	/* avoid the copy if same size block */
270 	if (i != 0) {
271 		i = 1 << (i + 2);
272 		if (i < pagesz)
273 			i -= sizeof(*op);
274 		else
275 			i += pagesz - sizeof(*op);
276 	}
277 	if (nbytes <= onb && nbytes > (size_t)i)
278 		return (cp);
279   	if ((res = __crt_malloc(nbytes)) == NULL)
280 		return (NULL);
281 	bcopy(cp, res, (nbytes < onb) ? nbytes : onb);
282 	__crt_free(cp);
283   	return (res);
284 }
285 
286 static int
287 morepages(int n)
288 {
289 	caddr_t	addr;
290 	int offset;
291 
292 	if (pagepool_end - pagepool_start > pagesz) {
293 		addr = roundup2(pagepool_start, pagesz);
294 		if (munmap(addr, pagepool_end - addr) != 0) {
295 #ifdef IN_RTLD
296 			rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": "
297 			    "morepages: cannot munmap %p: %s\n",
298 			    addr, rtld_strerror(errno));
299 #endif
300 		}
301 	}
302 
303 	offset = (uintptr_t)pagepool_start - rounddown2(
304 	    (uintptr_t)pagepool_start, pagesz);
305 
306 	addr = mmap(0, n * pagesz, PROT_READ | PROT_WRITE,
307 	    MAP_ANON | MAP_PRIVATE, -1, 0);
308 	if (addr == MAP_FAILED) {
309 #ifdef IN_RTLD
310 		rtld_fdprintf(STDERR_FILENO, _BASENAME_RTLD ": morepages: "
311 		    "cannot mmap anonymous memory: %s\n",
312 		    rtld_strerror(errno));
313 #endif
314 		pagepool_start = pagepool_end = NULL;
315 		return (0);
316 	}
317 	pagepool_start = addr;
318 	pagepool_end = pagepool_start + n * pagesz;
319 	pagepool_start += offset;
320 
321 	return (n);
322 }
323