xref: /illumos-gate/usr/src/lib/libc/port/threads/alloc.c (revision 65451a03349bdb7f4bf79a8f29d4065772e01cae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include "lint.h"
27 #include "thr_uberdata.h"
28 #include <sys/syscall.h>
29 
30 extern int __systemcall6(sysret_t *, int, ...);
31 
32 /*
33  * This is a small and simple power of two memory allocator that is
34  * used internally by libc.  Allocations are fast and memory is never
35  * returned to the system, except for allocations of 64 Kbytes and larger,
36  * which are simply mmap()ed and munmap()ed as needed.  Smaller allocations
37  * (minimum size is 64 bytes) are obtained from mmap() of 64K chunks
38  * broken up into unit allocations and maintained on free lists.
39  * The interface requires the caller to keep track of the size of an
40  * allocated block and to pass that size back when freeing a block.
41  *
42  * This allocator is called during initialization, from code called
43  * from the dynamic linker, so it must not call anything that might
44  * re-invoke the dynamic linker to resolve a symbol.  That is,
45  * it must only call functions that are wholly private to libc.
46  *
47  * Also, this allocator must be unique across all link maps
48  * because pointers returned by lmalloc() are stored in the
49  * thread structure, which is constant across all link maps.
50  *
51  * Memory blocks returned by lmalloc() are initialized to zero.
52  */
53 
54 #define	MINSIZE		64	/* (1 << MINSHIFT) */
55 #define	MINSHIFT	6
56 #define	CHUNKSIZE	(64 * 1024)
57 
58 /*
59  * bucketnum	allocation size
60  * 0		64
61  * 1		128
62  * 2		256
63  * 3		512
64  * 4		1024
65  * 5		2048
66  * 6		4096
67  * 7		8192
68  * 8		16384
69  * 9		32768
70  */
71 
72 /*
73  * See "thr_uberdata.h" for the definition of bucket_t.
74  * The 10 (NBUCKETS) buckets are allocated in uberdata.
75  */
76 
77 /*
78  * Performance hack:
79  *
80  * On the very first lmalloc(), before any memory has been allocated,
81  * mmap() a 24K block of memory and carve out six 2K chunks, each
82  * of which is subdivided for the initial allocations from buckets
83  * 0, 1, 2, 3, 4 and 5, giving them initial numbers of elements
84  * 32, 16, 8, 4, 2 and 1, respectively.  The remaining 12K is cut
85  * into one 4K buffer for bucket 6 and one 8K buffer for bucket 7.
86  *
87  * This results in almost all simple single-threaded processes,
88  * such as those employed in the kenbus test suite, having to
89  * allocate only this one 24K block during their lifetimes.
90  */
91 
92 #define	SUBCHUNKSIZE	2048
93 #define	BASE_SIZE	(24 * 1024)
94 
95 static void
96 initial_allocation(bucket_t *bp)	/* &__uberdata.bucket[0] */
97 {
98 	sysret_t rval;
99 	void *ptr;
100 	size_t size;
101 	size_t n;
102 	int bucketnum;
103 	void *base;
104 
105 	/*
106 	 * We do this seemingly obtuse call to __systemcall6(SYS_mmap)
107 	 * instead of simply calling mmap() directly because, if the
108 	 * mmap() system call fails, we must make sure that __cerror()
109 	 * is not called, because that would call ___errno()
110 	 * which would dereference curthread and, because we are very
111 	 * early in libc initialization, curthread is NULL and we would
112 	 * draw a hard-to-debug SIGSEGV core dump, or worse.
113 	 * We opt to give a thread panic message instead.
114 	 */
115 	if (__systemcall6(&rval, SYS_mmap, CHUNKSIZE, BASE_SIZE,
116 	    PROT_READ | PROT_WRITE | PROT_EXEC,
117 	    _MAP_NEW | MAP_PRIVATE | MAP_ANON | MAP_ALIGN, -1L, (off_t)0) != 0)
118 		thr_panic("initial allocation failed; swap space exhausted?");
119 	base = (void *)rval.sys_rval1;
120 
121 	for (bucketnum = 0; bucketnum < 6; bucketnum++, bp++) {
122 		size = (size_t)MINSIZE << bucketnum;
123 		n = SUBCHUNKSIZE / size;
124 		ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
125 
126 		ASSERT(bp->free_list == NULL);
127 		bp->free_list = ptr;
128 		while (--n != 0) {
129 			void *next = (void *)((caddr_t)ptr + size);
130 			*(void **)ptr = next;
131 			ptr = next;
132 		}
133 		*(void **)ptr = NULL;
134 	}
135 
136 	ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
137 	ASSERT(bp->free_list == NULL);
138 	bp->free_list = ptr;
139 
140 	ptr = (void *)((caddr_t)ptr + 2 * SUBCHUNKSIZE);
141 	bp++;
142 	ASSERT(bp->free_list == NULL);
143 	bp->free_list = ptr;
144 
145 	ASSERT(((caddr_t)ptr - (caddr_t)base + 4 * SUBCHUNKSIZE) == BASE_SIZE);
146 }
147 
148 /*
149  * This highbit code is the same as the code in fls_impl().
150  * We inline it here for speed.
151  */
152 static int
153 getbucketnum(size_t size)
154 {
155 	int highbit = 1;
156 
157 	if (size-- <= MINSIZE)
158 		return (0);
159 
160 #ifdef _LP64
161 	if (size & 0xffffffff00000000ul)
162 		highbit += 32, size >>= 32;
163 #endif
164 	if (size & 0xffff0000)
165 		highbit += 16, size >>= 16;
166 	if (size & 0xff00)
167 		highbit += 8, size >>= 8;
168 	if (size & 0xf0)
169 		highbit += 4, size >>= 4;
170 	if (size & 0xc)
171 		highbit += 2, size >>= 2;
172 	if (size & 0x2)
173 		highbit += 1;
174 
175 	ASSERT(highbit > MINSHIFT);
176 	return (highbit - MINSHIFT);
177 }
178 
179 void *
180 lmalloc(size_t size)
181 {
182 	int bucketnum = getbucketnum(size);
183 	ulwp_t *self;
184 	uberdata_t *udp;
185 	bucket_t *bp;
186 	void *ptr;
187 
188 	/*
189 	 * ulwp_t structures must be allocated from a rwx mapping since it
190 	 * is a normal data object _and_ it contains instructions that are
191 	 * executed for user-land DTrace tracing with the fasttrap provider.
192 	 */
193 	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
194 
195 	/* round size up to the proper power of 2 */
196 	size = (size_t)MINSIZE << bucketnum;
197 
198 	if (bucketnum >= NBUCKETS) {
199 		/* mmap() allocates memory already set to zero */
200 		ptr = mmap((void *)CHUNKSIZE, size, prot,
201 		    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
202 		if (ptr == MAP_FAILED)
203 			ptr = NULL;
204 		return (ptr);
205 	}
206 
207 	if ((self = __curthread()) == NULL)
208 		udp = &__uberdata;
209 	else
210 		udp = self->ul_uberdata;
211 
212 	if (udp->bucket_init == 0) {
213 		ASSERT(udp->nthreads == 0);
214 		initial_allocation(udp->bucket);
215 		udp->bucket_init = 1;
216 	}
217 
218 	bp = &udp->bucket[bucketnum];
219 	if (self != NULL)
220 		lmutex_lock(&bp->bucket_lock);
221 
222 	if ((ptr = bp->free_list) == NULL) {
223 		size_t bsize;
224 		size_t n;
225 
226 		/*
227 		 * Double the number of chunks mmap()ed each time,
228 		 * in case of large numbers of allocations.
229 		 */
230 		if (bp->chunks == 0)
231 			bp->chunks = 1;
232 		else
233 			bp->chunks <<= 1;
234 		for (;;) {
235 			bsize = CHUNKSIZE * bp->chunks;
236 			n = bsize / size;
237 			ptr = mmap((void *)CHUNKSIZE, bsize, prot,
238 			    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
239 			if (ptr != MAP_FAILED)
240 				break;
241 			/* try a smaller chunk allocation */
242 			if ((bp->chunks >>= 1) == 0) {
243 				if (self != NULL)
244 					lmutex_unlock(&bp->bucket_lock);
245 				return (NULL);
246 			}
247 		}
248 		bp->free_list = ptr;
249 		while (--n != 0) {
250 			void *next = (void *)((caddr_t)ptr + size);
251 			*(void **)ptr = next;
252 			ptr = next;
253 		}
254 		*(void **)ptr = NULL;
255 		ptr = bp->free_list;
256 	}
257 	bp->free_list = *(void **)ptr;
258 	if (self != NULL)
259 		lmutex_unlock(&bp->bucket_lock);
260 	/*
261 	 * We maintain the free list already zeroed except for the pointer
262 	 * stored at the head of the block (mmap() allocates memory already
263 	 * set to zero), so all we have to do is zero out the pointer.
264 	 */
265 	*(void **)ptr = NULL;
266 	return (ptr);
267 }
268 
269 void
270 lfree(void *ptr, size_t size)
271 {
272 	int bucketnum = getbucketnum(size);
273 	ulwp_t *self;
274 	bucket_t *bp;
275 
276 	/* round size up to the proper power of 2 */
277 	size = (size_t)MINSIZE << bucketnum;
278 
279 	if (bucketnum >= NBUCKETS) {
280 		/* see comment below */
281 		if (((uintptr_t)ptr & (CHUNKSIZE - 1)) != 0)
282 			goto bad;
283 		(void) munmap(ptr, size);
284 		return;
285 	}
286 
287 	/*
288 	 * If the low order bits are not all zero as expected, then panic.
289 	 * This can be caused by an application calling, for example,
290 	 * pthread_attr_destroy() without having first called
291 	 * pthread_attr_init() (thereby passing uninitialized data
292 	 * to pthread_attr_destroy() who then calls lfree() with
293 	 * the uninitialized data).
294 	 */
295 	if (((uintptr_t)ptr & (size - 1)) != 0)
296 		goto bad;
297 
298 	/*
299 	 * Zeroing the memory here saves time later when reallocating it.
300 	 */
301 	(void) memset(ptr, 0, size);
302 
303 	if ((self = __curthread()) == NULL)
304 		bp = &__uberdata.bucket[bucketnum];
305 	else {
306 		bp = &self->ul_uberdata->bucket[bucketnum];
307 		lmutex_lock(&bp->bucket_lock);
308 	}
309 	*(void **)ptr = bp->free_list;
310 	bp->free_list = ptr;
311 	if (self != NULL)
312 		lmutex_unlock(&bp->bucket_lock);
313 	return;
314 
315 bad:
316 	thr_panic("lfree() called with a misaligned pointer");
317 }
318 
319 /*
320  * The following functions can be used internally to libc
321  * to make memory allocations in the style of malloc()/free()
322  * (where the size of the allocation is not remembered by the caller)
323  * but which are safe to use within critical sections, that is,
324  * sections of code bounded by enter_critical()/exit_critical(),
325  * lmutex_lock()/lmutex_unlock() or lrw_rdlock()/lrw_wrlock()/lrw_unlock().
326  *
327  * These functions must never be used to allocate memory that is
328  * passed out of libc, for example by strdup(), because it is a
329  * fatal error to free() an object allocated by libc_malloc().
330  * Such objects can only be freed by calling libc_free().
331  */
332 
333 #ifdef	_LP64
334 #define	ALIGNMENT	16
335 #else
336 #define	ALIGNMENT	8
337 #endif
338 
339 typedef union {
340 	size_t	private_size;
341 	char	private_align[ALIGNMENT];
342 } private_header_t;
343 
344 void *
345 libc_malloc(size_t size)
346 {
347 	private_header_t *ptr;
348 
349 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
350 	if ((ptr = lmalloc(size)) == NULL)
351 		return (NULL);
352 	ptr->private_size = size;
353 	return (ptr + 1);
354 }
355 
356 void *
357 libc_realloc(void *old, size_t size)
358 {
359 	private_header_t *ptr;
360 	void *new;
361 
362 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
363 	if ((ptr = lmalloc(size)) == NULL)
364 		return (NULL);
365 	ptr->private_size = size;
366 	new = ptr + 1;
367 	if (old != NULL) {
368 		ptr = (private_header_t *)old - 1;
369 		if (size >= ptr->private_size)
370 			size = ptr->private_size;
371 		(void) memcpy(new, old, size - sizeof (*ptr));
372 		lfree(ptr, ptr->private_size);
373 	}
374 	return (new);
375 }
376 
377 void
378 libc_free(void *p)
379 {
380 	private_header_t *ptr;
381 
382 	if (p) {
383 		ptr = (private_header_t *)p - 1;
384 		lfree(ptr, ptr->private_size);
385 	}
386 }
387 
388 char *
389 libc_strdup(const char *s1)
390 {
391 	char *s2 = libc_malloc(strlen(s1) + 1);
392 
393 	if (s2)
394 		(void) strcpy(s2, s1);
395 	return (s2);
396 }
397