xref: /titanic_51/usr/src/lib/libc/port/threads/alloc.c (revision a31148363f598def767ac48c5d82e1572e44b935)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "lint.h"
30 #include "thr_uberdata.h"
31 #include <sys/syscall.h>
32 
33 extern int __systemcall6(sysret_t *, int, ...);
34 
35 /*
36  * This is a small and simple power of two memory allocator that is
37  * used internally by libc.  Allocations are fast and memory is never
38  * returned to the system, except for allocations of 64 Kbytes and larger,
39  * which are simply mmap()ed and munmap()ed as needed.  Smaller allocations
40  * (minimum size is 64 bytes) are obtained from mmap() of 64K chunks
41  * broken up into unit allocations and maintained on free lists.
42  * The interface requires the caller to keep track of the size of an
43  * allocated block and to pass that size back when freeing a block.
44  *
45  * This allocator is called during initialization, from code called
46  * from the dynamic linker, so it must not call anything that might
47  * re-invoke the dynamic linker to resolve a symbol.  That is,
48  * it must only call functions that are wholly private to libc.
49  *
50  * Also, this allocator must be unique across all link maps
51  * because pointers returned by lmalloc() are stored in the
52  * thread structure, which is constant across all link maps.
53  *
54  * Memory blocks returned by lmalloc() are initialized to zero.
55  */
56 
57 #define	MINSIZE		64	/* (1 << MINSHIFT) */
58 #define	MINSHIFT	6
59 #define	CHUNKSIZE	(64 * 1024)
60 
61 /*
62  * bucketnum	allocation size
63  * 0		64
64  * 1		128
65  * 2		256
66  * 3		512
67  * 4		1024
68  * 5		2048
69  * 6		4096
70  * 7		8192
71  * 8		16384
72  * 9		32768
73  */
74 
75 /*
76  * See "thr_uberdata.h" for the definition of bucket_t.
77  * The 10 (NBUCKETS) buckets are allocated in uberdata.
78  */
79 
80 /*
81  * Performance hack:
82  *
83  * On the very first lmalloc(), before any memory has been allocated,
84  * mmap() a 24K block of memory and carve out six 2K chunks, each
85  * of which is subdivided for the initial allocations from buckets
86  * 0, 1, 2, 3, 4 and 5, giving them initial numbers of elements
87  * 32, 16, 8, 4, 2 and 1, respectively.  The remaining 12K is cut
88  * into one 4K buffer for bucket 6 and one 8K buffer for bucket 7.
89  *
90  * This results in almost all simple single-threaded processes,
91  * such as those employed in the kenbus test suite, having to
92  * allocate only this one 24K block during their lifetimes.
93  */
94 
95 #define	SUBCHUNKSIZE	2048
96 #define	BASE_SIZE	(24 * 1024)
97 
98 static void
99 initial_allocation(bucket_t *bp)	/* &__uberdata.bucket[0] */
100 {
101 	sysret_t rval;
102 	void *ptr;
103 	size_t size;
104 	size_t n;
105 	int bucketnum;
106 	void *base;
107 
108 	/*
109 	 * We do this seemingly obtuse call to __systemcall6(SYS_mmap)
110 	 * instead of simply calling mmap() directly because, if the
111 	 * mmap() system call fails, we must make sure that __cerror()
112 	 * is not called, because that would call ___errno()
113 	 * which would dereference curthread and, because we are very
114 	 * early in libc initialization, curthread is NULL and we would
115 	 * draw a hard-to-debug SIGSEGV core dump, or worse.
116 	 * We opt to give a thread panic message instead.
117 	 */
118 	if (__systemcall6(&rval, SYS_mmap, CHUNKSIZE, BASE_SIZE,
119 	    PROT_READ | PROT_WRITE | PROT_EXEC,
120 	    _MAP_NEW | MAP_PRIVATE | MAP_ANON | MAP_ALIGN, -1L, (off_t)0) != 0)
121 		thr_panic("initial allocation failed; swap space exhausted?");
122 	base = (void *)rval.sys_rval1;
123 
124 	for (bucketnum = 0; bucketnum < 6; bucketnum++, bp++) {
125 		size = (size_t)MINSIZE << bucketnum;
126 		n = SUBCHUNKSIZE / size;
127 		ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
128 
129 		ASSERT(bp->free_list == NULL);
130 		bp->free_list = ptr;
131 		while (--n != 0) {
132 			void *next = (void *)((caddr_t)ptr + size);
133 			*(void **)ptr = next;
134 			ptr = next;
135 		}
136 		*(void **)ptr = NULL;
137 	}
138 
139 	ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
140 	ASSERT(bp->free_list == NULL);
141 	bp->free_list = ptr;
142 
143 	ptr = (void *)((caddr_t)ptr + 2 * SUBCHUNKSIZE);
144 	bp++;
145 	ASSERT(bp->free_list == NULL);
146 	bp->free_list = ptr;
147 
148 	ASSERT(((caddr_t)ptr - (caddr_t)base + 4 * SUBCHUNKSIZE) == BASE_SIZE);
149 }
150 
151 static int
152 getbucketnum(size_t size)
153 {
154 	int highbit = 0;
155 
156 	if (size-- <= MINSIZE)
157 		return (0);
158 
159 #ifdef _LP64
160 	if (size & 0xffffffff00000000ul)
161 		highbit += 32, size >>= 32;
162 #endif
163 	if (size & 0xffff0000)
164 		highbit += 16, size >>= 16;
165 	if (size & 0xff00)
166 		highbit += 8, size >>= 8;
167 	if (size & 0xf0)
168 		highbit += 4, size >>= 4;
169 	if (size & 0xc)
170 		highbit += 2, size >>= 2;
171 	if (size & 0x2)
172 		highbit += 1;
173 
174 	ASSERT(highbit >= MINSHIFT);
175 	return (highbit - (MINSHIFT - 1));
176 }
177 
178 void *
179 lmalloc(size_t size)
180 {
181 	int bucketnum = getbucketnum(size);
182 	ulwp_t *self;
183 	uberdata_t *udp;
184 	bucket_t *bp;
185 	void *ptr;
186 
187 	/*
188 	 * ulwp_t structures must be allocated from a rwx mapping since it
189 	 * is a normal data object _and_ it contains instructions that are
190 	 * executed for user-land DTrace tracing with the fasttrap provider.
191 	 */
192 	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
193 
194 	/* round size up to the proper power of 2 */
195 	size = (size_t)MINSIZE << bucketnum;
196 
197 	if (bucketnum >= NBUCKETS) {
198 		/* mmap() allocates memory already set to zero */
199 		ptr = mmap((void *)CHUNKSIZE, size, prot,
200 		    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
201 		if (ptr == MAP_FAILED)
202 			ptr = NULL;
203 		return (ptr);
204 	}
205 
206 	if ((self = __curthread()) == NULL)
207 		udp = &__uberdata;
208 	else
209 		udp = self->ul_uberdata;
210 
211 	if (udp->bucket_init == 0) {
212 		ASSERT(udp->nthreads == 0);
213 		initial_allocation(udp->bucket);
214 		udp->bucket_init = 1;
215 	}
216 
217 	bp = &udp->bucket[bucketnum];
218 	if (self != NULL)
219 		lmutex_lock(&bp->bucket_lock);
220 
221 	if ((ptr = bp->free_list) == NULL) {
222 		size_t bsize;
223 		size_t n;
224 
225 		/*
226 		 * Double the number of chunks mmap()ed each time,
227 		 * in case of large numbers of allocations.
228 		 */
229 		if (bp->chunks == 0)
230 			bp->chunks = 1;
231 		else
232 			bp->chunks <<= 1;
233 		for (;;) {
234 			bsize = CHUNKSIZE * bp->chunks;
235 			n = bsize / size;
236 			ptr = mmap((void *)CHUNKSIZE, bsize, prot,
237 			    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
238 			if (ptr != MAP_FAILED)
239 				break;
240 			/* try a smaller chunk allocation */
241 			if ((bp->chunks >>= 1) == 0) {
242 				if (self != NULL)
243 					lmutex_unlock(&bp->bucket_lock);
244 				return (NULL);
245 			}
246 		}
247 		bp->free_list = ptr;
248 		while (--n != 0) {
249 			void *next = (void *)((caddr_t)ptr + size);
250 			*(void **)ptr = next;
251 			ptr = next;
252 		}
253 		*(void **)ptr = NULL;
254 		ptr = bp->free_list;
255 	}
256 	bp->free_list = *(void **)ptr;
257 	if (self != NULL)
258 		lmutex_unlock(&bp->bucket_lock);
259 	/*
260 	 * We maintain the free list already zeroed except for the pointer
261 	 * stored at the head of the block (mmap() allocates memory already
262 	 * set to zero), so all we have to do is zero out the pointer.
263 	 */
264 	*(void **)ptr = NULL;
265 	return (ptr);
266 }
267 
268 void
269 lfree(void *ptr, size_t size)
270 {
271 	int bucketnum = getbucketnum(size);
272 	ulwp_t *self;
273 	bucket_t *bp;
274 
275 	/* round size up to the proper power of 2 */
276 	size = (size_t)MINSIZE << bucketnum;
277 
278 	if (bucketnum >= NBUCKETS) {
279 		/* see comment below */
280 		if (((uintptr_t)ptr & (CHUNKSIZE - 1)) != 0)
281 			goto bad;
282 		(void) munmap(ptr, size);
283 		return;
284 	}
285 
286 	/*
287 	 * If the low order bits are not all zero as expected, then panic.
288 	 * This can be caused by an application calling, for example,
289 	 * pthread_attr_destroy() without having first called
290 	 * pthread_attr_init() (thereby passing uninitialized data
291 	 * to pthread_attr_destroy() who then calls lfree() with
292 	 * the uninitialized data).
293 	 */
294 	if (((uintptr_t)ptr & (size - 1)) != 0)
295 		goto bad;
296 
297 	/*
298 	 * Zeroing the memory here saves time later when reallocating it.
299 	 */
300 	(void) memset(ptr, 0, size);
301 
302 	if ((self = __curthread()) == NULL)
303 		bp = &__uberdata.bucket[bucketnum];
304 	else {
305 		bp = &self->ul_uberdata->bucket[bucketnum];
306 		lmutex_lock(&bp->bucket_lock);
307 	}
308 	*(void **)ptr = bp->free_list;
309 	bp->free_list = ptr;
310 	if (self != NULL)
311 		lmutex_unlock(&bp->bucket_lock);
312 	return;
313 
314 bad:
315 	thr_panic("lfree() called with a misaligned pointer");
316 }
317 
318 /*
319  * The following functions can be used internally to libc
320  * to make memory allocations in the style of malloc()/free()
321  * (where the size of the allocation is not remembered by the caller)
322  * but which are safe to use within critical sections, that is,
323  * sections of code bounded by enter_critical()/exit_critical(),
324  * lmutex_lock()/lmutex_unlock() or lrw_rdlock()/lrw_wrlock()/lrw_unlock().
325  *
326  * These functions must never be used to allocate memory that is
327  * passed out of libc, for example by strdup(), because it is a
328  * fatal error to free() an object allocated by libc_malloc().
329  * Such objects can only be freed by calling libc_free().
330  */
331 
332 #ifdef	_LP64
333 #define	ALIGNMENT	16
334 #else
335 #define	ALIGNMENT	8
336 #endif
337 
338 typedef union {
339 	size_t	private_size;
340 	char	private_align[ALIGNMENT];
341 } private_header_t;
342 
343 void *
344 libc_malloc(size_t size)
345 {
346 	private_header_t *ptr;
347 
348 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
349 	if ((ptr = lmalloc(size)) == NULL)
350 		return (NULL);
351 	ptr->private_size = size;
352 	return (ptr + 1);
353 }
354 
355 void *
356 libc_realloc(void *old, size_t size)
357 {
358 	private_header_t *ptr;
359 	void *new;
360 
361 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
362 	if ((ptr = lmalloc(size)) == NULL)
363 		return (NULL);
364 	ptr->private_size = size;
365 	new = ptr + 1;
366 	if (old != NULL) {
367 		ptr = (private_header_t *)old - 1;
368 		if (size >= ptr->private_size)
369 			size = ptr->private_size;
370 		(void) memcpy(new, old, size - sizeof (*ptr));
371 		lfree(ptr, ptr->private_size);
372 	}
373 	return (new);
374 }
375 
376 void
377 libc_free(void *p)
378 {
379 	private_header_t *ptr;
380 
381 	if (p) {
382 		ptr = (private_header_t *)p - 1;
383 		lfree(ptr, ptr->private_size);
384 	}
385 }
386 
387 char *
388 libc_strdup(const char *s1)
389 {
390 	char *s2 = libc_malloc(strlen(s1) + 1);
391 
392 	if (s2)
393 		(void) strcpy(s2, s1);
394 	return (s2);
395 }
396