xref: /titanic_41/usr/src/lib/libc/port/threads/alloc.c (revision 6a634c9dca3093f3922e4b7ab826d7bdf17bf78e)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
58cd45542Sraf  * Common Development and Distribution License (the "License").
68cd45542Sraf  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
218cd45542Sraf 
227c478bd9Sstevel@tonic-gate /*
23*23a1cceaSRoger A. Faulkner  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include "lint.h"
277c478bd9Sstevel@tonic-gate #include "thr_uberdata.h"
287c478bd9Sstevel@tonic-gate #include <sys/syscall.h>
297c478bd9Sstevel@tonic-gate 
307c478bd9Sstevel@tonic-gate extern int __systemcall6(sysret_t *, int, ...);
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate /*
337c478bd9Sstevel@tonic-gate  * This is a small and simple power of two memory allocator that is
347c478bd9Sstevel@tonic-gate  * used internally by libc.  Allocations are fast and memory is never
357c478bd9Sstevel@tonic-gate  * returned to the system, except for allocations of 64 Kbytes and larger,
367c478bd9Sstevel@tonic-gate  * which are simply mmap()ed and munmap()ed as needed.  Smaller allocations
377c478bd9Sstevel@tonic-gate  * (minimum size is 64 bytes) are obtained from mmap() of 64K chunks
387c478bd9Sstevel@tonic-gate  * broken up into unit allocations and maintained on free lists.
397c478bd9Sstevel@tonic-gate  * The interface requires the caller to keep track of the size of an
407c478bd9Sstevel@tonic-gate  * allocated block and to pass that size back when freeing a block.
417c478bd9Sstevel@tonic-gate  *
427c478bd9Sstevel@tonic-gate  * This allocator is called during initialization, from code called
437c478bd9Sstevel@tonic-gate  * from the dynamic linker, so it must not call anything that might
447c478bd9Sstevel@tonic-gate  * re-invoke the dynamic linker to resolve a symbol.  That is,
457c478bd9Sstevel@tonic-gate  * it must only call functions that are wholly private to libc.
467c478bd9Sstevel@tonic-gate  *
477c478bd9Sstevel@tonic-gate  * Also, this allocator must be unique across all link maps
487c478bd9Sstevel@tonic-gate  * because pointers returned by lmalloc() are stored in the
497c478bd9Sstevel@tonic-gate  * thread structure, which is constant across all link maps.
507c478bd9Sstevel@tonic-gate  *
517c478bd9Sstevel@tonic-gate  * Memory blocks returned by lmalloc() are initialized to zero.
527c478bd9Sstevel@tonic-gate  */
537c478bd9Sstevel@tonic-gate 
547c478bd9Sstevel@tonic-gate #define	MINSIZE		64	/* (1 << MINSHIFT) */
557c478bd9Sstevel@tonic-gate #define	MINSHIFT	6
567c478bd9Sstevel@tonic-gate #define	CHUNKSIZE	(64 * 1024)
577c478bd9Sstevel@tonic-gate 
587c478bd9Sstevel@tonic-gate /*
597c478bd9Sstevel@tonic-gate  * bucketnum	allocation size
607c478bd9Sstevel@tonic-gate  * 0		64
617c478bd9Sstevel@tonic-gate  * 1		128
627c478bd9Sstevel@tonic-gate  * 2		256
637c478bd9Sstevel@tonic-gate  * 3		512
647c478bd9Sstevel@tonic-gate  * 4		1024
657c478bd9Sstevel@tonic-gate  * 5		2048
667c478bd9Sstevel@tonic-gate  * 6		4096
677c478bd9Sstevel@tonic-gate  * 7		8192
687c478bd9Sstevel@tonic-gate  * 8		16384
697c478bd9Sstevel@tonic-gate  * 9		32768
707c478bd9Sstevel@tonic-gate  */
717c478bd9Sstevel@tonic-gate 
727c478bd9Sstevel@tonic-gate /*
737c478bd9Sstevel@tonic-gate  * See "thr_uberdata.h" for the definition of bucket_t.
747c478bd9Sstevel@tonic-gate  * The 10 (NBUCKETS) buckets are allocated in uberdata.
757c478bd9Sstevel@tonic-gate  */
767c478bd9Sstevel@tonic-gate 
777c478bd9Sstevel@tonic-gate /*
787c478bd9Sstevel@tonic-gate  * Performance hack:
797c478bd9Sstevel@tonic-gate  *
807c478bd9Sstevel@tonic-gate  * On the very first lmalloc(), before any memory has been allocated,
817c478bd9Sstevel@tonic-gate  * mmap() a 24K block of memory and carve out six 2K chunks, each
827c478bd9Sstevel@tonic-gate  * of which is subdivided for the initial allocations from buckets
837c478bd9Sstevel@tonic-gate  * 0, 1, 2, 3, 4 and 5, giving them initial numbers of elements
847c478bd9Sstevel@tonic-gate  * 32, 16, 8, 4, 2 and 1, respectively.  The remaining 12K is cut
857c478bd9Sstevel@tonic-gate  * into one 4K buffer for bucket 6 and one 8K buffer for bucket 7.
867c478bd9Sstevel@tonic-gate  *
877c478bd9Sstevel@tonic-gate  * This results in almost all simple single-threaded processes,
887c478bd9Sstevel@tonic-gate  * such as those employed in the kenbus test suite, having to
897c478bd9Sstevel@tonic-gate  * allocate only this one 24K block during their lifetimes.
907c478bd9Sstevel@tonic-gate  */
917c478bd9Sstevel@tonic-gate 
927c478bd9Sstevel@tonic-gate #define	SUBCHUNKSIZE	2048
937c478bd9Sstevel@tonic-gate #define	BASE_SIZE	(24 * 1024)
947c478bd9Sstevel@tonic-gate 
957c478bd9Sstevel@tonic-gate static void
initial_allocation(bucket_t * bp)967c478bd9Sstevel@tonic-gate initial_allocation(bucket_t *bp)	/* &__uberdata.bucket[0] */
977c478bd9Sstevel@tonic-gate {
987c478bd9Sstevel@tonic-gate 	sysret_t rval;
997c478bd9Sstevel@tonic-gate 	void *ptr;
1007c478bd9Sstevel@tonic-gate 	size_t size;
1017c478bd9Sstevel@tonic-gate 	size_t n;
1027c478bd9Sstevel@tonic-gate 	int bucketnum;
1037c478bd9Sstevel@tonic-gate 	void *base;
1047c478bd9Sstevel@tonic-gate 
1057c478bd9Sstevel@tonic-gate 	/*
1067c478bd9Sstevel@tonic-gate 	 * We do this seemingly obtuse call to __systemcall6(SYS_mmap)
1077c478bd9Sstevel@tonic-gate 	 * instead of simply calling mmap() directly because, if the
1087c478bd9Sstevel@tonic-gate 	 * mmap() system call fails, we must make sure that __cerror()
1098cd45542Sraf 	 * is not called, because that would call ___errno()
1107c478bd9Sstevel@tonic-gate 	 * which would dereference curthread and, because we are very
1117c478bd9Sstevel@tonic-gate 	 * early in libc initialization, curthread is NULL and we would
1127c478bd9Sstevel@tonic-gate 	 * draw a hard-to-debug SIGSEGV core dump, or worse.
1137c478bd9Sstevel@tonic-gate 	 * We opt to give a thread panic message instead.
1147c478bd9Sstevel@tonic-gate 	 */
1157c478bd9Sstevel@tonic-gate 	if (__systemcall6(&rval, SYS_mmap, CHUNKSIZE, BASE_SIZE,
1167c478bd9Sstevel@tonic-gate 	    PROT_READ | PROT_WRITE | PROT_EXEC,
1177c478bd9Sstevel@tonic-gate 	    _MAP_NEW | MAP_PRIVATE | MAP_ANON | MAP_ALIGN, -1L, (off_t)0) != 0)
1187c478bd9Sstevel@tonic-gate 		thr_panic("initial allocation failed; swap space exhausted?");
1197c478bd9Sstevel@tonic-gate 	base = (void *)rval.sys_rval1;
1207c478bd9Sstevel@tonic-gate 
1217c478bd9Sstevel@tonic-gate 	for (bucketnum = 0; bucketnum < 6; bucketnum++, bp++) {
1227c478bd9Sstevel@tonic-gate 		size = (size_t)MINSIZE << bucketnum;
1237c478bd9Sstevel@tonic-gate 		n = SUBCHUNKSIZE / size;
1247c478bd9Sstevel@tonic-gate 		ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
1257c478bd9Sstevel@tonic-gate 
1267c478bd9Sstevel@tonic-gate 		ASSERT(bp->free_list == NULL);
1277c478bd9Sstevel@tonic-gate 		bp->free_list = ptr;
1287c478bd9Sstevel@tonic-gate 		while (--n != 0) {
1297c478bd9Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
1307c478bd9Sstevel@tonic-gate 			*(void **)ptr = next;
1317c478bd9Sstevel@tonic-gate 			ptr = next;
1327c478bd9Sstevel@tonic-gate 		}
1337c478bd9Sstevel@tonic-gate 		*(void **)ptr = NULL;
1347c478bd9Sstevel@tonic-gate 	}
1357c478bd9Sstevel@tonic-gate 
1367c478bd9Sstevel@tonic-gate 	ptr = (void *)((caddr_t)base + bucketnum * SUBCHUNKSIZE);
1377c478bd9Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
1387c478bd9Sstevel@tonic-gate 	bp->free_list = ptr;
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate 	ptr = (void *)((caddr_t)ptr + 2 * SUBCHUNKSIZE);
1417c478bd9Sstevel@tonic-gate 	bp++;
1427c478bd9Sstevel@tonic-gate 	ASSERT(bp->free_list == NULL);
1437c478bd9Sstevel@tonic-gate 	bp->free_list = ptr;
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 	ASSERT(((caddr_t)ptr - (caddr_t)base + 4 * SUBCHUNKSIZE) == BASE_SIZE);
1467c478bd9Sstevel@tonic-gate }
1477c478bd9Sstevel@tonic-gate 
148*23a1cceaSRoger A. Faulkner /*
149*23a1cceaSRoger A. Faulkner  * This highbit code is the same as the code in fls_impl().
150*23a1cceaSRoger A. Faulkner  * We inline it here for speed.
151*23a1cceaSRoger A. Faulkner  */
1527c478bd9Sstevel@tonic-gate static int
getbucketnum(size_t size)1537c478bd9Sstevel@tonic-gate getbucketnum(size_t size)
1547c478bd9Sstevel@tonic-gate {
155*23a1cceaSRoger A. Faulkner 	int highbit = 1;
1567c478bd9Sstevel@tonic-gate 
1577c478bd9Sstevel@tonic-gate 	if (size-- <= MINSIZE)
1587c478bd9Sstevel@tonic-gate 		return (0);
1597c478bd9Sstevel@tonic-gate 
1607c478bd9Sstevel@tonic-gate #ifdef _LP64
1617c478bd9Sstevel@tonic-gate 	if (size & 0xffffffff00000000ul)
1627c478bd9Sstevel@tonic-gate 		highbit += 32, size >>= 32;
1637c478bd9Sstevel@tonic-gate #endif
1647c478bd9Sstevel@tonic-gate 	if (size & 0xffff0000)
1657c478bd9Sstevel@tonic-gate 		highbit += 16, size >>= 16;
1667c478bd9Sstevel@tonic-gate 	if (size & 0xff00)
1677c478bd9Sstevel@tonic-gate 		highbit += 8, size >>= 8;
1687c478bd9Sstevel@tonic-gate 	if (size & 0xf0)
1697c478bd9Sstevel@tonic-gate 		highbit += 4, size >>= 4;
1707c478bd9Sstevel@tonic-gate 	if (size & 0xc)
1717c478bd9Sstevel@tonic-gate 		highbit += 2, size >>= 2;
1727c478bd9Sstevel@tonic-gate 	if (size & 0x2)
1737c478bd9Sstevel@tonic-gate 		highbit += 1;
1747c478bd9Sstevel@tonic-gate 
175*23a1cceaSRoger A. Faulkner 	ASSERT(highbit > MINSHIFT);
176*23a1cceaSRoger A. Faulkner 	return (highbit - MINSHIFT);
1777c478bd9Sstevel@tonic-gate }
1787c478bd9Sstevel@tonic-gate 
1797c478bd9Sstevel@tonic-gate void *
lmalloc(size_t size)1807c478bd9Sstevel@tonic-gate lmalloc(size_t size)
1817c478bd9Sstevel@tonic-gate {
1827c478bd9Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
1837c478bd9Sstevel@tonic-gate 	ulwp_t *self;
1847c478bd9Sstevel@tonic-gate 	uberdata_t *udp;
1857c478bd9Sstevel@tonic-gate 	bucket_t *bp;
1867c478bd9Sstevel@tonic-gate 	void *ptr;
1877c478bd9Sstevel@tonic-gate 
1887c478bd9Sstevel@tonic-gate 	/*
1897c478bd9Sstevel@tonic-gate 	 * ulwp_t structures must be allocated from a rwx mapping since it
1907c478bd9Sstevel@tonic-gate 	 * is a normal data object _and_ it contains instructions that are
1917c478bd9Sstevel@tonic-gate 	 * executed for user-land DTrace tracing with the fasttrap provider.
1927c478bd9Sstevel@tonic-gate 	 */
1937c478bd9Sstevel@tonic-gate 	int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
1947c478bd9Sstevel@tonic-gate 
1957c478bd9Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
1967c478bd9Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
1977c478bd9Sstevel@tonic-gate 
1987c478bd9Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
1997c478bd9Sstevel@tonic-gate 		/* mmap() allocates memory already set to zero */
2008cd45542Sraf 		ptr = mmap((void *)CHUNKSIZE, size, prot,
2017c478bd9Sstevel@tonic-gate 		    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
2027c478bd9Sstevel@tonic-gate 		if (ptr == MAP_FAILED)
2037c478bd9Sstevel@tonic-gate 			ptr = NULL;
2047c478bd9Sstevel@tonic-gate 		return (ptr);
2057c478bd9Sstevel@tonic-gate 	}
2067c478bd9Sstevel@tonic-gate 
2077c478bd9Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
2087c478bd9Sstevel@tonic-gate 		udp = &__uberdata;
2097c478bd9Sstevel@tonic-gate 	else
2107c478bd9Sstevel@tonic-gate 		udp = self->ul_uberdata;
2117c478bd9Sstevel@tonic-gate 
2127c478bd9Sstevel@tonic-gate 	if (udp->bucket_init == 0) {
2137c478bd9Sstevel@tonic-gate 		ASSERT(udp->nthreads == 0);
2147c478bd9Sstevel@tonic-gate 		initial_allocation(udp->bucket);
2157c478bd9Sstevel@tonic-gate 		udp->bucket_init = 1;
2167c478bd9Sstevel@tonic-gate 	}
2177c478bd9Sstevel@tonic-gate 
2187c478bd9Sstevel@tonic-gate 	bp = &udp->bucket[bucketnum];
2197c478bd9Sstevel@tonic-gate 	if (self != NULL)
2207c478bd9Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate 	if ((ptr = bp->free_list) == NULL) {
2237c478bd9Sstevel@tonic-gate 		size_t bsize;
2247c478bd9Sstevel@tonic-gate 		size_t n;
2257c478bd9Sstevel@tonic-gate 
2267c478bd9Sstevel@tonic-gate 		/*
2277c478bd9Sstevel@tonic-gate 		 * Double the number of chunks mmap()ed each time,
2287c478bd9Sstevel@tonic-gate 		 * in case of large numbers of allocations.
2297c478bd9Sstevel@tonic-gate 		 */
2307c478bd9Sstevel@tonic-gate 		if (bp->chunks == 0)
2317c478bd9Sstevel@tonic-gate 			bp->chunks = 1;
2327c478bd9Sstevel@tonic-gate 		else
2337c478bd9Sstevel@tonic-gate 			bp->chunks <<= 1;
2347c478bd9Sstevel@tonic-gate 		for (;;) {
2357c478bd9Sstevel@tonic-gate 			bsize = CHUNKSIZE * bp->chunks;
2367c478bd9Sstevel@tonic-gate 			n = bsize / size;
2378cd45542Sraf 			ptr = mmap((void *)CHUNKSIZE, bsize, prot,
2387c478bd9Sstevel@tonic-gate 			    MAP_PRIVATE|MAP_ANON|MAP_ALIGN, -1, (off_t)0);
2397c478bd9Sstevel@tonic-gate 			if (ptr != MAP_FAILED)
2407c478bd9Sstevel@tonic-gate 				break;
2417c478bd9Sstevel@tonic-gate 			/* try a smaller chunk allocation */
2427c478bd9Sstevel@tonic-gate 			if ((bp->chunks >>= 1) == 0) {
2437c478bd9Sstevel@tonic-gate 				if (self != NULL)
2447c478bd9Sstevel@tonic-gate 					lmutex_unlock(&bp->bucket_lock);
2457c478bd9Sstevel@tonic-gate 				return (NULL);
2467c478bd9Sstevel@tonic-gate 			}
2477c478bd9Sstevel@tonic-gate 		}
2487c478bd9Sstevel@tonic-gate 		bp->free_list = ptr;
2497c478bd9Sstevel@tonic-gate 		while (--n != 0) {
2507c478bd9Sstevel@tonic-gate 			void *next = (void *)((caddr_t)ptr + size);
2517c478bd9Sstevel@tonic-gate 			*(void **)ptr = next;
2527c478bd9Sstevel@tonic-gate 			ptr = next;
2537c478bd9Sstevel@tonic-gate 		}
2547c478bd9Sstevel@tonic-gate 		*(void **)ptr = NULL;
2557c478bd9Sstevel@tonic-gate 		ptr = bp->free_list;
2567c478bd9Sstevel@tonic-gate 	}
2577c478bd9Sstevel@tonic-gate 	bp->free_list = *(void **)ptr;
2587c478bd9Sstevel@tonic-gate 	if (self != NULL)
2597c478bd9Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
2607c478bd9Sstevel@tonic-gate 	/*
2617c478bd9Sstevel@tonic-gate 	 * We maintain the free list already zeroed except for the pointer
2627c478bd9Sstevel@tonic-gate 	 * stored at the head of the block (mmap() allocates memory already
2637c478bd9Sstevel@tonic-gate 	 * set to zero), so all we have to do is zero out the pointer.
2647c478bd9Sstevel@tonic-gate 	 */
2657c478bd9Sstevel@tonic-gate 	*(void **)ptr = NULL;
2667c478bd9Sstevel@tonic-gate 	return (ptr);
2677c478bd9Sstevel@tonic-gate }
2687c478bd9Sstevel@tonic-gate 
2697c478bd9Sstevel@tonic-gate void
lfree(void * ptr,size_t size)2707c478bd9Sstevel@tonic-gate lfree(void *ptr, size_t size)
2717c478bd9Sstevel@tonic-gate {
2727c478bd9Sstevel@tonic-gate 	int bucketnum = getbucketnum(size);
2737c478bd9Sstevel@tonic-gate 	ulwp_t *self;
2747c478bd9Sstevel@tonic-gate 	bucket_t *bp;
2757c478bd9Sstevel@tonic-gate 
2767c478bd9Sstevel@tonic-gate 	/* round size up to the proper power of 2 */
2777c478bd9Sstevel@tonic-gate 	size = (size_t)MINSIZE << bucketnum;
2787c478bd9Sstevel@tonic-gate 
2797c478bd9Sstevel@tonic-gate 	if (bucketnum >= NBUCKETS) {
2807c478bd9Sstevel@tonic-gate 		/* see comment below */
2817c478bd9Sstevel@tonic-gate 		if (((uintptr_t)ptr & (CHUNKSIZE - 1)) != 0)
2827c478bd9Sstevel@tonic-gate 			goto bad;
2838cd45542Sraf 		(void) munmap(ptr, size);
2847c478bd9Sstevel@tonic-gate 		return;
2857c478bd9Sstevel@tonic-gate 	}
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate 	/*
2887c478bd9Sstevel@tonic-gate 	 * If the low order bits are not all zero as expected, then panic.
2897c478bd9Sstevel@tonic-gate 	 * This can be caused by an application calling, for example,
2907c478bd9Sstevel@tonic-gate 	 * pthread_attr_destroy() without having first called
2917c478bd9Sstevel@tonic-gate 	 * pthread_attr_init() (thereby passing uninitialized data
2927c478bd9Sstevel@tonic-gate 	 * to pthread_attr_destroy() who then calls lfree() with
2937c478bd9Sstevel@tonic-gate 	 * the uninitialized data).
2947c478bd9Sstevel@tonic-gate 	 */
2957c478bd9Sstevel@tonic-gate 	if (((uintptr_t)ptr & (size - 1)) != 0)
2967c478bd9Sstevel@tonic-gate 		goto bad;
2977c478bd9Sstevel@tonic-gate 
2987c478bd9Sstevel@tonic-gate 	/*
2997c478bd9Sstevel@tonic-gate 	 * Zeroing the memory here saves time later when reallocating it.
3007c478bd9Sstevel@tonic-gate 	 */
3018cd45542Sraf 	(void) memset(ptr, 0, size);
3027c478bd9Sstevel@tonic-gate 
3037c478bd9Sstevel@tonic-gate 	if ((self = __curthread()) == NULL)
3047c478bd9Sstevel@tonic-gate 		bp = &__uberdata.bucket[bucketnum];
3057c478bd9Sstevel@tonic-gate 	else {
3067c478bd9Sstevel@tonic-gate 		bp = &self->ul_uberdata->bucket[bucketnum];
3077c478bd9Sstevel@tonic-gate 		lmutex_lock(&bp->bucket_lock);
3087c478bd9Sstevel@tonic-gate 	}
3097c478bd9Sstevel@tonic-gate 	*(void **)ptr = bp->free_list;
3107c478bd9Sstevel@tonic-gate 	bp->free_list = ptr;
3117c478bd9Sstevel@tonic-gate 	if (self != NULL)
3127c478bd9Sstevel@tonic-gate 		lmutex_unlock(&bp->bucket_lock);
3137c478bd9Sstevel@tonic-gate 	return;
3147c478bd9Sstevel@tonic-gate 
3157c478bd9Sstevel@tonic-gate bad:
3167c478bd9Sstevel@tonic-gate 	thr_panic("lfree() called with a misaligned pointer");
3177c478bd9Sstevel@tonic-gate }
3187c478bd9Sstevel@tonic-gate 
3197c478bd9Sstevel@tonic-gate /*
3207c478bd9Sstevel@tonic-gate  * The following functions can be used internally to libc
3217c478bd9Sstevel@tonic-gate  * to make memory allocations in the style of malloc()/free()
3227c478bd9Sstevel@tonic-gate  * (where the size of the allocation is not remembered by the caller)
3237c478bd9Sstevel@tonic-gate  * but which are safe to use within critical sections, that is,
3247c478bd9Sstevel@tonic-gate  * sections of code bounded by enter_critical()/exit_critical(),
3257c478bd9Sstevel@tonic-gate  * lmutex_lock()/lmutex_unlock() or lrw_rdlock()/lrw_wrlock()/lrw_unlock().
3267c478bd9Sstevel@tonic-gate  *
3277c478bd9Sstevel@tonic-gate  * These functions must never be used to allocate memory that is
3287c478bd9Sstevel@tonic-gate  * passed out of libc, for example by strdup(), because it is a
3297c478bd9Sstevel@tonic-gate  * fatal error to free() an object allocated by libc_malloc().
3307c478bd9Sstevel@tonic-gate  * Such objects can only be freed by calling libc_free().
3317c478bd9Sstevel@tonic-gate  */
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate #ifdef	_LP64
3347c478bd9Sstevel@tonic-gate #define	ALIGNMENT	16
3357c478bd9Sstevel@tonic-gate #else
3367c478bd9Sstevel@tonic-gate #define	ALIGNMENT	8
3377c478bd9Sstevel@tonic-gate #endif
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate typedef union {
3407c478bd9Sstevel@tonic-gate 	size_t	private_size;
3417c478bd9Sstevel@tonic-gate 	char	private_align[ALIGNMENT];
3427c478bd9Sstevel@tonic-gate } private_header_t;
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate void *
libc_malloc(size_t size)3457c478bd9Sstevel@tonic-gate libc_malloc(size_t size)
3467c478bd9Sstevel@tonic-gate {
3477c478bd9Sstevel@tonic-gate 	private_header_t *ptr;
3487c478bd9Sstevel@tonic-gate 
3497c478bd9Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
3507c478bd9Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
3517c478bd9Sstevel@tonic-gate 		return (NULL);
3527c478bd9Sstevel@tonic-gate 	ptr->private_size = size;
3537c478bd9Sstevel@tonic-gate 	return (ptr + 1);
3547c478bd9Sstevel@tonic-gate }
3557c478bd9Sstevel@tonic-gate 
3567c478bd9Sstevel@tonic-gate void *
libc_realloc(void * old,size_t size)3577c478bd9Sstevel@tonic-gate libc_realloc(void *old, size_t size)
3587c478bd9Sstevel@tonic-gate {
3597c478bd9Sstevel@tonic-gate 	private_header_t *ptr;
3607c478bd9Sstevel@tonic-gate 	void *new;
3617c478bd9Sstevel@tonic-gate 
3627c478bd9Sstevel@tonic-gate 	size = (size_t)MINSIZE << getbucketnum(size + sizeof (*ptr));
3637c478bd9Sstevel@tonic-gate 	if ((ptr = lmalloc(size)) == NULL)
3647c478bd9Sstevel@tonic-gate 		return (NULL);
3657c478bd9Sstevel@tonic-gate 	ptr->private_size = size;
3667c478bd9Sstevel@tonic-gate 	new = ptr + 1;
3677c478bd9Sstevel@tonic-gate 	if (old != NULL) {
3687c478bd9Sstevel@tonic-gate 		ptr = (private_header_t *)old - 1;
3697c478bd9Sstevel@tonic-gate 		if (size >= ptr->private_size)
3707c478bd9Sstevel@tonic-gate 			size = ptr->private_size;
3718cd45542Sraf 		(void) memcpy(new, old, size - sizeof (*ptr));
3727c478bd9Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
3737c478bd9Sstevel@tonic-gate 	}
3747c478bd9Sstevel@tonic-gate 	return (new);
3757c478bd9Sstevel@tonic-gate }
3767c478bd9Sstevel@tonic-gate 
3777c478bd9Sstevel@tonic-gate void
libc_free(void * p)3787c478bd9Sstevel@tonic-gate libc_free(void *p)
3797c478bd9Sstevel@tonic-gate {
3807c478bd9Sstevel@tonic-gate 	private_header_t *ptr;
3817c478bd9Sstevel@tonic-gate 
3827c478bd9Sstevel@tonic-gate 	if (p) {
3837c478bd9Sstevel@tonic-gate 		ptr = (private_header_t *)p - 1;
3847c478bd9Sstevel@tonic-gate 		lfree(ptr, ptr->private_size);
3857c478bd9Sstevel@tonic-gate 	}
3867c478bd9Sstevel@tonic-gate }
3877c478bd9Sstevel@tonic-gate 
3887c478bd9Sstevel@tonic-gate char *
libc_strdup(const char * s1)3897c478bd9Sstevel@tonic-gate libc_strdup(const char *s1)
3907c478bd9Sstevel@tonic-gate {
3917c478bd9Sstevel@tonic-gate 	char *s2 = libc_malloc(strlen(s1) + 1);
3927c478bd9Sstevel@tonic-gate 
3937c478bd9Sstevel@tonic-gate 	if (s2)
3947c478bd9Sstevel@tonic-gate 		(void) strcpy(s2, s1);
3957c478bd9Sstevel@tonic-gate 	return (s2);
3967c478bd9Sstevel@tonic-gate }
397