xref: /titanic_44/usr/src/lib/libumem/common/vmem_sbrk.c (revision ee5416c9d7e449233197d5d20bc6b81e4ff091b2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * The structure of the sbrk backend:
31  *
32  * +-----------+
33  * | sbrk_top  |
34  * +-----------+
35  *      | (vmem_sbrk_alloc(), vmem_free())
36  *      |
37  * +-----------+
38  * | sbrk_heap |
39  * +-----------+
40  *   | | ... |  (vmem_alloc(), vmem_free())
41  * <other arenas>
42  *
43  * The sbrk_top arena holds all controlled memory.  vmem_sbrk_alloc() handles
44  * allocations from it, including growing the heap when we run low.
45  *
46  * Growing the heap is complicated by the fact that we have to extend the
47  * sbrk_top arena (using _vmem_extend_alloc()), and that can fail.  Since
48  * other threads may be actively allocating, we can't return the memory.
49  *
50  * Instead, we put it on a doubly-linked list, sbrk_fails, which we search
51  * before calling sbrk().
52  */
53 
54 #include "c_synonyms.h"
55 #include <errno.h>
56 #include <limits.h>
57 #include <sys/sysmacros.h>
58 #include <sys/mman.h>
59 #include <unistd.h>
60 
61 #include "vmem_base.h"
62 
63 #include "misc.h"
64 
65 size_t vmem_sbrk_pagesize = 0; /* the preferred page size of the heap */
66 
67 #define	VMEM_SBRK_MINALLOC	(64 * 1024)
68 size_t vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC; /* minimum allocation */
69 
70 static size_t real_pagesize;
71 static vmem_t *sbrk_heap;
72 
73 typedef struct sbrk_fail {
74 	struct sbrk_fail *sf_next;
75 	struct sbrk_fail *sf_prev;
76 	void *sf_base;			/* == the sbrk_fail's address */
77 	size_t sf_size;			/* the size of this buffer */
78 } sbrk_fail_t;
79 
80 static sbrk_fail_t sbrk_fails = {
81 	&sbrk_fails,
82 	&sbrk_fails,
83 	NULL,
84 	0
85 };
86 
87 static mutex_t sbrk_faillock = DEFAULTMUTEX;
88 
89 /*
90  * Try to extend src with [pos, pos + size).
91  *
92  * If it fails, add the block to the sbrk_fails list.
93  */
94 static void *
95 vmem_sbrk_extend_alloc(vmem_t *src, void *pos, size_t size, size_t alloc,
96     int vmflags)
97 {
98 	sbrk_fail_t *fnext, *fprev, *fp;
99 	void *ret;
100 
101 	ret = _vmem_extend_alloc(src, pos, size, alloc, vmflags);
102 	if (ret != NULL)
103 		return (ret);
104 
105 	fp = (sbrk_fail_t *)pos;
106 
107 	ASSERT(sizeof (sbrk_fail_t) <= size);
108 
109 	fp->sf_base = pos;
110 	fp->sf_size = size;
111 
112 	(void) mutex_lock(&sbrk_faillock);
113 	fp->sf_next = fnext = &sbrk_fails;
114 	fp->sf_prev = fprev = sbrk_fails.sf_prev;
115 	fnext->sf_prev = fp;
116 	fprev->sf_next = fp;
117 	(void) mutex_unlock(&sbrk_faillock);
118 
119 	return (NULL);
120 }
121 
122 /*
123  * Try to add at least size bytes to src, using the sbrk_fails list
124  */
125 static void *
126 vmem_sbrk_tryfail(vmem_t *src, size_t size, int vmflags)
127 {
128 	sbrk_fail_t *fp;
129 
130 	(void) mutex_lock(&sbrk_faillock);
131 	for (fp = sbrk_fails.sf_next; fp != &sbrk_fails; fp = fp->sf_next) {
132 		if (fp->sf_size >= size) {
133 			fp->sf_next->sf_prev = fp->sf_prev;
134 			fp->sf_prev->sf_next = fp->sf_next;
135 			fp->sf_next = fp->sf_prev = NULL;
136 			break;
137 		}
138 	}
139 	(void) mutex_unlock(&sbrk_faillock);
140 
141 	if (fp != &sbrk_fails) {
142 		ASSERT(fp->sf_base == (void *)fp);
143 		return (vmem_sbrk_extend_alloc(src, fp, fp->sf_size, size,
144 		    vmflags));
145 	}
146 	/*
147 	 * nothing of the right size on the freelist
148 	 */
149 	return (NULL);
150 }
151 
152 static void *
153 vmem_sbrk_alloc(vmem_t *src, size_t size, int vmflags)
154 {
155 	extern void *_sbrk_grow_aligned(size_t min_size, size_t low_align,
156 	    size_t high_align, size_t *actual_size);
157 
158 	void *ret;
159 	void *buf;
160 	size_t buf_size;
161 
162 	int old_errno = errno;
163 
164 	ret = vmem_alloc(src, size, VM_NOSLEEP);
165 	if (ret != NULL) {
166 		errno = old_errno;
167 		return (ret);
168 	}
169 
170 	/*
171 	 * The allocation failed.  We need to grow the heap.
172 	 *
173 	 * First, try to use any buffers which failed earlier.
174 	 */
175 	if (sbrk_fails.sf_next != &sbrk_fails &&
176 	    (ret = vmem_sbrk_tryfail(src, size, vmflags)) != NULL)
177 		return (ret);
178 
179 	buf_size = MAX(size, vmem_sbrk_minalloc);
180 
181 	/*
182 	 * buf_size gets overwritten with the actual allocated size
183 	 */
184 	buf = _sbrk_grow_aligned(buf_size, real_pagesize, vmem_sbrk_pagesize,
185 	    &buf_size);
186 
187 	if (buf != MAP_FAILED) {
188 		ret = vmem_sbrk_extend_alloc(src, buf, buf_size, size, vmflags);
189 		if (ret != NULL) {
190 			errno = old_errno;
191 			return (ret);
192 		}
193 	}
194 
195 	/*
196 	 * Growing the heap failed. The vmem_alloc() above called umem_reap().
197 	 */
198 	ASSERT((vmflags & VM_NOSLEEP) == VM_NOSLEEP);
199 
200 	errno = old_errno;
201 	return (NULL);
202 }
203 
204 /*
205  * fork1() support
206  */
207 void
208 vmem_sbrk_lockup(void)
209 {
210 	(void) mutex_lock(&sbrk_faillock);
211 }
212 
213 void
214 vmem_sbrk_release(void)
215 {
216 	(void) mutex_unlock(&sbrk_faillock);
217 }
218 
219 vmem_t *
220 vmem_sbrk_arena(vmem_alloc_t **a_out, vmem_free_t **f_out)
221 {
222 	if (sbrk_heap == NULL) {
223 		size_t heap_size;
224 
225 		real_pagesize = sysconf(_SC_PAGESIZE);
226 
227 		heap_size = vmem_sbrk_pagesize;
228 
229 		if (issetugid()) {
230 			heap_size = 0;
231 		} else if (heap_size != 0 && !ISP2(heap_size)) {
232 			heap_size = 0;
233 			log_message("ignoring bad pagesize: 0x%p\n", heap_size);
234 		}
235 		if (heap_size <= real_pagesize) {
236 			heap_size = real_pagesize;
237 		} else {
238 			struct memcntl_mha mha;
239 			mha.mha_cmd = MHA_MAPSIZE_BSSBRK;
240 			mha.mha_flags = 0;
241 			mha.mha_pagesize = heap_size;
242 
243 			if (memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0)
244 			    == -1) {
245 				log_message("unable to set MAPSIZE_BSSBRK to "
246 				    "0x%p\n", heap_size);
247 				heap_size = real_pagesize;
248 			}
249 		}
250 		vmem_sbrk_pagesize = heap_size;
251 
252 		/* validate vmem_sbrk_minalloc */
253 		if (vmem_sbrk_minalloc < VMEM_SBRK_MINALLOC)
254 			vmem_sbrk_minalloc = VMEM_SBRK_MINALLOC;
255 		vmem_sbrk_minalloc = P2ROUNDUP(vmem_sbrk_minalloc, heap_size);
256 
257 		sbrk_heap = vmem_init("sbrk_top", real_pagesize,
258 		    vmem_sbrk_alloc, vmem_free,
259 		    "sbrk_heap", NULL, 0, real_pagesize,
260 		    vmem_alloc, vmem_free);
261 	}
262 
263 	if (a_out != NULL)
264 		*a_out = vmem_alloc;
265 	if (f_out != NULL)
266 		*f_out = vmem_free;
267 
268 	return (sbrk_heap);
269 }
270