xref: /freebsd/lib/libthr/thread/thr_stack.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
5  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/auxv.h>
33 #include <sys/mman.h>
34 #include <sys/queue.h>
35 #include <sys/resource.h>
36 #include <sys/sysctl.h>
37 #include <stdlib.h>
38 #include <pthread.h>
39 #include <link.h>
40 
41 #include "thr_private.h"
42 
43 /* Spare thread stack. */
44 struct stack {
45 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
46 	size_t			stacksize;	/* Stack size (rounded up). */
47 	size_t			guardsize;	/* Guard size. */
48 	void			*stackaddr;	/* Stack address. */
49 };
50 
51 /*
52  * Default sized (stack and guard) spare stack queue.  Stacks are cached
53  * to avoid additional complexity managing mmap()ed stack regions.  Spare
54  * stacks are used in LIFO order to increase cache locality.
55  */
56 static LIST_HEAD(, stack)	dstackq = LIST_HEAD_INITIALIZER(dstackq);
57 
58 /*
59  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
60  * Stacks are cached to avoid additional complexity managing mmap()ed
61  * stack regions.  This list is unordered, since ordering on both stack
62  * size and guard size would be more trouble than it's worth.  Stacks are
63  * allocated from this cache on a first size match basis.
64  */
65 static LIST_HEAD(, stack)	mstackq = LIST_HEAD_INITIALIZER(mstackq);
66 
67 /**
68  * Base address of the last stack allocated (including its red zone, if
69  * there is one).  Stacks are allocated contiguously, starting beyond the
70  * top of the main stack.  When a new stack is created, a red zone is
71  * typically created (actually, the red zone is mapped with PROT_NONE) above
72  * the top of the stack, such that the stack will not be able to grow all
73  * the way to the bottom of the next stack.  This isn't fool-proof.  It is
74  * possible for a stack to grow by a large amount, such that it grows into
75  * the next stack, and as long as the memory within the red zone is never
76  * accessed, nothing will prevent one thread stack from trouncing all over
77  * the next.
78  *
79  * low memory
80  *     . . . . . . . . . . . . . . . . . .
81  *    |                                   |
82  *    |             stack 3               | start of 3rd thread stack
83  *    +-----------------------------------+
84  *    |                                   |
85  *    |       Red Zone (guard page)       | red zone for 2nd thread
86  *    |                                   |
87  *    +-----------------------------------+
88  *    |  stack 2 - _thr_stack_default     | top of 2nd thread stack
89  *    |                                   |
90  *    |                                   |
91  *    |                                   |
92  *    |                                   |
93  *    |             stack 2               |
94  *    +-----------------------------------+ <-- start of 2nd thread stack
95  *    |                                   |
96  *    |       Red Zone                    | red zone for 1st thread
97  *    |                                   |
98  *    +-----------------------------------+
99  *    |  stack 1 - _thr_stack_default     | top of 1st thread stack
100  *    |                                   |
101  *    |                                   |
102  *    |                                   |
103  *    |                                   |
104  *    |             stack 1               |
105  *    +-----------------------------------+ <-- start of 1st thread stack
106  *    |                                   |   (initial value of last_stack)
107  *    |       Red Zone                    |
108  *    |                                   | red zone for main thread
109  *    +-----------------------------------+
110  *    | USRSTACK - _thr_stack_initial     | top of main thread stack
111  *    |                                   | ^
112  *    |                                   | |
113  *    |                                   | |
114  *    |                                   | | stack growth
115  *    |                                   |
116  *    +-----------------------------------+ <-- start of main thread stack
117  *                                              (USRSTACK)
118  * high memory
119  *
120  */
121 static char *last_stack = NULL;
122 
123 /*
124  * Round size up to the nearest multiple of
125  * _thr_page_size.
126  */
127 static inline size_t
128 round_up(size_t size)
129 {
130 	if (size % _thr_page_size != 0)
131 		size = ((size / _thr_page_size) + 1) *
132 		    _thr_page_size;
133 	return size;
134 }
135 
136 void
137 _thr_stack_fix_protection(struct pthread *thrd)
138 {
139 
140 	mprotect((char *)thrd->attr.stackaddr_attr +
141 	    round_up(thrd->attr.guardsize_attr),
142 	    round_up(thrd->attr.stacksize_attr),
143 	    _rtld_get_stack_prot());
144 }
145 
146 static void
147 singlethread_map_stacks_exec(void)
148 {
149 	char *usrstack;
150 	size_t stacksz;
151 
152 	if (!__thr_get_main_stack_base(&usrstack) ||
153 	    !__thr_get_main_stack_lim(&stacksz))
154 		return;
155 	mprotect(usrstack - stacksz, stacksz, _rtld_get_stack_prot());
156 }
157 
158 void
159 __thr_map_stacks_exec(void)
160 {
161 	struct pthread *curthread, *thrd;
162 	struct stack *st;
163 
164 	if (!_thr_is_inited()) {
165 		singlethread_map_stacks_exec();
166 		return;
167 	}
168 	curthread = _get_curthread();
169 	THREAD_LIST_RDLOCK(curthread);
170 	LIST_FOREACH(st, &mstackq, qe)
171 		mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
172 		    _rtld_get_stack_prot());
173 	LIST_FOREACH(st, &dstackq, qe)
174 		mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
175 		    _rtld_get_stack_prot());
176 	TAILQ_FOREACH(thrd, &_thread_gc_list, gcle)
177 		_thr_stack_fix_protection(thrd);
178 	TAILQ_FOREACH(thrd, &_thread_list, tle)
179 		_thr_stack_fix_protection(thrd);
180 	THREAD_LIST_UNLOCK(curthread);
181 }
182 
183 int
184 _thr_stack_alloc(struct pthread_attr *attr)
185 {
186 	struct pthread *curthread = _get_curthread();
187 	struct stack *spare_stack;
188 	size_t stacksize;
189 	size_t guardsize;
190 	char *stackaddr;
191 
192 	/*
193 	 * Round up stack size to nearest multiple of _thr_page_size so
194 	 * that mmap() * will work.  If the stack size is not an even
195 	 * multiple, we end up initializing things such that there is
196 	 * unused space above the beginning of the stack, so the stack
197 	 * sits snugly against its guard.
198 	 */
199 	stacksize = round_up(attr->stacksize_attr);
200 	guardsize = round_up(attr->guardsize_attr);
201 
202 	attr->stackaddr_attr = NULL;
203 	attr->flags &= ~THR_STACK_USER;
204 
205 	/*
206 	 * Use the garbage collector lock for synchronization of the
207 	 * spare stack lists and allocations from usrstack.
208 	 */
209 	THREAD_LIST_WRLOCK(curthread);
210 	/*
211 	 * If the stack and guard sizes are default, try to allocate a stack
212 	 * from the default-size stack cache:
213 	 */
214 	if ((stacksize == THR_STACK_DEFAULT) &&
215 	    (guardsize == _thr_guard_default)) {
216 		if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
217 			/* Use the spare stack. */
218 			LIST_REMOVE(spare_stack, qe);
219 			attr->stackaddr_attr = spare_stack->stackaddr;
220 		}
221 	}
222 	/*
223 	 * The user specified a non-default stack and/or guard size, so try to
224 	 * allocate a stack from the non-default size stack cache, using the
225 	 * rounded up stack size (stack_size) in the search:
226 	 */
227 	else {
228 		LIST_FOREACH(spare_stack, &mstackq, qe) {
229 			if (spare_stack->stacksize == stacksize &&
230 			    spare_stack->guardsize == guardsize) {
231 				LIST_REMOVE(spare_stack, qe);
232 				attr->stackaddr_attr = spare_stack->stackaddr;
233 				break;
234 			}
235 		}
236 	}
237 	if (attr->stackaddr_attr != NULL) {
238 		/* A cached stack was found.  Release the lock. */
239 		THREAD_LIST_UNLOCK(curthread);
240 	}
241 	else {
242 		/*
243 		 * Allocate a stack from or below usrstack, depending
244 		 * on the LIBPTHREAD_BIGSTACK_MAIN env variable.
245 		 */
246 		if (last_stack == NULL)
247 			last_stack = _usrstack - _thr_stack_initial -
248 			    _thr_guard_default;
249 
250 		/* Allocate a new stack. */
251 		stackaddr = last_stack - stacksize - guardsize;
252 
253 		/*
254 		 * Even if stack allocation fails, we don't want to try to
255 		 * use this location again, so unconditionally decrement
256 		 * last_stack.  Under normal operating conditions, the most
257 		 * likely reason for an mmap() error is a stack overflow of
258 		 * the adjacent thread stack.
259 		 */
260 		last_stack -= (stacksize + guardsize);
261 
262 		/* Release the lock before mmap'ing it. */
263 		THREAD_LIST_UNLOCK(curthread);
264 
265 		/* Map the stack and guard page together, and split guard
266 		   page from allocated space: */
267 		if ((stackaddr = mmap(stackaddr, stacksize + guardsize,
268 		     _rtld_get_stack_prot(), MAP_STACK,
269 		     -1, 0)) != MAP_FAILED &&
270 		    (guardsize == 0 ||
271 		     mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
272 			stackaddr += guardsize;
273 		} else {
274 			if (stackaddr != MAP_FAILED)
275 				munmap(stackaddr, stacksize + guardsize);
276 			stackaddr = NULL;
277 		}
278 		attr->stackaddr_attr = stackaddr;
279 	}
280 	if (attr->stackaddr_attr != NULL)
281 		return (0);
282 	else
283 		return (-1);
284 }
285 
286 /* This function must be called with _thread_list_lock held. */
287 void
288 _thr_stack_free(struct pthread_attr *attr)
289 {
290 	struct stack *spare_stack;
291 
292 	if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
293 	    && (attr->stackaddr_attr != NULL)) {
294 		spare_stack = (struct stack *)
295 			((char *)attr->stackaddr_attr +
296 			attr->stacksize_attr - sizeof(struct stack));
297 		spare_stack->stacksize = round_up(attr->stacksize_attr);
298 		spare_stack->guardsize = round_up(attr->guardsize_attr);
299 		spare_stack->stackaddr = attr->stackaddr_attr;
300 
301 		if (spare_stack->stacksize == THR_STACK_DEFAULT &&
302 		    spare_stack->guardsize == _thr_guard_default) {
303 			/* Default stack/guard size. */
304 			LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
305 		} else {
306 			/* Non-default stack/guard size. */
307 			LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
308 		}
309 		attr->stackaddr_attr = NULL;
310 	}
311 }
312