xref: /freebsd/lib/libthr/thread/thr_stack.c (revision 1669d8afc64812c8d2d1d147ae1fd42ff441e1b1)
1 /*
2  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sys/queue.h>
33 #include <stdlib.h>
34 #include <pthread.h>
35 
36 #include "thr_private.h"
37 
38 /* Spare thread stack. */
39 struct stack {
40 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
41 	size_t			stacksize;	/* Stack size (rounded up). */
42 	size_t			guardsize;	/* Guard size. */
43 	void			*stackaddr;	/* Stack address. */
44 };
45 
46 /*
47  * Default sized (stack and guard) spare stack queue.  Stacks are cached
48  * to avoid additional complexity managing mmap()ed stack regions.  Spare
49  * stacks are used in LIFO order to increase cache locality.
50  */
51 static LIST_HEAD(, stack)	dstackq = LIST_HEAD_INITIALIZER(dstackq);
52 
53 /*
54  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
55  * Stacks are cached to avoid additional complexity managing mmap()ed
56  * stack regions.  This list is unordered, since ordering on both stack
57  * size and guard size would be more trouble than it's worth.  Stacks are
58  * allocated from this cache on a first size match basis.
59  */
60 static LIST_HEAD(, stack)	mstackq = LIST_HEAD_INITIALIZER(mstackq);
61 
62 /**
63  * Base address of the last stack allocated (including its red zone, if
64  * there is one).  Stacks are allocated contiguously, starting beyond the
65  * top of the main stack.  When a new stack is created, a red zone is
66  * typically created (actually, the red zone is mapped with PROT_NONE) above
67  * the top of the stack, such that the stack will not be able to grow all
68  * the way to the bottom of the next stack.  This isn't fool-proof.  It is
69  * possible for a stack to grow by a large amount, such that it grows into
70  * the next stack, and as long as the memory within the red zone is never
71  * accessed, nothing will prevent one thread stack from trouncing all over
72  * the next.
73  *
74  * low memory
75  *     . . . . . . . . . . . . . . . . . .
76  *    |                                   |
77  *    |             stack 3               | start of 3rd thread stack
78  *    +-----------------------------------+
79  *    |                                   |
80  *    |       Red Zone (guard page)       | red zone for 2nd thread
81  *    |                                   |
82  *    +-----------------------------------+
83  *    |  stack 2 - _thr_stack_default     | top of 2nd thread stack
84  *    |                                   |
85  *    |                                   |
86  *    |                                   |
87  *    |                                   |
88  *    |             stack 2               |
89  *    +-----------------------------------+ <-- start of 2nd thread stack
90  *    |                                   |
91  *    |       Red Zone                    | red zone for 1st thread
92  *    |                                   |
93  *    +-----------------------------------+
94  *    |  stack 1 - _thr_stack_default     | top of 1st thread stack
95  *    |                                   |
96  *    |                                   |
97  *    |                                   |
98  *    |                                   |
99  *    |             stack 1               |
100  *    +-----------------------------------+ <-- start of 1st thread stack
101  *    |                                   |   (initial value of last_stack)
102  *    |       Red Zone                    |
103  *    |                                   | red zone for main thread
104  *    +-----------------------------------+
105  *    | USRSTACK - _thr_stack_initial     | top of main thread stack
106  *    |                                   | ^
107  *    |                                   | |
108  *    |                                   | |
109  *    |                                   | | stack growth
110  *    |                                   |
111  *    +-----------------------------------+ <-- start of main thread stack
112  *                                              (USRSTACK)
113  * high memory
114  *
115  */
116 static char *last_stack = NULL;
117 
118 /*
119  * Round size up to the nearest multiple of
120  * _thr_page_size.
121  */
122 static inline size_t
123 round_up(size_t size)
124 {
125 	if (size % _thr_page_size != 0)
126 		size = ((size / _thr_page_size) + 1) *
127 		    _thr_page_size;
128 	return size;
129 }
130 
131 int
132 _thr_stack_alloc(struct pthread_attr *attr)
133 {
134 	struct pthread *curthread = _get_curthread();
135 	struct stack *spare_stack;
136 	size_t stacksize;
137 	size_t guardsize;
138 	char *stackaddr;
139 
140 	/*
141 	 * Round up stack size to nearest multiple of _thr_page_size so
142 	 * that mmap() * will work.  If the stack size is not an even
143 	 * multiple, we end up initializing things such that there is
144 	 * unused space above the beginning of the stack, so the stack
145 	 * sits snugly against its guard.
146 	 */
147 	stacksize = round_up(attr->stacksize_attr);
148 	guardsize = round_up(attr->guardsize_attr);
149 
150 	attr->stackaddr_attr = NULL;
151 	attr->flags &= ~THR_STACK_USER;
152 
153 	/*
154 	 * Use the garbage collector lock for synchronization of the
155 	 * spare stack lists and allocations from usrstack.
156 	 */
157 	THREAD_LIST_LOCK(curthread);
158 	/*
159 	 * If the stack and guard sizes are default, try to allocate a stack
160 	 * from the default-size stack cache:
161 	 */
162 	if ((stacksize == THR_STACK_DEFAULT) &&
163 	    (guardsize == _thr_guard_default)) {
164 		if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
165 			/* Use the spare stack. */
166 			LIST_REMOVE(spare_stack, qe);
167 			attr->stackaddr_attr = spare_stack->stackaddr;
168 		}
169 	}
170 	/*
171 	 * The user specified a non-default stack and/or guard size, so try to
172 	 * allocate a stack from the non-default size stack cache, using the
173 	 * rounded up stack size (stack_size) in the search:
174 	 */
175 	else {
176 		LIST_FOREACH(spare_stack, &mstackq, qe) {
177 			if (spare_stack->stacksize == stacksize &&
178 			    spare_stack->guardsize == guardsize) {
179 				LIST_REMOVE(spare_stack, qe);
180 				attr->stackaddr_attr = spare_stack->stackaddr;
181 				break;
182 			}
183 		}
184 	}
185 	if (attr->stackaddr_attr != NULL) {
186 		/* A cached stack was found.  Release the lock. */
187 		THREAD_LIST_UNLOCK(curthread);
188 	}
189 	else {
190 		/* Allocate a stack from usrstack. */
191 		if (last_stack == NULL)
192 			last_stack = _usrstack - _thr_stack_initial -
193 			    _thr_guard_default;
194 
195 		/* Allocate a new stack. */
196 		stackaddr = last_stack - stacksize - guardsize;
197 
198 		/*
199 		 * Even if stack allocation fails, we don't want to try to
200 		 * use this location again, so unconditionally decrement
201 		 * last_stack.  Under normal operating conditions, the most
202 		 * likely reason for an mmap() error is a stack overflow of
203 		 * the adjacent thread stack.
204 		 */
205 		last_stack -= (stacksize + guardsize);
206 
207 		/* Release the lock before mmap'ing it. */
208 		THREAD_LIST_UNLOCK(curthread);
209 
210 		/* Map the stack and guard page together, and split guard
211 		   page from allocated space: */
212 		if ((stackaddr = mmap(stackaddr, stacksize+guardsize,
213 		     PROT_READ | PROT_WRITE, MAP_STACK,
214 		     -1, 0)) != MAP_FAILED &&
215 		    (guardsize == 0 ||
216 		     mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
217 			stackaddr += guardsize;
218 		} else {
219 			if (stackaddr != MAP_FAILED)
220 				munmap(stackaddr, stacksize + guardsize);
221 			stackaddr = NULL;
222 		}
223 		attr->stackaddr_attr = stackaddr;
224 	}
225 	if (attr->stackaddr_attr != NULL)
226 		return (0);
227 	else
228 		return (-1);
229 }
230 
231 /* This function must be called with _thread_list_lock held. */
232 void
233 _thr_stack_free(struct pthread_attr *attr)
234 {
235 	struct stack *spare_stack;
236 
237 	if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
238 	    && (attr->stackaddr_attr != NULL)) {
239 		spare_stack = (struct stack *)
240 			((char *)attr->stackaddr_attr +
241 			attr->stacksize_attr - sizeof(struct stack));
242 		spare_stack->stacksize = round_up(attr->stacksize_attr);
243 		spare_stack->guardsize = round_up(attr->guardsize_attr);
244 		spare_stack->stackaddr = attr->stackaddr_attr;
245 
246 		if (spare_stack->stacksize == THR_STACK_DEFAULT &&
247 		    spare_stack->guardsize == _thr_guard_default) {
248 			/* Default stack/guard size. */
249 			LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
250 		} else {
251 			/* Non-default stack/guard size. */
252 			LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
253 		}
254 		attr->stackaddr_attr = NULL;
255 	}
256 }
257