xref: /freebsd/lib/libthr/thread/thr_stack.c (revision 2879ce1de304a56c743c8780b48687e936d2948a)
1 /*
2  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 #include <sys/types.h>
30 #include <sys/mman.h>
31 #include <sys/param.h>
32 #include <sys/queue.h>
33 #include <stdlib.h>
34 #include <pthread.h>
35 #include "thr_private.h"
36 
37 /* Spare thread stack. */
38 struct stack {
39 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
40 	size_t			stacksize;	/* Stack size (rounded up). */
41 	size_t			guardsize;	/* Guard size. */
42 	void			*stackaddr;	/* Stack address. */
43 };
44 
45 /*
46  * Default sized (stack and guard) spare stack queue.  Stacks are cached to
47  * avoid additional complexity managing mmap()ed stack regions.  Spare stacks
48  * are used in LIFO order to increase cache locality.
49  */
50 static LIST_HEAD(, stack)	_dstackq = LIST_HEAD_INITIALIZER(_dstackq);
51 
52 /*
53  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
54  * Stacks are cached to avoid additional complexity managing mmap()ed stack
55  * regions.  This list is unordered, since ordering on both stack size and guard
56  * size would be more trouble than it's worth.  Stacks are allocated from this
57  * cache on a first size match basis.
58  */
59 static LIST_HEAD(, stack)	_mstackq = LIST_HEAD_INITIALIZER(_mstackq);
60 
61 /**
62  * Base address of the last stack allocated (including its red zone, if there is
63  * one).  Stacks are allocated contiguously, starting beyond the top of the main
64  * stack.  When a new stack is created, a red zone is typically created
65  * (actually, the red zone is simply left unmapped) above the top of the stack,
66  * such that the stack will not be able to grow all the way to the bottom of the
67  * next stack.  This isn't fool-proof.  It is possible for a stack to grow by a
68  * large amount, such that it grows into the next stack, and as long as the
69  * memory within the red zone is never accessed, nothing will prevent one thread
70  * stack from trouncing all over the next.
71  *
72  * low memory
73  *     . . . . . . . . . . . . . . . . . .
74  *    |                                   |
75  *    |             stack 3               | start of 3rd thread stack
76  *    +-----------------------------------+
77  *    |                                   |
78  *    |       Red Zone (guard page)       | red zone for 2nd thread
79  *    |                                   |
80  *    +-----------------------------------+
81  *    |  stack 2 - PTHREAD_STACK_DEFAULT  | top of 2nd thread stack
82  *    |                                   |
83  *    |                                   |
84  *    |                                   |
85  *    |                                   |
86  *    |             stack 2               |
87  *    +-----------------------------------+ <-- start of 2nd thread stack
88  *    |                                   |
89  *    |       Red Zone                    | red zone for 1st thread
90  *    |                                   |
91  *    +-----------------------------------+
92  *    |  stack 1 - PTHREAD_STACK_DEFAULT  | top of 1st thread stack
93  *    |                                   |
94  *    |                                   |
95  *    |                                   |
96  *    |                                   |
97  *    |             stack 1               |
98  *    +-----------------------------------+ <-- start of 1st thread stack
99  *    |                                   |   (initial value of last_stack)
100  *    |       Red Zone                    |
101  *    |                                   | red zone for main thread
102  *    +-----------------------------------+
103  *    | USRSTACK - PTHREAD_STACK_INITIAL  | top of main thread stack
104  *    |                                   | ^
105  *    |                                   | |
106  *    |                                   | |
107  *    |                                   | | stack growth
108  *    |                                   |
109  *    +-----------------------------------+ <-- start of main thread stack
110  *                                              (USRSTACK)
111  * high memory
112  *
113  */
114 static void *	last_stack;
115 
116 void *
117 _thread_stack_alloc(size_t stacksize, size_t guardsize)
118 {
119 	void		*stack = NULL;
120 	struct stack	*spare_stack;
121 	size_t		stack_size;
122 
123 	/*
124 	 * Round up stack size to nearest multiple of _pthread_page_size,
125 	 * so that mmap() * will work.  If the stack size is not an even
126 	 * multiple, we end up initializing things such that there is unused
127 	 * space above the beginning of the stack, so the stack sits snugly
128 	 * against its guard.
129 	 */
130 	if (stacksize % _pthread_page_size != 0)
131 		stack_size = ((stacksize / _pthread_page_size) + 1) *
132 		    _pthread_page_size;
133 	else
134 		stack_size = stacksize;
135 
136 	/*
137 	 * If the stack and guard sizes are default, try to allocate a stack
138 	 * from the default-size stack cache:
139 	 */
140 	if (stack_size == PTHREAD_STACK_DEFAULT &&
141 	    guardsize == _pthread_guard_default) {
142 		/*
143 		 * Use the garbage collector mutex for synchronization of the
144 		 * spare stack list.
145 		 */
146 		STACK_LOCK;
147 
148 		if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
149 				/* Use the spare stack. */
150 			LIST_REMOVE(spare_stack, qe);
151 			stack = spare_stack->stackaddr;
152 		}
153 
154 		/* Unlock the garbage collector mutex. */
155 		STACK_UNLOCK;
156 	}
157 	/*
158 	 * The user specified a non-default stack and/or guard size, so try to
159 	 * allocate a stack from the non-default size stack cache, using the
160 	 * rounded up stack size (stack_size) in the search:
161 	 */
162 	else {
163 		/*
164 		 * Use the garbage collector mutex for synchronization of the
165 		 * spare stack list.
166 		 */
167 		STACK_LOCK;
168 
169 		LIST_FOREACH(spare_stack, &_mstackq, qe) {
170 			if (spare_stack->stacksize == stack_size &&
171 			    spare_stack->guardsize == guardsize) {
172 				LIST_REMOVE(spare_stack, qe);
173 				stack = spare_stack->stackaddr;
174 				break;
175 			}
176 		}
177 
178 		/* Unlock the garbage collector mutex. */
179 		STACK_UNLOCK;
180 	}
181 
182 	/* Check if a stack was not allocated from a stack cache: */
183 	if (stack == NULL) {
184 
185 		if (last_stack == NULL)
186 			last_stack = _usrstack - PTHREAD_STACK_INITIAL -
187 			    _pthread_guard_default;
188 
189 		/* Allocate a new stack. */
190 		stack = last_stack - stack_size;
191 
192 		/*
193 		 * Even if stack allocation fails, we don't want to try to use
194 		 * this location again, so unconditionally decrement
195 		 * last_stack.  Under normal operating conditions, the most
196 		 * likely reason for an mmap() error is a stack overflow of the
197 		 * adjacent thread stack.
198 		 */
199 		last_stack -= (stack_size + guardsize);
200 
201 		/* Stack: */
202 		if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
203 		    -1, 0) == MAP_FAILED)
204 			stack = NULL;
205 	}
206 
207 	return (stack);
208 }
209 
210 /* This function must be called with the 'dead thread list' lock held. */
211 void
212 _thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
213 {
214 	struct stack	*spare_stack;
215 
216 	spare_stack = (stack + stacksize - sizeof(struct stack));
217 	/* Round stacksize up to nearest multiple of _pthread_page_size. */
218 	if (stacksize % _pthread_page_size != 0) {
219 		spare_stack->stacksize =
220 		    ((stacksize / _pthread_page_size) + 1) *
221 		    _pthread_page_size;
222 	} else
223 		spare_stack->stacksize = stacksize;
224 	spare_stack->guardsize = guardsize;
225 	spare_stack->stackaddr = stack;
226 
227 	if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
228 	    spare_stack->guardsize == _pthread_guard_default) {
229 		/* Default stack/guard size. */
230 		LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
231 	} else {
232 		/* Non-default stack/guard size. */
233 		LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
234 	}
235 }
236