1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org> 5 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/auxv.h> 35 #include <sys/mman.h> 36 #include <sys/queue.h> 37 #include <sys/resource.h> 38 #include <sys/sysctl.h> 39 #include <stdlib.h> 40 #include <pthread.h> 41 #include <link.h> 42 43 #include "thr_private.h" 44 45 /* Spare thread stack. */ 46 struct stack { 47 LIST_ENTRY(stack) qe; /* Stack queue linkage. */ 48 size_t stacksize; /* Stack size (rounded up). */ 49 size_t guardsize; /* Guard size. */ 50 void *stackaddr; /* Stack address. */ 51 }; 52 53 /* 54 * Default sized (stack and guard) spare stack queue. Stacks are cached 55 * to avoid additional complexity managing mmap()ed stack regions. Spare 56 * stacks are used in LIFO order to increase cache locality. 57 */ 58 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); 59 60 /* 61 * Miscellaneous sized (non-default stack and/or guard) spare stack queue. 62 * Stacks are cached to avoid additional complexity managing mmap()ed 63 * stack regions. This list is unordered, since ordering on both stack 64 * size and guard size would be more trouble than it's worth. Stacks are 65 * allocated from this cache on a first size match basis. 66 */ 67 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); 68 69 /** 70 * Base address of the last stack allocated (including its red zone, if 71 * there is one). Stacks are allocated contiguously, starting beyond the 72 * top of the main stack. When a new stack is created, a red zone is 73 * typically created (actually, the red zone is mapped with PROT_NONE) above 74 * the top of the stack, such that the stack will not be able to grow all 75 * the way to the bottom of the next stack. This isn't fool-proof. It is 76 * possible for a stack to grow by a large amount, such that it grows into 77 * the next stack, and as long as the memory within the red zone is never 78 * accessed, nothing will prevent one thread stack from trouncing all over 79 * the next. 80 * 81 * low memory 82 * . . . . . . . . . . . . . . . . . . 83 * | | 84 * | stack 3 | start of 3rd thread stack 85 * +-----------------------------------+ 86 * | | 87 * | Red Zone (guard page) | red zone for 2nd thread 88 * | | 89 * +-----------------------------------+ 90 * | stack 2 - _thr_stack_default | top of 2nd thread stack 91 * | | 92 * | | 93 * | | 94 * | | 95 * | stack 2 | 96 * +-----------------------------------+ <-- start of 2nd thread stack 97 * | | 98 * | Red Zone | red zone for 1st thread 99 * | | 100 * +-----------------------------------+ 101 * | stack 1 - _thr_stack_default | top of 1st thread stack 102 * | | 103 * | | 104 * | | 105 * | | 106 * | stack 1 | 107 * +-----------------------------------+ <-- start of 1st thread stack 108 * | | (initial value of last_stack) 109 * | Red Zone | 110 * | | red zone for main thread 111 * +-----------------------------------+ 112 * | USRSTACK - _thr_stack_initial | top of main thread stack 113 * | | ^ 114 * | | | 115 * | | | 116 * | | | stack growth 117 * | | 118 * +-----------------------------------+ <-- start of main thread stack 119 * (USRSTACK) 120 * high memory 121 * 122 */ 123 static char *last_stack = NULL; 124 125 /* 126 * Round size up to the nearest multiple of 127 * _thr_page_size. 128 */ 129 static inline size_t 130 round_up(size_t size) 131 { 132 if (size % _thr_page_size != 0) 133 size = ((size / _thr_page_size) + 1) * 134 _thr_page_size; 135 return size; 136 } 137 138 void 139 _thr_stack_fix_protection(struct pthread *thrd) 140 { 141 142 mprotect((char *)thrd->attr.stackaddr_attr + 143 round_up(thrd->attr.guardsize_attr), 144 round_up(thrd->attr.stacksize_attr), 145 _rtld_get_stack_prot()); 146 } 147 148 static void 149 singlethread_map_stacks_exec(void) 150 { 151 char *usrstack; 152 size_t stacksz; 153 154 if (!__thr_get_main_stack_base(&usrstack) || 155 !__thr_get_main_stack_lim(&stacksz)) 156 return; 157 mprotect(usrstack - stacksz, stacksz, _rtld_get_stack_prot()); 158 } 159 160 void 161 __thr_map_stacks_exec(void) 162 { 163 struct pthread *curthread, *thrd; 164 struct stack *st; 165 166 if (!_thr_is_inited()) { 167 singlethread_map_stacks_exec(); 168 return; 169 } 170 curthread = _get_curthread(); 171 THREAD_LIST_RDLOCK(curthread); 172 LIST_FOREACH(st, &mstackq, qe) 173 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, 174 _rtld_get_stack_prot()); 175 LIST_FOREACH(st, &dstackq, qe) 176 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, 177 _rtld_get_stack_prot()); 178 TAILQ_FOREACH(thrd, &_thread_gc_list, gcle) 179 _thr_stack_fix_protection(thrd); 180 TAILQ_FOREACH(thrd, &_thread_list, tle) 181 _thr_stack_fix_protection(thrd); 182 THREAD_LIST_UNLOCK(curthread); 183 } 184 185 int 186 _thr_stack_alloc(struct pthread_attr *attr) 187 { 188 struct pthread *curthread = _get_curthread(); 189 struct stack *spare_stack; 190 size_t stacksize; 191 size_t guardsize; 192 char *stackaddr; 193 194 /* 195 * Round up stack size to nearest multiple of _thr_page_size so 196 * that mmap() * will work. If the stack size is not an even 197 * multiple, we end up initializing things such that there is 198 * unused space above the beginning of the stack, so the stack 199 * sits snugly against its guard. 200 */ 201 stacksize = round_up(attr->stacksize_attr); 202 guardsize = round_up(attr->guardsize_attr); 203 204 attr->stackaddr_attr = NULL; 205 attr->flags &= ~THR_STACK_USER; 206 207 /* 208 * Use the garbage collector lock for synchronization of the 209 * spare stack lists and allocations from usrstack. 210 */ 211 THREAD_LIST_WRLOCK(curthread); 212 /* 213 * If the stack and guard sizes are default, try to allocate a stack 214 * from the default-size stack cache: 215 */ 216 if ((stacksize == THR_STACK_DEFAULT) && 217 (guardsize == _thr_guard_default)) { 218 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { 219 /* Use the spare stack. */ 220 LIST_REMOVE(spare_stack, qe); 221 attr->stackaddr_attr = spare_stack->stackaddr; 222 } 223 } 224 /* 225 * The user specified a non-default stack and/or guard size, so try to 226 * allocate a stack from the non-default size stack cache, using the 227 * rounded up stack size (stack_size) in the search: 228 */ 229 else { 230 LIST_FOREACH(spare_stack, &mstackq, qe) { 231 if (spare_stack->stacksize == stacksize && 232 spare_stack->guardsize == guardsize) { 233 LIST_REMOVE(spare_stack, qe); 234 attr->stackaddr_attr = spare_stack->stackaddr; 235 break; 236 } 237 } 238 } 239 if (attr->stackaddr_attr != NULL) { 240 /* A cached stack was found. Release the lock. */ 241 THREAD_LIST_UNLOCK(curthread); 242 } 243 else { 244 /* 245 * Allocate a stack from or below usrstack, depending 246 * on the LIBPTHREAD_BIGSTACK_MAIN env variable. 247 */ 248 if (last_stack == NULL) 249 last_stack = _usrstack - _thr_stack_initial - 250 _thr_guard_default; 251 252 /* Allocate a new stack. */ 253 stackaddr = last_stack - stacksize - guardsize; 254 255 /* 256 * Even if stack allocation fails, we don't want to try to 257 * use this location again, so unconditionally decrement 258 * last_stack. Under normal operating conditions, the most 259 * likely reason for an mmap() error is a stack overflow of 260 * the adjacent thread stack. 261 */ 262 last_stack -= (stacksize + guardsize); 263 264 /* Release the lock before mmap'ing it. */ 265 THREAD_LIST_UNLOCK(curthread); 266 267 /* Map the stack and guard page together, and split guard 268 page from allocated space: */ 269 if ((stackaddr = mmap(stackaddr, stacksize + guardsize, 270 _rtld_get_stack_prot(), MAP_STACK, 271 -1, 0)) != MAP_FAILED && 272 (guardsize == 0 || 273 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) { 274 stackaddr += guardsize; 275 } else { 276 if (stackaddr != MAP_FAILED) 277 munmap(stackaddr, stacksize + guardsize); 278 stackaddr = NULL; 279 } 280 attr->stackaddr_attr = stackaddr; 281 } 282 if (attr->stackaddr_attr != NULL) 283 return (0); 284 else 285 return (-1); 286 } 287 288 /* This function must be called with _thread_list_lock held. */ 289 void 290 _thr_stack_free(struct pthread_attr *attr) 291 { 292 struct stack *spare_stack; 293 294 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) 295 && (attr->stackaddr_attr != NULL)) { 296 spare_stack = (struct stack *) 297 ((char *)attr->stackaddr_attr + 298 attr->stacksize_attr - sizeof(struct stack)); 299 spare_stack->stacksize = round_up(attr->stacksize_attr); 300 spare_stack->guardsize = round_up(attr->guardsize_attr); 301 spare_stack->stackaddr = attr->stackaddr_attr; 302 303 if (spare_stack->stacksize == THR_STACK_DEFAULT && 304 spare_stack->guardsize == _thr_guard_default) { 305 /* Default stack/guard size. */ 306 LIST_INSERT_HEAD(&dstackq, spare_stack, qe); 307 } else { 308 /* Non-default stack/guard size. */ 309 LIST_INSERT_HEAD(&mstackq, spare_stack, qe); 310 } 311 attr->stackaddr_attr = NULL; 312 } 313 } 314