1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org> 5 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/types.h> 34 #include <sys/mman.h> 35 #include <sys/queue.h> 36 #include <sys/resource.h> 37 #include <sys/sysctl.h> 38 #include <stdlib.h> 39 #include <pthread.h> 40 #include <link.h> 41 42 #include "thr_private.h" 43 44 /* Spare thread stack. */ 45 struct stack { 46 LIST_ENTRY(stack) qe; /* Stack queue linkage. */ 47 size_t stacksize; /* Stack size (rounded up). */ 48 size_t guardsize; /* Guard size. */ 49 void *stackaddr; /* Stack address. */ 50 }; 51 52 /* 53 * Default sized (stack and guard) spare stack queue. Stacks are cached 54 * to avoid additional complexity managing mmap()ed stack regions. Spare 55 * stacks are used in LIFO order to increase cache locality. 56 */ 57 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); 58 59 /* 60 * Miscellaneous sized (non-default stack and/or guard) spare stack queue. 61 * Stacks are cached to avoid additional complexity managing mmap()ed 62 * stack regions. This list is unordered, since ordering on both stack 63 * size and guard size would be more trouble than it's worth. Stacks are 64 * allocated from this cache on a first size match basis. 65 */ 66 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); 67 68 /** 69 * Base address of the last stack allocated (including its red zone, if 70 * there is one). Stacks are allocated contiguously, starting beyond the 71 * top of the main stack. When a new stack is created, a red zone is 72 * typically created (actually, the red zone is mapped with PROT_NONE) above 73 * the top of the stack, such that the stack will not be able to grow all 74 * the way to the bottom of the next stack. This isn't fool-proof. It is 75 * possible for a stack to grow by a large amount, such that it grows into 76 * the next stack, and as long as the memory within the red zone is never 77 * accessed, nothing will prevent one thread stack from trouncing all over 78 * the next. 79 * 80 * low memory 81 * . . . . . . . . . . . . . . . . . . 82 * | | 83 * | stack 3 | start of 3rd thread stack 84 * +-----------------------------------+ 85 * | | 86 * | Red Zone (guard page) | red zone for 2nd thread 87 * | | 88 * +-----------------------------------+ 89 * | stack 2 - _thr_stack_default | top of 2nd thread stack 90 * | | 91 * | | 92 * | | 93 * | | 94 * | stack 2 | 95 * +-----------------------------------+ <-- start of 2nd thread stack 96 * | | 97 * | Red Zone | red zone for 1st thread 98 * | | 99 * +-----------------------------------+ 100 * | stack 1 - _thr_stack_default | top of 1st thread stack 101 * | | 102 * | | 103 * | | 104 * | | 105 * | stack 1 | 106 * +-----------------------------------+ <-- start of 1st thread stack 107 * | | (initial value of last_stack) 108 * | Red Zone | 109 * | | red zone for main thread 110 * +-----------------------------------+ 111 * | USRSTACK - _thr_stack_initial | top of main thread stack 112 * | | ^ 113 * | | | 114 * | | | 115 * | | | stack growth 116 * | | 117 * +-----------------------------------+ <-- start of main thread stack 118 * (USRSTACK) 119 * high memory 120 * 121 */ 122 static char *last_stack = NULL; 123 124 /* 125 * Round size up to the nearest multiple of 126 * _thr_page_size. 127 */ 128 static inline size_t 129 round_up(size_t size) 130 { 131 if (size % _thr_page_size != 0) 132 size = ((size / _thr_page_size) + 1) * 133 _thr_page_size; 134 return size; 135 } 136 137 void 138 _thr_stack_fix_protection(struct pthread *thrd) 139 { 140 141 mprotect((char *)thrd->attr.stackaddr_attr + 142 round_up(thrd->attr.guardsize_attr), 143 round_up(thrd->attr.stacksize_attr), 144 _rtld_get_stack_prot()); 145 } 146 147 static void 148 singlethread_map_stacks_exec(void) 149 { 150 int mib[2]; 151 struct rlimit rlim; 152 u_long stacktop; 153 size_t len; 154 155 mib[0] = CTL_KERN; 156 mib[1] = KERN_STACKTOP; 157 len = sizeof(stacktop); 158 if (sysctl(mib, nitems(mib), &stacktop, &len, NULL, 0) == -1) { 159 mib[1] = KERN_USRSTACK; 160 if (sysctl(mib, nitems(mib), &stacktop, &len, NULL, 0) == -1) 161 return; 162 } 163 if (getrlimit(RLIMIT_STACK, &rlim) == -1) 164 return; 165 mprotect((void *)(uintptr_t)(stacktop - rlim.rlim_cur), 166 rlim.rlim_cur, _rtld_get_stack_prot()); 167 } 168 169 void 170 __thr_map_stacks_exec(void) 171 { 172 struct pthread *curthread, *thrd; 173 struct stack *st; 174 175 if (!_thr_is_inited()) { 176 singlethread_map_stacks_exec(); 177 return; 178 } 179 curthread = _get_curthread(); 180 THREAD_LIST_RDLOCK(curthread); 181 LIST_FOREACH(st, &mstackq, qe) 182 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, 183 _rtld_get_stack_prot()); 184 LIST_FOREACH(st, &dstackq, qe) 185 mprotect((char *)st->stackaddr + st->guardsize, st->stacksize, 186 _rtld_get_stack_prot()); 187 TAILQ_FOREACH(thrd, &_thread_gc_list, gcle) 188 _thr_stack_fix_protection(thrd); 189 TAILQ_FOREACH(thrd, &_thread_list, tle) 190 _thr_stack_fix_protection(thrd); 191 THREAD_LIST_UNLOCK(curthread); 192 } 193 194 int 195 _thr_stack_alloc(struct pthread_attr *attr) 196 { 197 struct pthread *curthread = _get_curthread(); 198 struct stack *spare_stack; 199 size_t stacksize; 200 size_t guardsize; 201 char *stackaddr; 202 203 /* 204 * Round up stack size to nearest multiple of _thr_page_size so 205 * that mmap() * will work. If the stack size is not an even 206 * multiple, we end up initializing things such that there is 207 * unused space above the beginning of the stack, so the stack 208 * sits snugly against its guard. 209 */ 210 stacksize = round_up(attr->stacksize_attr); 211 guardsize = round_up(attr->guardsize_attr); 212 213 attr->stackaddr_attr = NULL; 214 attr->flags &= ~THR_STACK_USER; 215 216 /* 217 * Use the garbage collector lock for synchronization of the 218 * spare stack lists and allocations from stacktop. 219 */ 220 THREAD_LIST_WRLOCK(curthread); 221 /* 222 * If the stack and guard sizes are default, try to allocate a stack 223 * from the default-size stack cache: 224 */ 225 if ((stacksize == THR_STACK_DEFAULT) && 226 (guardsize == _thr_guard_default)) { 227 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { 228 /* Use the spare stack. */ 229 LIST_REMOVE(spare_stack, qe); 230 attr->stackaddr_attr = spare_stack->stackaddr; 231 } 232 } 233 /* 234 * The user specified a non-default stack and/or guard size, so try to 235 * allocate a stack from the non-default size stack cache, using the 236 * rounded up stack size (stack_size) in the search: 237 */ 238 else { 239 LIST_FOREACH(spare_stack, &mstackq, qe) { 240 if (spare_stack->stacksize == stacksize && 241 spare_stack->guardsize == guardsize) { 242 LIST_REMOVE(spare_stack, qe); 243 attr->stackaddr_attr = spare_stack->stackaddr; 244 break; 245 } 246 } 247 } 248 if (attr->stackaddr_attr != NULL) { 249 /* A cached stack was found. Release the lock. */ 250 THREAD_LIST_UNLOCK(curthread); 251 } 252 else { 253 /* 254 * Allocate a stack from or below stacktop, depending 255 * on the LIBPTHREAD_BIGSTACK_MAIN env variable. 256 */ 257 if (last_stack == NULL) 258 last_stack = _stacktop - _thr_stack_initial - 259 _thr_guard_default; 260 261 /* Allocate a new stack. */ 262 stackaddr = last_stack - stacksize - guardsize; 263 264 /* 265 * Even if stack allocation fails, we don't want to try to 266 * use this location again, so unconditionally decrement 267 * last_stack. Under normal operating conditions, the most 268 * likely reason for an mmap() error is a stack overflow of 269 * the adjacent thread stack. 270 */ 271 last_stack -= (stacksize + guardsize); 272 273 /* Release the lock before mmap'ing it. */ 274 THREAD_LIST_UNLOCK(curthread); 275 276 /* Map the stack and guard page together, and split guard 277 page from allocated space: */ 278 if ((stackaddr = mmap(stackaddr, stacksize + guardsize, 279 _rtld_get_stack_prot(), MAP_STACK, 280 -1, 0)) != MAP_FAILED && 281 (guardsize == 0 || 282 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) { 283 stackaddr += guardsize; 284 } else { 285 if (stackaddr != MAP_FAILED) 286 munmap(stackaddr, stacksize + guardsize); 287 stackaddr = NULL; 288 } 289 attr->stackaddr_attr = stackaddr; 290 } 291 if (attr->stackaddr_attr != NULL) 292 return (0); 293 else 294 return (-1); 295 } 296 297 /* This function must be called with _thread_list_lock held. */ 298 void 299 _thr_stack_free(struct pthread_attr *attr) 300 { 301 struct stack *spare_stack; 302 303 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) 304 && (attr->stackaddr_attr != NULL)) { 305 spare_stack = (struct stack *) 306 ((char *)attr->stackaddr_attr + 307 attr->stacksize_attr - sizeof(struct stack)); 308 spare_stack->stacksize = round_up(attr->stacksize_attr); 309 spare_stack->guardsize = round_up(attr->guardsize_attr); 310 spare_stack->stackaddr = attr->stackaddr_attr; 311 312 if (spare_stack->stacksize == THR_STACK_DEFAULT && 313 spare_stack->guardsize == _thr_guard_default) { 314 /* Default stack/guard size. */ 315 LIST_INSERT_HEAD(&dstackq, spare_stack, qe); 316 } else { 317 /* Non-default stack/guard size. */ 318 LIST_INSERT_HEAD(&mstackq, spare_stack, qe); 319 } 320 attr->stackaddr_attr = NULL; 321 } 322 } 323