1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * 26 * Copyright 2016 Joyent, Inc. 27 */ 28 29 /* Copyright (c) 1988 AT&T */ 30 /* All Rights Reserved */ 31 32 #pragma weak _atexit = atexit 33 34 #include "lint.h" 35 #include "thr_uberdata.h" 36 #include "libc_int.h" 37 #include "atexit.h" 38 #include "stdiom.h" 39 40 /* 41 * Note that memory is managed by lmalloc()/lfree(). 42 * 43 * Among other reasons, this is occasioned by the insistence of our 44 * brothers sh(1) and csh(1) that they can do malloc, etc., better than 45 * libc can. Those programs define their own malloc routines, and 46 * initialize the underlying mechanism in main(). This means that calls 47 * to malloc occuring before main will crash. The loader calls atexit(3C) 48 * before calling main, so we'd better avoid malloc() when it does. 49 * 50 * Another reason for using lmalloc()/lfree() is that the atexit() 51 * list must transcend all link maps. See the Linker and Libraries 52 * Guide for information on alternate link maps. 53 * 54 * See "thr_uberdata.h" for the definitions of structures used here. 55 */ 56 57 static int in_range(void *, Lc_addr_range_t[], uint_t count); 58 59 extern caddr_t _getfp(void); 60 61 /* 62 * exitfns_lock is declared to be a recursive mutex so that we 63 * can hold it while calling out to the registered functions. 64 * If they call back to us, we are self-consistent and everything 65 * works, even the case of calling exit() from functions called 66 * by _exithandle() (recursive exit()). All that is required is 67 * that the registered functions actually return (no longjmp()s). 68 * 69 * Because exitfns_lock is declared to be a recursive mutex, we 70 * cannot use it with lmutex_lock()/lmutex_unlock() and we must 71 * use mutex_lock()/mutex_unlock(). This means that atexit() 72 * and exit() are not async-signal-safe. We make them fork1-safe 73 * via the atexit_locks()/atexit_unlocks() functions, called from 74 * libc_prepare_atfork()/libc_child_atfork()/libc_parent_atfork() 75 */ 76 77 /* 78 * atexit_locks() and atexit_unlocks() are called on every link map. 79 * Do not use curthread->ul_uberdata->atexit_root for these. 80 */ 81 void 82 atexit_locks() 83 { 84 (void) mutex_lock(&__uberdata.atexit_root.exitfns_lock); 85 (void) mutex_lock(&__uberdata.quickexit_root.exitfns_lock); 86 } 87 88 void 89 atexit_unlocks() 90 { 91 (void) mutex_unlock(&__uberdata.quickexit_root.exitfns_lock); 92 (void) mutex_unlock(&__uberdata.atexit_root.exitfns_lock); 93 } 94 95 96 /* 97 * This is called via atexit() before the primordial thread is fully set up. 98 * Be careful about dereferencing self->ul_uberdata->atexit_root. 99 */ 100 int 101 __cxa_atexit(void (*hdlr)(void *), void *arg, void *dso) 102 { 103 ulwp_t *self; 104 atexit_root_t *arp; 105 _exthdlr_t *p; 106 107 if ((p = lmalloc(sizeof (_exthdlr_t))) == NULL) 108 return (-1); 109 110 if ((self = __curthread()) == NULL) 111 arp = &__uberdata.atexit_root; 112 else { 113 arp = &self->ul_uberdata->atexit_root; 114 (void) mutex_lock(&arp->exitfns_lock); 115 } 116 p->hdlr = hdlr; 117 p->arg = arg; 118 p->dso = dso; 119 p->next = arp->head; 120 arp->head = p; 121 122 if (self != NULL) 123 (void) mutex_unlock(&arp->exitfns_lock); 124 return (0); 125 } 126 127 int 128 atexit(void (*func)(void)) 129 { 130 return (__cxa_atexit((_exithdlr_func_t)func, NULL, NULL)); 131 } 132 133 /* 134 * Note that we may be entered recursively, as we'll call __cxa_finalize(0) at 135 * exit, one of our handlers is ld.so.1`atexit_fini, and libraries may call 136 * __cxa_finalize(__dso_handle) from their _fini. 137 */ 138 void 139 __cxa_finalize(void *dso) 140 { 141 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 142 _exthdlr_t *p, *o; 143 int cancel_state; 144 145 /* disable cancellation while running atexit handlers */ 146 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 147 (void) mutex_lock(&arp->exitfns_lock); 148 149 o = NULL; 150 p = arp->head; 151 while (p != NULL) { 152 if ((dso == NULL) || (p->dso == dso)) { 153 if (o != NULL) 154 o->next = p->next; 155 else 156 arp->head = p->next; 157 158 p->hdlr(p->arg); 159 lfree(p, sizeof (_exthdlr_t)); 160 o = NULL; 161 p = arp->head; 162 } else { 163 o = p; 164 p = p->next; 165 } 166 } 167 168 (void) mutex_unlock(&arp->exitfns_lock); 169 (void) pthread_setcancelstate(cancel_state, NULL); 170 } 171 172 void 173 _exithandle(void) 174 { 175 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 176 177 arp->exit_frame_monitor = _getfp() + STACK_BIAS; 178 __cxa_finalize(NULL); 179 } 180 181 /* 182 * _get_exit_frame_monitor is called by the C++ runtimes. 183 */ 184 void * 185 _get_exit_frame_monitor(void) 186 { 187 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 188 return (&arp->exit_frame_monitor); 189 } 190 191 /* 192 * The following is a routine which the loader (ld.so.1) calls when it 193 * processes a dlclose call on an object. It resets all signal handlers 194 * which fall within the union of the ranges specified by the elements 195 * of the array range to SIG_DFL. 196 */ 197 static void 198 _preexec_sig_unload(Lc_addr_range_t range[], uint_t count) 199 { 200 uberdata_t *udp = curthread->ul_uberdata; 201 int sig; 202 rwlock_t *rwlp; 203 struct sigaction *sap; 204 struct sigaction oact; 205 void (*handler)(); 206 207 for (sig = 1; sig < NSIG; sig++) { 208 sap = (struct sigaction *)&udp->siguaction[sig].sig_uaction; 209 again: 210 handler = sap->sa_handler; 211 if (handler != SIG_DFL && handler != SIG_IGN && 212 in_range((void *)handler, range, count)) { 213 rwlp = &udp->siguaction[sig].sig_lock; 214 lrw_wrlock(rwlp); 215 if (handler != sap->sa_handler) { 216 lrw_unlock(rwlp); 217 goto again; 218 } 219 sap->sa_handler = SIG_DFL; 220 sap->sa_flags = SA_SIGINFO; 221 (void) sigemptyset(&sap->sa_mask); 222 if (__sigaction(sig, NULL, &oact) == 0 && 223 oact.sa_handler != SIG_DFL && 224 oact.sa_handler != SIG_IGN) 225 (void) __sigaction(sig, sap, NULL); 226 lrw_unlock(rwlp); 227 } 228 } 229 } 230 231 /* 232 * The following is a routine which the loader (ld.so.1) calls when it 233 * processes a dlclose call on an object. It cancels all atfork() entries 234 * whose prefork, parent postfork, or child postfork functions fall within 235 * the union of the ranges specified by the elements of the array range. 236 */ 237 static void 238 _preexec_atfork_unload(Lc_addr_range_t range[], uint_t count) 239 { 240 ulwp_t *self = curthread; 241 uberdata_t *udp = self->ul_uberdata; 242 atfork_t *atfork_q; 243 atfork_t *atfp; 244 atfork_t *next; 245 void (*func)(void); 246 int start_again; 247 248 (void) mutex_lock(&udp->atfork_lock); 249 if ((atfork_q = udp->atforklist) != NULL) { 250 atfp = atfork_q; 251 do { 252 next = atfp->forw; 253 start_again = 0; 254 255 if (((func = atfp->prepare) != NULL && 256 in_range((void *)func, range, count)) || 257 ((func = atfp->parent) != NULL && 258 in_range((void *)func, range, count)) || 259 ((func = atfp->child) != NULL && 260 in_range((void *)func, range, count))) { 261 if (self->ul_fork) { 262 /* 263 * dlclose() called from a fork handler. 264 * Deleting the entry would wreak havoc. 265 * Just null out the function pointers 266 * and leave the entry in place. 267 */ 268 atfp->prepare = NULL; 269 atfp->parent = NULL; 270 atfp->child = NULL; 271 continue; 272 } 273 if (atfp == atfork_q) { 274 /* deleting the list head member */ 275 udp->atforklist = atfork_q = next; 276 start_again = 1; 277 } 278 atfp->forw->back = atfp->back; 279 atfp->back->forw = atfp->forw; 280 lfree(atfp, sizeof (atfork_t)); 281 if (atfp == atfork_q) { 282 /* we deleted the whole list */ 283 udp->atforklist = NULL; 284 break; 285 } 286 } 287 } while ((atfp = next) != atfork_q || start_again); 288 } 289 (void) mutex_unlock(&udp->atfork_lock); 290 } 291 292 /* 293 * The following is a routine which the loader (ld.so.1) calls when it 294 * processes a dlclose call on an object. It sets the destructor 295 * function pointer to NULL for all keys whose destructors fall within 296 * the union of the ranges specified by the elements of the array range. 297 * We don't assign TSD_UNALLOCATED (the equivalent of pthread_key_destroy()) 298 * because the thread may use the key's TSD further on in fini processing. 299 */ 300 static void 301 _preexec_tsd_unload(Lc_addr_range_t range[], uint_t count) 302 { 303 tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata; 304 void (*func)(void *); 305 int key; 306 307 lmutex_lock(&tsdm->tsdm_lock); 308 for (key = 1; key < tsdm->tsdm_nused; key++) { 309 if ((func = tsdm->tsdm_destro[key]) != NULL && 310 func != TSD_UNALLOCATED && 311 in_range((void *)func, range, count)) 312 tsdm->tsdm_destro[key] = NULL; 313 } 314 lmutex_unlock(&tsdm->tsdm_lock); 315 } 316 317 /* 318 * The following is a routine which the loader (ld.so.1) calls when it 319 * processes dlclose calls on objects with atexit registrations. It 320 * executes the exit handlers that fall within the union of the ranges 321 * specified by the elements of the array range in the REVERSE ORDER of 322 * their registration. Do not change this characteristic; it is REQUIRED 323 * BEHAVIOR. 324 */ 325 int 326 _preexec_exit_handlers(Lc_addr_range_t range[], uint_t count) 327 { 328 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 329 _exthdlr_t *o; /* previous node */ 330 _exthdlr_t *p; /* this node */ 331 int cancel_state; 332 333 /* disable cancellation while running atexit handlers */ 334 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 335 (void) mutex_lock(&arp->exitfns_lock); 336 o = NULL; 337 p = arp->head; 338 while (p != NULL) { 339 /* 340 * We call even CXA handlers of functions present in the 341 * library being unloaded. The specification isn't 342 * particularly clear on this, and this seems the most sane. 343 * This is the behaviour of FreeBSD 9.1 (GNU libc leaves the 344 * handler on the exit list, and crashes at exit time). 345 * 346 * This won't cause handlers to be called twice, because 347 * anything called from a __cxa_finalize call from the 348 * language runtime will have been removed from the list. 349 */ 350 if (in_range((void *)p->hdlr, range, count)) { 351 /* We need to execute this one */ 352 if (o != NULL) 353 o->next = p->next; 354 else 355 arp->head = p->next; 356 p->hdlr(p->arg); 357 lfree(p, sizeof (_exthdlr_t)); 358 o = NULL; 359 p = arp->head; 360 } else { 361 o = p; 362 p = p->next; 363 } 364 } 365 (void) mutex_unlock(&arp->exitfns_lock); 366 (void) pthread_setcancelstate(cancel_state, NULL); 367 368 _preexec_tsd_unload(range, count); 369 _preexec_atfork_unload(range, count); 370 _preexec_sig_unload(range, count); 371 372 return (0); 373 } 374 375 static int 376 in_range(void *addr, Lc_addr_range_t ranges[], uint_t count) 377 { 378 uint_t idx; 379 380 for (idx = 0; idx < count; idx++) { 381 if (addr >= ranges[idx].lb && 382 addr < ranges[idx].ub) { 383 return (1); 384 } 385 } 386 387 return (0); 388 } 389 390 int 391 at_quick_exit(void (*func)(void)) 392 { 393 ulwp_t *self; 394 quickexit_root_t *arp; 395 _qexthdlr_t *p; 396 397 if ((p = lmalloc(sizeof (_qexthdlr_t))) == NULL) 398 return (-1); 399 400 if ((self = __curthread()) == NULL) { 401 arp = &__uberdata.quickexit_root; 402 } else { 403 arp = &self->ul_uberdata->quickexit_root; 404 (void) mutex_lock(&arp->exitfns_lock); 405 } 406 p->hdlr = func; 407 p->next = arp->head; 408 arp->head = p; 409 410 if (self != NULL) 411 (void) mutex_unlock(&arp->exitfns_lock); 412 return (0); 413 414 } 415 416 void 417 quick_exit(int status) 418 { 419 quickexit_root_t *qrp = &curthread->ul_uberdata->quickexit_root; 420 _qexthdlr_t *p; 421 int cancel_state; 422 423 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state); 424 (void) mutex_lock(&qrp->exitfns_lock); 425 426 p = qrp->head; 427 while (p != NULL) { 428 qrp->head = p->next; 429 p->hdlr(); 430 lfree(p, sizeof (_qexthdlr_t)); 431 p = qrp->head; 432 } 433 434 (void) mutex_unlock(&qrp->exitfns_lock); 435 (void) pthread_setcancelstate(cancel_state, NULL); 436 _Exit(status); 437 } 438