1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* Copyright (c) 1988 AT&T */ 30 /* All Rights Reserved */ 31 32 #pragma weak atexit = _atexit 33 34 #include "synonyms.h" 35 #include "thr_uberdata.h" 36 #include "libc_int.h" 37 #include "atexit.h" 38 #include "stdiom.h" 39 40 /* 41 * Note that memory is managed by lmalloc()/lfree(). 42 * 43 * Among other reasons, this is occasioned by the insistence of our 44 * brothers sh(1) and csh(1) that they can do malloc, etc., better than 45 * libc can. Those programs define their own malloc routines, and 46 * initialize the underlying mechanism in main(). This means that calls 47 * to malloc occuring before main will crash. The loader calls atexit(3C) 48 * before calling main, so we'd better avoid malloc() when it does. 49 * 50 * Another reason for using lmalloc()/lfree() is that the atexit() 51 * list must transcend all link maps. See the Linker and Libraries 52 * Guide for information on alternate link maps. 53 * 54 * See "thr_uberdata.h" for the definitions of structures used here. 55 */ 56 57 static int in_range(_exithdlr_func_t, Lc_addr_range_t[], uint_t count); 58 59 extern caddr_t _getfp(void); 60 61 /* 62 * exitfns_lock is declared to be a recursive mutex so that we 63 * can hold it while calling out to the registered functions. 64 * If they call back to us, we are self-consistent and everything 65 * works, even the case of calling exit() from functions called 66 * by _exithandle() (recursive exit()). All that is required is 67 * that the registered functions actually return (no longjmp()s). 68 * 69 * Because exitfns_lock is declared to be a recursive mutex, we 70 * cannot use it with lmutex_lock()/lmutex_unlock() and we must 71 * use mutex_lock()/mutex_unlock(). This means that atexit() 72 * and exit() are not async-signal-safe. We make them fork1-safe 73 * via the atexit_locks()/atexit_unlocks() functions, called from 74 * libc_prepare_atfork()/libc_child_atfork()/libc_parent_atfork() 75 */ 76 77 /* 78 * atexit_locks() and atexit_unlocks() are called on every link map. 79 * Do not use curthread->ul_uberdata->atexit_root for these. 80 */ 81 void 82 atexit_locks() 83 { 84 (void) _private_mutex_lock(&__uberdata.atexit_root.exitfns_lock); 85 } 86 87 void 88 atexit_unlocks() 89 { 90 (void) _private_mutex_unlock(&__uberdata.atexit_root.exitfns_lock); 91 } 92 93 /* 94 * atexit() is called before the primordial thread is fully set up. 95 * Be careful about dereferencing self->ul_uberdata->atexit_root. 96 */ 97 int 98 _atexit(void (*func)(void)) 99 { 100 ulwp_t *self; 101 atexit_root_t *arp; 102 _exthdlr_t *p; 103 104 if ((p = lmalloc(sizeof (_exthdlr_t))) == NULL) 105 return (-1); 106 107 if ((self = __curthread()) == NULL) 108 arp = &__uberdata.atexit_root; 109 else { 110 arp = &self->ul_uberdata->atexit_root; 111 (void) _private_mutex_lock(&arp->exitfns_lock); 112 } 113 p->hdlr = func; 114 p->next = arp->head; 115 arp->head = p; 116 if (self != NULL) 117 (void) _private_mutex_unlock(&arp->exitfns_lock); 118 return (0); 119 } 120 121 void 122 _exithandle(void) 123 { 124 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 125 _exthdlr_t *p; 126 127 (void) _private_mutex_lock(&arp->exitfns_lock); 128 arp->exit_frame_monitor = _getfp() + STACK_BIAS; 129 p = arp->head; 130 while (p != NULL) { 131 arp->head = p->next; 132 p->hdlr(); 133 lfree(p, sizeof (_exthdlr_t)); 134 p = arp->head; 135 } 136 (void) _private_mutex_unlock(&arp->exitfns_lock); 137 } 138 139 /* 140 * _get_exit_frame_monitor is called by the C++ runtimes. 141 */ 142 void * 143 _get_exit_frame_monitor(void) 144 { 145 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 146 return (&arp->exit_frame_monitor); 147 } 148 149 /* 150 * The following is a routine which the loader (ld.so.1) calls when it 151 * processes a dlclose call on an object. It resets all signal handlers 152 * which fall within the union of the ranges specified by the elements 153 * of the array range to SIG_DFL. 154 */ 155 static void 156 _preexec_sig_unload(Lc_addr_range_t range[], uint_t count) 157 { 158 uberdata_t *udp = curthread->ul_uberdata; 159 int sig; 160 rwlock_t *rwlp; 161 struct sigaction *sap; 162 struct sigaction oact; 163 void (*handler)(); 164 165 for (sig = 1; sig < NSIG; sig++) { 166 sap = (struct sigaction *)&udp->siguaction[sig].sig_uaction; 167 again: 168 handler = sap->sa_handler; 169 if (handler != SIG_DFL && handler != SIG_IGN && 170 in_range(handler, range, count)) { 171 rwlp = &udp->siguaction[sig].sig_lock; 172 lrw_wrlock(rwlp); 173 if (handler != sap->sa_handler) { 174 lrw_unlock(rwlp); 175 goto again; 176 } 177 sap->sa_handler = SIG_DFL; 178 sap->sa_flags = SA_SIGINFO; 179 (void) sigemptyset(&sap->sa_mask); 180 if (__sigaction(sig, NULL, &oact) == 0 && 181 oact.sa_handler != SIG_DFL && 182 oact.sa_handler != SIG_IGN) 183 (void) __sigaction(sig, sap, NULL); 184 lrw_unlock(rwlp); 185 } 186 } 187 } 188 189 /* 190 * The following is a routine which the loader (ld.so.1) calls when it 191 * processes a dlclose call on an object. It cancels all atfork() entries 192 * whose prefork, parent postfork, or child postfork functions fall within 193 * the union of the ranges specified by the elements of the array range. 194 */ 195 static void 196 _preexec_atfork_unload(Lc_addr_range_t range[], uint_t count) 197 { 198 ulwp_t *self = curthread; 199 uberdata_t *udp = self->ul_uberdata; 200 atfork_t *atfork_q; 201 atfork_t *atfp; 202 atfork_t *next; 203 void (*func)(void); 204 int start_again; 205 206 (void) _private_mutex_lock(&udp->atfork_lock); 207 if ((atfork_q = udp->atforklist) != NULL) { 208 atfp = atfork_q; 209 do { 210 next = atfp->forw; 211 start_again = 0; 212 213 if (((func = atfp->prepare) != NULL && 214 in_range(func, range, count)) || 215 ((func = atfp->parent) != NULL && 216 in_range(func, range, count)) || 217 ((func = atfp->child) != NULL && 218 in_range(func, range, count))) { 219 if (self->ul_fork) { 220 /* 221 * dlclose() called from a fork handler. 222 * Deleting the entry would wreak havoc. 223 * Just null out the function pointers 224 * and leave the entry in place. 225 */ 226 atfp->prepare = NULL; 227 atfp->parent = NULL; 228 atfp->child = NULL; 229 continue; 230 } 231 if (atfp == atfork_q) { 232 /* deleting the list head member */ 233 udp->atforklist = atfork_q = next; 234 start_again = 1; 235 } 236 atfp->forw->back = atfp->back; 237 atfp->back->forw = atfp->forw; 238 lfree(atfp, sizeof (atfork_t)); 239 if (atfp == atfork_q) { 240 /* we deleted the whole list */ 241 udp->atforklist = NULL; 242 break; 243 } 244 } 245 } while ((atfp = next) != atfork_q || start_again); 246 } 247 (void) _private_mutex_unlock(&udp->atfork_lock); 248 } 249 250 /* 251 * The following is a routine which the loader (ld.so.1) calls when it 252 * processes a dlclose call on an object. It sets the destructor 253 * function pointer to NULL for all keys whose destructors fall within 254 * the union of the ranges specified by the elements of the array range. 255 * We don't assign TSD_UNALLOCATED (the equivalent of pthread_key_destroy()) 256 * because the thread may use the key's TSD further on in fini processing. 257 */ 258 static void 259 _preexec_tsd_unload(Lc_addr_range_t range[], uint_t count) 260 { 261 tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata; 262 void (*func)(void *); 263 int key; 264 265 lmutex_lock(&tsdm->tsdm_lock); 266 for (key = 1; key < tsdm->tsdm_nused; key++) { 267 if ((func = tsdm->tsdm_destro[key]) != NULL && 268 func != TSD_UNALLOCATED && 269 in_range((_exithdlr_func_t)func, range, count)) 270 tsdm->tsdm_destro[key] = NULL; 271 } 272 lmutex_unlock(&tsdm->tsdm_lock); 273 } 274 275 /* 276 * The following is a routine which the loader (ld.so.1) calls when it 277 * processes dlclose calls on objects with atexit registrations. It 278 * executes the exit handlers that fall within the union of the ranges 279 * specified by the elements of the array range in the REVERSE ORDER of 280 * their registration. Do not change this characteristic; it is REQUIRED 281 * BEHAVIOR. 282 */ 283 int 284 _preexec_exit_handlers(Lc_addr_range_t range[], uint_t count) 285 { 286 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root; 287 _exthdlr_t *o; /* previous node */ 288 _exthdlr_t *p; /* this node */ 289 290 (void) _private_mutex_lock(&arp->exitfns_lock); 291 o = NULL; 292 p = arp->head; 293 while (p != NULL) { 294 if (in_range(p->hdlr, range, count)) { 295 /* We need to execute this one */ 296 if (o != NULL) 297 o->next = p->next; 298 else 299 arp->head = p->next; 300 p->hdlr(); 301 lfree(p, sizeof (_exthdlr_t)); 302 o = NULL; 303 p = arp->head; 304 } else { 305 o = p; 306 p = p->next; 307 } 308 } 309 (void) _private_mutex_unlock(&arp->exitfns_lock); 310 311 _preexec_tsd_unload(range, count); 312 _preexec_atfork_unload(range, count); 313 _preexec_sig_unload(range, count); 314 315 return (0); 316 } 317 318 static int 319 in_range(_exithdlr_func_t addr, Lc_addr_range_t ranges[], uint_t count) 320 { 321 uint_t idx; 322 323 for (idx = 0; idx < count; idx++) { 324 if ((void *)addr >= ranges[idx].lb && 325 (void *)addr < ranges[idx].ub) { 326 return (1); 327 } 328 } 329 330 return (0); 331 } 332