xref: /titanic_44/usr/src/lib/libc/port/gen/atexit.c (revision 38f4bddda7216cf3550c325e8cabe56d08a2bce9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1988 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 #pragma weak _atexit = atexit
31 
32 #include "lint.h"
33 #include "thr_uberdata.h"
34 #include "libc_int.h"
35 #include "atexit.h"
36 #include "stdiom.h"
37 
38 /*
39  * Note that memory is managed by lmalloc()/lfree().
40  *
41  * Among other reasons, this is occasioned by the insistence of our
42  * brothers sh(1) and csh(1) that they can do malloc, etc., better than
43  * libc can.  Those programs define their own malloc routines, and
44  * initialize the underlying mechanism in main().  This means that calls
45  * to malloc occuring before main will crash.  The loader calls atexit(3C)
46  * before calling main, so we'd better avoid malloc() when it does.
47  *
48  * Another reason for using lmalloc()/lfree() is that the atexit()
49  * list must transcend all link maps.  See the Linker and Libraries
50  * Guide for information on alternate link maps.
51  *
52  * See "thr_uberdata.h" for the definitions of structures used here.
53  */
54 
55 static int in_range(void *, Lc_addr_range_t[], uint_t count);
56 
57 extern	caddr_t	_getfp(void);
58 
59 /*
60  * exitfns_lock is declared to be a recursive mutex so that we
61  * can hold it while calling out to the registered functions.
62  * If they call back to us, we are self-consistent and everything
63  * works, even the case of calling exit() from functions called
64  * by _exithandle() (recursive exit()).  All that is required is
65  * that the registered functions actually return (no longjmp()s).
66  *
67  * Because exitfns_lock is declared to be a recursive mutex, we
68  * cannot use it with lmutex_lock()/lmutex_unlock() and we must
69  * use mutex_lock()/mutex_unlock().  This means that atexit()
70  * and exit() are not async-signal-safe.  We make them fork1-safe
71  * via the atexit_locks()/atexit_unlocks() functions, called from
72  * libc_prepare_atfork()/libc_child_atfork()/libc_parent_atfork()
73  */
74 
75 /*
76  * atexit_locks() and atexit_unlocks() are called on every link map.
77  * Do not use curthread->ul_uberdata->atexit_root for these.
78  */
79 void
80 atexit_locks()
81 {
82 	(void) mutex_lock(&__uberdata.atexit_root.exitfns_lock);
83 }
84 
85 void
86 atexit_unlocks()
87 {
88 	(void) mutex_unlock(&__uberdata.atexit_root.exitfns_lock);
89 }
90 
91 
92 /*
93  * This is called via atexit() before the primordial thread is fully set up.
94  * Be careful about dereferencing self->ul_uberdata->atexit_root.
95  */
96 int
97 __cxa_atexit(void (*hdlr)(void *), void *arg, void *dso)
98 {
99 	ulwp_t *self;
100 	atexit_root_t *arp;
101 	_exthdlr_t *p;
102 
103 	if ((p = lmalloc(sizeof (_exthdlr_t))) == NULL)
104 		return (-1);
105 
106 	if ((self = __curthread()) == NULL)
107 		arp = &__uberdata.atexit_root;
108 	else {
109 		arp = &self->ul_uberdata->atexit_root;
110 		(void) mutex_lock(&arp->exitfns_lock);
111 	}
112 	p->hdlr = hdlr;
113 	p->arg = arg;
114 	p->dso = dso;
115 	p->next = arp->head;
116 	arp->head = p;
117 
118 	if (self != NULL)
119 		(void) mutex_unlock(&arp->exitfns_lock);
120 	return (0);
121 }
122 
123 int
124 atexit(void (*func)(void))
125 {
126 	return (__cxa_atexit((_exithdlr_func_t)func, NULL, NULL));
127 }
128 
129 /*
130  * Note that we may be entered recursively, as we'll call __cxa_finalize(0) at
131  * exit, one of our handlers is ld.so.1`atexit_fini, and libraries may call
132  * __cxa_finalize(__dso_handle) from their _fini.
133  */
134 void
135 __cxa_finalize(void *dso)
136 {
137 	atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
138 	_exthdlr_t *p, *o;
139 	int cancel_state;
140 
141 	/* disable cancellation while running atexit handlers */
142 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
143 	(void) mutex_lock(&arp->exitfns_lock);
144 
145 	o = NULL;
146 	p = arp->head;
147 	while (p != NULL) {
148 		if ((dso == NULL) || (p->dso == dso)) {
149 			if (o != NULL)
150 				o->next = p->next;
151 			else
152 				arp->head = p->next;
153 
154 			p->hdlr(p->arg);
155 			lfree(p, sizeof (_exthdlr_t));
156 			o = NULL;
157 			p = arp->head;
158 		} else {
159 			o = p;
160 			p = p->next;
161 		}
162 	}
163 
164 	(void) mutex_unlock(&arp->exitfns_lock);
165 	(void) pthread_setcancelstate(cancel_state, NULL);
166 }
167 
168 void
169 _exithandle(void)
170 {
171 	atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
172 
173 	arp->exit_frame_monitor = _getfp() + STACK_BIAS;
174 	__cxa_finalize(NULL);
175 }
176 
177 /*
178  * _get_exit_frame_monitor is called by the C++ runtimes.
179  */
180 void *
181 _get_exit_frame_monitor(void)
182 {
183 	atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
184 	return (&arp->exit_frame_monitor);
185 }
186 
187 /*
188  * The following is a routine which the loader (ld.so.1) calls when it
189  * processes a dlclose call on an object.  It resets all signal handlers
190  * which fall within the union of the ranges specified by the elements
191  * of the array range to SIG_DFL.
192  */
193 static void
194 _preexec_sig_unload(Lc_addr_range_t range[], uint_t count)
195 {
196 	uberdata_t *udp = curthread->ul_uberdata;
197 	int sig;
198 	rwlock_t *rwlp;
199 	struct sigaction *sap;
200 	struct sigaction oact;
201 	void (*handler)();
202 
203 	for (sig = 1; sig < NSIG; sig++) {
204 		sap = (struct sigaction *)&udp->siguaction[sig].sig_uaction;
205 again:
206 		handler = sap->sa_handler;
207 		if (handler != SIG_DFL && handler != SIG_IGN &&
208 		    in_range((void *)handler, range, count)) {
209 			rwlp = &udp->siguaction[sig].sig_lock;
210 			lrw_wrlock(rwlp);
211 			if (handler != sap->sa_handler) {
212 				lrw_unlock(rwlp);
213 				goto again;
214 			}
215 			sap->sa_handler = SIG_DFL;
216 			sap->sa_flags = SA_SIGINFO;
217 			(void) sigemptyset(&sap->sa_mask);
218 			if (__sigaction(sig, NULL, &oact) == 0 &&
219 			    oact.sa_handler != SIG_DFL &&
220 			    oact.sa_handler != SIG_IGN)
221 				(void) __sigaction(sig, sap, NULL);
222 			lrw_unlock(rwlp);
223 		}
224 	}
225 }
226 
227 /*
228  * The following is a routine which the loader (ld.so.1) calls when it
229  * processes a dlclose call on an object.  It cancels all atfork() entries
230  * whose prefork, parent postfork, or child postfork functions fall within
231  * the union of the ranges specified by the elements of the array range.
232  */
233 static void
234 _preexec_atfork_unload(Lc_addr_range_t range[], uint_t count)
235 {
236 	ulwp_t *self = curthread;
237 	uberdata_t *udp = self->ul_uberdata;
238 	atfork_t *atfork_q;
239 	atfork_t *atfp;
240 	atfork_t *next;
241 	void (*func)(void);
242 	int start_again;
243 
244 	(void) mutex_lock(&udp->atfork_lock);
245 	if ((atfork_q = udp->atforklist) != NULL) {
246 		atfp = atfork_q;
247 		do {
248 			next = atfp->forw;
249 			start_again = 0;
250 
251 			if (((func = atfp->prepare) != NULL &&
252 			    in_range((void *)func, range, count)) ||
253 			    ((func = atfp->parent) != NULL &&
254 			    in_range((void *)func, range, count)) ||
255 			    ((func = atfp->child) != NULL &&
256 			    in_range((void *)func, range, count))) {
257 				if (self->ul_fork) {
258 					/*
259 					 * dlclose() called from a fork handler.
260 					 * Deleting the entry would wreak havoc.
261 					 * Just null out the function pointers
262 					 * and leave the entry in place.
263 					 */
264 					atfp->prepare = NULL;
265 					atfp->parent = NULL;
266 					atfp->child = NULL;
267 					continue;
268 				}
269 				if (atfp == atfork_q) {
270 					/* deleting the list head member */
271 					udp->atforklist = atfork_q = next;
272 					start_again = 1;
273 				}
274 				atfp->forw->back = atfp->back;
275 				atfp->back->forw = atfp->forw;
276 				lfree(atfp, sizeof (atfork_t));
277 				if (atfp == atfork_q) {
278 					/* we deleted the whole list */
279 					udp->atforklist = NULL;
280 					break;
281 				}
282 			}
283 		} while ((atfp = next) != atfork_q || start_again);
284 	}
285 	(void) mutex_unlock(&udp->atfork_lock);
286 }
287 
288 /*
289  * The following is a routine which the loader (ld.so.1) calls when it
290  * processes a dlclose call on an object.  It sets the destructor
291  * function pointer to NULL for all keys whose destructors fall within
292  * the union of the ranges specified by the elements of the array range.
293  * We don't assign TSD_UNALLOCATED (the equivalent of pthread_key_destroy())
294  * because the thread may use the key's TSD further on in fini processing.
295  */
296 static void
297 _preexec_tsd_unload(Lc_addr_range_t range[], uint_t count)
298 {
299 	tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
300 	void (*func)(void *);
301 	int key;
302 
303 	lmutex_lock(&tsdm->tsdm_lock);
304 	for (key = 1; key < tsdm->tsdm_nused; key++) {
305 		if ((func = tsdm->tsdm_destro[key]) != NULL &&
306 		    func != TSD_UNALLOCATED &&
307 		    in_range((void *)func, range, count))
308 			tsdm->tsdm_destro[key] = NULL;
309 	}
310 	lmutex_unlock(&tsdm->tsdm_lock);
311 }
312 
313 /*
314  * The following is a routine which the loader (ld.so.1) calls when it
315  * processes dlclose calls on objects with atexit registrations.  It
316  * executes the exit handlers that fall within the union of the ranges
317  * specified by the elements of the array range in the REVERSE ORDER of
318  * their registration.  Do not change this characteristic; it is REQUIRED
319  * BEHAVIOR.
320  */
321 int
322 _preexec_exit_handlers(Lc_addr_range_t range[], uint_t count)
323 {
324 	atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
325 	_exthdlr_t *o;		/* previous node */
326 	_exthdlr_t *p;		/* this node */
327 	int cancel_state;
328 
329 	/* disable cancellation while running atexit handlers */
330 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
331 	(void) mutex_lock(&arp->exitfns_lock);
332 	o = NULL;
333 	p = arp->head;
334 	while (p != NULL) {
335 		/*
336 		 * We call even CXA handlers of functions present in the
337 		 * library being unloaded.  The specification isn't
338 		 * particularly clear on this, and this seems the most sane.
339 		 * This is the behaviour of FreeBSD 9.1 (GNU libc leaves the
340 		 * handler on the exit list, and crashes at exit time).
341 		 *
342 		 * This won't cause handlers to be called twice, because
343 		 * anything called from a __cxa_finalize call from the
344 		 * language runtime will have been removed from the list.
345 		 */
346 		if (in_range((void *)p->hdlr, range, count)) {
347 			/* We need to execute this one */
348 			if (o != NULL)
349 				o->next = p->next;
350 			else
351 				arp->head = p->next;
352 			p->hdlr(p->arg);
353 			lfree(p, sizeof (_exthdlr_t));
354 			o = NULL;
355 			p = arp->head;
356 		} else {
357 			o = p;
358 			p = p->next;
359 		}
360 	}
361 	(void) mutex_unlock(&arp->exitfns_lock);
362 	(void) pthread_setcancelstate(cancel_state, NULL);
363 
364 	_preexec_tsd_unload(range, count);
365 	_preexec_atfork_unload(range, count);
366 	_preexec_sig_unload(range, count);
367 
368 	return (0);
369 }
370 
371 static int
372 in_range(void *addr, Lc_addr_range_t ranges[], uint_t count)
373 {
374 	uint_t idx;
375 
376 	for (idx = 0; idx < count; idx++) {
377 		if (addr >= ranges[idx].lb &&
378 		    addr < ranges[idx].ub) {
379 			return (1);
380 		}
381 	}
382 
383 	return (0);
384 }
385