xref: /titanic_50/usr/src/lib/libc/port/threads/tls.c (revision 0a1278f26ea4b7c8c0285d4f2d6c5b680904aa01)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "lint.h"
28 #include "thr_uberdata.h"
29 
30 #define	MIN_MOD_SLOTS	8
31 
32 /*
33  * Used to inform libc_init() that we are on the primary link map,
34  * and to cause certain functions (like malloc() and sbrk()) to fail
35  * (with ENOTSUP) when they are called on an alternate link map.
36  */
37 int primary_link_map = 0;
38 
39 #if defined(_LP64)
40 #define	ALIGN	16
41 #else
42 #define	ALIGN	8
43 #endif
44 
45 /*
46  * Grow the TLS module information array as necessary to include the
47  * specified module-id.  tls_modinfo->tls_size must be a power of two.
48  * Return a pointer to the (possibly reallocated) module information array.
49  */
50 static TLS_modinfo *
51 tls_modinfo_alloc(tls_metadata_t *tlsm, ulong_t moduleid)
52 {
53 	tls_t *tls_modinfo = &tlsm->tls_modinfo;
54 	TLS_modinfo *modinfo;
55 	size_t mod_slots;
56 
57 	if ((modinfo = tls_modinfo->tls_data) == NULL ||
58 	    tls_modinfo->tls_size <= moduleid) {
59 		if ((mod_slots = tls_modinfo->tls_size) == 0)
60 			mod_slots = MIN_MOD_SLOTS;
61 		while (mod_slots <= moduleid)
62 			mod_slots *= 2;
63 		modinfo = lmalloc(mod_slots * sizeof (TLS_modinfo));
64 		if (tls_modinfo->tls_data != NULL) {
65 			(void) memcpy(modinfo, tls_modinfo->tls_data,
66 			    tls_modinfo->tls_size * sizeof (TLS_modinfo));
67 			lfree(tls_modinfo->tls_data,
68 			    tls_modinfo->tls_size * sizeof (TLS_modinfo));
69 		}
70 		tls_modinfo->tls_data = modinfo;
71 		tls_modinfo->tls_size = mod_slots;
72 	}
73 	return (modinfo);
74 }
75 
76 /*
77  * This is called from the dynamic linker, before libc_init() is called,
78  * to setup all of the TLS blocks that are available at process startup
79  * and hence must be included as part of the static TLS block.
80  * No locks are needed because we are single-threaded at this point.
81  * We must be careful not to call any function that could possibly
82  * invoke the dynamic linker.  That is, we must only call functions
83  * that are wholly private to libc.
84  */
85 void
86 __tls_static_mods(TLS_modinfo **tlslist, unsigned long statictlssize)
87 {
88 	ulwp_t *oldself = __curthread();
89 	tls_metadata_t *tlsm;
90 	TLS_modinfo **tlspp;
91 	TLS_modinfo *tlsp;
92 	TLS_modinfo *modinfo;
93 	caddr_t data;
94 	caddr_t data_end;
95 	int max_modid;
96 
97 	primary_link_map = 1;		/* inform libc_init */
98 	if (statictlssize == 0)
99 		return;
100 
101 	/*
102 	 * Retrieve whatever dynamic TLS metadata was generated by code
103 	 * running on alternate link maps prior to now (we must be running
104 	 * on the primary link map now since __tls_static_mods() is only
105 	 * called on the primary link map).
106 	 */
107 	tlsm = &__uberdata.tls_metadata;
108 	if (oldself != NULL) {
109 		(void) memcpy(tlsm,
110 		    &oldself->ul_uberdata->tls_metadata, sizeof (*tlsm));
111 		ASSERT(tlsm->static_tls.tls_data == NULL);
112 	}
113 
114 	/*
115 	 * We call lmalloc() to allocate the template even though libc_init()
116 	 * has not yet been called.  lmalloc() must and does deal with this.
117 	 */
118 	ASSERT((statictlssize & (ALIGN - 1)) == 0);
119 	tlsm->static_tls.tls_data = data = lmalloc(statictlssize);
120 	data_end = data + statictlssize;
121 	tlsm->static_tls.tls_size = statictlssize;
122 	/*
123 	 * Initialize the static TLS template.
124 	 * We make no assumptions about the order in memory of the TLS
125 	 * modules we are processing, only that they fit within the
126 	 * total size we are given and that they are self-consistent.
127 	 * We do not assume any order for the moduleid's; we only assume
128 	 * that they are reasonably small integers.
129 	 */
130 	for (max_modid = 0, tlspp = tlslist; (tlsp = *tlspp) != NULL; tlspp++) {
131 		ASSERT(tlsp->tm_flags & TM_FLG_STATICTLS);
132 		ASSERT(tlsp->tm_stattlsoffset > 0);
133 		ASSERT(tlsp->tm_stattlsoffset <= statictlssize);
134 		ASSERT((tlsp->tm_stattlsoffset & (ALIGN - 1)) == 0);
135 		ASSERT(tlsp->tm_filesz <= tlsp->tm_memsz);
136 		ASSERT(tlsp->tm_memsz <= tlsp->tm_stattlsoffset);
137 		if (tlsp->tm_filesz)
138 			(void) memcpy(data_end-tlsp->tm_stattlsoffset,
139 			    tlsp->tm_tlsblock, tlsp->tm_filesz);
140 		if (max_modid < tlsp->tm_modid)
141 			max_modid = tlsp->tm_modid;
142 	}
143 	/*
144 	 * Record the static TLS_modinfo information.
145 	 */
146 	modinfo = tls_modinfo_alloc(tlsm, max_modid);
147 	for (tlspp = tlslist; (tlsp = *tlspp) != NULL; tlspp++)
148 		(void) memcpy(&modinfo[tlsp->tm_modid],
149 		    tlsp, sizeof (*tlsp));
150 
151 	/*
152 	 * Copy the new tls_metadata back to the old, if any,
153 	 * since it will be copied up again in libc_init().
154 	 */
155 	if (oldself != NULL)
156 		(void) memcpy(&oldself->ul_uberdata->tls_metadata,
157 		    tlsm, sizeof (*tlsm));
158 }
159 
160 /*
161  * This is called from the dynamic linker for each module not included
162  * in the static TLS mod list, after the module has been loaded but
163  * before any of the module's init code has been executed.
164  */
165 void
166 __tls_mod_add(TLS_modinfo *tlsp)
167 {
168 	tls_metadata_t *tlsm = &curthread->ul_uberdata->tls_metadata;
169 	ulong_t moduleid = tlsp->tm_modid;
170 	TLS_modinfo *modinfo;
171 
172 	lmutex_lock(&tlsm->tls_lock);
173 	ASSERT(!(tlsp->tm_flags & TM_FLG_STATICTLS));
174 	ASSERT(tlsp->tm_filesz <= tlsp->tm_memsz);
175 	modinfo = tls_modinfo_alloc(tlsm, moduleid);
176 	(void) memcpy(&modinfo[moduleid], tlsp, sizeof (*tlsp));
177 	lmutex_unlock(&tlsm->tls_lock);
178 }
179 
180 /*
181  * Called for each module as it is unloaded from memory by dlclose().
182  */
183 void
184 __tls_mod_remove(TLS_modinfo *tlsp)
185 {
186 	tls_metadata_t *tlsm = &curthread->ul_uberdata->tls_metadata;
187 	ulong_t moduleid = tlsp->tm_modid;
188 	TLS_modinfo *modinfo;
189 
190 	lmutex_lock(&tlsm->tls_lock);
191 	ASSERT(tlsm->tls_modinfo.tls_data != NULL &&
192 	    moduleid < tlsm->tls_modinfo.tls_size);
193 	modinfo = tlsm->tls_modinfo.tls_data;
194 	(void) memset(&modinfo[moduleid], 0, sizeof (TLS_modinfo));
195 	lmutex_unlock(&tlsm->tls_lock);
196 }
197 
198 extern	int	_preexec_exit_handlers();
199 extern	void	libc_init();
200 
201 const Lc_interface tls_rtldinfo[] = {
202 	{CI_VERSION,	(int(*)())CI_V_CURRENT},
203 	{CI_ATEXIT,	(int(*)())_preexec_exit_handlers},
204 	{CI_TLS_MODADD,	(int(*)())__tls_mod_add},
205 	{CI_TLS_MODREM,	(int(*)())__tls_mod_remove},
206 	{CI_TLS_STATMOD, (int(*)())__tls_static_mods},
207 	{CI_THRINIT,	(int(*)())libc_init},
208 	{CI_NULL,	(int(*)())NULL}
209 };
210 
211 /*
212  * Return the address of a TLS variable for the current thread.
213  * Run the constructors for newly-allocated dynamic TLS.
214  */
215 void *
216 slow_tls_get_addr(TLS_index *tls_index)
217 {
218 	ulwp_t *self = curthread;
219 	tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata;
220 	TLS_modinfo *tlsp;
221 	ulong_t moduleid;
222 	tls_t *tlsent;
223 	caddr_t	base;
224 	void (**initarray)(void);
225 	ulong_t arraycnt = 0;
226 
227 	/*
228 	 * Defer signals until we have finished calling
229 	 * all of the constructors.
230 	 */
231 	sigoff(self);
232 	lmutex_lock(&tlsm->tls_lock);
233 	if ((moduleid = tls_index->ti_moduleid) < self->ul_ntlsent)
234 		tlsent = self->ul_tlsent;
235 	else {
236 		ASSERT(moduleid < tlsm->tls_modinfo.tls_size);
237 		tlsent = lmalloc(tlsm->tls_modinfo.tls_size * sizeof (tls_t));
238 		if (self->ul_tlsent != NULL) {
239 			(void) memcpy(tlsent, self->ul_tlsent,
240 			    self->ul_ntlsent * sizeof (tls_t));
241 			lfree(self->ul_tlsent,
242 			    self->ul_ntlsent * sizeof (tls_t));
243 		}
244 		self->ul_tlsent = tlsent;
245 		self->ul_ntlsent = tlsm->tls_modinfo.tls_size;
246 	}
247 	tlsent += moduleid;
248 	if ((base = tlsent->tls_data) == NULL) {
249 		tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid;
250 		if (tlsp->tm_memsz == 0) {	/* dlclose()d module? */
251 			base = NULL;
252 		} else if (tlsp->tm_flags & TM_FLG_STATICTLS) {
253 			/* static TLS is already allocated/initialized */
254 			base = (caddr_t)self - tlsp->tm_stattlsoffset;
255 			tlsent->tls_data = base;
256 			tlsent->tls_size = 0;	/* don't lfree() this space */
257 		} else {
258 			/* allocate/initialize the dynamic TLS */
259 			base = lmalloc(tlsp->tm_memsz);
260 			if (tlsp->tm_filesz != 0)
261 				(void) memcpy(base, tlsp->tm_tlsblock,
262 				    tlsp->tm_filesz);
263 			tlsent->tls_data = base;
264 			tlsent->tls_size = tlsp->tm_memsz;
265 			/* remember the constructors */
266 			arraycnt = tlsp->tm_tlsinitarraycnt;
267 			initarray = tlsp->tm_tlsinitarray;
268 		}
269 	}
270 	lmutex_unlock(&tlsm->tls_lock);
271 
272 	/*
273 	 * Call constructors, if any, in ascending order.
274 	 * We have to do this after dropping tls_lock because
275 	 * we have no idea what the constructors will do.
276 	 * At least we have signals deferred until they are done.
277 	 */
278 	if (arraycnt) {
279 		do {
280 			(**initarray++)();
281 		} while (--arraycnt != 0);
282 	}
283 
284 	if (base == NULL)	/* kludge to get x86/x64 to boot */
285 		base = (caddr_t)self - 512;
286 
287 	sigon(self);
288 	return (base + tls_index->ti_tlsoffset);
289 }
290 
291 #ifdef	TLS_GET_ADDR_IS_WRITTEN_IN_ASSEMBLER
292 /*
293  * For speed, we do not make reference to any static data in this function.
294  * If necessary to do so, we do a tail call to slow_tls_get_addr().
295  */
296 void *
297 __tls_get_addr(TLS_index *tls_index)
298 {
299 	ulwp_t *self = curthread;
300 	tls_t *tlsent = self->ul_tlsent;
301 	ulong_t moduleid;
302 	caddr_t	base;
303 
304 	if ((moduleid = tls_index->ti_moduleid) < self->ul_ntlsent &&
305 	    (base = tlsent[moduleid].tls_data) != NULL)
306 		return (base + tls_index->ti_tlsoffset);
307 
308 	return (slow_tls_get_addr(tls_index));
309 }
310 #endif	/* TLS_GET_ADDR_IS_WRITTEN_IN_ASSEMBLER */
311 
312 /*
313  * This is called by _thrp_setup() to initialize the thread's static TLS.
314  * Constructors for initially allocated static TLS are called here.
315  */
316 void
317 tls_setup()
318 {
319 	ulwp_t *self = curthread;
320 	tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata;
321 	TLS_modinfo *tlsp;
322 	long moduleid;
323 	ulong_t nmods;
324 
325 	if (tlsm->static_tls.tls_size == 0)	/* no static TLS */
326 		return;
327 
328 	/* static TLS initialization */
329 	(void) memcpy((caddr_t)self - tlsm->static_tls.tls_size,
330 	    tlsm->static_tls.tls_data, tlsm->static_tls.tls_size);
331 
332 	/* call TLS constructors for the static TLS just initialized */
333 	lmutex_lock(&tlsm->tls_lock);
334 	nmods = tlsm->tls_modinfo.tls_size;
335 	for (moduleid = 0; moduleid < nmods; moduleid++) {
336 		/*
337 		 * Resume where we left off in the module array.
338 		 * tls_modinfo.tls_data may have changed since we
339 		 * dropped and reacquired tls_lock, but TLS modules
340 		 * retain their positions in the new array.
341 		 */
342 		tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid;
343 		/*
344 		 * Call constructors for this module if there are any
345 		 * to be called and if it is part of the static TLS.
346 		 */
347 		if (tlsp->tm_tlsinitarraycnt != 0 &&
348 		    (tlsp->tm_flags & TM_FLG_STATICTLS)) {
349 			ulong_t arraycnt = tlsp->tm_tlsinitarraycnt;
350 			void (**initarray)(void) = tlsp->tm_tlsinitarray;
351 
352 			/*
353 			 * Call the constructors in ascending order.
354 			 * We must drop tls_lock while doing this because
355 			 * we have no idea what the constructors will do.
356 			 */
357 			lmutex_unlock(&tlsm->tls_lock);
358 			do {
359 				(**initarray++)();
360 			} while (--arraycnt != 0);
361 			lmutex_lock(&tlsm->tls_lock);
362 		}
363 	}
364 	lmutex_unlock(&tlsm->tls_lock);
365 }
366 
367 /*
368  * This is called by _thrp_exit() to deallocate the thread's TLS.
369  * Destructors for all allocated TLS are called here.
370  */
371 void
372 tls_exit()
373 {
374 	ulwp_t *self = curthread;
375 	tls_metadata_t *tlsm = &self->ul_uberdata->tls_metadata;
376 	tls_t *tlsent;
377 	TLS_modinfo *tlsp;
378 	long moduleid;
379 	ulong_t nmods;
380 
381 	if (tlsm->static_tls.tls_size == 0 && self->ul_ntlsent == 0)
382 		return;		/* no TLS */
383 
384 	/*
385 	 * Call TLS destructors for all TLS allocated for this thread.
386 	 */
387 	lmutex_lock(&tlsm->tls_lock);
388 	nmods = tlsm->tls_modinfo.tls_size;
389 	for (moduleid = nmods - 1; moduleid >= 0; --moduleid) {
390 		/*
391 		 * Resume where we left off in the module array.
392 		 * tls_modinfo.tls_data may have changed since we
393 		 * dropped and reacquired tls_lock, but TLS modules
394 		 * retain their positions in the new array.
395 		 */
396 		tlsp = (TLS_modinfo *)tlsm->tls_modinfo.tls_data + moduleid;
397 		/*
398 		 * Call destructors for this module if there are any
399 		 * to be called and if it is part of the static TLS or
400 		 * if the dynamic TLS for the module has been allocated.
401 		 */
402 		if (tlsp->tm_tlsfiniarraycnt != 0 &&
403 		    ((tlsp->tm_flags & TM_FLG_STATICTLS) ||
404 		    (moduleid < self->ul_ntlsent &&
405 		    (tlsent = self->ul_tlsent) != NULL &&
406 		    tlsent[moduleid].tls_data != NULL))) {
407 			ulong_t arraycnt = tlsp->tm_tlsfiniarraycnt;
408 			void (**finiarray)(void) = tlsp->tm_tlsfiniarray;
409 
410 			/*
411 			 * Call the destructors in descending order.
412 			 * We must drop tls_lock while doing this because
413 			 * we have no idea what the destructors will do.
414 			 */
415 			lmutex_unlock(&tlsm->tls_lock);
416 			finiarray += arraycnt;
417 			do {
418 				(**--finiarray)();
419 			} while (--arraycnt != 0);
420 			lmutex_lock(&tlsm->tls_lock);
421 		}
422 	}
423 	lmutex_unlock(&tlsm->tls_lock);
424 
425 	tls_free(self);
426 }
427 
428 /*
429  * We only free the dynamically allocated TLS; the statically
430  * allocated TLS is reused when the ulwp_t is reallocated.
431  */
432 void
433 tls_free(ulwp_t *ulwp)
434 {
435 	ulong_t moduleid;
436 	tls_t *tlsent;
437 	size_t ntlsent;
438 	void *base;
439 	size_t size;
440 
441 	if ((tlsent = ulwp->ul_tlsent) == NULL ||
442 	    (ntlsent = ulwp->ul_ntlsent) == 0)
443 		return;
444 
445 	for (moduleid = 0; moduleid < ntlsent; moduleid++, tlsent++) {
446 		if ((base = tlsent->tls_data) != NULL &&
447 		    (size = tlsent->tls_size) != 0)
448 			lfree(base, size);
449 		tlsent->tls_data = NULL;	/* paranoia */
450 		tlsent->tls_size = 0;
451 	}
452 	lfree(ulwp->ul_tlsent, ntlsent * sizeof (tls_t));
453 	ulwp->ul_tlsent = NULL;
454 	ulwp->ul_ntlsent = 0;
455 }
456