1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* util/support/threads.c - Portable thread support */
3 /*
4 * Copyright 2004,2005,2006,2007,2008 by the Massachusetts Institute of
5 * Technology. All Rights Reserved.
6 *
7 * Export of this software from the United States of America may
8 * require a specific license from the United States Government.
9 * It is the responsibility of any person or organization contemplating
10 * export to obtain such a license before exporting.
11 *
12 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
13 * distribute this software and its documentation for any purpose and
14 * without fee is hereby granted, provided that the above copyright
15 * notice appear in all copies and that both that copyright notice and
16 * this permission notice appear in supporting documentation, and that
17 * the name of M.I.T. not be used in advertising or publicity pertaining
18 * to distribution of the software without specific, written prior
19 * permission. Furthermore if you modify this software you must label
20 * your software as modified software and not distribute it in such a
21 * fashion that it might be confused with the original M.I.T. software.
22 * M.I.T. makes no representations about the suitability of
23 * this software for any purpose. It is provided "as is" without express
24 * or implied warranty.
25 */
26
27 #define THREAD_SUPPORT_IMPL
28 #include "k5-platform.h"
29 #include "k5-thread.h"
30 #include "supp-int.h"
31
32 MAKE_INIT_FUNCTION(krb5int_thread_support_init);
33 MAKE_FINI_FUNCTION(krb5int_thread_support_fini);
34
35 /* This function used to be referenced from elsewhere in the tree, but is now
36 * only used internally. Keep it linker-visible for now. */
37 int krb5int_pthread_loaded(void);
38
39 #ifndef ENABLE_THREADS /* no thread support */
40
41 static void (*destructors[K5_KEY_MAX])(void *);
42 struct tsd_block { void *values[K5_KEY_MAX]; };
43 static struct tsd_block tsd_no_threads;
44 static unsigned char destructors_set[K5_KEY_MAX];
45
krb5int_pthread_loaded(void)46 int krb5int_pthread_loaded (void)
47 {
48 return 0;
49 }
50
51 #elif defined(_WIN32)
52
53 static DWORD tls_idx;
54 static CRITICAL_SECTION key_lock;
55 struct tsd_block {
56 void *values[K5_KEY_MAX];
57 };
58 static void (*destructors[K5_KEY_MAX])(void *);
59 static unsigned char destructors_set[K5_KEY_MAX];
60
krb5int_thread_detach_hook(void)61 void krb5int_thread_detach_hook (void)
62 {
63 /* XXX Memory leak here!
64 Need to destroy all TLS objects we know about for this thread. */
65 struct tsd_block *t;
66 int i, err;
67
68 err = CALL_INIT_FUNCTION(krb5int_thread_support_init);
69 if (err)
70 return;
71
72 t = TlsGetValue(tls_idx);
73 if (t == NULL)
74 return;
75 for (i = 0; i < K5_KEY_MAX; i++) {
76 if (destructors_set[i] && destructors[i] && t->values[i]) {
77 void *v = t->values[i];
78 t->values[i] = 0;
79 (*destructors[i])(v);
80 }
81 }
82 }
83
84 /* Stub function not used on Windows. */
krb5int_pthread_loaded(void)85 int krb5int_pthread_loaded (void)
86 {
87 return 0;
88 }
89 #else /* POSIX threads */
90
91 /* Must support register/delete/register sequence, e.g., if krb5 is
92 loaded so this support code stays in the process, and gssapi is
93 loaded, unloaded, and loaded again. */
94
95 static k5_mutex_t key_lock = K5_MUTEX_PARTIAL_INITIALIZER;
96 static void (*destructors[K5_KEY_MAX])(void *);
97 static unsigned char destructors_set[K5_KEY_MAX];
98
99 /* This is not safe yet!
100
101 Thread termination concurrent with key deletion can cause two
102 threads to interfere. It's a bit tricky, since one of the threads
103 will want to remove this structure from the list being walked by
104 the other.
105
106 Other cases, like looking up data while the library owning the key
107 is in the process of being unloaded, we don't worry about. */
108
109 struct tsd_block {
110 struct tsd_block *next;
111 void *values[K5_KEY_MAX];
112 };
113
114 #ifdef HAVE_PRAGMA_WEAK_REF
115 # pragma weak pthread_once
116 # pragma weak pthread_mutex_lock
117 # pragma weak pthread_mutex_unlock
118 # pragma weak pthread_mutex_destroy
119 # pragma weak pthread_mutex_init
120 # pragma weak pthread_self
121 # pragma weak pthread_getspecific
122 # pragma weak pthread_setspecific
123 # pragma weak pthread_key_create
124 # pragma weak pthread_key_delete
125 # pragma weak pthread_create
126 # pragma weak pthread_join
127 # define K5_PTHREADS_LOADED (krb5int_pthread_loaded())
128 static volatile int flag_pthread_loaded = -1;
loaded_test_aux(void)129 static void loaded_test_aux(void)
130 {
131 if (flag_pthread_loaded == -1)
132 flag_pthread_loaded = 1;
133 else
134 /* Could we have been called twice? */
135 flag_pthread_loaded = 0;
136 }
137 static pthread_once_t loaded_test_once = PTHREAD_ONCE_INIT;
krb5int_pthread_loaded(void)138 int krb5int_pthread_loaded (void)
139 {
140 int x = flag_pthread_loaded;
141 if (x != -1)
142 return x;
143 if (&pthread_getspecific == 0
144 || &pthread_setspecific == 0
145 || &pthread_key_create == 0
146 || &pthread_key_delete == 0
147 || &pthread_once == 0
148 || &pthread_mutex_lock == 0
149 || &pthread_mutex_unlock == 0
150 || &pthread_mutex_destroy == 0
151 || &pthread_mutex_init == 0
152 || &pthread_self == 0
153 /* Any program that's really multithreaded will have to be
154 able to create threads. */
155 || &pthread_create == 0
156 || &pthread_join == 0
157 /* Okay, all the interesting functions -- or stubs for them --
158 seem to be present. If we call pthread_once, does it
159 actually seem to cause the indicated function to get called
160 exactly one time? */
161 || pthread_once(&loaded_test_once, loaded_test_aux) != 0
162 || pthread_once(&loaded_test_once, loaded_test_aux) != 0
163 /* This catches cases where pthread_once does nothing, and
164 never causes the function to get called. That's a pretty
165 clear violation of the POSIX spec, but hey, it happens. */
166 || flag_pthread_loaded < 0) {
167 flag_pthread_loaded = 0;
168 return 0;
169 }
170 /* If we wanted to be super-paranoid, we could try testing whether
171 pthread_get/setspecific work, too. I don't know -- so far --
172 of any system with non-functional stubs for those. */
173 return flag_pthread_loaded;
174 }
175
176 static struct tsd_block tsd_if_single;
177 # define GET_NO_PTHREAD_TSD() (&tsd_if_single)
178 #else
179 # define K5_PTHREADS_LOADED (1)
krb5int_pthread_loaded(void)180 int krb5int_pthread_loaded (void)
181 {
182 return 1;
183 }
184
185 # define GET_NO_PTHREAD_TSD() (abort(),(struct tsd_block *)0)
186 #endif
187
188 static pthread_key_t key;
189 static void thread_termination(void *);
190
thread_termination(void * tptr)191 static void thread_termination (void *tptr)
192 {
193 int i, pass, none_found;
194 struct tsd_block *t = tptr;
195
196 k5_mutex_lock(&key_lock);
197
198 /*
199 * Make multiple passes in case, for example, a libkrb5 cleanup
200 * function wants to print out an error message, which causes
201 * com_err to allocate a thread-specific buffer, after we just
202 * freed up the old one.
203 *
204 * Shouldn't actually happen, if we're careful, but check just in
205 * case.
206 */
207
208 pass = 0;
209 none_found = 0;
210 while (pass < 4 && !none_found) {
211 none_found = 1;
212 for (i = 0; i < K5_KEY_MAX; i++) {
213 if (destructors_set[i] && destructors[i] && t->values[i]) {
214 void *v = t->values[i];
215 t->values[i] = 0;
216 (*destructors[i])(v);
217 none_found = 0;
218 }
219 }
220 }
221 free (t);
222 k5_mutex_unlock(&key_lock);
223
224 /* remove thread from global linked list */
225 }
226
227 #endif /* no threads vs Win32 vs POSIX */
228
k5_getspecific(k5_key_t keynum)229 void *k5_getspecific (k5_key_t keynum)
230 {
231 struct tsd_block *t;
232 int err;
233
234 err = CALL_INIT_FUNCTION(krb5int_thread_support_init);
235 if (err)
236 return NULL;
237
238 assert(destructors_set[keynum] == 1);
239
240 #ifndef ENABLE_THREADS
241
242 t = &tsd_no_threads;
243
244 #elif defined(_WIN32)
245
246 t = TlsGetValue(tls_idx);
247
248 #else /* POSIX */
249
250 if (K5_PTHREADS_LOADED)
251 t = pthread_getspecific(key);
252 else
253 t = GET_NO_PTHREAD_TSD();
254
255 #endif
256
257 if (t == NULL)
258 return NULL;
259 return t->values[keynum];
260 }
261
k5_setspecific(k5_key_t keynum,void * value)262 int k5_setspecific (k5_key_t keynum, void *value)
263 {
264 struct tsd_block *t;
265 int err;
266
267 err = CALL_INIT_FUNCTION(krb5int_thread_support_init);
268 if (err)
269 return err;
270
271 assert(destructors_set[keynum] == 1);
272
273 #ifndef ENABLE_THREADS
274
275 t = &tsd_no_threads;
276
277 #elif defined(_WIN32)
278
279 t = TlsGetValue(tls_idx);
280 if (t == NULL) {
281 int i;
282 t = malloc(sizeof(*t));
283 if (t == NULL)
284 return ENOMEM;
285 for (i = 0; i < K5_KEY_MAX; i++)
286 t->values[i] = 0;
287 /* add to global linked list */
288 /* t->next = 0; */
289 err = TlsSetValue(tls_idx, t);
290 if (!err) {
291 free(t);
292 return GetLastError();
293 }
294 }
295
296 #else /* POSIX */
297
298 if (K5_PTHREADS_LOADED) {
299 t = pthread_getspecific(key);
300 if (t == NULL) {
301 int i;
302 t = malloc(sizeof(*t));
303 if (t == NULL)
304 return ENOMEM;
305 for (i = 0; i < K5_KEY_MAX; i++)
306 t->values[i] = 0;
307 /* add to global linked list */
308 t->next = 0;
309 err = pthread_setspecific(key, t);
310 if (err) {
311 free(t);
312 return err;
313 }
314 }
315 } else {
316 t = GET_NO_PTHREAD_TSD();
317 }
318
319 #endif
320
321 t->values[keynum] = value;
322 return 0;
323 }
324
k5_key_register(k5_key_t keynum,void (* destructor)(void *))325 int k5_key_register (k5_key_t keynum, void (*destructor)(void *))
326 {
327 int err;
328
329 err = CALL_INIT_FUNCTION(krb5int_thread_support_init);
330 if (err)
331 return err;
332
333 #ifndef ENABLE_THREADS
334
335 assert(destructors_set[keynum] == 0);
336 destructors[keynum] = destructor;
337 destructors_set[keynum] = 1;
338
339 #elif defined(_WIN32)
340
341 /* XXX: This can raise EXCEPTION_POSSIBLE_DEADLOCK. */
342 EnterCriticalSection(&key_lock);
343 assert(destructors_set[keynum] == 0);
344 destructors_set[keynum] = 1;
345 destructors[keynum] = destructor;
346 LeaveCriticalSection(&key_lock);
347
348 #else /* POSIX */
349
350 k5_mutex_lock(&key_lock);
351 assert(destructors_set[keynum] == 0);
352 destructors_set[keynum] = 1;
353 destructors[keynum] = destructor;
354 k5_mutex_unlock(&key_lock);
355
356 #endif
357 return 0;
358 }
359
k5_key_delete(k5_key_t keynum)360 int k5_key_delete (k5_key_t keynum)
361 {
362 #ifndef ENABLE_THREADS
363
364 assert(destructors_set[keynum] == 1);
365 if (destructors[keynum] && tsd_no_threads.values[keynum])
366 (*destructors[keynum])(tsd_no_threads.values[keynum]);
367 destructors[keynum] = 0;
368 tsd_no_threads.values[keynum] = 0;
369 destructors_set[keynum] = 0;
370
371 #elif defined(_WIN32)
372
373 /* XXX: This can raise EXCEPTION_POSSIBLE_DEADLOCK. */
374 EnterCriticalSection(&key_lock);
375 /* XXX Memory leak here!
376 Need to destroy the associated data for all threads.
377 But watch for race conditions in case threads are going away too. */
378 assert(destructors_set[keynum] == 1);
379 destructors_set[keynum] = 0;
380 destructors[keynum] = 0;
381 LeaveCriticalSection(&key_lock);
382
383 #else /* POSIX */
384
385 /* XXX RESOURCE LEAK: Need to destroy the allocated objects first! */
386 k5_mutex_lock(&key_lock);
387 assert(destructors_set[keynum] == 1);
388 destructors_set[keynum] = 0;
389 destructors[keynum] = NULL;
390 k5_mutex_unlock(&key_lock);
391
392 #endif
393
394 return 0;
395 }
396
krb5int_call_thread_support_init(void)397 int krb5int_call_thread_support_init (void)
398 {
399 return CALL_INIT_FUNCTION(krb5int_thread_support_init);
400 }
401
402 #include "cache-addrinfo.h"
403
krb5int_thread_support_init(void)404 int krb5int_thread_support_init (void)
405 {
406 int err;
407
408 #ifdef SHOW_INITFINI_FUNCS
409 printf("krb5int_thread_support_init\n");
410 #endif
411
412 #ifndef ENABLE_THREADS
413
414 /* Nothing to do for TLS initialization. */
415
416 #elif defined(_WIN32)
417
418 tls_idx = TlsAlloc();
419 /* XXX This can raise an exception if memory is low! */
420 InitializeCriticalSection(&key_lock);
421
422 #else /* POSIX */
423
424 err = k5_mutex_finish_init(&key_lock);
425 if (err)
426 return err;
427 if (K5_PTHREADS_LOADED) {
428 err = pthread_key_create(&key, thread_termination);
429 if (err)
430 return err;
431 }
432
433 #endif
434
435 err = krb5int_init_fac();
436 if (err)
437 return err;
438
439 err = krb5int_err_init();
440 if (err)
441 return err;
442
443 return 0;
444 }
445
krb5int_thread_support_fini(void)446 void krb5int_thread_support_fini (void)
447 {
448 if (! INITIALIZER_RAN (krb5int_thread_support_init))
449 return;
450
451 #ifdef SHOW_INITFINI_FUNCS
452 printf("krb5int_thread_support_fini\n");
453 #endif
454
455 #ifndef ENABLE_THREADS
456
457 /* Do nothing. */
458
459 #elif defined(_WIN32)
460
461 /* ... free stuff ... */
462 TlsFree(tls_idx);
463 DeleteCriticalSection(&key_lock);
464
465 #else /* POSIX */
466
467 if (! INITIALIZER_RAN(krb5int_thread_support_init))
468 return;
469 if (K5_PTHREADS_LOADED)
470 pthread_key_delete(key);
471 /* ... delete stuff ... */
472 k5_mutex_destroy(&key_lock);
473
474 #endif
475
476 krb5int_fini_fac();
477 }
478
479 /* Mutex allocation functions, for use in plugins that may not know
480 what options a given set of libraries was compiled with. */
481 int KRB5_CALLCONV
krb5int_mutex_alloc(k5_mutex_t ** m)482 krb5int_mutex_alloc (k5_mutex_t **m)
483 {
484 k5_mutex_t *ptr;
485 int err;
486
487 ptr = malloc (sizeof (k5_mutex_t));
488 if (ptr == NULL)
489 return ENOMEM;
490 err = k5_mutex_init (ptr);
491 if (err) {
492 free (ptr);
493 return err;
494 }
495 *m = ptr;
496 return 0;
497 }
498
499 void KRB5_CALLCONV
krb5int_mutex_free(k5_mutex_t * m)500 krb5int_mutex_free (k5_mutex_t *m)
501 {
502 (void) k5_mutex_destroy (m);
503 free (m);
504 }
505
506 /* Callable versions of the various macros. */
507 void KRB5_CALLCONV
krb5int_mutex_lock(k5_mutex_t * m)508 krb5int_mutex_lock (k5_mutex_t *m)
509 {
510 k5_mutex_lock (m);
511 }
512 void KRB5_CALLCONV
krb5int_mutex_unlock(k5_mutex_t * m)513 krb5int_mutex_unlock (k5_mutex_t *m)
514 {
515 k5_mutex_unlock (m);
516 }
517
518 #ifdef USE_CONDITIONAL_PTHREADS
519
520 int
k5_os_mutex_init(k5_os_mutex * m)521 k5_os_mutex_init(k5_os_mutex *m)
522 {
523 if (krb5int_pthread_loaded())
524 return pthread_mutex_init(m, 0);
525 else
526 return 0;
527 }
528
529 int
k5_os_mutex_destroy(k5_os_mutex * m)530 k5_os_mutex_destroy(k5_os_mutex *m)
531 {
532 if (krb5int_pthread_loaded())
533 return pthread_mutex_destroy(m);
534 else
535 return 0;
536 }
537
538 int
k5_os_mutex_lock(k5_os_mutex * m)539 k5_os_mutex_lock(k5_os_mutex *m)
540 {
541 if (krb5int_pthread_loaded())
542 return pthread_mutex_lock(m);
543 else
544 return 0;
545 }
546
547 int
k5_os_mutex_unlock(k5_os_mutex * m)548 k5_os_mutex_unlock(k5_os_mutex *m)
549 {
550 if (krb5int_pthread_loaded())
551 return pthread_mutex_unlock(m);
552 else
553 return 0;
554 }
555
556 int
k5_once(k5_once_t * once,void (* fn)(void))557 k5_once(k5_once_t *once, void (*fn)(void))
558 {
559 if (krb5int_pthread_loaded())
560 return pthread_once(&once->o, fn);
561 else
562 return k5_os_nothread_once(&once->n, fn);
563 }
564
565 #else /* USE_CONDITIONAL_PTHREADS */
566
567 #undef k5_os_mutex_init
568 #undef k5_os_mutex_destroy
569 #undef k5_os_mutex_lock
570 #undef k5_os_mutex_unlock
571 #undef k5_once
572
573 int k5_os_mutex_init(k5_os_mutex *m);
574 int k5_os_mutex_destroy(k5_os_mutex *m);
575 int k5_os_mutex_lock(k5_os_mutex *m);
576 int k5_os_mutex_unlock(k5_os_mutex *m);
577 int k5_once(k5_once_t *once, void (*fn)(void));
578
579 /* Stub functions */
580 int
k5_os_mutex_init(k5_os_mutex * m)581 k5_os_mutex_init(k5_os_mutex *m)
582 {
583 return 0;
584 }
585 int
k5_os_mutex_destroy(k5_os_mutex * m)586 k5_os_mutex_destroy(k5_os_mutex *m)
587 {
588 return 0;
589 }
590 int
k5_os_mutex_lock(k5_os_mutex * m)591 k5_os_mutex_lock(k5_os_mutex *m)
592 {
593 return 0;
594 }
595 int
k5_os_mutex_unlock(k5_os_mutex * m)596 k5_os_mutex_unlock(k5_os_mutex *m)
597 {
598 return 0;
599 }
600 int
k5_once(k5_once_t * once,void (* fn)(void))601 k5_once(k5_once_t *once, void (*fn)(void))
602 {
603 return 0;
604 }
605
606 #endif /* not USE_CONDITIONAL_PTHREADS */
607