xref: /illumos-gate/usr/src/uts/common/gssapi/mechs/krb5/include/k5-thread.h (revision 6e375c8351497b82ffa4f33cbf61d712999b4605)
1 /*
2  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * include/k5-thread.h
8  *
9  * Copyright 2004,2005,2006 by the Massachusetts Institute of Technology.
10  * All Rights Reserved.
11  *
12  * Export of this software from the United States of America may
13  *   require a specific license from the United States Government.
14  *   It is the responsibility of any person or organization contemplating
15  *   export to obtain such a license before exporting.
16  *
17  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
18  * distribute this software and its documentation for any purpose and
19  * without fee is hereby granted, provided that the above copyright
20  * notice appear in all copies and that both that copyright notice and
21  * this permission notice appear in supporting documentation, and that
22  * the name of M.I.T. not be used in advertising or publicity pertaining
23  * to distribution of the software without specific, written prior
24  * permission.  Furthermore if you modify this software you must label
25  * your software as modified software and not distribute it in such a
26  * fashion that it might be confused with the original M.I.T. software.
27  * M.I.T. makes no representations about the suitability of
28  * this software for any purpose.  It is provided "as is" without express
29  * or implied warranty.
30  *
31  *
32  * Preliminary thread support.
33  */
34 
35 #ifndef K5_THREAD_H
36 #define K5_THREAD_H
37 
38 #ifdef _KERNEL
39 
40 #include <sys/ksynch.h>
41 
42 typedef kmutex_t k5_mutex_t;
43 
44 #define K5_MUTEX_PARTIAL_INITIALIZER {0}
45 
46 /* ARGSUSED */
47 static void k5_mutex_assert_locked(k5_mutex_t *m) { }
48 
49 static int
50 k5_mutex_lock(k5_mutex_t *m)
51 {
52   mutex_enter(m);
53   return (0);
54 }
55 
56 static int
57 k5_mutex_unlock(k5_mutex_t *m)
58 {
59   mutex_exit(m);
60   return(0);
61 }
62 
63 
64 #else /* _KERNEL */
65 
66 #include "autoconf.h"
67 #ifndef KRB5_CALLCONV
68 # define KRB5_CALLCONV
69 #endif
70 #ifndef KRB5_CALLCONV_C
71 # define KRB5_CALLCONV_C
72 #endif
73 
74 /* Interface (tentative):
75 
76    Mutex support:
77 
78    // Between these two, we should be able to do pure compile-time
79    // and pure run-time initialization.
80    //   POSIX:   partial initializer is PTHREAD_MUTEX_INITIALIZER,
81    //            finish does nothing
82    //   Windows: partial initializer is an invalid handle,
83    //            finish does the real initialization work
84    //   debug:   partial initializer sets one magic value,
85    //            finish verifies and sets a new magic value for
86    //              lock/unlock to check
87    k5_mutex_t foo_mutex = K5_MUTEX_PARTIAL_INITIALIZER;
88    int k5_mutex_finish_init(k5_mutex_t *);
89    // for dynamic allocation
90    int k5_mutex_init(k5_mutex_t *);
91    // Must work for both kinds of alloc, even if it means adding flags.
92    int k5_mutex_destroy(k5_mutex_t *);
93 
94    // As before.
95    int k5_mutex_lock(k5_mutex_t *);
96    int k5_mutex_unlock(k5_mutex_t *);
97 
98    In each library, one new function to finish the static mutex init,
99    and any other library-wide initialization that might be desired.
100    On POSIX, this function would be called via the second support
101    function (see below).  On Windows, it would be called at library
102    load time.  These functions, or functions they calls, should be the
103    only places that k5_mutex_finish_init gets called.
104 
105    A second function or macro called at various possible "first" entry
106    points which either calls pthread_once on the first function
107    (POSIX), or checks some flag set by the first function (Windows,
108    debug support), and possibly returns an error.  (In the
109    non-threaded case, a simple flag can be used to avoid multiple
110    invocations, and the mutexes don't need run-time initialization
111    anyways.)
112 
113    A third function for library termination calls mutex_destroy on
114    each mutex for the library.  This function would be called
115    automatically at library unload time.  If it turns out to be needed
116    at exit time for libraries that don't get unloaded, perhaps we
117    should also use atexit().  Any static mutexes should be cleaned up
118    with k5_mutex_destroy here.
119 
120    How does that second support function invoke the first support
121    function only once?  Through something modelled on pthread_once
122    that I haven't written up yet.  Probably:
123 
124    k5_once_t foo_once = K5_ONCE_INIT;
125    k5_once(k5_once_t *, void (*)(void));
126 
127    For POSIX: Map onto pthread_once facility.
128    For non-threaded case: A simple flag.
129    For Windows: Not needed; library init code takes care of it.
130 
131    XXX: A general k5_once mechanism isn't possible for Windows,
132    without faking it through named mutexes or mutexes initialized at
133    startup.  I was only using it in one place outside these headers,
134    so I'm dropping the general scheme.  Eventually the existing uses
135    in k5-thread.h and k5-platform.h will be converted to pthread_once
136    or static variables.
137 
138 
139    Thread-specific data:
140 
141    // TSD keys are limited in number in gssapi/krb5/com_err; enumerate
142    // them all.  This allows support code init to allocate the
143    // necessary storage for pointers all at once, and avoids any
144    // possible error in key creation.
145    enum { ... } k5_key_t;
146    // Register destructor function.  Called in library init code.
147    int k5_key_register(k5_key_t, void (*destructor)(void *));
148    // Returns NULL or data.
149    void *k5_getspecific(k5_key_t);
150    // Returns error if key out of bounds, or the pointer table can't
151    // be allocated.  A call to k5_key_register must have happened first.
152    // This may trigger the calling of pthread_setspecific on POSIX.
153    int k5_setspecific(k5_key_t, void *);
154    // Called in library termination code.
155    // Trashes data in all threads, calling the registered destructor
156    // (but calling it from the current thread).
157    int k5_key_delete(k5_key_t);
158 
159    For the non-threaded version, the support code will have a static
160    array indexed by k5_key_t values, and get/setspecific simply access
161    the array elements.
162 
163    The TSD destructor table is global state, protected by a mutex if
164    threads are enabled.
165 
166    Debug support: Not much.  Might check if k5_key_register has been
167    called and abort if not.
168 
169 
170    Any actual external symbols will use the krb5int_ prefix.  The k5_
171    names will be simple macros or inline functions to rename the
172    external symbols, or slightly more complex ones to expand the
173    implementation inline (e.g., map to POSIX versions and/or debug
174    code using __FILE__ and the like).
175 
176 
177    More to be added, perhaps.  */
178 
179 #undef DEBUG_THREADS /* SUNW14resync XXX */
180 #undef DEBUG_THREADS_LOC /* SUNW14resync XXX */
181 #undef DEBUG_THREADS_SLOW /* debugging stuff that'll slow things down? */
182 #undef DEBUG_THREADS_STATS
183 
184 #ifndef _KERNEL
185 #include <assert.h>
186 #include <stdarg.h>
187 #define ASSERT assert
188 #endif
189 
190 /* For tracking locations, of (e.g.) last lock or unlock of mutex.  */
191 #ifdef DEBUG_THREADS_LOC
192 typedef struct {
193     const char *filename;
194     int lineno;
195 } k5_debug_loc;
196 #define K5_DEBUG_LOC_INIT	{ __FILE__, __LINE__ }
197 #if __GNUC__ >= 2
198 #define K5_DEBUG_LOC		(__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT)
199 #else
200 static inline k5_debug_loc k5_debug_make_loc(const char *file, int line)
201 {
202     k5_debug_loc l;
203     l.filename = file;
204     l.lineno = line;
205     return l;
206 }
207 #define K5_DEBUG_LOC		(k5_debug_make_loc(__FILE__,__LINE__))
208 #endif
209 #else /* ! DEBUG_THREADS_LOC */
210 typedef char k5_debug_loc;
211 #define K5_DEBUG_LOC_INIT	0
212 #define K5_DEBUG_LOC		0
213 #endif
214 
215 #define k5_debug_update_loc(L)	((L) = K5_DEBUG_LOC)
216 
217 
218 
219 /* Statistics gathering:
220 
221    Currently incomplete, don't try enabling it.
222 
223    Eventually: Report number of times locked, total and standard
224    deviation of the time the lock was held, total and std dev time
225    spent waiting for the lock.  "Report" will probably mean "write a
226    line to a file if a magic environment variable is set."  */
227 
228 #ifdef DEBUG_THREADS_STATS
229 
230 #if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME))
231 # include <time.h>
232 #endif
233 #if HAVE_SYS_TIME_H
234 # include <sys/time.h>
235 #endif
236 #ifdef HAVE_STDINT_H
237 # include <stdint.h>
238 #endif
239 /* for memset */
240 #include <string.h>
241 /* for uint64_t */
242 #include <inttypes.h>
243 typedef uint64_t k5_debug_timediff_t; /* or long double */
244 typedef struct timeval k5_debug_time_t;
245 static inline k5_debug_timediff_t
246 timediff(k5_debug_time_t t2, k5_debug_time_t t1)
247 {
248     return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec);
249 }
250 static inline k5_debug_time_t get_current_time(void)
251 {
252     struct timeval tv;
253     if (gettimeofday(&tv,0) < 0) { tv.tv_sec = tv.tv_usec = 0; }
254     return tv;
255 }
256 struct k5_timediff_stats {
257     k5_debug_timediff_t valmin, valmax, valsum, valsqsum;
258 };
259 typedef struct {
260     int count;
261     k5_debug_time_t time_acquired, time_created;
262     struct k5_timediff_stats lockwait, lockheld;
263 } k5_debug_mutex_stats;
264 #define k5_mutex_init_stats(S)					\
265 	(memset((S), 0, sizeof(k5_debug_mutex_stats)),	\
266 	 (S)->time_created = get_current_time(),		\
267 	 0)
268 #define k5_mutex_finish_init_stats(S) 	(0)
269 #define K5_MUTEX_STATS_INIT	{ 0, {0}, {0}, {0}, {0} }
270 typedef k5_debug_time_t k5_mutex_stats_tmp;
271 #define k5_mutex_stats_start()	get_current_time()
272 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
273 						   k5_mutex_stats_tmp start);
274 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
275 #define k5_mutex_lock_update_stats	krb5int_mutex_lock_update_stats
276 #define k5_mutex_unlock_update_stats	krb5int_mutex_unlock_update_stats
277 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
278 
279 #else
280 
281 typedef char k5_debug_mutex_stats;
282 #define k5_mutex_init_stats(S)		(*(S) = 's', 0)
283 #define k5_mutex_finish_init_stats(S)	(0)
284 #define K5_MUTEX_STATS_INIT		's'
285 typedef int k5_mutex_stats_tmp;
286 #define k5_mutex_stats_start()		(0)
287 #ifdef __GNUC__
288 static void
289 k5_mutex_lock_update_stats(k5_debug_mutex_stats *m, k5_mutex_stats_tmp t)
290 {
291 }
292 #else
293 # define k5_mutex_lock_update_stats(M,S)	(S)
294 #endif
295 #define k5_mutex_unlock_update_stats(M)	(*(M) = 's')
296 
297 /* If statistics tracking isn't enabled, these functions don't actually
298    do anything.  Declare anyways so we can do type checking etc.  */
299 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
300 						   k5_mutex_stats_tmp start);
301 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
302 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
303 
304 #define krb5int_mutex_report_stats(M)	((M)->stats = 'd')
305 
306 #endif
307 
308 
309 
310 /* Define the OS mutex bit.  */
311 
312 /* First, if we're not actually doing multiple threads, do we
313    want the debug support or not?  */
314 
315 #ifdef DEBUG_THREADS
316 
317 enum k5_mutex_init_states {
318     K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12,
319     K5_MUTEX_DEBUG_INITIALIZED,
320     K5_MUTEX_DEBUG_DESTROYED
321 };
322 enum k5_mutex_flag_states {
323     K5_MUTEX_DEBUG_UNLOCKED = 0x23,
324     K5_MUTEX_DEBUG_LOCKED
325 };
326 
327 typedef struct {
328     enum k5_mutex_init_states initialized;
329     enum k5_mutex_flag_states locked;
330 } k5_os_nothread_mutex;
331 
332 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \
333 	{ K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED }
334 
335 # define k5_os_nothread_mutex_finish_init(M)				\
336 	(ASSERT((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED),	\
337 	 ASSERT((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED),	\
338 	 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED),		\
339 	 (M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0)
340 # define k5_os_nothread_mutex_init(M)			\
341 	((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED,	\
342 	 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
343 # define k5_os_nothread_mutex_destroy(M)				\
344 	(ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),	\
345 	 (M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0)
346 
347 # define k5_os_nothread_mutex_lock(M)			\
348 	(k5_os_nothread_mutex_assert_unlocked(M),	\
349 	 (M)->locked = K5_MUTEX_DEBUG_LOCKED, 0)
350 # define k5_os_nothread_mutex_unlock(M)			\
351 	(k5_os_nothread_mutex_assert_locked(M),		\
352 	 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
353 
354 # define k5_os_nothread_mutex_assert_locked(M)				\
355 	(ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),	\
356 	 ASSERT((M)->locked != K5_MUTEX_DEBUG_UNLOCKED),		\
357 	 ASSERT((M)->locked == K5_MUTEX_DEBUG_LOCKED))
358 # define k5_os_nothread_mutex_assert_unlocked(M)			\
359 	(ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),	\
360 	 ASSERT((M)->locked != K5_MUTEX_DEBUG_LOCKED),			\
361 	 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED))
362 
363 #else /* threads disabled and not debugging */
364 typedef char k5_os_nothread_mutex;
365 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER	0
366 /* Empty inline functions avoid the "statement with no effect"
367    warnings, and do better type-checking than functions that don't use
368    their arguments.  */
369 /* SUNW 1.4resync, remove "inline" to avoid warning */
370 /* ARGSUSED */
371 /* LINTED */
372 static int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) {
373     return 0;
374 }
375 /* ARGSUSED */
376 /* LINTED */
377 static int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) {
378     return 0;
379 }
380 /* ARGSUSED */
381 /* LINTED */
382 static int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) {
383     return 0;
384 }
385 /* ARGSUSED */
386 /* LINTED */
387 static int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) {
388     return 0;
389 }
390 /* ARGSUSED */
391 /* LINTED */
392 static int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) {
393     return 0;
394 }
395 # define k5_os_nothread_mutex_assert_locked(M)		((void)0)
396 # define k5_os_nothread_mutex_assert_unlocked(M)	((void)0)
397 
398 #endif
399 
400 /* Values:
401    2 - function has not been run
402    3 - function has been run
403    4 - function is being run -- deadlock detected */
404 typedef unsigned char k5_os_nothread_once_t;
405 # define K5_OS_NOTHREAD_ONCE_INIT	2
406 # define k5_os_nothread_once(O,F)					\
407 	(*(O) == 3 ? 0							\
408 	 : *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0)			\
409 	 : (ASSERT(*(O) != 4), ASSERT(*(O) == 2 || *(O) == 3), 0))
410 
411 
412 
413 #ifndef ENABLE_THREADS
414 typedef k5_os_nothread_mutex k5_os_mutex;
415 # define K5_OS_MUTEX_PARTIAL_INITIALIZER	\
416 		K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER
417 # define k5_os_mutex_finish_init	k5_os_nothread_mutex_finish_init
418 # define k5_os_mutex_init		k5_os_nothread_mutex_init
419 # define k5_os_mutex_destroy		k5_os_nothread_mutex_destroy
420 # define k5_os_mutex_lock		k5_os_nothread_mutex_lock
421 # define k5_os_mutex_unlock		k5_os_nothread_mutex_unlock
422 # define k5_os_mutex_assert_locked	k5_os_nothread_mutex_assert_locked
423 # define k5_os_mutex_assert_unlocked	k5_os_nothread_mutex_assert_unlocked
424 
425 # define k5_once_t			k5_os_nothread_once_t
426 # define K5_ONCE_INIT			K5_OS_NOTHREAD_ONCE_INIT
427 # define k5_once			k5_os_nothread_once
428 
429 #elif HAVE_PTHREAD
430 
431 # include <pthread.h>
432 
433 /* Weak reference support, etc.
434 
435    Linux: Stub mutex routines exist, but pthread_once does not.
436 
437    Solaris: In libc there's a pthread_once that doesn't seem to do
438    anything.  Bleah.  But pthread_mutexattr_setrobust_np is defined
439    only in libpthread.  However, some version of GNU libc (Red Hat's
440    Fedora Core 5, reportedly) seems to have that function, but no
441    declaration, so we'd have to declare it in order to test for its
442    address.  We now have tests to see if pthread_once actually works,
443    so stick with that for now.
444 
445    IRIX 6.5 stub pthread support in libc is really annoying.  The
446    pthread_mutex_lock function returns ENOSYS for a program not linked
447    against -lpthread.  No link-time failure, no weak symbols, etc.
448    The C library doesn't provide pthread_once; we can use weak
449    reference support for that.
450 
451    If weak references are not available, then for now, we assume that
452    the pthread support routines will always be available -- either the
453    real thing, or functional stubs that merely prohibit creating
454    threads.
455 
456    If we find a platform with non-functional stubs and no weak
457    references, we may have to resort to some hack like dlsym on the
458    symbol tables of the current process.  */
459 #ifdef HAVE_PRAGMA_WEAK_REF
460 # pragma weak pthread_once
461 # pragma weak pthread_mutex_lock
462 # pragma weak pthread_mutex_unlock
463 # pragma weak pthread_mutex_destroy
464 # pragma weak pthread_mutex_init
465 # pragma weak pthread_self
466 # pragma weak pthread_equal
467 # ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
468 #  pragma weak pthread_mutexattr_setrobust_np
469 # endif
470 # if !defined HAVE_PTHREAD_ONCE
471 #  define K5_PTHREADS_LOADED	(&pthread_once != 0)
472 # elif !defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP \
473 	&& defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
474 #  define K5_PTHREADS_LOADED	(&pthread_mutexattr_setrobust_np != 0)
475 # else
476 #  define K5_PTHREADS_LOADED	(1)
477 # endif
478 #else
479 /* no pragma weak support */
480 # define K5_PTHREADS_LOADED	(1)
481 #endif
482 
483 #if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__))
484 /* IRIX 6.5 stub pthread support in libc is really annoying.  The
485    pthread_mutex_lock function returns ENOSYS for a program not linked
486    against -lpthread.  No link-time failure, no weak reference tests,
487    etc.
488 
489    The C library doesn't provide pthread_once; we can use weak
490    reference support for that.  */
491 # ifndef HAVE_PRAGMA_WEAK_REF
492 #  if defined(__GNUC__) && __GNUC__ < 3
493 #   error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile."
494 #  else
495 #   error "Weak reference support is required"
496 #  endif
497 # endif
498 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
499 #endif
500 
501 #if !defined(HAVE_PTHREAD_MUTEX_LOCK) && !defined(USE_PTHREAD_LOCK_ONLY_IF_LOADED)
502 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
503 #endif
504 
505 #ifdef HAVE_PRAGMA_WEAK_REF
506 /* Can't rely on useful stubs -- see above regarding Solaris.  */
507 typedef struct {
508     pthread_once_t o;
509     k5_os_nothread_once_t n;
510 } k5_once_t;
511 # define K5_ONCE_INIT	{ PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT }
512 # define k5_once(O,F)	(K5_PTHREADS_LOADED			\
513 			 ? pthread_once(&(O)->o,F)		\
514 			 : k5_os_nothread_once(&(O)->n,F))
515 #else
516 typedef pthread_once_t k5_once_t;
517 # define K5_ONCE_INIT	PTHREAD_ONCE_INIT
518 # define k5_once	pthread_once
519 #endif
520 
521 typedef struct {
522     pthread_mutex_t p;
523 #ifdef DEBUG_THREADS
524     pthread_t owner;
525 #endif
526 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
527     k5_os_nothread_mutex n;
528 #endif
529 } k5_os_mutex;
530 
531 #ifdef DEBUG_THREADS
532 # ifdef __GNUC__
533 #  define k5_pthread_mutex_lock(M)			\
534 	({						\
535 	    k5_os_mutex *_m2 = (M);			\
536 	    int _r2 = pthread_mutex_lock(&_m2->p);	\
537 	    if (_r2 == 0) _m2->owner = pthread_self();	\
538 	    _r2;					\
539 	})
540 # else
541 static int
542 k5_pthread_mutex_lock(k5_os_mutex *m)
543 {
544     int r = pthread_mutex_lock(&m->p);
545     if (r)
546 	return r;
547     m->owner = pthread_self();
548     return 0;
549 }
550 # endif
551 # define k5_pthread_assert_locked(M)				\
552 	(K5_PTHREADS_LOADED					\
553 	 ? ASSERT(pthread_equal((M)->owner, pthread_self()))	\
554 	 : (void)0)
555 # define k5_pthread_mutex_unlock(M)	\
556 	(k5_pthread_assert_locked(M),	\
557 	 (M)->owner = (pthread_t) 0,	\
558 	 pthread_mutex_unlock(&(M)->p))
559 #else
560 # define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
561 /* LINTED */
562 static void k5_pthread_assert_locked(k5_os_mutex *m) { }
563 # define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
564 #endif
565 
566 /* Define as functions to:
567    (1) eliminate "statement with no effect" warnings for "0"
568    (2) encourage type-checking in calling code  */
569 
570 /* LINTED */
571 static void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
572 
573 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
574 # include <sched.h>
575 # if !HAVE_SCHED_YIELD
576 #  pragma weak sched_yield
577 #  define MAYBE_SCHED_YIELD()	((void)((&sched_yield != NULL) ? sched_yield() : 0))
578 # else
579 #  define MAYBE_SCHED_YIELD()	((void)sched_yield())
580 # endif
581 #else
582 # define MAYBE_SCHED_YIELD()	((void)0)
583 #endif
584 
585 /* It may not be obvious why this function is desirable.
586 
587    I want to call pthread_mutex_lock, then sched_yield, then look at
588    the return code from pthread_mutex_lock.  That can't be implemented
589    in a macro without a temporary variable, or GNU C extensions.
590 
591    There used to be an inline function which did it, with both
592    functions called from the inline function.  But that messes with
593    the debug information on a lot of configurations, and you can't
594    tell where the inline function was called from.  (Typically, gdb
595    gives you the name of the function from which the inline function
596    was called, and a line number within the inline function itself.)
597 
598    With this auxiliary function, pthread_mutex_lock can be called at
599    the invoking site via a macro; once it returns, the inline function
600    is called (with messed-up line-number info for gdb hopefully
601    localized to just that call).  */
602 #ifdef __GNUC__
603 #define return_after_yield(R)			\
604 	__extension__ ({			\
605 	    int _r = (R);			\
606 	    MAYBE_SCHED_YIELD();		\
607 	    _r;					\
608 	})
609 #else
610 static int return_after_yield(int r)
611 {
612     MAYBE_SCHED_YIELD();
613     return r;
614 }
615 #endif
616 
617 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
618 
619 # if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
620 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
621 	{ PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
622 	  K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
623 # elif defined(DEBUG_THREADS)
624 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
625 	{ PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
626 	  K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
627 # else
628 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
629 	{ PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
630 # endif
631 asdfsdf
632 # define k5_os_mutex_finish_init(M)		\
633 	k5_os_nothread_mutex_finish_init(&(M)->n)
634 # define k5_os_mutex_init(M)			\
635 	(k5_os_nothread_mutex_init(&(M)->n),	\
636 	 (K5_PTHREADS_LOADED			\
637 	  ? pthread_mutex_init(&(M)->p, 0)	\
638 	  : 0))
639 # define k5_os_mutex_destroy(M)			\
640 	(k5_os_nothread_mutex_destroy(&(M)->n),	\
641 	 (K5_PTHREADS_LOADED			\
642 	  ? pthread_mutex_destroy(&(M)->p)	\
643 	  : 0))
644 
645 # define k5_os_mutex_lock(M)						\
646 	return_after_yield(K5_PTHREADS_LOADED				\
647 			   ? k5_pthread_mutex_lock(M)			\
648 			   : k5_os_nothread_mutex_lock(&(M)->n))
649 # define k5_os_mutex_unlock(M)				\
650 	(MAYBE_SCHED_YIELD(),				\
651 	 (K5_PTHREADS_LOADED				\
652 	  ? k5_pthread_mutex_unlock(M)			\
653 	  : k5_os_nothread_mutex_unlock(&(M)->n)))
654 
655 # define k5_os_mutex_assert_unlocked(M)			\
656 	(K5_PTHREADS_LOADED				\
657 	 ? k5_pthread_assert_unlocked(&(M)->p)		\
658 	 : k5_os_nothread_mutex_assert_unlocked(&(M)->n))
659 # define k5_os_mutex_assert_locked(M)			\
660 	(K5_PTHREADS_LOADED				\
661 	 ? k5_pthread_assert_locked(M)			\
662 	 : k5_os_nothread_mutex_assert_locked(&(M)->n))
663 
664 #else
665 
666 # ifdef DEBUG_THREADS
667 #  ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
668 #   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
669 	{ PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
670 #  else
671 #   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
672 	{ PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
673 #  endif
674 # else
675 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
676 	{ PTHREAD_MUTEX_INITIALIZER }
677 # endif
678 
679 /* LINTED */
680 static  int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
681 # define k5_os_mutex_init(M)		pthread_mutex_init(&(M)->p, 0)
682 # define k5_os_mutex_destroy(M)		pthread_mutex_destroy(&(M)->p)
683 # define k5_os_mutex_lock(M)	return_after_yield(k5_pthread_mutex_lock(M))
684 # define k5_os_mutex_unlock(M)		(MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
685 
686 # define k5_os_mutex_assert_unlocked(M)	k5_pthread_assert_unlocked(&(M)->p)
687 # define k5_os_mutex_assert_locked(M)	k5_pthread_assert_locked(M)
688 
689 #endif /* is pthreads always available? */
690 
691 #elif defined _WIN32
692 
693 typedef struct {
694     HANDLE h;
695     int is_locked;
696 } k5_os_mutex;
697 
698 # define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 }
699 
700 # define k5_os_mutex_finish_init(M)					 \
701 	(ASSERT((M)->h == INVALID_HANDLE_VALUE),			 \
702 	 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
703 # define k5_os_mutex_init(M)						 \
704 	((M)->is_locked = 0,						 \
705 	 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
706 # define k5_os_mutex_destroy(M)		\
707 	(CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError())
708 
709 static int k5_os_mutex_lock(k5_os_mutex *m)
710 {
711     DWORD res;
712     res = WaitForSingleObject(m->h, INFINITE);
713     if (res == WAIT_FAILED)
714 	return GetLastError();
715     /* Eventually these should be turned into some reasonable error
716        code.  */
717     ASSERT(res != WAIT_TIMEOUT);
718     ASSERT(res != WAIT_ABANDONED);
719     ASSERT(res == WAIT_OBJECT_0);
720     /* Avoid locking twice.  */
721     ASSERT(m->is_locked == 0);
722     m->is_locked = 1;
723     return 0;
724 }
725 
726 # define k5_os_mutex_unlock(M)				\
727 	(ASSERT((M)->is_locked == 1),			\
728 	 (M)->is_locked = 0,				\
729 	 ReleaseMutex((M)->h) ? 0 : GetLastError())
730 
731 # define k5_os_mutex_assert_unlocked(M)	((void)0)
732 # define k5_os_mutex_assert_locked(M)	((void)0)
733 
734 #else
735 
736 # error "Thread support enabled, but thread system unknown"
737 
738 #endif
739 
740 
741 
742 
743 typedef struct {
744     k5_debug_loc loc_last, loc_created;
745     k5_os_mutex os;
746     k5_debug_mutex_stats stats;
747 } k5_mutex_t;
748 #define K5_MUTEX_PARTIAL_INITIALIZER		\
749 	{ K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT,	\
750 	  K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT }
751 /* LINTED */
752 static int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l)
753 {
754     int err = k5_os_mutex_init(&m->os);
755     if (err) return err;
756     m->loc_created = m->loc_last = l;
757     err = k5_mutex_init_stats(&m->stats);
758     ASSERT(err == 0);
759     return 0;
760 }
761 #define k5_mutex_init(M)	k5_mutex_init_1((M), K5_DEBUG_LOC)
762 /* LINTED */
763 static  int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
764 {
765     int err = k5_os_mutex_finish_init(&m->os);
766     if (err) return err;
767     m->loc_created = m->loc_last = l;
768     err = k5_mutex_finish_init_stats(&m->stats);
769     ASSERT(err == 0);
770     return 0;
771 }
772 #define k5_mutex_finish_init(M)	k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
773 #define k5_mutex_destroy(M)			\
774 	(k5_os_mutex_assert_unlocked(&(M)->os),	\
775 	 k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
776 	 k5_os_mutex_destroy(&(M)->os))
777 #ifdef __GNUC__
778 #define k5_mutex_lock(M)				\
779 	__extension__ ({				\
780 	    int _err = 0;				\
781 	    k5_mutex_t *_m = (M);			\
782 	    _err = k5_os_mutex_lock(&_m->os);		\
783 	    if (_err == 0) _m->loc_last = K5_DEBUG_LOC;	\
784 	    _err;					\
785 	})
786 #else
787 /* LINTED */
788 static  int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
789 {
790     int err = 0;
791     err = k5_os_mutex_lock(&m->os);
792     if (err)
793 	return err;
794     m->loc_last = l;
795     return err;
796 }
797 #define k5_mutex_lock(M)	k5_mutex_lock_1(M, K5_DEBUG_LOC)
798 #endif
799 #define k5_mutex_unlock(M)				\
800 	(k5_mutex_assert_locked(M),			\
801 	 (M)->loc_last = K5_DEBUG_LOC,			\
802 	 k5_os_mutex_unlock(&(M)->os))
803 
804 #define k5_mutex_assert_locked(M)	k5_os_mutex_assert_locked(&(M)->os)
805 #define k5_mutex_assert_unlocked(M)	k5_os_mutex_assert_unlocked(&(M)->os)
806 
807 #define k5_assert_locked	k5_mutex_assert_locked
808 #define k5_assert_unlocked	k5_mutex_assert_unlocked
809 
810 
811 /* Thread-specific data; implemented in a support file, because we'll
812    need to keep track of some global data for cleanup purposes.
813 
814    Note that the callback function type is such that the C library
815    routine free() is a valid callback.  */
816 typedef enum {
817     K5_KEY_COM_ERR,
818     K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME,
819     K5_KEY_GSS_KRB5_CCACHE_NAME,
820     K5_KEY_MAX
821 } k5_key_t;
822 /* rename shorthand symbols for export */
823 #define k5_key_register	krb5int_key_register
824 #define k5_getspecific	krb5int_getspecific
825 #define k5_setspecific	krb5int_setspecific
826 #define k5_key_delete	krb5int_key_delete
827 extern int k5_key_register(k5_key_t, void (*)(void *));
828 extern void *k5_getspecific(k5_key_t);
829 extern int k5_setspecific(k5_key_t, void *);
830 extern int k5_key_delete(k5_key_t);
831 
832 extern int  KRB5_CALLCONV krb5int_mutex_alloc  (k5_mutex_t **);
833 extern void KRB5_CALLCONV krb5int_mutex_free   (k5_mutex_t *);
834 extern int  KRB5_CALLCONV krb5int_mutex_lock   (k5_mutex_t *);
835 extern int  KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *);
836 
837 /* In time, many of the definitions above should move into the support
838    library, and this file should be greatly simplified.  For type
839    definitions, that'll take some work, since other data structures
840    incorporate mutexes directly, and our mutex type is dependent on
841    configuration options and system attributes.  For most functions,
842    though, it should be relatively easy.
843 
844    For now, plugins should use the exported functions, and not the
845    above macros, and use krb5int_mutex_alloc for allocations.  */
846 #ifdef PLUGIN
847 #undef k5_mutex_lock
848 #define k5_mutex_lock krb5int_mutex_lock
849 #undef k5_mutex_unlock
850 #define k5_mutex_unlock krb5int_mutex_unlock
851 #endif
852 
853 #endif /* _KERNEL */
854 
855 
856 #endif /* multiple inclusion? */
857