1 /* 2 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * include/k5-thread.h 8 * 9 * Copyright 2004,2005,2006 by the Massachusetts Institute of Technology. 10 * All Rights Reserved. 11 * 12 * Export of this software from the United States of America may 13 * require a specific license from the United States Government. 14 * It is the responsibility of any person or organization contemplating 15 * export to obtain such a license before exporting. 16 * 17 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 18 * distribute this software and its documentation for any purpose and 19 * without fee is hereby granted, provided that the above copyright 20 * notice appear in all copies and that both that copyright notice and 21 * this permission notice appear in supporting documentation, and that 22 * the name of M.I.T. not be used in advertising or publicity pertaining 23 * to distribution of the software without specific, written prior 24 * permission. Furthermore if you modify this software you must label 25 * your software as modified software and not distribute it in such a 26 * fashion that it might be confused with the original M.I.T. software. 27 * M.I.T. makes no representations about the suitability of 28 * this software for any purpose. It is provided "as is" without express 29 * or implied warranty. 30 * 31 * 32 * Preliminary thread support. 33 */ 34 35 #ifndef K5_THREAD_H 36 #define K5_THREAD_H 37 38 #pragma ident "%Z%%M% %I% %E% SMI" 39 40 #ifdef _KERNEL 41 42 #include <sys/ksynch.h> 43 44 typedef kmutex_t k5_mutex_t; 45 46 #define K5_MUTEX_PARTIAL_INITIALIZER {0} 47 48 /* ARGSUSED */ 49 static void k5_mutex_assert_locked(k5_mutex_t *m) { } 50 51 static int 52 k5_mutex_lock(k5_mutex_t *m) 53 { 54 mutex_enter(m); 55 return (0); 56 } 57 58 static int 59 k5_mutex_unlock(k5_mutex_t *m) 60 { 61 mutex_exit(m); 62 return(0); 63 } 64 65 66 #else /* _KERNEL */ 67 68 #include "autoconf.h" 69 70 #ifndef KRB5_CALLCONV 71 # define KRB5_CALLCONV 72 #endif 73 #ifndef KRB5_CALLCONV_C 74 # define KRB5_CALLCONV_C 75 #endif 76 77 /* Interface (tentative): 78 79 Mutex support: 80 81 // Between these two, we should be able to do pure compile-time 82 // and pure run-time initialization. 83 // POSIX: partial initializer is PTHREAD_MUTEX_INITIALIZER, 84 // finish does nothing 85 // Windows: partial initializer is an invalid handle, 86 // finish does the real initialization work 87 // debug: partial initializer sets one magic value, 88 // finish verifies and sets a new magic value for 89 // lock/unlock to check 90 k5_mutex_t foo_mutex = K5_MUTEX_PARTIAL_INITIALIZER; 91 int k5_mutex_finish_init(k5_mutex_t *); 92 // for dynamic allocation 93 int k5_mutex_init(k5_mutex_t *); 94 // Must work for both kinds of alloc, even if it means adding flags. 95 int k5_mutex_destroy(k5_mutex_t *); 96 97 // As before. 98 int k5_mutex_lock(k5_mutex_t *); 99 int k5_mutex_unlock(k5_mutex_t *); 100 101 In each library, one new function to finish the static mutex init, 102 and any other library-wide initialization that might be desired. 103 On POSIX, this function would be called via the second support 104 function (see below). On Windows, it would be called at library 105 load time. These functions, or functions they calls, should be the 106 only places that k5_mutex_finish_init gets called. 107 108 A second function or macro called at various possible "first" entry 109 points which either calls pthread_once on the first function 110 (POSIX), or checks some flag set by the first function (Windows, 111 debug support), and possibly returns an error. (In the 112 non-threaded case, a simple flag can be used to avoid multiple 113 invocations, and the mutexes don't need run-time initialization 114 anyways.) 115 116 A third function for library termination calls mutex_destroy on 117 each mutex for the library. This function would be called 118 automatically at library unload time. If it turns out to be needed 119 at exit time for libraries that don't get unloaded, perhaps we 120 should also use atexit(). Any static mutexes should be cleaned up 121 with k5_mutex_destroy here. 122 123 How does that second support function invoke the first support 124 function only once? Through something modelled on pthread_once 125 that I haven't written up yet. Probably: 126 127 k5_once_t foo_once = K5_ONCE_INIT; 128 k5_once(k5_once_t *, void (*)(void)); 129 130 For POSIX: Map onto pthread_once facility. 131 For non-threaded case: A simple flag. 132 For Windows: Not needed; library init code takes care of it. 133 134 XXX: A general k5_once mechanism isn't possible for Windows, 135 without faking it through named mutexes or mutexes initialized at 136 startup. I was only using it in one place outside these headers, 137 so I'm dropping the general scheme. Eventually the existing uses 138 in k5-thread.h and k5-platform.h will be converted to pthread_once 139 or static variables. 140 141 142 Thread-specific data: 143 144 // TSD keys are limited in number in gssapi/krb5/com_err; enumerate 145 // them all. This allows support code init to allocate the 146 // necessary storage for pointers all at once, and avoids any 147 // possible error in key creation. 148 enum { ... } k5_key_t; 149 // Register destructor function. Called in library init code. 150 int k5_key_register(k5_key_t, void (*destructor)(void *)); 151 // Returns NULL or data. 152 void *k5_getspecific(k5_key_t); 153 // Returns error if key out of bounds, or the pointer table can't 154 // be allocated. A call to k5_key_register must have happened first. 155 // This may trigger the calling of pthread_setspecific on POSIX. 156 int k5_setspecific(k5_key_t, void *); 157 // Called in library termination code. 158 // Trashes data in all threads, calling the registered destructor 159 // (but calling it from the current thread). 160 int k5_key_delete(k5_key_t); 161 162 For the non-threaded version, the support code will have a static 163 array indexed by k5_key_t values, and get/setspecific simply access 164 the array elements. 165 166 The TSD destructor table is global state, protected by a mutex if 167 threads are enabled. 168 169 Debug support: Not much. Might check if k5_key_register has been 170 called and abort if not. 171 172 173 Any actual external symbols will use the krb5int_ prefix. The k5_ 174 names will be simple macros or inline functions to rename the 175 external symbols, or slightly more complex ones to expand the 176 implementation inline (e.g., map to POSIX versions and/or debug 177 code using __FILE__ and the like). 178 179 180 More to be added, perhaps. */ 181 182 #undef DEBUG_THREADS /* SUNW14resync XXX */ 183 #undef DEBUG_THREADS_LOC /* SUNW14resync XXX */ 184 #undef DEBUG_THREADS_SLOW /* debugging stuff that'll slow things down? */ 185 #undef DEBUG_THREADS_STATS 186 187 #ifndef _KERNEL 188 #include <assert.h> 189 #include <stdarg.h> 190 #define ASSERT assert 191 #endif 192 193 /* For tracking locations, of (e.g.) last lock or unlock of mutex. */ 194 #ifdef DEBUG_THREADS_LOC 195 typedef struct { 196 const char *filename; 197 int lineno; 198 } k5_debug_loc; 199 #define K5_DEBUG_LOC_INIT { __FILE__, __LINE__ } 200 #if __GNUC__ >= 2 201 #define K5_DEBUG_LOC (__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT) 202 #else 203 static inline k5_debug_loc k5_debug_make_loc(const char *file, short line) 204 { 205 k5_debug_loc l; 206 l.filename = file; 207 l.lineno = line; 208 return l; 209 } 210 #define K5_DEBUG_LOC (k5_debug_make_loc(__FILE__,__LINE__)) 211 #endif 212 #else /* ! DEBUG_THREADS_LOC */ 213 typedef char k5_debug_loc; 214 #define K5_DEBUG_LOC_INIT 0 215 #define K5_DEBUG_LOC 0 216 #endif 217 218 #define k5_debug_update_loc(L) ((L) = K5_DEBUG_LOC) 219 220 221 222 /* Statistics gathering: 223 224 Currently incomplete, don't try enabling it. 225 226 Eventually: Report number of times locked, total and standard 227 deviation of the time the lock was held, total and std dev time 228 spent waiting for the lock. "Report" will probably mean "write a 229 line to a file if a magic environment variable is set." */ 230 231 #ifdef DEBUG_THREADS_STATS 232 233 #if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME)) 234 # include <time.h> 235 #endif 236 #if HAVE_SYS_TIME_H 237 # include <sys/time.h> 238 #endif 239 #ifdef HAVE_STDINT_H 240 # include <stdint.h> 241 #endif 242 /* for memset */ 243 #include <string.h> 244 /* for uint64_t */ 245 #include <inttypes.h> 246 typedef uint64_t k5_debug_timediff_t; /* or long double */ 247 typedef struct timeval k5_debug_time_t; 248 static inline k5_debug_timediff_t 249 timediff(k5_debug_time_t t2, k5_debug_time_t t1) 250 { 251 return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec); 252 } 253 struct k5_timediff_stats { 254 k5_debug_timediff_t valmin, valmax, valsum, valsqsum; 255 }; 256 typedef struct { 257 int count; 258 k5_debug_time_t time_acquired, time_created; 259 struct k5_timediff_stats lockwait, lockheld; 260 } k5_debug_mutex_stats; 261 #define k5_mutex_init_stats(S) \ 262 (memset((S), 0, sizeof(struct k5_debug_mutex_stats)), 0) 263 #define k5_mutex_finish_init_stats(S) (0) 264 #define K5_MUTEX_STATS_INIT { 0, {0}, {0}, {0}, {0} } 265 266 #else 267 268 typedef char k5_debug_mutex_stats; 269 #define k5_mutex_init_stats(S) (*(S) = 's', 0) 270 #define k5_mutex_finish_init_stats(S) (0) 271 #define K5_MUTEX_STATS_INIT 's' 272 273 #endif 274 275 276 277 /* Define the OS mutex bit. */ 278 279 /* First, if we're not actually doing multiple threads, do we 280 want the debug support or not? */ 281 282 #ifdef DEBUG_THREADS 283 284 enum k5_mutex_init_states { 285 K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12, 286 K5_MUTEX_DEBUG_INITIALIZED, 287 K5_MUTEX_DEBUG_DESTROYED 288 }; 289 enum k5_mutex_flag_states { 290 K5_MUTEX_DEBUG_UNLOCKED = 0x23, 291 K5_MUTEX_DEBUG_LOCKED 292 }; 293 294 typedef struct { 295 enum k5_mutex_init_states initialized; 296 enum k5_mutex_flag_states locked; 297 } k5_os_nothread_mutex; 298 299 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \ 300 { K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED } 301 302 # define k5_os_nothread_mutex_finish_init(M) \ 303 (ASSERT((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED), \ 304 ASSERT((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED), \ 305 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED), \ 306 (M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0) 307 # define k5_os_nothread_mutex_init(M) \ 308 ((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, \ 309 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0) 310 # define k5_os_nothread_mutex_destroy(M) \ 311 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \ 312 (M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0) 313 314 # define k5_os_nothread_mutex_lock(M) \ 315 (k5_os_nothread_mutex_assert_unlocked(M), \ 316 (M)->locked = K5_MUTEX_DEBUG_LOCKED, 0) 317 # define k5_os_nothread_mutex_unlock(M) \ 318 (k5_os_nothread_mutex_assert_locked(M), \ 319 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0) 320 321 # define k5_os_nothread_mutex_assert_locked(M) \ 322 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \ 323 ASSERT((M)->locked != K5_MUTEX_DEBUG_UNLOCKED), \ 324 ASSERT((M)->locked == K5_MUTEX_DEBUG_LOCKED)) 325 # define k5_os_nothread_mutex_assert_unlocked(M) \ 326 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \ 327 ASSERT((M)->locked != K5_MUTEX_DEBUG_LOCKED), \ 328 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED)) 329 330 #else /* threads disabled and not debugging */ 331 332 typedef char k5_os_nothread_mutex; 333 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER 0 334 /* Empty inline functions avoid the "statement with no effect" 335 warnings, and do better type-checking than functions that don't use 336 their arguments. */ 337 /* SUNW 1.4resync, remove "inline" to avoid warning */ 338 /* ARGSUSED */ 339 /* LINTED */ 340 static int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) { 341 return 0; 342 } 343 /* ARGSUSED */ 344 /* LINTED */ 345 static int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) { 346 return 0; 347 } 348 /* ARGSUSED */ 349 /* LINTED */ 350 static int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) { 351 return 0; 352 } 353 /* ARGSUSED */ 354 /* LINTED */ 355 static int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) { 356 return 0; 357 } 358 /* ARGSUSED */ 359 /* LINTED */ 360 static int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) { 361 return 0; 362 } 363 # define k5_os_nothread_mutex_assert_locked(M) ((void)0) 364 # define k5_os_nothread_mutex_assert_unlocked(M) ((void)0) 365 366 #endif 367 368 /* Values: 369 2 - function has not been run 370 3 - function has been run 371 4 - function is being run -- deadlock detected */ 372 typedef unsigned char k5_os_nothread_once_t; 373 # define K5_OS_NOTHREAD_ONCE_INIT 2 374 # define k5_os_nothread_once(O,F) \ 375 (*(O) == 3 ? 0 \ 376 : *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0) \ 377 : (ASSERT(*(O) != 4), ASSERT(*(O) == 2 || *(O) == 3), 0)) 378 379 380 381 #ifndef ENABLE_THREADS 382 383 typedef k5_os_nothread_mutex k5_os_mutex; 384 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 385 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER 386 # define k5_os_mutex_finish_init k5_os_nothread_mutex_finish_init 387 # define k5_os_mutex_init k5_os_nothread_mutex_init 388 # define k5_os_mutex_destroy k5_os_nothread_mutex_destroy 389 # define k5_os_mutex_lock k5_os_nothread_mutex_lock 390 # define k5_os_mutex_unlock k5_os_nothread_mutex_unlock 391 # define k5_os_mutex_assert_locked k5_os_nothread_mutex_assert_locked 392 # define k5_os_mutex_assert_unlocked k5_os_nothread_mutex_assert_unlocked 393 394 # define k5_once_t k5_os_nothread_once_t 395 # define K5_ONCE_INIT K5_OS_NOTHREAD_ONCE_INIT 396 # define k5_once k5_os_nothread_once 397 398 #elif HAVE_PTHREAD 399 400 # include <pthread.h> 401 402 /* Weak reference support, etc. 403 404 Linux: Stub mutex routines exist, but pthread_once does not. 405 406 Solaris: In libc there's a pthread_once that doesn't seem 407 to do anything. Bleah. But pthread_mutexattr_setrobust_np 408 is defined only in libpthread. 409 410 IRIX 6.5 stub pthread support in libc is really annoying. The 411 pthread_mutex_lock function returns ENOSYS for a program not linked 412 against -lpthread. No link-time failure, no weak symbols, etc. 413 The C library doesn't provide pthread_once; we can use weak 414 reference support for that. 415 416 If weak references are not available, then for now, we assume that 417 the pthread support routines will always be available -- either the 418 real thing, or functional stubs that merely prohibit creating 419 threads. 420 421 If we find a platform with non-functional stubs and no weak 422 references, we may have to resort to some hack like dlsym on the 423 symbol tables of the current process. */ 424 #ifdef HAVE_PRAGMA_WEAK_REF 425 # pragma weak pthread_once 426 # pragma weak pthread_mutex_lock 427 # pragma weak pthread_mutex_unlock 428 # pragma weak pthread_mutex_destroy 429 # pragma weak pthread_mutex_init 430 # pragma weak pthread_self 431 # pragma weak pthread_equal 432 # ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB 433 # pragma weak pthread_mutexattr_setrobust_np 434 # endif 435 # if !defined HAVE_PTHREAD_ONCE 436 # define K5_PTHREADS_LOADED (&pthread_once != 0) 437 # elif !defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP \ 438 && defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB 439 # define K5_PTHREADS_LOADED (&pthread_mutexattr_setrobust_np != 0) 440 # else 441 # define K5_PTHREADS_LOADED (1) 442 # endif 443 #else 444 /* no pragma weak support */ 445 # define K5_PTHREADS_LOADED (1) 446 #endif 447 448 #if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__)) 449 /* IRIX 6.5 stub pthread support in libc is really annoying. The 450 pthread_mutex_lock function returns ENOSYS for a program not linked 451 against -lpthread. No link-time failure, no weak reference tests, 452 etc. 453 454 The C library doesn't provide pthread_once; we can use weak 455 reference support for that. */ 456 # ifndef HAVE_PRAGMA_WEAK_REF 457 # if defined(__GNUC__) && __GNUC__ < 3 458 # error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile." 459 # else 460 # error "Weak reference support is required" 461 # endif 462 # endif 463 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED 464 #endif 465 466 #if !defined(HAVE_PTHREAD_MUTEX_LOCK) && !defined(USE_PTHREAD_LOCK_ONLY_IF_LOADED) 467 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED 468 #endif 469 470 #ifdef HAVE_PRAGMA_WEAK_REF 471 /* Can't rely on useful stubs -- see above regarding Solaris. */ 472 typedef struct { 473 pthread_once_t o; 474 k5_os_nothread_once_t n; 475 } k5_once_t; 476 # define K5_ONCE_INIT { PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT } 477 # define k5_once(O,F) (K5_PTHREADS_LOADED \ 478 ? pthread_once(&(O)->o,F) \ 479 : k5_os_nothread_once(&(O)->n,F)) 480 #else 481 typedef pthread_once_t k5_once_t; 482 # define K5_ONCE_INIT PTHREAD_ONCE_INIT 483 # define k5_once pthread_once 484 #endif 485 486 typedef struct { 487 pthread_mutex_t p; 488 #ifdef DEBUG_THREADS 489 pthread_t owner; 490 #endif 491 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED 492 k5_os_nothread_mutex n; 493 #endif 494 } k5_os_mutex; 495 496 #ifdef DEBUG_THREADS 497 # ifdef __GNUC__ 498 # define k5_pthread_mutex_lock(M) \ 499 ({ \ 500 k5_os_mutex *_m2 = (M); \ 501 int _r2 = pthread_mutex_lock(&_m2->p); \ 502 if (_r2 == 0) _m2->owner = pthread_self(); \ 503 _r2; \ 504 }) 505 # else 506 static inline int 507 k5_pthread_mutex_lock(k5_os_mutex *m) 508 { 509 int r = pthread_mutex_lock(&m->p); 510 if (r) 511 return r; 512 m->owner = pthread_self(); 513 return 0; 514 } 515 # endif 516 # define k5_pthread_assert_locked(M) \ 517 (K5_PTHREADS_LOADED \ 518 ? ASSERT(pthread_equal((M)->owner, pthread_self())) \ 519 : (void)0) 520 # define k5_pthread_mutex_unlock(M) \ 521 (k5_pthread_assert_locked(M), \ 522 (M)->owner = (pthread_t) 0, \ 523 pthread_mutex_unlock(&(M)->p)) 524 #else 525 # define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p) 526 /* LINTED */ 527 static void k5_pthread_assert_locked(k5_os_mutex *m) { } 528 # define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p) 529 #endif 530 531 /* Define as functions to: 532 (1) eliminate "statement with no effect" warnings for "0" 533 (2) encourage type-checking in calling code */ 534 535 /* LINTED */ 536 static void k5_pthread_assert_unlocked(pthread_mutex_t *m) { } 537 538 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF) 539 # include <sched.h> 540 # if !HAVE_SCHED_YIELD 541 # pragma weak sched_yield 542 # define MAYBE_SCHED_YIELD() ((void)((&sched_yield != NULL) ? sched_yield() : 0)) 543 # else 544 # define MAYBE_SCHED_YIELD() ((void)sched_yield()) 545 # endif 546 #else 547 # define MAYBE_SCHED_YIELD() ((void)0) 548 #endif 549 550 /* It may not be obvious why this function is desirable. 551 552 I want to call pthread_mutex_lock, then sched_yield, then look at 553 the return code from pthread_mutex_lock. That can't be implemented 554 in a macro without a temporary variable, or GNU C extensions. 555 556 There used to be an inline function which did it, with both 557 functions called from the inline function. But that messes with 558 the debug information on a lot of configurations, and you can't 559 tell where the inline function was called from. (Typically, gdb 560 gives you the name of the function from which the inline function 561 was called, and a line number within the inline function itself.) 562 563 With this auxiliary function, pthread_mutex_lock can be called at 564 the invoking site via a macro; once it returns, the inline function 565 is called (with messed-up line-number info for gdb hopefully 566 localized to just that call). */ 567 #ifdef __GNUC__ 568 #define return_after_yield(R) \ 569 __extension__ ({ \ 570 int _r = (R); \ 571 MAYBE_SCHED_YIELD(); \ 572 _r; \ 573 }) 574 #else 575 static int return_after_yield(int r) 576 { 577 MAYBE_SCHED_YIELD(); 578 return r; 579 } 580 #endif 581 582 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED 583 584 # if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS) 585 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 586 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \ 587 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER } 588 # elif defined(DEBUG_THREADS) 589 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 590 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \ 591 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER } 592 # else 593 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 594 { PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER } 595 # endif 596 597 # define k5_os_mutex_finish_init(M) \ 598 k5_os_nothread_mutex_finish_init(&(M)->n) 599 # define k5_os_mutex_init(M) \ 600 (k5_os_nothread_mutex_init(&(M)->n), \ 601 (K5_PTHREADS_LOADED \ 602 ? pthread_mutex_init(&(M)->p, 0) \ 603 : 0)) 604 # define k5_os_mutex_destroy(M) \ 605 (k5_os_nothread_mutex_destroy(&(M)->n), \ 606 (K5_PTHREADS_LOADED \ 607 ? pthread_mutex_destroy(&(M)->p) \ 608 : 0)) 609 610 # define k5_os_mutex_lock(M) \ 611 return_after_yield(K5_PTHREADS_LOADED \ 612 ? k5_pthread_mutex_lock(M) \ 613 : k5_os_nothread_mutex_lock(&(M)->n)) 614 # define k5_os_mutex_unlock(M) \ 615 (MAYBE_SCHED_YIELD(), \ 616 (K5_PTHREADS_LOADED \ 617 ? k5_pthread_mutex_unlock(M) \ 618 : k5_os_nothread_mutex_unlock(&(M)->n))) 619 620 # define k5_os_mutex_assert_unlocked(M) \ 621 (K5_PTHREADS_LOADED \ 622 ? k5_pthread_assert_unlocked(&(M)->p) \ 623 : k5_os_nothread_mutex_assert_unlocked(&(M)->n)) 624 # define k5_os_mutex_assert_locked(M) \ 625 (K5_PTHREADS_LOADED \ 626 ? k5_pthread_assert_locked(M) \ 627 : k5_os_nothread_mutex_assert_locked(&(M)->n)) 628 629 #else 630 631 # ifdef DEBUG_THREADS 632 # ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP 633 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 634 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 } 635 # else 636 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 637 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 } 638 # endif 639 # else 640 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \ 641 { PTHREAD_MUTEX_INITIALIZER } 642 # endif 643 644 /* LINTED */ 645 static int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; } 646 # define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0) 647 # define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p) 648 # define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M)) 649 # define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M)) 650 651 # define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p) 652 # define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M) 653 654 #endif /* is pthreads always available? */ 655 656 #elif defined _WIN32 657 658 typedef struct { 659 HANDLE h; 660 int is_locked; 661 } k5_os_mutex; 662 663 # define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 } 664 665 # define k5_os_mutex_finish_init(M) \ 666 (ASSERT((M)->h == INVALID_HANDLE_VALUE), \ 667 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError()) 668 # define k5_os_mutex_init(M) \ 669 ((M)->is_locked = 0, \ 670 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError()) 671 # define k5_os_mutex_destroy(M) \ 672 (CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError()) 673 674 static inline int k5_os_mutex_lock(k5_os_mutex *m) 675 { 676 DWORD res; 677 res = WaitForSingleObject(m->h, INFINITE); 678 if (res == WAIT_FAILED) 679 return GetLastError(); 680 /* Eventually these should be turned into some reasonable error 681 code. */ 682 ASSERT(res != WAIT_TIMEOUT); 683 ASSERT(res != WAIT_ABANDONED); 684 ASSERT(res == WAIT_OBJECT_0); 685 /* Avoid locking twice. */ 686 ASSERT(m->is_locked == 0); 687 m->is_locked = 1; 688 return 0; 689 } 690 691 # define k5_os_mutex_unlock(M) \ 692 (ASSERT((M)->is_locked == 1), \ 693 (M)->is_locked = 0, \ 694 ReleaseMutex((M)->h) ? 0 : GetLastError()) 695 696 # define k5_os_mutex_assert_unlocked(M) ((void)0) 697 # define k5_os_mutex_assert_locked(M) ((void)0) 698 699 #else 700 701 # error "Thread support enabled, but thread system unknown" 702 703 #endif 704 705 706 707 708 typedef struct { 709 k5_debug_loc loc_last, loc_created; 710 k5_os_mutex os; 711 k5_debug_mutex_stats stats; 712 } k5_mutex_t; 713 #define K5_MUTEX_PARTIAL_INITIALIZER \ 714 { K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT, \ 715 K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT } 716 /* LINTED */ 717 static int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l) 718 { 719 int err = k5_os_mutex_init(&m->os); 720 if (err) return err; 721 m->loc_created = m->loc_last = l; 722 err = k5_mutex_init_stats(&m->stats); 723 ASSERT(err == 0); 724 return 0; 725 } 726 #define k5_mutex_init(M) k5_mutex_init_1((M), K5_DEBUG_LOC) 727 /* LINTED */ 728 static int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l) 729 { 730 int err = k5_os_mutex_finish_init(&m->os); 731 if (err) return err; 732 m->loc_created = m->loc_last = l; 733 err = k5_mutex_finish_init_stats(&m->stats); 734 ASSERT(err == 0); 735 return 0; 736 } 737 #define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC) 738 #define k5_mutex_destroy(M) \ 739 (k5_os_mutex_assert_unlocked(&(M)->os), \ 740 k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \ 741 k5_os_mutex_destroy(&(M)->os)) 742 #ifdef __GNUC__ 743 #define k5_mutex_lock(M) \ 744 __extension__ ({ \ 745 int _err = 0; \ 746 k5_mutex_t *_m = (M); \ 747 _err = k5_os_mutex_lock(&_m->os); \ 748 if (_err == 0) _m->loc_last = K5_DEBUG_LOC; \ 749 _err; \ 750 }) 751 #else 752 /* LINTED */ 753 static int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l) 754 { 755 int err = 0; 756 err = k5_os_mutex_lock(&m->os); 757 if (err) 758 return err; 759 m->loc_last = l; 760 return err; 761 } 762 #define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC) 763 #endif 764 #define k5_mutex_unlock(M) \ 765 (k5_mutex_assert_locked(M), \ 766 (M)->loc_last = K5_DEBUG_LOC, \ 767 k5_os_mutex_unlock(&(M)->os)) 768 769 #define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os) 770 #define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os) 771 772 #define k5_assert_locked k5_mutex_assert_locked 773 #define k5_assert_unlocked k5_mutex_assert_unlocked 774 775 776 /* Thread-specific data; implemented in a support file, because we'll 777 need to keep track of some global data for cleanup purposes. 778 779 Note that the callback function type is such that the C library 780 routine free() is a valid callback. */ 781 typedef enum { 782 K5_KEY_COM_ERR, 783 K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME, 784 K5_KEY_GSS_KRB5_CCACHE_NAME, 785 K5_KEY_MAX 786 } k5_key_t; 787 /* rename shorthand symbols for export */ 788 #define k5_key_register krb5int_key_register 789 #define k5_getspecific krb5int_getspecific 790 #define k5_setspecific krb5int_setspecific 791 #define k5_key_delete krb5int_key_delete 792 extern int k5_key_register(k5_key_t, void (*)(void *)); 793 extern void *k5_getspecific(k5_key_t); 794 extern int k5_setspecific(k5_key_t, void *); 795 extern int k5_key_delete(k5_key_t); 796 797 extern int KRB5_CALLCONV krb5int_mutex_alloc (k5_mutex_t **); 798 extern void KRB5_CALLCONV krb5int_mutex_free (k5_mutex_t *); 799 extern int KRB5_CALLCONV krb5int_mutex_lock (k5_mutex_t *); 800 extern int KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *); 801 802 /* In time, many of the definitions above should move into the support 803 library, and this file should be greatly simplified. For type 804 definitions, that'll take some work, since other data structures 805 incorporate mutexes directly, and our mutex type is dependent on 806 configuration options and system attributes. For most functions, 807 though, it should be relatively easy. 808 809 For now, plugins should use the exported functions, and not the 810 above macros, and use krb5int_mutex_alloc for allocations. */ 811 #ifdef PLUGIN 812 #undef k5_mutex_lock 813 #define k5_mutex_lock krb5int_mutex_lock 814 #undef k5_mutex_unlock 815 #define k5_mutex_unlock krb5int_mutex_unlock 816 #endif 817 818 #endif /* _KERNEL */ 819 820 821 #endif /* multiple inclusion? */ 822