1 /* 2 * kmp_lock.h -- lock header file 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef KMP_LOCK_H 14 #define KMP_LOCK_H 15 16 #include <limits.h> // CHAR_BIT 17 #include <stddef.h> // offsetof 18 19 #include "kmp_debug.h" 20 #include "kmp_os.h" 21 22 #ifdef __cplusplus 23 #include <atomic> 24 25 extern "C" { 26 #endif // __cplusplus 27 28 // ---------------------------------------------------------------------------- 29 // Have to copy these definitions from kmp.h because kmp.h cannot be included 30 // due to circular dependencies. Will undef these at end of file. 31 32 #define KMP_PAD(type, sz) \ 33 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1)) 34 #define KMP_GTID_DNE (-2) 35 36 // Forward declaration of ident and ident_t 37 38 struct ident; 39 typedef struct ident ident_t; 40 41 // End of copied code. 42 // ---------------------------------------------------------------------------- 43 44 // We need to know the size of the area we can assume that the compiler(s) 45 // allocated for objects of type omp_lock_t and omp_nest_lock_t. The Intel 46 // compiler always allocates a pointer-sized area, as does visual studio. 47 // 48 // gcc however, only allocates 4 bytes for regular locks, even on 64-bit 49 // intel archs. It allocates at least 8 bytes for nested lock (more on 50 // recent versions), but we are bounded by the pointer-sized chunks that 51 // the Intel compiler allocates. 52 53 #if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_GOMP_COMPAT) 54 #define OMP_LOCK_T_SIZE sizeof(int) 55 #define OMP_NEST_LOCK_T_SIZE sizeof(void *) 56 #else 57 #define OMP_LOCK_T_SIZE sizeof(void *) 58 #define OMP_NEST_LOCK_T_SIZE sizeof(void *) 59 #endif 60 61 // The Intel compiler allocates a 32-byte chunk for a critical section. 62 // Both gcc and visual studio only allocate enough space for a pointer. 63 // Sometimes we know that the space was allocated by the Intel compiler. 64 #define OMP_CRITICAL_SIZE sizeof(void *) 65 #define INTEL_CRITICAL_SIZE 32 66 67 // lock flags 68 typedef kmp_uint32 kmp_lock_flags_t; 69 70 #define kmp_lf_critical_section 1 71 72 // When a lock table is used, the indices are of kmp_lock_index_t 73 typedef kmp_uint32 kmp_lock_index_t; 74 75 // When memory allocated for locks are on the lock pool (free list), 76 // it is treated as structs of this type. 77 struct kmp_lock_pool { 78 union kmp_user_lock *next; 79 kmp_lock_index_t index; 80 }; 81 82 typedef struct kmp_lock_pool kmp_lock_pool_t; 83 84 extern void __kmp_validate_locks(void); 85 86 // ---------------------------------------------------------------------------- 87 // There are 5 lock implementations: 88 // 1. Test and set locks. 89 // 2. futex locks (Linux* OS on x86 and 90 // Intel(R) Many Integrated Core Architecture) 91 // 3. Ticket (Lamport bakery) locks. 92 // 4. Queuing locks (with separate spin fields). 93 // 5. DRPA (Dynamically Reconfigurable Distributed Polling Area) locks 94 // 95 // and 3 lock purposes: 96 // 1. Bootstrap locks -- Used for a few locks available at library 97 // startup-shutdown time. 98 // These do not require non-negative global thread ID's. 99 // 2. Internal RTL locks -- Used everywhere else in the RTL 100 // 3. User locks (includes critical sections) 101 // ---------------------------------------------------------------------------- 102 103 // ============================================================================ 104 // Lock implementations. 105 // 106 // Test and set locks. 107 // 108 // Non-nested test and set locks differ from the other lock kinds (except 109 // futex) in that we use the memory allocated by the compiler for the lock, 110 // rather than a pointer to it. 111 // 112 // On lin32, lin_32e, and win_32, the space allocated may be as small as 4 113 // bytes, so we have to use a lock table for nested locks, and avoid accessing 114 // the depth_locked field for non-nested locks. 115 // 116 // Information normally available to the tools, such as lock location, lock 117 // usage (normal lock vs. critical section), etc. is not available with test and 118 // set locks. 119 // ---------------------------------------------------------------------------- 120 121 struct kmp_base_tas_lock { 122 // KMP_LOCK_FREE(tas) => unlocked; locked: (gtid+1) of owning thread 123 #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) && \ 124 __LP64__ 125 // Flip the ordering of the high and low 32-bit member to be consistent 126 // with the memory layout of the address in 64-bit big-endian. 127 kmp_int32 depth_locked; // depth locked, for nested locks only 128 std::atomic<kmp_int32> poll; 129 #else 130 std::atomic<kmp_int32> poll; 131 kmp_int32 depth_locked; // depth locked, for nested locks only 132 #endif 133 }; 134 135 typedef struct kmp_base_tas_lock kmp_base_tas_lock_t; 136 137 union kmp_tas_lock { 138 kmp_base_tas_lock_t lk; 139 kmp_lock_pool_t pool; // make certain struct is large enough 140 double lk_align; // use worst case alignment; no cache line padding 141 }; 142 143 typedef union kmp_tas_lock kmp_tas_lock_t; 144 145 // Static initializer for test and set lock variables. Usage: 146 // kmp_tas_lock_t xlock = KMP_TAS_LOCK_INITIALIZER( xlock ); 147 #define KMP_TAS_LOCK_INITIALIZER(lock) \ 148 { \ 149 { KMP_LOCK_FREE(tas), 0 } \ 150 } 151 152 extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 153 extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 154 extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 155 extern void __kmp_init_tas_lock(kmp_tas_lock_t *lck); 156 extern void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck); 157 158 extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 159 extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 160 extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid); 161 extern void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck); 162 extern void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck); 163 164 #define KMP_LOCK_RELEASED 1 165 #define KMP_LOCK_STILL_HELD 0 166 #define KMP_LOCK_ACQUIRED_FIRST 1 167 #define KMP_LOCK_ACQUIRED_NEXT 0 168 #ifndef KMP_USE_FUTEX 169 #define KMP_USE_FUTEX \ 170 (KMP_OS_LINUX && \ 171 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)) 172 #endif 173 #if KMP_USE_FUTEX 174 175 // ---------------------------------------------------------------------------- 176 // futex locks. futex locks are only available on Linux* OS. 177 // 178 // Like non-nested test and set lock, non-nested futex locks use the memory 179 // allocated by the compiler for the lock, rather than a pointer to it. 180 // 181 // Information normally available to the tools, such as lock location, lock 182 // usage (normal lock vs. critical section), etc. is not available with test and 183 // set locks. With non-nested futex locks, the lock owner is not even available. 184 // ---------------------------------------------------------------------------- 185 186 struct kmp_base_futex_lock { 187 volatile kmp_int32 poll; // KMP_LOCK_FREE(futex) => unlocked 188 // 2*(gtid+1) of owning thread, 0 if unlocked 189 // locked: (gtid+1) of owning thread 190 kmp_int32 depth_locked; // depth locked, for nested locks only 191 }; 192 193 typedef struct kmp_base_futex_lock kmp_base_futex_lock_t; 194 195 union kmp_futex_lock { 196 kmp_base_futex_lock_t lk; 197 kmp_lock_pool_t pool; // make certain struct is large enough 198 double lk_align; // use worst case alignment 199 // no cache line padding 200 }; 201 202 typedef union kmp_futex_lock kmp_futex_lock_t; 203 204 // Static initializer for futex lock variables. Usage: 205 // kmp_futex_lock_t xlock = KMP_FUTEX_LOCK_INITIALIZER( xlock ); 206 #define KMP_FUTEX_LOCK_INITIALIZER(lock) \ 207 { \ 208 { KMP_LOCK_FREE(futex), 0 } \ 209 } 210 211 extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid); 212 extern int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid); 213 extern int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid); 214 extern void __kmp_init_futex_lock(kmp_futex_lock_t *lck); 215 extern void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck); 216 217 extern int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, 218 kmp_int32 gtid); 219 extern int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid); 220 extern int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, 221 kmp_int32 gtid); 222 extern void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck); 223 extern void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck); 224 225 #endif // KMP_USE_FUTEX 226 227 // ---------------------------------------------------------------------------- 228 // Ticket locks. 229 230 #ifdef __cplusplus 231 232 #ifdef _MSC_VER 233 // MSVC won't allow use of std::atomic<> in a union since it has non-trivial 234 // copy constructor. 235 236 struct kmp_base_ticket_lock { 237 // `initialized' must be the first entry in the lock data structure! 238 std::atomic_bool initialized; 239 volatile union kmp_ticket_lock *self; // points to the lock union 240 ident_t const *location; // Source code location of omp_init_lock(). 241 std::atomic_uint 242 next_ticket; // ticket number to give to next thread which acquires 243 std::atomic_uint now_serving; // ticket number for thread which holds the lock 244 std::atomic_int owner_id; // (gtid+1) of owning thread, 0 if unlocked 245 std::atomic_int depth_locked; // depth locked, for nested locks only 246 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock 247 }; 248 #else 249 struct kmp_base_ticket_lock { 250 // `initialized' must be the first entry in the lock data structure! 251 std::atomic<bool> initialized; 252 volatile union kmp_ticket_lock *self; // points to the lock union 253 ident_t const *location; // Source code location of omp_init_lock(). 254 std::atomic<unsigned> 255 next_ticket; // ticket number to give to next thread which acquires 256 std::atomic<unsigned> 257 now_serving; // ticket number for thread which holds the lock 258 std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked 259 std::atomic<int> depth_locked; // depth locked, for nested locks only 260 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock 261 }; 262 #endif 263 264 #else // __cplusplus 265 266 struct kmp_base_ticket_lock; 267 268 #endif // !__cplusplus 269 270 typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t; 271 272 union KMP_ALIGN_CACHE kmp_ticket_lock { 273 kmp_base_ticket_lock_t 274 lk; // This field must be first to allow static initializing. 275 kmp_lock_pool_t pool; 276 double lk_align; // use worst case alignment 277 char lk_pad[KMP_PAD(kmp_base_ticket_lock_t, CACHE_LINE)]; 278 }; 279 280 typedef union kmp_ticket_lock kmp_ticket_lock_t; 281 282 // Static initializer for simple ticket lock variables. Usage: 283 // kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock ); 284 // Note the macro argument. It is important to make var properly initialized. 285 #define KMP_TICKET_LOCK_INITIALIZER(lock) \ 286 { \ 287 { true, &(lock), NULL, 0U, 0U, 0, -1 } \ 288 } 289 290 extern int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid); 291 extern int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid); 292 extern int __kmp_test_ticket_lock_with_cheks(kmp_ticket_lock_t *lck, 293 kmp_int32 gtid); 294 extern int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid); 295 extern void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck); 296 extern void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck); 297 298 extern int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, 299 kmp_int32 gtid); 300 extern int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, 301 kmp_int32 gtid); 302 extern int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, 303 kmp_int32 gtid); 304 extern void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck); 305 extern void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck); 306 307 // ---------------------------------------------------------------------------- 308 // Queuing locks. 309 310 #if KMP_USE_ADAPTIVE_LOCKS 311 312 struct kmp_adaptive_lock_info; 313 314 typedef struct kmp_adaptive_lock_info kmp_adaptive_lock_info_t; 315 316 #if KMP_DEBUG_ADAPTIVE_LOCKS 317 318 struct kmp_adaptive_lock_statistics { 319 /* So we can get stats from locks that haven't been destroyed. */ 320 kmp_adaptive_lock_info_t *next; 321 kmp_adaptive_lock_info_t *prev; 322 323 /* Other statistics */ 324 kmp_uint32 successfulSpeculations; 325 kmp_uint32 hardFailedSpeculations; 326 kmp_uint32 softFailedSpeculations; 327 kmp_uint32 nonSpeculativeAcquires; 328 kmp_uint32 nonSpeculativeAcquireAttempts; 329 kmp_uint32 lemmingYields; 330 }; 331 332 typedef struct kmp_adaptive_lock_statistics kmp_adaptive_lock_statistics_t; 333 334 extern void __kmp_print_speculative_stats(); 335 extern void __kmp_init_speculative_stats(); 336 337 #endif // KMP_DEBUG_ADAPTIVE_LOCKS 338 339 struct kmp_adaptive_lock_info { 340 /* Values used for adaptivity. 341 Although these are accessed from multiple threads we don't access them 342 atomically, because if we miss updates it probably doesn't matter much. (It 343 just affects our decision about whether to try speculation on the lock). */ 344 kmp_uint32 volatile badness; 345 kmp_uint32 volatile acquire_attempts; 346 /* Parameters of the lock. */ 347 kmp_uint32 max_badness; 348 kmp_uint32 max_soft_retries; 349 350 #if KMP_DEBUG_ADAPTIVE_LOCKS 351 kmp_adaptive_lock_statistics_t volatile stats; 352 #endif 353 }; 354 355 #endif // KMP_USE_ADAPTIVE_LOCKS 356 357 struct kmp_base_queuing_lock { 358 359 // `initialized' must be the first entry in the lock data structure! 360 volatile union kmp_queuing_lock 361 *initialized; // Points to the lock union if in initialized state. 362 363 ident_t const *location; // Source code location of omp_init_lock(). 364 365 KMP_ALIGN(8) // tail_id must be 8-byte aligned! 366 367 volatile kmp_int32 368 tail_id; // (gtid+1) of thread at tail of wait queue, 0 if empty 369 // Must be no padding here since head/tail used in 8-byte CAS 370 volatile kmp_int32 371 head_id; // (gtid+1) of thread at head of wait queue, 0 if empty 372 // Decl order assumes little endian 373 // bakery-style lock 374 volatile kmp_uint32 375 next_ticket; // ticket number to give to next thread which acquires 376 volatile kmp_uint32 377 now_serving; // ticket number for thread which holds the lock 378 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked 379 kmp_int32 depth_locked; // depth locked, for nested locks only 380 381 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock 382 }; 383 384 typedef struct kmp_base_queuing_lock kmp_base_queuing_lock_t; 385 386 KMP_BUILD_ASSERT(offsetof(kmp_base_queuing_lock_t, tail_id) % 8 == 0); 387 388 union KMP_ALIGN_CACHE kmp_queuing_lock { 389 kmp_base_queuing_lock_t 390 lk; // This field must be first to allow static initializing. 391 kmp_lock_pool_t pool; 392 double lk_align; // use worst case alignment 393 char lk_pad[KMP_PAD(kmp_base_queuing_lock_t, CACHE_LINE)]; 394 }; 395 396 typedef union kmp_queuing_lock kmp_queuing_lock_t; 397 398 extern int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid); 399 extern int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid); 400 extern int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid); 401 extern void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck); 402 extern void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck); 403 404 extern int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, 405 kmp_int32 gtid); 406 extern int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, 407 kmp_int32 gtid); 408 extern int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, 409 kmp_int32 gtid); 410 extern void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck); 411 extern void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck); 412 413 #if KMP_USE_ADAPTIVE_LOCKS 414 415 // ---------------------------------------------------------------------------- 416 // Adaptive locks. 417 struct kmp_base_adaptive_lock { 418 kmp_base_queuing_lock qlk; 419 KMP_ALIGN(CACHE_LINE) 420 kmp_adaptive_lock_info_t 421 adaptive; // Information for the speculative adaptive lock 422 }; 423 424 typedef struct kmp_base_adaptive_lock kmp_base_adaptive_lock_t; 425 426 union KMP_ALIGN_CACHE kmp_adaptive_lock { 427 kmp_base_adaptive_lock_t lk; 428 kmp_lock_pool_t pool; 429 double lk_align; 430 char lk_pad[KMP_PAD(kmp_base_adaptive_lock_t, CACHE_LINE)]; 431 }; 432 typedef union kmp_adaptive_lock kmp_adaptive_lock_t; 433 434 #define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk) 435 436 #endif // KMP_USE_ADAPTIVE_LOCKS 437 438 // ---------------------------------------------------------------------------- 439 // DRDPA ticket locks. 440 struct kmp_base_drdpa_lock { 441 // All of the fields on the first cache line are only written when 442 // initializing or reconfiguring the lock. These are relatively rare 443 // operations, so data from the first cache line will usually stay resident in 444 // the cache of each thread trying to acquire the lock. 445 // 446 // initialized must be the first entry in the lock data structure! 447 KMP_ALIGN_CACHE 448 449 volatile union kmp_drdpa_lock 450 *initialized; // points to the lock union if in initialized state 451 ident_t const *location; // Source code location of omp_init_lock(). 452 std::atomic<std::atomic<kmp_uint64> *> polls; 453 std::atomic<kmp_uint64> mask; // is 2**num_polls-1 for mod op 454 kmp_uint64 cleanup_ticket; // thread with cleanup ticket 455 std::atomic<kmp_uint64> *old_polls; // will deallocate old_polls 456 kmp_uint32 num_polls; // must be power of 2 457 458 // next_ticket it needs to exist in a separate cache line, as it is 459 // invalidated every time a thread takes a new ticket. 460 KMP_ALIGN_CACHE 461 462 std::atomic<kmp_uint64> next_ticket; 463 464 // now_serving is used to store our ticket value while we hold the lock. It 465 // has a slightly different meaning in the DRDPA ticket locks (where it is 466 // written by the acquiring thread) than it does in the simple ticket locks 467 // (where it is written by the releasing thread). 468 // 469 // Since now_serving is only read and written in the critical section, 470 // it is non-volatile, but it needs to exist on a separate cache line, 471 // as it is invalidated at every lock acquire. 472 // 473 // Likewise, the vars used for nested locks (owner_id and depth_locked) are 474 // only written by the thread owning the lock, so they are put in this cache 475 // line. owner_id is read by other threads, so it must be declared volatile. 476 KMP_ALIGN_CACHE 477 kmp_uint64 now_serving; // doesn't have to be volatile 478 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked 479 kmp_int32 depth_locked; // depth locked 480 kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock 481 }; 482 483 typedef struct kmp_base_drdpa_lock kmp_base_drdpa_lock_t; 484 485 union KMP_ALIGN_CACHE kmp_drdpa_lock { 486 kmp_base_drdpa_lock_t 487 lk; // This field must be first to allow static initializing. */ 488 kmp_lock_pool_t pool; 489 double lk_align; // use worst case alignment 490 char lk_pad[KMP_PAD(kmp_base_drdpa_lock_t, CACHE_LINE)]; 491 }; 492 493 typedef union kmp_drdpa_lock kmp_drdpa_lock_t; 494 495 extern int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid); 496 extern int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid); 497 extern int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid); 498 extern void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck); 499 extern void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck); 500 501 extern int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, 502 kmp_int32 gtid); 503 extern int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid); 504 extern int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, 505 kmp_int32 gtid); 506 extern void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck); 507 extern void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck); 508 509 // ============================================================================ 510 // Lock purposes. 511 // ============================================================================ 512 513 // Bootstrap locks. 514 // 515 // Bootstrap locks -- very few locks used at library initialization time. 516 // Bootstrap locks are currently implemented as ticket locks. 517 // They could also be implemented as test and set lock, but cannot be 518 // implemented with other lock kinds as they require gtids which are not 519 // available at initialization time. 520 521 typedef kmp_ticket_lock_t kmp_bootstrap_lock_t; 522 523 #define KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) KMP_TICKET_LOCK_INITIALIZER((lock)) 524 #define KMP_BOOTSTRAP_LOCK_INIT(lock) \ 525 kmp_bootstrap_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock) 526 527 static inline int __kmp_acquire_bootstrap_lock(kmp_bootstrap_lock_t *lck) { 528 return __kmp_acquire_ticket_lock(lck, KMP_GTID_DNE); 529 } 530 531 static inline int __kmp_test_bootstrap_lock(kmp_bootstrap_lock_t *lck) { 532 return __kmp_test_ticket_lock(lck, KMP_GTID_DNE); 533 } 534 535 static inline void __kmp_release_bootstrap_lock(kmp_bootstrap_lock_t *lck) { 536 __kmp_release_ticket_lock(lck, KMP_GTID_DNE); 537 } 538 539 static inline void __kmp_init_bootstrap_lock(kmp_bootstrap_lock_t *lck) { 540 __kmp_init_ticket_lock(lck); 541 } 542 543 static inline void __kmp_destroy_bootstrap_lock(kmp_bootstrap_lock_t *lck) { 544 __kmp_destroy_ticket_lock(lck); 545 } 546 547 // Internal RTL locks. 548 // 549 // Internal RTL locks are also implemented as ticket locks, for now. 550 // 551 // FIXME - We should go through and figure out which lock kind works best for 552 // each internal lock, and use the type declaration and function calls for 553 // that explicit lock kind (and get rid of this section). 554 555 typedef kmp_ticket_lock_t kmp_lock_t; 556 557 #define KMP_LOCK_INIT(lock) kmp_lock_t lock = KMP_TICKET_LOCK_INITIALIZER(lock) 558 559 static inline int __kmp_acquire_lock(kmp_lock_t *lck, kmp_int32 gtid) { 560 return __kmp_acquire_ticket_lock(lck, gtid); 561 } 562 563 static inline int __kmp_test_lock(kmp_lock_t *lck, kmp_int32 gtid) { 564 return __kmp_test_ticket_lock(lck, gtid); 565 } 566 567 static inline void __kmp_release_lock(kmp_lock_t *lck, kmp_int32 gtid) { 568 __kmp_release_ticket_lock(lck, gtid); 569 } 570 571 static inline void __kmp_init_lock(kmp_lock_t *lck) { 572 __kmp_init_ticket_lock(lck); 573 } 574 575 static inline void __kmp_destroy_lock(kmp_lock_t *lck) { 576 __kmp_destroy_ticket_lock(lck); 577 } 578 579 // User locks. 580 // 581 // Do not allocate objects of type union kmp_user_lock!!! This will waste space 582 // unless __kmp_user_lock_kind == lk_drdpa. Instead, check the value of 583 // __kmp_user_lock_kind and allocate objects of the type of the appropriate 584 // union member, and cast their addresses to kmp_user_lock_p. 585 586 enum kmp_lock_kind { 587 lk_default = 0, 588 lk_tas, 589 #if KMP_USE_FUTEX 590 lk_futex, 591 #endif 592 #if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX 593 lk_hle, 594 lk_rtm_queuing, 595 lk_rtm_spin, 596 #endif 597 lk_ticket, 598 lk_queuing, 599 lk_drdpa, 600 #if KMP_USE_ADAPTIVE_LOCKS 601 lk_adaptive 602 #endif // KMP_USE_ADAPTIVE_LOCKS 603 }; 604 605 typedef enum kmp_lock_kind kmp_lock_kind_t; 606 607 extern kmp_lock_kind_t __kmp_user_lock_kind; 608 609 union kmp_user_lock { 610 kmp_tas_lock_t tas; 611 #if KMP_USE_FUTEX 612 kmp_futex_lock_t futex; 613 #endif 614 kmp_ticket_lock_t ticket; 615 kmp_queuing_lock_t queuing; 616 kmp_drdpa_lock_t drdpa; 617 #if KMP_USE_ADAPTIVE_LOCKS 618 kmp_adaptive_lock_t adaptive; 619 #endif // KMP_USE_ADAPTIVE_LOCKS 620 kmp_lock_pool_t pool; 621 }; 622 623 typedef union kmp_user_lock *kmp_user_lock_p; 624 625 #if !KMP_USE_DYNAMIC_LOCK 626 627 extern size_t __kmp_base_user_lock_size; 628 extern size_t __kmp_user_lock_size; 629 630 extern kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck); 631 632 static inline kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck) { 633 KMP_DEBUG_ASSERT(__kmp_get_user_lock_owner_ != NULL); 634 return (*__kmp_get_user_lock_owner_)(lck); 635 } 636 637 extern int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck, 638 kmp_int32 gtid); 639 640 #if KMP_OS_LINUX && \ 641 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) 642 643 #define __kmp_acquire_user_lock_with_checks(lck, gtid) \ 644 if (__kmp_user_lock_kind == lk_tas) { \ 645 if (__kmp_env_consistency_check) { \ 646 char const *const func = "omp_set_lock"; \ 647 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && \ 648 lck->tas.lk.depth_locked != -1) { \ 649 KMP_FATAL(LockNestableUsedAsSimple, func); \ 650 } \ 651 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \ 652 KMP_FATAL(LockIsAlreadyOwned, func); \ 653 } \ 654 } \ 655 if (lck->tas.lk.poll != 0 || \ 656 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \ 657 kmp_uint32 spins; \ 658 kmp_uint64 time; \ 659 KMP_FSYNC_PREPARE(lck); \ 660 KMP_INIT_YIELD(spins); \ 661 KMP_INIT_BACKOFF(time); \ 662 do { \ 663 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \ 664 } while ( \ 665 lck->tas.lk.poll != 0 || \ 666 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \ 667 } \ 668 KMP_FSYNC_ACQUIRED(lck); \ 669 } else { \ 670 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); \ 671 (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); \ 672 } 673 674 #else 675 static inline int __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck, 676 kmp_int32 gtid) { 677 KMP_DEBUG_ASSERT(__kmp_acquire_user_lock_with_checks_ != NULL); 678 return (*__kmp_acquire_user_lock_with_checks_)(lck, gtid); 679 } 680 #endif 681 682 extern int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck, 683 kmp_int32 gtid); 684 685 #if KMP_OS_LINUX && \ 686 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) 687 688 #include "kmp_i18n.h" /* AC: KMP_FATAL definition */ 689 extern int __kmp_env_consistency_check; /* AC: copy from kmp.h here */ 690 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck, 691 kmp_int32 gtid) { 692 if (__kmp_user_lock_kind == lk_tas) { 693 if (__kmp_env_consistency_check) { 694 char const *const func = "omp_test_lock"; 695 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && 696 lck->tas.lk.depth_locked != -1) { 697 KMP_FATAL(LockNestableUsedAsSimple, func); 698 } 699 } 700 return ((lck->tas.lk.poll == 0) && 701 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); 702 } else { 703 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL); 704 return (*__kmp_test_user_lock_with_checks_)(lck, gtid); 705 } 706 } 707 #else 708 static inline int __kmp_test_user_lock_with_checks(kmp_user_lock_p lck, 709 kmp_int32 gtid) { 710 KMP_DEBUG_ASSERT(__kmp_test_user_lock_with_checks_ != NULL); 711 return (*__kmp_test_user_lock_with_checks_)(lck, gtid); 712 } 713 #endif 714 715 extern int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck, 716 kmp_int32 gtid); 717 718 static inline void __kmp_release_user_lock_with_checks(kmp_user_lock_p lck, 719 kmp_int32 gtid) { 720 KMP_DEBUG_ASSERT(__kmp_release_user_lock_with_checks_ != NULL); 721 (*__kmp_release_user_lock_with_checks_)(lck, gtid); 722 } 723 724 extern void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck); 725 726 static inline void __kmp_init_user_lock_with_checks(kmp_user_lock_p lck) { 727 KMP_DEBUG_ASSERT(__kmp_init_user_lock_with_checks_ != NULL); 728 (*__kmp_init_user_lock_with_checks_)(lck); 729 } 730 731 // We need a non-checking version of destroy lock for when the RTL is 732 // doing the cleanup as it can't always tell if the lock is nested or not. 733 extern void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck); 734 735 static inline void __kmp_destroy_user_lock(kmp_user_lock_p lck) { 736 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_ != NULL); 737 (*__kmp_destroy_user_lock_)(lck); 738 } 739 740 extern void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck); 741 742 static inline void __kmp_destroy_user_lock_with_checks(kmp_user_lock_p lck) { 743 KMP_DEBUG_ASSERT(__kmp_destroy_user_lock_with_checks_ != NULL); 744 (*__kmp_destroy_user_lock_with_checks_)(lck); 745 } 746 747 extern int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 748 kmp_int32 gtid); 749 750 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64) 751 752 #define __kmp_acquire_nested_user_lock_with_checks(lck, gtid, depth) \ 753 if (__kmp_user_lock_kind == lk_tas) { \ 754 if (__kmp_env_consistency_check) { \ 755 char const *const func = "omp_set_nest_lock"; \ 756 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && \ 757 lck->tas.lk.depth_locked == -1) { \ 758 KMP_FATAL(LockSimpleUsedAsNestable, func); \ 759 } \ 760 } \ 761 if (lck->tas.lk.poll - 1 == gtid) { \ 762 lck->tas.lk.depth_locked += 1; \ 763 *depth = KMP_LOCK_ACQUIRED_NEXT; \ 764 } else { \ 765 if ((lck->tas.lk.poll != 0) || \ 766 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \ 767 kmp_uint32 spins; \ 768 kmp_uint64 time; \ 769 KMP_FSYNC_PREPARE(lck); \ 770 KMP_INIT_YIELD(spins); \ 771 KMP_INIT_BACKOFF(time); \ 772 do { \ 773 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time); \ 774 } while ( \ 775 (lck->tas.lk.poll != 0) || \ 776 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \ 777 } \ 778 lck->tas.lk.depth_locked = 1; \ 779 *depth = KMP_LOCK_ACQUIRED_FIRST; \ 780 } \ 781 KMP_FSYNC_ACQUIRED(lck); \ 782 } else { \ 783 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); \ 784 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); \ 785 } 786 787 #else 788 static inline void 789 __kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck, kmp_int32 gtid, 790 int *depth) { 791 KMP_DEBUG_ASSERT(__kmp_acquire_nested_user_lock_with_checks_ != NULL); 792 *depth = (*__kmp_acquire_nested_user_lock_with_checks_)(lck, gtid); 793 } 794 #endif 795 796 extern int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 797 kmp_int32 gtid); 798 799 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64) 800 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck, 801 kmp_int32 gtid) { 802 if (__kmp_user_lock_kind == lk_tas) { 803 int retval; 804 if (__kmp_env_consistency_check) { 805 char const *const func = "omp_test_nest_lock"; 806 if ((sizeof(kmp_tas_lock_t) <= OMP_NEST_LOCK_T_SIZE) && 807 lck->tas.lk.depth_locked == -1) { 808 KMP_FATAL(LockSimpleUsedAsNestable, func); 809 } 810 } 811 KMP_DEBUG_ASSERT(gtid >= 0); 812 if (lck->tas.lk.poll - 1 == 813 gtid) { /* __kmp_get_tas_lock_owner( lck ) == gtid */ 814 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */ 815 } 816 retval = ((lck->tas.lk.poll == 0) && 817 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); 818 if (retval) { 819 KMP_MB(); 820 lck->tas.lk.depth_locked = 1; 821 } 822 return retval; 823 } else { 824 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL); 825 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid); 826 } 827 } 828 #else 829 static inline int __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck, 830 kmp_int32 gtid) { 831 KMP_DEBUG_ASSERT(__kmp_test_nested_user_lock_with_checks_ != NULL); 832 return (*__kmp_test_nested_user_lock_with_checks_)(lck, gtid); 833 } 834 #endif 835 836 extern int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 837 kmp_int32 gtid); 838 839 static inline int 840 __kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck, 841 kmp_int32 gtid) { 842 KMP_DEBUG_ASSERT(__kmp_release_nested_user_lock_with_checks_ != NULL); 843 return (*__kmp_release_nested_user_lock_with_checks_)(lck, gtid); 844 } 845 846 extern void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck); 847 848 static inline void 849 __kmp_init_nested_user_lock_with_checks(kmp_user_lock_p lck) { 850 KMP_DEBUG_ASSERT(__kmp_init_nested_user_lock_with_checks_ != NULL); 851 (*__kmp_init_nested_user_lock_with_checks_)(lck); 852 } 853 854 extern void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck); 855 856 static inline void 857 __kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) { 858 KMP_DEBUG_ASSERT(__kmp_destroy_nested_user_lock_with_checks_ != NULL); 859 (*__kmp_destroy_nested_user_lock_with_checks_)(lck); 860 } 861 862 // user lock functions which do not necessarily exist for all lock kinds. 863 // 864 // The "set" functions usually have wrapper routines that check for a NULL set 865 // function pointer and call it if non-NULL. 866 // 867 // In some cases, it makes sense to have a "get" wrapper function check for a 868 // NULL get function pointer and return NULL / invalid value / error code if 869 // the function pointer is NULL. 870 // 871 // In other cases, the calling code really should differentiate between an 872 // unimplemented function and one that is implemented but returning NULL / 873 // invalid value. If this is the case, no get function wrapper exists. 874 875 extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck); 876 877 // no set function; fields set during local allocation 878 879 extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck); 880 881 static inline const ident_t *__kmp_get_user_lock_location(kmp_user_lock_p lck) { 882 if (__kmp_get_user_lock_location_ != NULL) { 883 return (*__kmp_get_user_lock_location_)(lck); 884 } else { 885 return NULL; 886 } 887 } 888 889 extern void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck, 890 const ident_t *loc); 891 892 static inline void __kmp_set_user_lock_location(kmp_user_lock_p lck, 893 const ident_t *loc) { 894 if (__kmp_set_user_lock_location_ != NULL) { 895 (*__kmp_set_user_lock_location_)(lck, loc); 896 } 897 } 898 899 extern kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck); 900 901 extern void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck, 902 kmp_lock_flags_t flags); 903 904 static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck, 905 kmp_lock_flags_t flags) { 906 if (__kmp_set_user_lock_flags_ != NULL) { 907 (*__kmp_set_user_lock_flags_)(lck, flags); 908 } 909 } 910 911 // The function which sets up all of the vtbl pointers for kmp_user_lock_t. 912 extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind); 913 914 // Macros for binding user lock functions. 915 #define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) \ 916 { \ 917 __kmp_acquire##nest##user_lock_with_checks_ = (int (*)( \ 918 kmp_user_lock_p, kmp_int32))__kmp_acquire##nest##kind##_##suffix; \ 919 __kmp_release##nest##user_lock_with_checks_ = (int (*)( \ 920 kmp_user_lock_p, kmp_int32))__kmp_release##nest##kind##_##suffix; \ 921 __kmp_test##nest##user_lock_with_checks_ = (int (*)( \ 922 kmp_user_lock_p, kmp_int32))__kmp_test##nest##kind##_##suffix; \ 923 __kmp_init##nest##user_lock_with_checks_ = \ 924 (void (*)(kmp_user_lock_p))__kmp_init##nest##kind##_##suffix; \ 925 __kmp_destroy##nest##user_lock_with_checks_ = \ 926 (void (*)(kmp_user_lock_p))__kmp_destroy##nest##kind##_##suffix; \ 927 } 928 929 #define KMP_BIND_USER_LOCK(kind) KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock) 930 #define KMP_BIND_USER_LOCK_WITH_CHECKS(kind) \ 931 KMP_BIND_USER_LOCK_TEMPLATE(_, kind, lock_with_checks) 932 #define KMP_BIND_NESTED_USER_LOCK(kind) \ 933 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock) 934 #define KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(kind) \ 935 KMP_BIND_USER_LOCK_TEMPLATE(_nested_, kind, lock_with_checks) 936 937 // User lock table & lock allocation 938 /* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory 939 for lock variable, which is not enough to store a pointer, so we have to use 940 lock indexes instead of pointers and maintain lock table to map indexes to 941 pointers. 942 943 944 Note: The first element of the table is not a pointer to lock! It is a 945 pointer to previously allocated table (or NULL if it is the first table). 946 947 Usage: 948 949 if ( OMP_LOCK_T_SIZE < sizeof( <lock> ) ) { // or OMP_NEST_LOCK_T_SIZE 950 Lock table is fully utilized. User locks are indexes, so table is used on 951 user lock operation. 952 Note: it may be the case (lin_32) that we don't need to use a lock 953 table for regular locks, but do need the table for nested locks. 954 } 955 else { 956 Lock table initialized but not actually used. 957 } 958 */ 959 960 struct kmp_lock_table { 961 kmp_lock_index_t used; // Number of used elements 962 kmp_lock_index_t allocated; // Number of allocated elements 963 kmp_user_lock_p *table; // Lock table. 964 }; 965 966 typedef struct kmp_lock_table kmp_lock_table_t; 967 968 extern kmp_lock_table_t __kmp_user_lock_table; 969 extern kmp_user_lock_p __kmp_lock_pool; 970 971 struct kmp_block_of_locks { 972 struct kmp_block_of_locks *next_block; 973 void *locks; 974 }; 975 976 typedef struct kmp_block_of_locks kmp_block_of_locks_t; 977 978 extern kmp_block_of_locks_t *__kmp_lock_blocks; 979 extern int __kmp_num_locks_in_block; 980 981 extern kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, 982 kmp_int32 gtid, 983 kmp_lock_flags_t flags); 984 extern void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid, 985 kmp_user_lock_p lck); 986 extern kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, 987 char const *func); 988 extern void __kmp_cleanup_user_locks(); 989 990 #define KMP_CHECK_USER_LOCK_INIT() \ 991 { \ 992 if (!TCR_4(__kmp_init_user_locks)) { \ 993 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); \ 994 if (!TCR_4(__kmp_init_user_locks)) { \ 995 TCW_4(__kmp_init_user_locks, TRUE); \ 996 } \ 997 __kmp_release_bootstrap_lock(&__kmp_initz_lock); \ 998 } \ 999 } 1000 1001 #endif // KMP_USE_DYNAMIC_LOCK 1002 1003 #undef KMP_PAD 1004 #undef KMP_GTID_DNE 1005 1006 #if KMP_USE_DYNAMIC_LOCK 1007 // KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without 1008 // breaking the current compatibility. Essential functionality of this new code 1009 // is dynamic dispatch, but it also implements (or enables implementation of) 1010 // hinted user lock and critical section which will be part of OMP 4.5 soon. 1011 // 1012 // Lock type can be decided at creation time (i.e., lock initialization), and 1013 // subsequent lock function call on the created lock object requires type 1014 // extraction and call through jump table using the extracted type. This type 1015 // information is stored in two different ways depending on the size of the lock 1016 // object, and we differentiate lock types by this size requirement - direct and 1017 // indirect locks. 1018 // 1019 // Direct locks: 1020 // A direct lock object fits into the space created by the compiler for an 1021 // omp_lock_t object, and TAS/Futex lock falls into this category. We use low 1022 // one byte of the lock object as the storage for the lock type, and appropriate 1023 // bit operation is required to access the data meaningful to the lock 1024 // algorithms. Also, to differentiate direct lock from indirect lock, 1 is 1025 // written to LSB of the lock object. The newly introduced "hle" lock is also a 1026 // direct lock. 1027 // 1028 // Indirect locks: 1029 // An indirect lock object requires more space than the compiler-generated 1030 // space, and it should be allocated from heap. Depending on the size of the 1031 // compiler-generated space for the lock (i.e., size of omp_lock_t), this 1032 // omp_lock_t object stores either the address of the heap-allocated indirect 1033 // lock (void * fits in the object) or an index to the indirect lock table entry 1034 // that holds the address. Ticket/Queuing/DRDPA/Adaptive lock falls into this 1035 // category, and the newly introduced "rtm" lock is also an indirect lock which 1036 // was implemented on top of the Queuing lock. When the omp_lock_t object holds 1037 // an index (not lock address), 0 is written to LSB to differentiate the lock 1038 // from a direct lock, and the remaining part is the actual index to the 1039 // indirect lock table. 1040 1041 #include <stdint.h> // for uintptr_t 1042 1043 // Shortcuts 1044 #define KMP_USE_INLINED_TAS \ 1045 (KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM)) && 1 1046 #define KMP_USE_INLINED_FUTEX KMP_USE_FUTEX && 0 1047 1048 // List of lock definitions; all nested locks are indirect locks. 1049 // hle lock is xchg lock prefixed with XACQUIRE/XRELEASE. 1050 // All nested locks are indirect lock types. 1051 #if KMP_USE_TSX 1052 #if KMP_USE_FUTEX 1053 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) m(hle, a) m(rtm_spin, a) 1054 #define KMP_FOREACH_I_LOCK(m, a) \ 1055 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \ 1056 m(nested_tas, a) m(nested_futex, a) m(nested_ticket, a) \ 1057 m(nested_queuing, a) m(nested_drdpa, a) 1058 #else 1059 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(hle, a) m(rtm_spin, a) 1060 #define KMP_FOREACH_I_LOCK(m, a) \ 1061 m(ticket, a) m(queuing, a) m(adaptive, a) m(drdpa, a) m(rtm_queuing, a) \ 1062 m(nested_tas, a) m(nested_ticket, a) m(nested_queuing, a) \ 1063 m(nested_drdpa, a) 1064 #endif // KMP_USE_FUTEX 1065 #define KMP_LAST_D_LOCK lockseq_rtm_spin 1066 #else 1067 #if KMP_USE_FUTEX 1068 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) m(futex, a) 1069 #define KMP_FOREACH_I_LOCK(m, a) \ 1070 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_futex, a) \ 1071 m(nested_ticket, a) m(nested_queuing, a) m(nested_drdpa, a) 1072 #define KMP_LAST_D_LOCK lockseq_futex 1073 #else 1074 #define KMP_FOREACH_D_LOCK(m, a) m(tas, a) 1075 #define KMP_FOREACH_I_LOCK(m, a) \ 1076 m(ticket, a) m(queuing, a) m(drdpa, a) m(nested_tas, a) m(nested_ticket, a) \ 1077 m(nested_queuing, a) m(nested_drdpa, a) 1078 #define KMP_LAST_D_LOCK lockseq_tas 1079 #endif // KMP_USE_FUTEX 1080 #endif // KMP_USE_TSX 1081 1082 // Information used in dynamic dispatch 1083 #define KMP_LOCK_SHIFT \ 1084 8 // number of low bits to be used as tag for direct locks 1085 #define KMP_FIRST_D_LOCK lockseq_tas 1086 #define KMP_FIRST_I_LOCK lockseq_ticket 1087 #define KMP_LAST_I_LOCK lockseq_nested_drdpa 1088 #define KMP_NUM_I_LOCKS \ 1089 (locktag_nested_drdpa + 1) // number of indirect lock types 1090 1091 // Base type for dynamic locks. 1092 typedef kmp_uint32 kmp_dyna_lock_t; 1093 1094 // Lock sequence that enumerates all lock kinds. Always make this enumeration 1095 // consistent with kmp_lockseq_t in the include directory. 1096 typedef enum { 1097 lockseq_indirect = 0, 1098 #define expand_seq(l, a) lockseq_##l, 1099 KMP_FOREACH_D_LOCK(expand_seq, 0) KMP_FOREACH_I_LOCK(expand_seq, 0) 1100 #undef expand_seq 1101 } kmp_dyna_lockseq_t; 1102 1103 // Enumerates indirect lock tags. 1104 typedef enum { 1105 #define expand_tag(l, a) locktag_##l, 1106 KMP_FOREACH_I_LOCK(expand_tag, 0) 1107 #undef expand_tag 1108 } kmp_indirect_locktag_t; 1109 1110 // Utility macros that extract information from lock sequences. 1111 #define KMP_IS_D_LOCK(seq) \ 1112 ((seq) >= KMP_FIRST_D_LOCK && (seq) <= KMP_LAST_D_LOCK) 1113 #define KMP_IS_I_LOCK(seq) \ 1114 ((seq) >= KMP_FIRST_I_LOCK && (seq) <= KMP_LAST_I_LOCK) 1115 #define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK) 1116 #define KMP_GET_D_TAG(seq) ((seq) << 1 | 1) 1117 1118 // Enumerates direct lock tags starting from indirect tag. 1119 typedef enum { 1120 #define expand_tag(l, a) locktag_##l = KMP_GET_D_TAG(lockseq_##l), 1121 KMP_FOREACH_D_LOCK(expand_tag, 0) 1122 #undef expand_tag 1123 } kmp_direct_locktag_t; 1124 1125 // Indirect lock type 1126 typedef struct { 1127 kmp_user_lock_p lock; 1128 kmp_indirect_locktag_t type; 1129 } kmp_indirect_lock_t; 1130 1131 // Function tables for direct locks. Set/unset/test differentiate functions 1132 // with/without consistency checking. 1133 extern void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t); 1134 extern void (**__kmp_direct_destroy)(kmp_dyna_lock_t *); 1135 extern int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32); 1136 extern int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32); 1137 extern int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32); 1138 1139 // Function tables for indirect locks. Set/unset/test differentiate functions 1140 // with/without consistency checking. 1141 extern void (*__kmp_indirect_init[])(kmp_user_lock_p); 1142 extern void (**__kmp_indirect_destroy)(kmp_user_lock_p); 1143 extern int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32); 1144 extern int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32); 1145 extern int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32); 1146 1147 // Extracts direct lock tag from a user lock pointer 1148 #define KMP_EXTRACT_D_TAG(l) \ 1149 ((kmp_dyna_lock_t)((kmp_base_tas_lock_t *)(l))->poll & \ 1150 ((1 << KMP_LOCK_SHIFT) - 1) & \ 1151 -((kmp_dyna_lock_t)((kmp_tas_lock_t *)(l))->lk.poll & 1)) 1152 1153 // Extracts indirect lock index from a user lock pointer 1154 #define KMP_EXTRACT_I_INDEX(l) \ 1155 ((kmp_lock_index_t)((kmp_base_tas_lock_t *)(l))->poll >> 1) 1156 1157 // Returns function pointer to the direct lock function with l (kmp_dyna_lock_t 1158 // *) and op (operation type). 1159 #define KMP_D_LOCK_FUNC(l, op) __kmp_direct_##op[KMP_EXTRACT_D_TAG(l)] 1160 1161 // Returns function pointer to the indirect lock function with l 1162 // (kmp_indirect_lock_t *) and op (operation type). 1163 #define KMP_I_LOCK_FUNC(l, op) \ 1164 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type] 1165 1166 // Initializes a direct lock with the given lock pointer and lock sequence. 1167 #define KMP_INIT_D_LOCK(l, seq) \ 1168 __kmp_direct_init[KMP_GET_D_TAG(seq)]((kmp_dyna_lock_t *)l, seq) 1169 1170 // Initializes an indirect lock with the given lock pointer and lock sequence. 1171 #define KMP_INIT_I_LOCK(l, seq) \ 1172 __kmp_direct_init[0]((kmp_dyna_lock_t *)(l), seq) 1173 1174 // Returns "free" lock value for the given lock type. 1175 #define KMP_LOCK_FREE(type) (locktag_##type) 1176 1177 // Returns "busy" lock value for the given lock teyp. 1178 #define KMP_LOCK_BUSY(v, type) ((v) << KMP_LOCK_SHIFT | locktag_##type) 1179 1180 // Returns lock value after removing (shifting) lock tag. 1181 #define KMP_LOCK_STRIP(v) ((v) >> KMP_LOCK_SHIFT) 1182 1183 // Initializes global states and data structures for managing dynamic user 1184 // locks. 1185 extern void __kmp_init_dynamic_user_locks(); 1186 1187 // Allocates and returns an indirect lock with the given indirect lock tag. 1188 extern kmp_indirect_lock_t * 1189 __kmp_allocate_indirect_lock(void **, kmp_int32, kmp_indirect_locktag_t); 1190 1191 // Cleans up global states and data structures for managing dynamic user locks. 1192 extern void __kmp_cleanup_indirect_user_locks(); 1193 1194 // Default user lock sequence when not using hinted locks. 1195 extern kmp_dyna_lockseq_t __kmp_user_lock_seq; 1196 1197 // Jump table for "set lock location", available only for indirect locks. 1198 extern void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, 1199 const ident_t *); 1200 #define KMP_SET_I_LOCK_LOCATION(lck, loc) \ 1201 { \ 1202 if (__kmp_indirect_set_location[(lck)->type] != NULL) \ 1203 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \ 1204 } 1205 1206 // Jump table for "set lock flags", available only for indirect locks. 1207 extern void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, 1208 kmp_lock_flags_t); 1209 #define KMP_SET_I_LOCK_FLAGS(lck, flag) \ 1210 { \ 1211 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \ 1212 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \ 1213 } 1214 1215 // Jump table for "get lock location", available only for indirect locks. 1216 extern const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])( 1217 kmp_user_lock_p); 1218 #define KMP_GET_I_LOCK_LOCATION(lck) \ 1219 (__kmp_indirect_get_location[(lck)->type] != NULL \ 1220 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \ 1221 : NULL) 1222 1223 // Jump table for "get lock flags", available only for indirect locks. 1224 extern kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])( 1225 kmp_user_lock_p); 1226 #define KMP_GET_I_LOCK_FLAGS(lck) \ 1227 (__kmp_indirect_get_flags[(lck)->type] != NULL \ 1228 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \ 1229 : NULL) 1230 1231 // number of kmp_indirect_lock_t objects to be allocated together 1232 #define KMP_I_LOCK_CHUNK 1024 1233 // Keep at a power of 2 since it is used in multiplication & division 1234 KMP_BUILD_ASSERT(KMP_I_LOCK_CHUNK % 2 == 0); 1235 // number of row entries in the initial lock table 1236 #define KMP_I_LOCK_TABLE_INIT_NROW_PTRS 8 1237 1238 // Lock table for indirect locks. 1239 typedef struct kmp_indirect_lock_table { 1240 kmp_indirect_lock_t **table; // blocks of indirect locks allocated 1241 kmp_uint32 nrow_ptrs; // number *table pointer entries in table 1242 kmp_lock_index_t next; // index to the next lock to be allocated 1243 struct kmp_indirect_lock_table *next_table; 1244 } kmp_indirect_lock_table_t; 1245 1246 extern kmp_indirect_lock_table_t __kmp_i_lock_table; 1247 1248 // Returns the indirect lock associated with the given index. 1249 // Returns nullptr if no lock at given index 1250 static inline kmp_indirect_lock_t *__kmp_get_i_lock(kmp_lock_index_t idx) { 1251 kmp_indirect_lock_table_t *lock_table = &__kmp_i_lock_table; 1252 while (lock_table) { 1253 kmp_lock_index_t max_locks = lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK; 1254 if (idx < max_locks) { 1255 kmp_lock_index_t row = idx / KMP_I_LOCK_CHUNK; 1256 kmp_lock_index_t col = idx % KMP_I_LOCK_CHUNK; 1257 if (!lock_table->table[row] || idx >= lock_table->next) 1258 break; 1259 return &lock_table->table[row][col]; 1260 } 1261 idx -= max_locks; 1262 lock_table = lock_table->next_table; 1263 } 1264 return nullptr; 1265 } 1266 1267 // Number of locks in a lock block, which is fixed to "1" now. 1268 // TODO: No lock block implementation now. If we do support, we need to manage 1269 // lock block data structure for each indirect lock type. 1270 extern int __kmp_num_locks_in_block; 1271 1272 // Fast lock table lookup without consistency checking 1273 #define KMP_LOOKUP_I_LOCK(l) \ 1274 ((OMP_LOCK_T_SIZE < sizeof(void *)) \ 1275 ? __kmp_get_i_lock(KMP_EXTRACT_I_INDEX(l)) \ 1276 : *((kmp_indirect_lock_t **)(l))) 1277 1278 // Used once in kmp_error.cpp 1279 extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32); 1280 1281 #else // KMP_USE_DYNAMIC_LOCK 1282 1283 #define KMP_LOCK_BUSY(v, type) (v) 1284 #define KMP_LOCK_FREE(type) 0 1285 #define KMP_LOCK_STRIP(v) (v) 1286 1287 #endif // KMP_USE_DYNAMIC_LOCK 1288 1289 // data structure for using backoff within spin locks. 1290 typedef struct { 1291 kmp_uint32 step; // current step 1292 kmp_uint32 max_backoff; // upper bound of outer delay loop 1293 kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent) 1294 } kmp_backoff_t; 1295 1296 // Runtime's default backoff parameters 1297 extern kmp_backoff_t __kmp_spin_backoff_params; 1298 1299 // Backoff function 1300 extern void __kmp_spin_backoff(kmp_backoff_t *); 1301 1302 #ifdef __cplusplus 1303 } // extern "C" 1304 #endif // __cplusplus 1305 1306 #endif /* KMP_LOCK_H */ 1307