Lines Matching refs:lk

71   return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
75 return lck->lk.depth_locked != -1;
83 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
92 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
93 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
112 } while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
113 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy));
139 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
140 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
161 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
187 lck->lk.poll = KMP_LOCK_FREE(tas);
190 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
210 lck->lk.depth_locked += 1;
214 lck->lk.depth_locked = 1;
234 retval = ++lck->lk.depth_locked;
239 retval = lck->lk.depth_locked = 1;
257 if (--(lck->lk.depth_locked) == 0) {
282 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
287 lck->lk.depth_locked = 0;
312 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
316 return lck->lk.depth_locked != -1;
326 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
334 lck, lck->lk.poll, gtid));
339 &(lck->lk.poll), KMP_LOCK_FREE(futex),
357 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
362 lck, lck->lk.poll, gtid));
369 lck->lk.poll, gtid));
378 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
397 lck->lk.poll, gtid));
420 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
442 lck, lck->lk.poll, gtid));
446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
463 lck->lk.poll, gtid));
488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
511 lck->lk.depth_locked += 1;
515 lck->lk.depth_locked = 1;
535 retval = ++lck->lk.depth_locked;
540 retval = lck->lk.depth_locked = 1;
558 if (--(lck->lk.depth_locked) == 0) {
583 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
588 lck->lk.depth_locked = 0;
608 return std::atomic_load_explicit(&lck->lk.owner_id,
614 return std::atomic_load_explicit(&lck->lk.depth_locked,
627 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
630 if (std::atomic_load_explicit(&lck->lk.now_serving,
636 if (std::atomic_load_explicit(&lck->lk.now_serving,
640 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
653 if (!std::atomic_load_explicit(&lck->lk.initialized,
657 if (lck->lk.self != lck) {
669 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
675 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
678 if (std::atomic_load_explicit(&lck->lk.now_serving,
682 &lck->lk.next_ticket, &my_ticket, next_ticket,
694 if (!std::atomic_load_explicit(&lck->lk.initialized,
698 if (lck->lk.self != lck) {
708 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
715 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
717 std::atomic_load_explicit(&lck->lk.now_serving,
720 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
732 if (!std::atomic_load_explicit(&lck->lk.initialized,
736 if (lck->lk.self != lck) {
749 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
754 lck->lk.location = NULL;
755 lck->lk.self = lck;
756 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
758 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
761 &lck->lk.owner_id, 0,
764 &lck->lk.depth_locked, -1,
766 std::atomic_store_explicit(&lck->lk.initialized, true,
771 std::atomic_store_explicit(&lck->lk.initialized, false,
773 lck->lk.self = NULL;
774 lck->lk.location = NULL;
775 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
777 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
779 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
780 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
787 if (!std::atomic_load_explicit(&lck->lk.initialized,
791 if (lck->lk.self != lck) {
809 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
814 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
816 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
826 if (!std::atomic_load_explicit(&lck->lk.initialized,
830 if (lck->lk.self != lck) {
845 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
851 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
853 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
864 if (!std::atomic_load_explicit(&lck->lk.initialized,
868 if (lck->lk.self != lck) {
880 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
883 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
894 if (!std::atomic_load_explicit(&lck->lk.initialized,
898 if (lck->lk.self != lck) {
915 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
922 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
930 if (!std::atomic_load_explicit(&lck->lk.initialized,
934 if (lck->lk.self != lck) {
949 return lck->lk.location;
954 lck->lk.location = loc;
958 return lck->lk.flags;
963 lck->lk.flags = flags;
1056 __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
1058 if (lck->lk.head_id >= 1) {
1059 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1065 __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
1072 return TCR_4(lck->lk.owner_id) - 1;
1076 return lck->lk.depth_locked != -1;
1087 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1088 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1287 if (lck->lk.initialized != lck) {
1299 lck->lk.owner_id = gtid + 1;
1304 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1338 if (lck->lk.initialized != lck) {
1348 lck->lk.owner_id = gtid + 1;
1354 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1355 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1500 if (lck->lk.initialized != lck) {
1512 lck->lk.owner_id = 0;
1517 lck->lk.location = NULL;
1518 lck->lk.head_id = 0;
1519 lck->lk.tail_id = 0;
1520 lck->lk.next_ticket = 0;
1521 lck->lk.now_serving = 0;
1522 lck->lk.owner_id = 0; // no thread owns the lock.
1523 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
1524 lck->lk.initialized = lck;
1530 lck->lk.initialized = NULL;
1531 lck->lk.location = NULL;
1532 lck->lk.head_id = 0;
1533 lck->lk.tail_id = 0;
1534 lck->lk.next_ticket = 0;
1535 lck->lk.now_serving = 0;
1536 lck->lk.owner_id = 0;
1537 lck->lk.depth_locked = -1;
1542 if (lck->lk.initialized != lck) {
1560 lck->lk.depth_locked += 1;
1565 lck->lk.depth_locked = 1;
1567 lck->lk.owner_id = gtid + 1;
1576 if (lck->lk.initialized != lck) {
1591 retval = ++lck->lk.depth_locked;
1596 retval = lck->lk.depth_locked = 1;
1598 lck->lk.owner_id = gtid + 1;
1606 if (lck->lk.initialized != lck) {
1619 if (--(lck->lk.depth_locked) == 0) {
1621 lck->lk.owner_id = 0;
1633 if (lck->lk.initialized != lck) {
1650 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
1655 lck->lk.depth_locked = 0;
1661 if (lck->lk.initialized != lck) {
1676 return lck->lk.location;
1681 lck->lk.location = loc;
1685 return lck->lk.flags;
1690 lck->lk.flags = flags;
1944 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1953 bool res = lck->lk.head_id == 0;
1970 lck->lk.adaptive.badness = 0;
1976 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
1977 if (newBadness > lck->lk.adaptive.max_badness) {
1980 lck->lk.adaptive.badness = newBadness;
1988 kmp_uint32 badness = lck->lk.adaptive.badness;
1989 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
1999 int retries = lck->lk.adaptive.max_soft_retries;
2053 lck->lk.adaptive.acquire_attempts++;
2067 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2074 lck->lk.qlk.owner_id = gtid + 1;
2115 lck->lk.adaptive.acquire_attempts++;
2125 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2134 lck->lk.qlk.owner_id = gtid + 1;
2157 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2166 lck->lk.qlk.owner_id = 0;
2173 lck->lk.adaptive.badness = 0;
2174 lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
2175 lck->lk.adaptive.max_soft_retries =
2177 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2179 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2186 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2194 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2210 return lck->lk.owner_id - 1;
2214 return lck->lk.depth_locked != -1;
2219 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2220 kmp_uint64 mask = lck->lk.mask; // atomic load
2221 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2250 mask = lck->lk.mask; // atomic load
2251 polls = lck->lk.polls; // atomic load
2258 lck->lk.now_serving = ticket; // non-volatile store
2265 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2266 __kmp_free(lck->lk.old_polls);
2267 lck->lk.old_polls = NULL;
2268 lck->lk.cleanup_ticket = 0;
2274 if (lck->lk.old_polls == NULL) {
2277 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2285 num_polls = TCR_4(lck->lk.num_polls);
2296 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2332 lck->lk.old_polls = old_polls;
2333 lck->lk.polls = polls; // atomic store
2337 lck->lk.num_polls = num_polls;
2338 lck->lk.mask = mask; // atomic store
2346 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2360 if (lck->lk.initialized != lck) {
2372 lck->lk.owner_id = gtid + 1;
2379 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load
2380 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2381 kmp_uint64 mask = lck->lk.mask; // atomic load
2384 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2389 lck->lk.now_serving = ticket; // non-volatile store
2406 if (lck->lk.initialized != lck) {
2416 lck->lk.owner_id = gtid + 1;
2424 kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load
2425 std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load
2426 kmp_uint64 mask = lck->lk.mask; // atomic load
2438 if (lck->lk.initialized != lck) {
2451 lck->lk.owner_id = 0;
2456 lck->lk.location = NULL;
2457 lck->lk.mask = 0;
2458 lck->lk.num_polls = 1;
2459 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2460 lck->lk.num_polls * sizeof(*(lck->lk.polls)));
2461 lck->lk.cleanup_ticket = 0;
2462 lck->lk.old_polls = NULL;
2463 lck->lk.next_ticket = 0;
2464 lck->lk.now_serving = 0;
2465 lck->lk.owner_id = 0; // no thread owns the lock.
2466 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
2467 lck->lk.initialized = lck;
2473 lck->lk.initialized = NULL;
2474 lck->lk.location = NULL;
2475 if (lck->lk.polls.load() != NULL) {
2476 __kmp_free(lck->lk.polls.load());
2477 lck->lk.polls = NULL;
2479 if (lck->lk.old_polls != NULL) {
2480 __kmp_free(lck->lk.old_polls);
2481 lck->lk.old_polls = NULL;
2483 lck->lk.mask = 0;
2484 lck->lk.num_polls = 0;
2485 lck->lk.cleanup_ticket = 0;
2486 lck->lk.next_ticket = 0;
2487 lck->lk.now_serving = 0;
2488 lck->lk.owner_id = 0;
2489 lck->lk.depth_locked = -1;
2494 if (lck->lk.initialized != lck) {
2512 lck->lk.depth_locked += 1;
2517 lck->lk.depth_locked = 1;
2519 lck->lk.owner_id = gtid + 1;
2527 if (lck->lk.initialized != lck) {
2542 retval = ++lck->lk.depth_locked;
2547 retval = lck->lk.depth_locked = 1;
2549 lck->lk.owner_id = gtid + 1;
2557 if (lck->lk.initialized != lck) {
2570 if (--(lck->lk.depth_locked) == 0) {
2572 lck->lk.owner_id = 0;
2583 if (lck->lk.initialized != lck) {
2600 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
2605 lck->lk.depth_locked = 0;
2610 if (lck->lk.initialized != lck) {
2625 return lck->lk.location;
2630 lck->lk.location = loc;
2634 return lck->lk.flags;
2639 lck->lk.flags = flags;
2841 KMP_ATOMIC_ST_REL(&lck->lk.poll, 0);
2857 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free)
2863 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free) {
2873 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free ||
2874 !__kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) {
2889 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == KMP_LOCK_FREE(rtm_spin)) {
2895 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(rtm_spin));
2913 KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) {
2920 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free &&
2921 __kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) {
2952 #define expand1(lk, op) \
2953 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \
2954 __kmp_##op##_##lk##_##lock(&lock->lk); \
2956 #define expand2(lk, op) \
2957 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \
2959 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2961 #define expand3(lk, op) \
2962 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \
2964 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2966 #define expand4(lk, op) \
2967 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \
2969 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
3494 return lck == lck->lk.self;
3506 return lck == lck->lk.initialized;
3525 return lck == lck->lk.initialized;