Home
last modified time | relevance | path

Searched refs:gtid (Results 1 – 25 of 40) sorted by relevance

12

/freebsd/contrib/llvm-project/openmp/runtime/src/
H A Dkmp_atomic.h368 kmp_int32 gtid) { in __kmp_acquire_atomic_lock() argument
377 __kmp_acquire_queuing_lock(lck, gtid); in __kmp_acquire_atomic_lock()
389 kmp_int32 gtid) { in __kmp_test_atomic_lock() argument
390 return __kmp_test_queuing_lock(lck, gtid); in __kmp_test_atomic_lock()
394 kmp_int32 gtid) { in __kmp_release_atomic_lock() argument
395 __kmp_release_queuing_lock(lck, gtid); in __kmp_release_atomic_lock()
456 void __kmpc_atomic_fixed1_add(ident_t *id_ref, int gtid, char *lhs, char rhs);
457 void __kmpc_atomic_fixed1_andb(ident_t *id_ref, int gtid, char *lhs, char rhs);
458 void __kmpc_atomic_fixed1_div(ident_t *id_ref, int gtid, char *lhs, char rhs);
459 void __kmpc_atomic_fixed1u_div(ident_t *id_ref, int gtid, unsigned char *lhs,
[all …]
H A Dkmp_gsupport.cpp126 int gtid = __kmp_entry_gtid(); in KMP_EXPAND_NAME() local
128 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid)); in KMP_EXPAND_NAME()
135 OMPT_STORE_RETURN_ADDRESS(gtid); in KMP_EXPAND_NAME()
137 __kmpc_barrier(&loc, gtid); in KMP_EXPAND_NAME()
157 int gtid = __kmp_entry_gtid(); in KMP_EXPAND_NAME() local
159 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid)); in KMP_EXPAND_NAME()
161 OMPT_STORE_RETURN_ADDRESS(gtid); in KMP_EXPAND_NAME()
163 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr); in KMP_EXPAND_NAME()
167 int gtid = __kmp_get_gtid(); in KMP_EXPAND_NAME() local
169 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid)); in KMP_EXPAND_NAME()
[all …]
H A Dkmp_lock.cpp79 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) { in __kmp_acquire_tas_lock_timed_template() argument
84 if ((curr != 0) && (curr != gtid + 1)) in __kmp_acquire_tas_lock_timed_template()
90 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); in __kmp_acquire_tas_lock_timed_template()
118 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { in __kmp_acquire_tas_lock() argument
119 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid); in __kmp_acquire_tas_lock()
124 kmp_int32 gtid) { in __kmp_acquire_tas_lock_with_checks() argument
130 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) { in __kmp_acquire_tas_lock_with_checks()
133 return __kmp_acquire_tas_lock(lck, gtid); in __kmp_acquire_tas_lock_with_checks()
136 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { in __kmp_test_tas_lock() argument
148 __kmp_test_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_test_tas_lock_with_checks() argument
157 __kmp_release_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_release_tas_lock() argument
169 __kmp_release_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_release_tas_lock_with_checks() argument
206 __kmp_acquire_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_tas_lock() argument
220 __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_tas_lock_with_checks() argument
228 __kmp_test_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_tas_lock() argument
245 __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_tas_lock_with_checks() argument
253 __kmp_release_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_tas_lock() argument
265 __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_tas_lock_with_checks() argument
320 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_acquire_futex_lock_timed_template() argument
401 __kmp_acquire_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_acquire_futex_lock() argument
407 __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_acquire_futex_lock_with_checks() argument
419 __kmp_test_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_test_futex_lock() argument
429 __kmp_test_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_test_futex_lock_with_checks() argument
438 __kmp_release_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_release_futex_lock() argument
470 __kmp_release_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_release_futex_lock_with_checks() argument
507 __kmp_acquire_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_futex_lock() argument
521 __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_futex_lock_with_checks() argument
529 __kmp_test_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_futex_lock() argument
546 __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_futex_lock_with_checks() argument
554 __kmp_release_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_futex_lock() argument
566 __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_futex_lock_with_checks() argument
625 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_acquire_ticket_lock_timed_template() argument
644 __kmp_acquire_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_acquire_ticket_lock() argument
650 __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_acquire_ticket_lock_with_checks() argument
674 __kmp_test_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_test_ticket_lock() argument
691 __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_test_ticket_lock_with_checks() argument
714 __kmp_release_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_release_ticket_lock() argument
729 __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_release_ticket_lock_with_checks() argument
805 __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_ticket_lock() argument
823 __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_ticket_lock_with_checks() argument
839 __kmp_test_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_ticket_lock() argument
861 __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_ticket_lock_with_checks() argument
877 __kmp_release_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_ticket_lock() argument
891 __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_ticket_lock_with_checks() argument
1035 __kmp_dump_queuing_lock(kmp_info_t * this_thr,kmp_int32 gtid,kmp_queuing_lock_t * lck,kmp_int32 head_id,kmp_int32 tail_id) __kmp_dump_queuing_lock() argument
1085 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_queuing_lock_timed_template() argument
1277 __kmp_acquire_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_queuing_lock() argument
1285 __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_queuing_lock_with_checks() argument
1303 __kmp_test_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_queuing_lock() argument
1336 __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_queuing_lock_with_checks() argument
1353 __kmp_release_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_queuing_lock() argument
1497 __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_queuing_lock_with_checks() argument
1556 __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_queuing_lock() argument
1574 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_queuing_lock_with_checks() argument
1585 __kmp_test_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_queuing_lock() argument
1604 __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_queuing_lock_with_checks() argument
1615 __kmp_release_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_queuing_lock() argument
1630 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_queuing_lock_with_checks() argument
1987 __kmp_should_speculate(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_should_speculate() argument
1998 __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_test_adaptive_lock_only() argument
2045 __kmp_test_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_test_adaptive_lock() argument
2065 __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_test_adaptive_lock_with_checks() argument
2091 __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_acquire_adaptive_lock() argument
2123 __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_acquire_adaptive_lock_with_checks() argument
2139 __kmp_release_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_release_adaptive_lock() argument
2154 __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid) __kmp_release_adaptive_lock_with_checks() argument
2218 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_acquire_drdpa_lock_timed_template() argument
2352 __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_acquire_drdpa_lock() argument
2358 __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_acquire_drdpa_lock_with_checks() argument
2376 __kmp_test_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_test_drdpa_lock() argument
2404 __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_test_drdpa_lock_with_checks() argument
2421 __kmp_release_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_release_drdpa_lock() argument
2435 __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_release_drdpa_lock_with_checks() argument
2508 __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_drdpa_lock() argument
2525 __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_acquire_nested_drdpa_lock_with_checks() argument
2536 __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_drdpa_lock() argument
2555 __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_test_nested_drdpa_lock_with_checks() argument
2566 __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_drdpa_lock() argument
2580 __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid) __kmp_release_nested_drdpa_lock_with_checks() argument
2715 __kmp_acquire_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_acquire_hle_lock() argument
2730 __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_acquire_hle_lock_with_checks() argument
2734 __kmp_release_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_release_hle_lock() argument
2743 __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_release_hle_lock_with_checks() argument
2747 __kmp_test_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_test_hle_lock() argument
2752 __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid) __kmp_test_hle_lock_with_checks() argument
2771 __kmp_acquire_rtm_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_rtm_queuing_lock() argument
2794 __kmp_acquire_rtm_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_acquire_rtm_queuing_lock_with_checks() argument
2800 __kmp_release_rtm_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_rtm_queuing_lock() argument
2812 __kmp_release_rtm_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_release_rtm_queuing_lock_with_checks() argument
2818 __kmp_test_rtm_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_rtm_queuing_lock() argument
2833 __kmp_test_rtm_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid) __kmp_test_rtm_queuing_lock_with_checks() argument
2850 __kmp_acquire_rtm_spin_lock(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_acquire_rtm_spin_lock() argument
2882 __kmp_acquire_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_acquire_rtm_spin_lock_with_checks() argument
2888 __kmp_release_rtm_spin_lock(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_release_rtm_spin_lock() argument
2901 __kmp_release_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_release_rtm_spin_lock_with_checks() argument
2906 __kmp_test_rtm_spin_lock(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_test_rtm_spin_lock() argument
2929 __kmp_test_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t * lck,kmp_int32 gtid) __kmp_test_rtm_spin_lock_with_checks() argument
3121 __kmp_allocate_indirect_lock(void ** user_lock,kmp_int32 gtid,kmp_indirect_locktag_t tag) __kmp_allocate_indirect_lock() argument
3242 kmp_uint32 gtid = __kmp_entry_gtid(); __kmp_destroy_indirect_lock() local
3260 __kmp_set_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_set_indirect_lock() argument
3265 __kmp_unset_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_unset_indirect_lock() argument
3270 __kmp_test_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_test_indirect_lock() argument
3276 __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_set_indirect_lock_with_checks() argument
3283 __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_unset_indirect_lock_with_checks() argument
3290 __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid) __kmp_test_indirect_lock_with_checks() argument
3854 __kmp_user_lock_allocate(void ** user_lock,kmp_int32 gtid,kmp_lock_flags_t flags) __kmp_user_lock_allocate() argument
3898 __kmp_user_lock_free(void ** user_lock,kmp_int32 gtid,kmp_user_lock_p lck) __kmp_user_lock_free() argument
[all...]
H A Dkmp_error.cpp56 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) { in __kmp_expand_cons_stack() argument
61 if (gtid < 0) in __kmp_expand_cons_stack()
64 KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid())); in __kmp_expand_cons_stack()
133 struct cons_header *__kmp_allocate_cons_stack(int gtid) { in __kmp_allocate_cons_stack() argument
137 if (gtid < 0) { in __kmp_allocate_cons_stack()
140 KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid)); in __kmp_allocate_cons_stack()
165 static void dump_cons_stack(int gtid, struct cons_header *p) { in dump_cons_stack() argument
175 tos, gtid); in dump_cons_stack()
184 __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid); in dump_cons_stack()
193 void __kmp_push_parallel(int gtid, ident_t const *ident) { in __kmp_push_parallel() argument
[all …]
H A Dkmp_threadprivate.cpp21 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
23 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
34 __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid, in __kmp_threadprivate_find_task_common() argument
43 gtid, pc_addr)); in __kmp_threadprivate_find_task_common()
52 gtid, pc_addr)); in __kmp_threadprivate_find_task_common()
65 __kmp_find_shared_task_common(struct shared_table *tbl, int gtid, in __kmp_find_shared_task_common() argument
75 gtid, pc_addr)); in __kmp_find_shared_task_common()
133 int gtid; in __kmp_common_initialize() local
140 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++) in __kmp_common_initialize()
141 if (__kmp_root[gtid]) { in __kmp_common_initialize()
[all …]
H A Dkmp_ftn_entry.h115 int gtid, tid, bt = (KMP_DEREF arg); in FTN_SET_BLOCKTIME()
118 gtid = __kmp_entry_gtid(); in FTN_SET_BLOCKTIME()
119 tid = __kmp_tid_from_gtid(gtid); in FTN_SET_BLOCKTIME()
120 thread = __kmp_thread_from_gtid(gtid); in FTN_SET_BLOCKTIME()
132 int gtid, tid; in FTN_GET_BLOCKTIME()
135 gtid = __kmp_entry_gtid(); in FTN_GET_BLOCKTIME()
136 tid = __kmp_tid_from_gtid(gtid); in FTN_GET_BLOCKTIME()
137 team = __kmp_threads[gtid]->th.th_team; in FTN_GET_BLOCKTIME()
141 KF_TRACE(10, ("kmp_get_blocktime: T#%d(%d:%d), blocktime=%d%cs\n", gtid, in FTN_GET_BLOCKTIME()
147 KF_TRACE(10, ("kmp_get_blocktime: T#%d(%d:%d), blocktime=%d%cs\n", gtid, in FTN_GET_BLOCKTIME()
[all...]
H A Dkmp_lock.h122 // KMP_LOCK_FREE(tas) => unlocked; locked: (gtid+1) of owning thread
152 extern int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
153 extern int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
154 extern int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
158 extern int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
159 extern int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
160 extern int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid);
188 // 2*(gtid+1) of owning thread, 0 if unlocked
189 // locked: (gtid+1) of owning thread
211 extern int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid);
551 __kmp_acquire_lock(kmp_lock_t * lck,kmp_int32 gtid) __kmp_acquire_lock() argument
555 __kmp_test_lock(kmp_lock_t * lck,kmp_int32 gtid) __kmp_test_lock() argument
559 __kmp_release_lock(kmp_lock_t * lck,kmp_int32 gtid) __kmp_release_lock() argument
635 __kmp_acquire_user_lock_with_checks(lck,gtid) global() argument
668 __kmp_acquire_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_acquire_user_lock_with_checks() argument
683 __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_test_user_lock_with_checks() argument
701 __kmp_test_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_test_user_lock_with_checks() argument
711 __kmp_release_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_release_user_lock_with_checks() argument
744 __kmp_acquire_nested_user_lock_with_checks(lck,gtid,depth) global() argument
781 __kmp_acquire_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid,int * depth) __kmp_acquire_nested_user_lock_with_checks() argument
793 __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_test_nested_user_lock_with_checks() argument
822 __kmp_test_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_test_nested_user_lock_with_checks() argument
833 __kmp_release_nested_user_lock_with_checks(kmp_user_lock_p lck,kmp_int32 gtid) __kmp_release_nested_user_lock_with_checks() argument
[all...]
H A Dkmp_tasking.cpp39 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
42 int __kmp_taskloop_task(int gtid, void *ptask);
54 static void __kmp_trace_task_stack(kmp_int32 gtid, in __kmp_trace_task_stack() argument
66 location, gtid, entries, task_stack->ts_first_block, stack_top)); in __kmp_trace_task_stack()
93 location, gtid, entries, stack_top, tied_task)); in __kmp_trace_task_stack()
99 location, gtid)); in __kmp_trace_task_stack()
108 static void __kmp_init_task_stack(kmp_int32 gtid, in __kmp_init_task_stack() argument
129 static void __kmp_free_task_stack(kmp_int32 gtid, in __kmp_free_task_stack() argument
158 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread, in __kmp_push_task_stack() argument
162 &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)]; in __kmp_push_task_stack()
[all …]
H A Dkmp_barrier.cpp220 int gtid = other_threads[thr]->th.th_info.ds.ds_gtid; in __kmp_dist_barrier_wakeup() local
222 __kmp_atomic_resume_64(gtid, (kmp_atomic_flag_64<> *)NULL); in __kmp_dist_barrier_wakeup()
227 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_dist_barrier_gather() argument
247 gtid, team->t.t_id, tid, bt)); in __kmp_dist_barrier_gather()
280 this_thr, gtid, (kmp_atomic_flag_64<> *)NULL, FALSE, in __kmp_dist_barrier_gather()
300 OMPT_REDUCTION_DECL(this_thr, gtid); in __kmp_dist_barrier_gather()
329 this_thr, gtid, (kmp_atomic_flag_64<> *)NULL, FALSE, in __kmp_dist_barrier_gather()
350 OMPT_REDUCTION_DECL(this_thr, gtid); in __kmp_dist_barrier_gather()
372 gtid, team->t.t_id, tid, bt)); in __kmp_dist_barrier_gather()
376 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_dist_barrier_release() argument
[all …]
H A Dompt-specific.h30 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, int gtid,
79 inline void *__ompt_load_return_address(int gtid) { in __ompt_load_return_address() argument
80 kmp_info_t *thr = __kmp_threads[gtid]; in __ompt_load_return_address()
91 #define OMPT_STORE_RETURN_ADDRESS(gtid) \ argument
92 OmptReturnAddressGuard ReturnAddressGuard{gtid, __builtin_return_address(0)};
93 #define OMPT_LOAD_RETURN_ADDRESS(gtid) __ompt_load_return_address(gtid) argument
94 #define OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid) \ argument
95 ((ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
96 __kmp_threads[gtid]->th.ompt_thread_info.return_address) \
97 ? __ompt_load_return_address(gtid) \
[all …]
H A Dkmp_error.h28 struct cons_header *__kmp_allocate_cons_stack(int gtid);
31 void __kmp_push_parallel(int gtid, ident_t const *ident);
32 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident);
34 void __kmp_push_sync(int gtid, enum cons_type ct, ident_t const *ident,
37 void __kmp_push_sync(int gtid, enum cons_type ct, ident_t const *ident,
41 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident);
43 void __kmp_check_sync(int gtid, enum cons_type ct, ident_t const *ident,
46 void __kmp_check_sync(int gtid, enum cons_type ct, ident_t const *ident,
50 void __kmp_pop_parallel(int gtid, ident_t const *ident);
51 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
[all …]
H A Dkmp_itt.h55 __kmp_itt_region_forking(int gtid, int team_size,
58 __kmp_itt_region_joined(int gtid); // Primary only, after joining threads.
63 __kmp_inline void __kmp_itt_frame_submit(int gtid, __itt_timestamp begin,
71 __kmp_inline void __kmp_itt_metadata_imbalance(int gtid, kmp_uint64 begin,
83 __kmp_inline void *__kmp_itt_barrier_object(int gtid, int bt, int set_name = 0,
85 __kmp_inline void __kmp_itt_barrier_starting(int gtid, void *object);
86 __kmp_inline void __kmp_itt_barrier_middle(int gtid, void *object);
87 __kmp_inline void __kmp_itt_barrier_finished(int gtid, void *object);
90 __kmp_inline void *__kmp_itt_taskwait_object(int gtid);
91 __kmp_inline void __kmp_itt_taskwait_starting(int gtid, void *object);
[all …]
H A Dkmp_dispatch.cpp184 void __kmp_dispatch_init_algorithm(ident_t *loc, int gtid, in __kmp_dispatch_init_algorithm() argument
214 KD_TRACE(10, (buff, gtid, pr, lb, ub, st, schedule, chunk, nproc, tid)); in __kmp_dispatch_init_algorithm()
219 th = __kmp_threads[gtid]; in __kmp_dispatch_init_algorithm()
226 KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL && in __kmp_dispatch_init_algorithm()
293 KD_TRACE(10, (buff, gtid, schedule, chunk)); in __kmp_dispatch_init_algorithm()
317 KD_TRACE(10, (buff, gtid, schedule, chunk)); in __kmp_dispatch_init_algorithm()
362 KD_TRACE(10, (buff, gtid, schedule, chunk)); in __kmp_dispatch_init_algorithm()
406 if (KMP_MASTER_GTID(gtid)) { in __kmp_dispatch_init_algorithm()
437 gtid)); in __kmp_dispatch_init_algorithm()
578 gtid)); in __kmp_dispatch_init_algorithm()
[all …]
H A Dkmp_csupport.cpp101 kmp_int32 gtid = __kmp_entry_gtid(); in __kmpc_global_thread_num() local
103 KC_TRACE(10, ("__kmpc_global_thread_num: T#%d\n", gtid)); in __kmpc_global_thread_num()
105 return gtid; in __kmpc_global_thread_num()
308 int gtid = __kmp_entry_gtid(); in __kmpc_fork_call() local
335 kmp_info_t *master_th = __kmp_threads[gtid]; in __kmpc_fork_call()
339 OMPT_STORE_RETURN_ADDRESS(gtid); in __kmpc_fork_call()
345 __kmp_fork_call(loc, gtid, fork_context_intel, argc, in __kmpc_fork_call()
352 __kmp_join_call(loc, gtid in __kmpc_fork_call()
390 int gtid = __kmp_entry_gtid(); in __kmpc_fork_call_if() local
397 __kmpc_serialized_parallel(loc, gtid); in __kmpc_fork_call_if()
[all …]
H A Dkmp.h1131 extern omp_allocator_handle_t __kmpc_init_allocator(int gtid,
1135 extern void __kmpc_destroy_allocator(int gtid, omp_allocator_handle_t al);
1136 extern void __kmpc_set_default_allocator(int gtid, omp_allocator_handle_t al);
1137 extern omp_allocator_handle_t __kmpc_get_default_allocator(int gtid);
1139 extern void *__kmpc_alloc(int gtid, size_t sz, omp_allocator_handle_t al);
1140 extern void *__kmpc_aligned_alloc(int gtid, size_t align, size_t sz,
1142 extern void *__kmpc_calloc(int gtid, size_t nmemb, size_t sz,
1144 extern void *__kmpc_realloc(int gtid, void *ptr, size_t sz,
1147 extern void __kmpc_free(int gtid, void *ptr, omp_allocator_handle_t al);
1149 extern void *__kmp_alloc(int gtid, size_t align, size_t sz,
[all …]
H A Dkmp_taskdeps.h18 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid)) argument
19 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid)) argument
94 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) { in __kmp_release_deps() argument
105 __kmp_omp_task(gtid, successor->task, false); in __kmp_release_deps()
112 kmp_info_t *thread = __kmp_threads[gtid]; in __kmp_release_deps()
121 __kmp_release_lock(node->dn.mtx_locks[i], gtid); in __kmp_release_deps()
128 gtid, task)); in __kmp_release_deps()
137 gtid, task)); in __kmp_release_deps()
139 KMP_ACQUIRE_DEPNODE(gtid, node); in __kmp_release_deps()
146 KMP_RELEASE_DEPNODE(gtid, node); in __kmp_release_deps()
[all …]
H A Dkmp_runtime.cpp87 int gtid);
96 void __kmp_fork_barrier(int gtid, int tid);
97 void __kmp_join_barrier(int gtid);
107 static int __kmp_unregister_root_other_thread(int gtid);
256 int gtid; in __kmp_get_global_thread_id_reg() local
259 gtid = KMP_GTID_DNE; in __kmp_get_global_thread_id_reg()
264 gtid = __kmp_gtid; in __kmp_get_global_thread_id_reg()
269 gtid = __kmp_gtid_get_specific(); in __kmp_get_global_thread_id_reg()
273 gtid = __kmp_get_global_thread_id(); in __kmp_get_global_thread_id_reg()
277 if (gtid == KMP_GTID_DNE) { in __kmp_get_global_thread_id_reg()
[all …]
H A Dkmp_taskdeps.cpp218 static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source, in __kmp_track_dependence() argument
279 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data; in __kmp_track_dependence()
298 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread, in __kmp_depnode_link_successor() argument
314 __kmp_track_dependence(gtid, dep, node, task); in __kmp_depnode_link_successor()
318 KMP_ACQUIRE_DEPNODE(gtid, dep); in __kmp_depnode_link_successor()
324 __kmp_track_dependence(gtid, dep, node, task); in __kmp_depnode_link_successor()
328 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task), in __kmp_depnode_link_successor()
333 KMP_RELEASE_DEPNODE(gtid, dep); in __kmp_depnode_link_successor()
340 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid, in __kmp_depnode_link_successor() argument
355 __kmp_track_dependence(gtid, sink, source, task); in __kmp_depnode_link_successor()
[all …]
H A Dkmp_cancel.cpp29 kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) { in __kmpc_cancel() argument
30 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmpc_cancel()
32 KC_TRACE(10, ("__kmpc_cancel: T#%d request %d OMP_CANCELLATION=%d\n", gtid, in __kmpc_cancel()
39 KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid); in __kmpc_cancel()
135 kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid, in __kmpc_cancellationpoint() argument
137 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmpc_cancellationpoint()
141 gtid, cncl_kind, __kmp_omp_cancellation)); in __kmpc_cancellationpoint()
147 KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid); in __kmpc_cancellationpoint()
243 kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) { in __kmpc_cancel_barrier() argument
245 kmp_info_t *this_thr = __kmp_threads[gtid]; in __kmpc_cancel_barrier()
[all …]
H A Dkmp_dispatch_hier.h133 extern int __kmp_dispatch_get_id(int gtid, kmp_hier_layer_e type);
478 int next_recurse(ident_t *loc, int gtid, kmp_hier_top_unit_t<T> *current, in next_recurse()
482 kmp_info_t *th = __kmp_threads[gtid]; in next_recurse()
494 1, ("kmp_hier_t.next_recurse(): T#%d (%d) called\n", gtid, hier_level)); in next_recurse()
500 gtid, hier_level)); in next_recurse()
512 gtid, hier_level)); in next_recurse()
520 gtid, hier_level)); in next_recurse()
530 status = __kmp_dispatch_next_algorithm<T>(gtid, my_pr, my_sh, in next_recurse()
536 gtid, hier_level, status)); in next_recurse()
542 status = next_recurse(loc, gtid, parent, &contains_last, &my_lb, &my_ub, in next_recurse()
[all …]
H A Dkmp_sched.cpp95 kmp_int32 gtid = global_tid; in __kmp_for_static_init() local
100 __kmp_assert_valid_gtid(gtid); in __kmp_for_static_init()
101 kmp_info_t *th = __kmp_threads[gtid]; in __kmp_for_static_init()
293 if (KMP_MASTER_GTID(gtid)) { in __kmp_for_static_init()
479 static void __kmp_dist_for_static_init(ident_t *loc, kmp_int32 gtid, in __kmp_dist_for_static_init() argument
504 KE_TRACE(10, ("__kmpc_dist_for_static_init called (%d)\n", gtid)); in __kmp_dist_for_static_init()
505 __kmp_assert_valid_gtid(gtid); in __kmp_dist_for_static_init()
516 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk)); in __kmp_dist_for_static_init()
522 __kmp_push_workshare(gtid, ct_pdo, loc); in __kmp_dist_for_static_init()
540 tid = __kmp_tid_from_gtid(gtid); in __kmp_dist_for_static_init()
[all …]
H A Dkmp_atomic.cpp689 if (gtid == KMP_GTID_UNKNOWN) { \
690 gtid = __kmp_entry_gtid(); \
699 RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID(ident_t *id_ref, int gtid, \
702 KA_TRACE(100, ("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid));
727 __kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \
731 __kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);
734 __kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \
736 __kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);
862 __kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \
864 __kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);
[all …]
H A Dz_Linux_util.cpp440 void __kmp_terminate_thread(int gtid) { in __kmp_terminate_thread() argument
442 kmp_info_t *th = __kmp_threads[gtid]; in __kmp_terminate_thread()
448 KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid)); in __kmp_terminate_thread()
462 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) { in __kmp_set_stack_info() argument
473 if (!KMP_UBER_GTID(gtid)) { in __kmp_set_stack_info()
485 gtid, size, addr)); in __kmp_set_stack_info()
503 gtid, size, addr)); in __kmp_set_stack_info()
535 int gtid; in __kmp_launch_worker() local
537 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid; in __kmp_launch_worker()
538 __kmp_gtid_set_specific(gtid); in __kmp_launch_worker()
[all …]
H A Dkmp_dispatch.h46 extern void __kmp_dispatch_init_algorithm(ident_t *loc, int gtid,
57 int gtid, dispatch_private_info_template<T> *pr,
329 int gtid = *gtid_ref; in __kmp_dispatch_deo()
331 kmp_info_t *th = __kmp_threads[gtid]; in __kmp_dispatch_deo()
334 KD_TRACE(100, ("__kmp_dispatch_deo: T#%d called\n", gtid)); in __kmp_dispatch_deo()
340 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL, 0); in __kmp_dispatch_deo()
342 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL); in __kmp_dispatch_deo()
362 struct cons_header *p = __kmp_threads[gtid]->th.th_cons; in __kmp_dispatch_deo()
378 KD_TRACE(1000, (buff, gtid, sh->u.s.ordered_iteration, lower)); in __kmp_dispatch_deo()
392 KD_TRACE(1000, (buff, gtid, s in __kmp_dispatch_deo()
323 int gtid = *gtid_ref; __kmp_dispatch_deo() local
399 int gtid = *gtid_ref; __kmp_dispatch_dxo() local
[all...]
H A Dkmp_itt.inl126 LINKAGE void __kmp_itt_region_forking(int gtid, int team_size, int barriers) { argument
128 kmp_team_t *team = __kmp_team_from_gtid(gtid);
133 kmp_info_t *th = __kmp_thread_from_gtid(gtid);
178 KMP_ITT_DEBUG_PRINT("[frm beg] gtid=%d, domain=%p, loc:%p\n", gtid, e->d,
184 LINKAGE void __kmp_itt_frame_submit(int gtid, __itt_timestamp begin, argument
192 kmp_info_t *th = __kmp_thread_from_gtid(gtid);
194 kmp_team_t *team = __kmp_team_from_gtid(gtid);
226 "[reg sub] gtid=%d, domain=%p, region:%d, loc:%p, beg:%llu, end:%llu\n",
227 gtid,
266 __kmp_itt_metadata_imbalance(int gtid,kmp_uint64 begin,kmp_uint64 end,kmp_uint64 imbalance,kmp_uint64 reduction) global() argument
355 __kmp_itt_region_starting(int gtid) global() argument
361 __kmp_itt_region_finished(int gtid) global() argument
367 __kmp_itt_region_joined(int gtid) global() argument
417 __kmp_itt_barrier_object(int gtid,int bt,int set_name,int delta) global() argument
521 __kmp_itt_barrier_starting(int gtid,void * object) global() argument
535 __kmp_itt_barrier_middle(int gtid,void * object) global() argument
550 __kmp_itt_barrier_finished(int gtid,void * object) global() argument
565 __kmp_itt_taskwait_object(int gtid) global() argument
579 __kmp_itt_taskwait_starting(int gtid,void * object) global() argument
595 __kmp_itt_taskwait_finished(int gtid,void * object) global() argument
800 __kmp_itt_single_start(int gtid) global() argument
822 __kmp_itt_single_end(int gtid) global() argument
844 __kmp_itt_ordered_init(int gtid) global() argument
856 __kmp_itt_ordered_prep(int gtid) global() argument
868 __kmp_itt_ordered_start(int gtid) global() argument
880 __kmp_itt_ordered_end(int gtid) global() argument
898 __kmp_itt_thread_name(int gtid) global() argument
[all...]

12