/freebsd/contrib/llvm-project/openmp/runtime/src/ |
H A D | kmp_os.h | 140 typedef __int64 kmp_int64; typedef 148 typedef struct kmp_struct64 kmp_int64; typedef 173 typedef long long kmp_int64; typedef 211 typedef kmp_int64 kmp_int; 497 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v)) 516 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v); 547 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 548 (kmp_int64)(sv)) 550 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \ 551 (kmp_int64)(sv)) [all …]
|
H A D | kmp_atomic.h | 495 void __kmpc_atomic_fixed8_add(ident_t *id_ref, int gtid, kmp_int64 *lhs, 496 kmp_int64 rhs); 497 void __kmpc_atomic_fixed8_sub(ident_t *id_ref, int gtid, kmp_int64 *lhs, 498 kmp_int64 rhs); 524 void __kmpc_atomic_fixed8_andb(ident_t *id_ref, int gtid, kmp_int64 *lhs, 525 kmp_int64 rhs); 526 void __kmpc_atomic_fixed8_div(ident_t *id_ref, int gtid, kmp_int64 *lhs, 527 kmp_int64 rhs); 530 void __kmpc_atomic_fixed8_mul(ident_t *id_ref, int gtid, kmp_int64 *lhs, 531 kmp_int64 rhs); [all …]
|
H A D | kmp_dispatch.h | 210 __forceinline kmp_int64 test_then_add<kmp_int64>(volatile kmp_int64 *p, 211 kmp_int64 d) { 212 kmp_int64 r; 228 __forceinline kmp_int64 test_then_inc_acq<kmp_int64>(volatile kmp_int64 *p) { 229 kmp_int64 r; 245 __forceinline kmp_int64 test_then_in [all...] |
H A D | kmp_sched.cpp | 44 kmp_int64 t; \ 45 kmp_int64 u = (kmp_int64)(*pupper); \ 46 kmp_int64 l = (kmp_int64)(*plower); \ 47 kmp_int64 i = (kmp_int64)incr; \ 923 kmp_int32 *plastiter, kmp_int64 *plower, in __kmpc_for_static_init_8() 924 kmp_int64 *pupper, kmp_int64 *pstride, in __kmpc_for_static_init_8() 925 kmp_int64 incr, kmp_int64 chunk) { in __kmpc_for_static_init_8() 926 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower, in __kmpc_for_static_init_8() 941 kmp_int64 *pstride, kmp_int64 incr, in __kmpc_for_static_init_8u() 942 kmp_int64 chunk) { in __kmpc_for_static_init_8u() [all …]
|
H A D | z_Windows_NT-586_util.cpp | 79 kmp_int64 old_value, new_value; in __kmp_test_then_add8() 92 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { in __kmp_test_then_add64() 93 kmp_int64 old_value, new_value; in __kmp_test_then_add64() 112 while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value, in __kmp_test_then_or64() 127 while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value, in __kmp_test_then_and64()
|
H A D | kmp_atomic.cpp | 958 ATOMIC_FIXED_ADD(fixed8, add, kmp_int64, 64, +, 8i, 7, 960 ATOMIC_FIXED_ADD(fixed8, sub, kmp_int64, 64, -, 8i, 7, 1045 ATOMIC_CMPXCHG(fixed8, andb, kmp_int64, 64, &, 8i, 7, 1047 ATOMIC_CMPXCHG(fixed8, div, kmp_int64, 64, /, 8i, 7, 1051 ATOMIC_CMPXCHG(fixed8, mul, kmp_int64, 64, *, 8i, 7, 1053 ATOMIC_CMPXCHG(fixed8, orb, kmp_int64, 64, |, 8i, 7, 1055 ATOMIC_CMPXCHG(fixed8, shl, kmp_int64, 64, <<, 8i, 7, 1057 ATOMIC_CMPXCHG(fixed8, shr, kmp_int64, 64, >>, 8i, 7, 1061 ATOMIC_CMPXCHG(fixed8, xor, kmp_int64, 64, ^, 8i, 7, 1122 ATOMIC_CMPX_L(fixed8, andl, kmp_int64, 64, &&, 8i, 7, [all …]
|
H A D | kmp_dispatch.cpp | 927 __kmp_dispatch_init_hier_runtime<kmp_int64>(ident_t *loc, kmp_int64 lb, in __kmp_dispatch_init_hier_runtime() 928 kmp_int64 ub, kmp_int64 st) { in __kmp_dispatch_init_hier_runtime() 929 __kmp_dispatch_init_hierarchy<kmp_int64>( in __kmp_dispatch_init_hier_runtime() 936 kmp_uint64 ub, kmp_int64 st) { in __kmp_dispatch_init_hier_runtime() 1515 kmp_int64 b; in __kmp_dispatch_next_algorithm() 1520 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm() 1524 (volatile kmp_int64 *)&pr->u.p.count, in __kmp_dispatch_next_algorithm() 1525 *VOLATILE_CAST(kmp_int64 *) & vold.b, in __kmp_dispatch_next_algorithm() 1526 *VOLATILE_CAST(kmp_int64 *) & vnew.b)) { in __kmp_dispatch_next_algorithm() 1528 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm() [all …]
|
H A D | kmp_collapse.cpp | 128 kmp_canonicalize_one_loop_XX<kmp_int64>( in kmp_canonicalize_loop_nest() 130 /*in/out*/ (bounds_infoXX_template<kmp_int64> *)(bounds)); in kmp_canonicalize_loop_nest() 203 trip_count = kmp_calculate_trip_count_XX<kmp_int64>( in kmp_calculate_trip_count() 204 /*in/out*/ (bounds_infoXX_template<kmp_int64> *)(bounds)); in kmp_calculate_trip_count() 245 res = static_cast<kmp_uint64>(static_cast<kmp_int64>(original_iv)); in kmp_fix_iv() 289 res = static_cast<kmp_int64>(original_iv1) == in kmp_ivs_eq() 290 static_cast<kmp_int64>(original_iv2); in kmp_ivs_eq() 376 return kmp_calc_one_iv_XX<kmp_int64>( in kmp_calc_one_iv() 377 (bounds_infoXX_template<kmp_int64> *)(bounds), in kmp_calc_one_iv() 432 kmp_calc_one_iv_rectang_XX<kmp_int64>( in kmp_calc_one_iv_rectang() [all …]
|
H A D | z_Windows_NT-586_asm.asm | 209 ; __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv ); 418 ; kmp_int64 419 ; __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv ); 694 ; __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d ); 802 ; __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv ); 903 ; kmp_int64 904 ; __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 d ); 1007 ; kmp_int64 1008 ; __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
|
H A D | kmp_collapse.h | 95 kmp_int64 step_64; // signed 120 kmp_int64 step_64; // signed 151 typename std::conditional<std::is_signed<T>::value, kmp_int64, kmp_uint64>
|
H A D | kmp.h | 292 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32))) 511 kmp_int64 sched; 1936 kmp_int64 count; // current chunk number for static & static-steal scheduling 1937 kmp_int64 ub; /* upper-bound */ 1939 kmp_int64 lb; /* lower-bound */ 1940 kmp_int64 st; /* stride */ 1941 kmp_int64 tc; /* trip count (number of iterations) */ 1954 kmp_int64 parm1; 1955 kmp_int64 parm2; 1956 kmp_int64 parm3; [all …]
|
H A D | kmp_gsupport.cpp | 912 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 950 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 977 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1075 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1123 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1869 (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, 1, sched, in __GOMP_taskloop() 1889 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; in __kmp_GOMP_doacross_post() 1890 kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc( in __kmp_GOMP_doacross_post() 1891 th, (size_t)(sizeof(kmp_int64) * num_dims)); in __kmp_GOMP_doacross_post() 1892 for (kmp_int64 i = 0; i < num_dims; ++i) { in __kmp_GOMP_doacross_post() [all …]
|
H A D | kmp_alloc.cpp | 32 typedef kmp_int64 bufsize; 950 gtid, (kmp_uint64)thr->totalloc, (kmp_int64)thr->numget, in bfreed() 951 (kmp_int64)thr->numrel, (kmp_int64)thr->numpblk, in bfreed() 952 (kmp_int64)thr->numpget, (kmp_int64)thr->numprel, in bfreed() 953 (kmp_int64)thr->numdget, (kmp_int64)thr->numdrel); in bfreed() 1626 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a); in __kmp_alloc() 1629 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in __kmp_alloc() 1707 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a); in __kmp_alloc() 1710 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in __kmp_alloc() 1880 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in ___kmpc_free() [all …]
|
H A D | kmp_tasking.cpp | 1714 kmp_int64 device_id) { 4776 kmp_int64 retval; 4780 retval = *(kmp_int64 *)((char *)task + lower_offset); 4785 retval = (kmp_int64)*lb; 4787 kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds); 4788 retval = (kmp_int64)*lb; 4793 retval = *(kmp_int64 *)((char *)task + lower_offset); 4798 kmp_int64 retval; 4802 retval = *(kmp_int64 *)((char *)task + upper_offset); 4807 retval = (kmp_int64)*ub; [all …]
|
H A D | kmp_csupport.cpp | 573 *(kmp_int64 *)(&this_thr->th.th_teams_size) = 0L; in __kmpc_fork_teams() 4149 kmp_int64 last, trace_count; in __kmpc_doacross_init() 4174 pr_buf->th_doacross_info = (kmp_int64 *)__kmp_thread_malloc( in __kmpc_doacross_init() 4175 th, sizeof(kmp_int64) * (4 * num_dims + 1)); in __kmpc_doacross_init() 4178 (kmp_int64)num_dims; // first element is number of dimensions in __kmpc_doacross_init() 4181 pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; in __kmpc_doacross_init() 4187 kmp_int64 in __kmpc_doacross_init() 4239 (volatile kmp_int64 *)&sh_buf->doacross_flags, NULL, 1LL); in __kmpc_doacross_init() 4253 while (*(volatile kmp_int64 *)&sh_buf->doacross_flags == 1LL) in __kmpc_doacross_init() 4267 void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) { in __kmpc_doacross_wait() [all …]
|
H A D | kmp_dispatch_hier.h | 58 kmp_int64 *large_chunks; 67 large_chunks = (kmp_int64 *)__kmp_allocate(sizeof(kmp_int64) * in append() 78 large_chunks[current_size] = (kmp_int64)chunk; in append() 96 kmp_int64 temp4 = large_chunks[i]; in sort() 313 volatile kmp_int64 *val; in barrier() 323 val = RCAST(volatile kmp_int64 *, &(bdata->val[current_index])); in barrier()
|
H A D | kmp_wait_release.h | 63 return KMP_TEST_THEN_ADD4_64(RCAST(volatile kmp_int64 *, f)); 78 return KMP_TEST_THEN_ADD4_64(RCAST(volatile kmp_int64 *, f)); 93 return KMP_TEST_THEN_ADD4_64(RCAST(volatile kmp_int64 *, f));
|
H A D | z_Windows_NT_util.cpp | 145 static kmp_int64 __kmp_win32_time; 942 __kmp_win32_time = (kmp_int64)time.QuadPart; in __kmp_clear_system_time() 978 *delta = ((double)(((kmp_int64)now.QuadPart) - __kmp_win32_time)) * in __kmp_read_system_time()
|
H A D | ompt-specific.cpp | 498 uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread); in __ompt_get_unique_id_internal()
|
H A D | z_Linux_util.cpp | 397 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { in __kmp_test_then_add64() 398 kmp_int64 old_value, new_value; in __kmp_test_then_add64()
|
H A D | kmp_lock.cpp | 1141 enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p, in __kmp_acquire_queuing_lock_timed_template() 1422 RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head), in __kmp_release_queuing_lock() 2662 return ((kmp_int64)b - (kmp_int64)a) > 0; in before()
|
H A D | kmp_runtime.cpp | 1960 __kmp_stkpadding += (short)((kmp_int64)dummy); in __kmp_fork_call() 7099 KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8); in __kmp_do_serial_initialize()
|