Lines Matching full:bt
210 void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team, in __kmp_dist_barrier_wakeup() argument
214 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_wakeup()
227 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_dist_barrier_gather() argument
247 gtid, team->t.t_id, tid, bt)); in __kmp_dist_barrier_gather()
372 gtid, team->t.t_id, tid, bt)); in __kmp_dist_barrier_gather()
376 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_dist_barrier_release() argument
387 gtid, tid, bt)); in __kmp_dist_barrier_release()
389 thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_dist_barrier_release()
414 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_release()
423 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_release()
429 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_release()
460 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_release()
471 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_dist_barrier_release()
502 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid); in __kmp_dist_barrier_release()
524 __kmp_dist_barrier_wakeup(bt, team, tid + b->threads_per_group, nproc, in __kmp_dist_barrier_release()
542 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid); in __kmp_dist_barrier_release()
551 gtid, team->t.t_id, tid, bt)); in __kmp_dist_barrier_release()
557 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_gather_template() argument
561 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_linear_barrier_gather_template()
567 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_gather_template()
593 kmp_balign_team_t *team_bar = &team->t.t_bar[bt]; in __kmp_linear_barrier_gather_template()
604 KMP_CACHE_PREFETCH(&other_threads[i + 1]->th.th_bar[bt].bb.b_arrived); in __kmp_linear_barrier_gather_template()
610 &other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state)); in __kmp_linear_barrier_gather_template()
615 &other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state); in __kmp_linear_barrier_gather_template()
619 kmp_flag_64<> flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived, in __kmp_linear_barrier_gather_template()
653 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_gather_template()
659 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_release_template() argument
662 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_linear_barrier_release_template()
676 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_release_template()
700 KMP_CACHE_PREFETCH(&other_threads[i + 1]->th.th_bar[bt].bb.b_go); in __kmp_linear_barrier_release_template()
707 team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go, in __kmp_linear_barrier_release_template()
708 other_threads[i]->th.th_bar[bt].bb.b_go, in __kmp_linear_barrier_release_template()
709 other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP)); in __kmp_linear_barrier_release_template()
710 kmp_flag_64<> flag(&other_threads[i]->th.th_bar[bt].bb.b_go, in __kmp_linear_barrier_release_template()
734 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_linear_barrier_release_template()
744 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_linear_barrier_release_template()
761 gtid, team->t.t_id, tid, bt)); in __kmp_linear_barrier_release_template()
766 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_gather() argument
769 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_linear_barrier_gather()
773 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_gather_cancellable() argument
776 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_linear_barrier_gather_cancellable()
780 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_release() argument
783 bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_linear_barrier_release()
787 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_linear_barrier_release_cancellable() argument
790 bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_linear_barrier_release_cancellable()
795 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_tree_barrier_gather() argument
799 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_tree_barrier_gather()
802 kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; in __kmp_tree_barrier_gather()
810 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_gather()
825 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_tree_barrier_gather()
829 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_tree_barrier_gather()
834 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_arrived); in __kmp_tree_barrier_gather()
887 team->t.t_bar[bt].b_arrived = new_state; in __kmp_tree_barrier_gather()
889 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP; in __kmp_tree_barrier_gather()
893 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_tree_barrier_gather()
897 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_gather()
901 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_tree_barrier_release() argument
905 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_tree_barrier_release()
907 kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; in __kmp_tree_barrier_release()
928 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_tree_barrier_release()
938 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_tree_barrier_release()
956 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_release()
967 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_tree_barrier_release()
972 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_go); in __kmp_tree_barrier_release()
1002 gtid, team->t.t_id, tid, bt)); in __kmp_tree_barrier_release()
1007 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_hyper_barrier_gather() argument
1011 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_hyper_barrier_gather()
1015 kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; in __kmp_hyper_barrier_gather()
1023 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_gather()
1063 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hyper_barrier_gather()
1068 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hyper_barrier_gather()
1074 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived); in __kmp_hyper_barrier_gather()
1110 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP; in __kmp_hyper_barrier_gather()
1112 team->t.t_bar[bt].b_arrived = new_state; in __kmp_hyper_barrier_gather()
1116 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_hyper_barrier_gather()
1120 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_gather()
1126 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_hyper_barrier_release() argument
1130 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_hyper_barrier_release()
1133 kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; in __kmp_hyper_barrier_release()
1149 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_release()
1169 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_hyper_barrier_release()
1179 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_hyper_barrier_release()
1234 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hyper_barrier_release()
1244 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_go); in __kmp_hyper_barrier_release()
1277 gtid, team->t.t_id, tid, bt)); in __kmp_hyper_barrier_release()
1290 static bool __kmp_init_hierarchical_barrier_thread(enum barrier_type bt, in __kmp_init_hierarchical_barrier_thread() argument
1335 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb; in __kmp_init_hierarchical_barrier_thread()
1340 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb; in __kmp_init_hierarchical_barrier_thread()
1358 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_hierarchical_barrier_gather() argument
1362 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_gather()
1379 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_gather()
1389 (void)__kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, tid, in __kmp_hierarchical_barrier_gather()
1395 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hierarchical_barrier_gather()
1403 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state; in __kmp_hierarchical_barrier_gather()
1436 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_gather()
1465 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_gather()
1507 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hierarchical_barrier_gather()
1514 team->t.t_bar[bt].b_arrived = new_state; in __kmp_hierarchical_barrier_gather()
1518 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived)); in __kmp_hierarchical_barrier_gather()
1523 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_gather()
1527 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, in __kmp_hierarchical_barrier_release() argument
1531 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_release()
1540 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_release()
1557 thr_bar->offset + 1, bt, in __kmp_hierarchical_barrier_release()
1571 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done)) in __kmp_hierarchical_barrier_release()
1604 team_change = __kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, in __kmp_hierarchical_barrier_release()
1655 &team->t.t_threads[child_tid]->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_release()
1686 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_release()
1712 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; in __kmp_hierarchical_barrier_release()
1734 gtid, team->t.t_id, tid, bt)); in __kmp_hierarchical_barrier_release()
1767 static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split, in __kmp_barrier_template() argument
1793 barrier_kind = __ompt_get_barrier_kind(bt, this_thr); in __kmp_barrier_template()
1836 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1); in __kmp_barrier_template()
1868 team->t.t_bar[bt].b_master_arrived += 1; in __kmp_barrier_template()
1870 this_thr->th.th_bar[bt].bb.b_worker_arrived += 1; in __kmp_barrier_template()
1883 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
1885 switch (__kmp_barrier_gather_pattern[bt]) { in __kmp_barrier_template()
1887 __kmp_dist_barrier_gather(bt, this_thr, gtid, tid, in __kmp_barrier_template()
1893 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); in __kmp_barrier_template()
1894 __kmp_hyper_barrier_gather(bt, this_thr, gtid, tid, in __kmp_barrier_template()
1900 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
1905 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); in __kmp_barrier_template()
1906 __kmp_tree_barrier_gather(bt, this_thr, gtid, tid, in __kmp_barrier_template()
1911 __kmp_linear_barrier_gather(bt, this_thr, gtid, tid, in __kmp_barrier_template()
1927 team->t.t_bar[bt].b_team_arrived += 1; in __kmp_barrier_template()
2001 bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
2003 switch (__kmp_barrier_release_pattern[bt]) { in __kmp_barrier_template()
2005 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]); in __kmp_barrier_template()
2006 __kmp_dist_barrier_release(bt, this_thr, gtid, tid, in __kmp_barrier_template()
2011 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]); in __kmp_barrier_template()
2012 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid, in __kmp_barrier_template()
2018 bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj)); in __kmp_barrier_template()
2022 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]); in __kmp_barrier_template()
2023 __kmp_tree_barrier_release(bt, this_thr, gtid, tid, in __kmp_barrier_template()
2028 __kmp_linear_barrier_release(bt, this_thr, gtid, tid, in __kmp_barrier_template()
2052 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1); in __kmp_barrier_template()
2099 int __kmp_barrier(enum barrier_type bt, int gtid, int is_split, in __kmp_barrier() argument
2102 return __kmp_barrier_template<>(bt, gtid, is_split, reduce_size, reduce_data, in __kmp_barrier()
2130 void __kmp_end_split_barrier(enum barrier_type bt, int gtid) { in __kmp_end_split_barrier() argument
2133 KMP_DEBUG_ASSERT(bt < bs_last_barrier); in __kmp_end_split_barrier()
2140 switch (__kmp_barrier_release_pattern[bt]) { in __kmp_end_split_barrier()
2142 __kmp_dist_barrier_release(bt, this_thr, gtid, tid, in __kmp_end_split_barrier()
2147 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]); in __kmp_end_split_barrier()
2148 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid, in __kmp_end_split_barrier()
2153 __kmp_hierarchical_barrier_release(bt, this_thr, gtid, tid, in __kmp_end_split_barrier()
2158 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]); in __kmp_end_split_barrier()
2159 __kmp_tree_barrier_release(bt, this_thr, gtid, tid, in __kmp_end_split_barrier()
2164 __kmp_linear_barrier_release(bt, this_thr, gtid, tid, in __kmp_end_split_barrier()