Lines Matching refs:th

121   int new_size = level + thr->th.th_set_nested_nth_sz;  in __kmp_override_nested_nth()
126 new_nested_nth->nth[i] = thr->th.th_set_nested_nth[j]; in __kmp_override_nested_nth()
187 stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize); in __kmp_get_global_thread_id()
188 stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase); in __kmp_get_global_thread_id()
229 if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) { in __kmp_get_global_thread_id()
233 stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase; in __kmp_get_global_thread_id()
235 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr); in __kmp_get_global_thread_id()
236 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize, in __kmp_get_global_thread_id()
237 other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr - in __kmp_get_global_thread_id()
240 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize, in __kmp_get_global_thread_id()
246 char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase; in __kmp_get_global_thread_id()
247 char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize; in __kmp_get_global_thread_id()
249 other_threads[i]->th.th_info.ds.ds_stacksize, in __kmp_get_global_thread_id()
298 void __kmp_check_stack_overlap(kmp_info_t *th) { in __kmp_check_stack_overlap() argument
306 stack_end = (char *)th->th.th_info.ds.ds_stackbase; in __kmp_check_stack_overlap()
307 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize; in __kmp_check_stack_overlap()
309 gtid = __kmp_gtid_from_thread(th); in __kmp_check_stack_overlap()
313 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, in __kmp_check_stack_overlap()
315 (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual"); in __kmp_check_stack_overlap()
318 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, in __kmp_check_stack_overlap()
320 (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual"); in __kmp_check_stack_overlap()
326 gtid = __kmp_gtid_from_thread(th); in __kmp_check_stack_overlap()
331 stack_end = (char *)th->th.th_info.ds.ds_stackbase; in __kmp_check_stack_overlap()
332 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize; in __kmp_check_stack_overlap()
338 if (f_th && f_th != th) { in __kmp_check_stack_overlap()
340 (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase); in __kmp_check_stack_overlap()
342 other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize); in __kmp_check_stack_overlap()
350 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize), in __kmp_check_stack_overlap()
505 __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team, in __kmp_print_thread_storage_map()
508 __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head, in __kmp_print_thread_storage_map()
512 gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier], in __kmp_print_thread_storage_map()
515 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier], in __kmp_print_thread_storage_map()
516 &thr->th.th_bar[bs_plain_barrier + 1], in __kmp_print_thread_storage_map()
520 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier], in __kmp_print_thread_storage_map()
521 &thr->th.th_bar[bs_forkjoin_barrier + 1], in __kmp_print_thread_storage_map()
526 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier], in __kmp_print_thread_storage_map()
527 &thr->th.th_bar[bs_reduction_barrier + 1], in __kmp_print_thread_storage_map()
654 if (__kmp_threads[gtid]->th.th_root->r.r_active) in __kmp_parallel_deo()
680 if (__kmp_threads[gtid]->th.th_root->r.r_active) in __kmp_parallel_dxo()
701 kmp_info_t *th; in __kmp_enter_single() local
708 th = __kmp_threads[gtid]; in __kmp_enter_single()
709 team = th->th.th_team; in __kmp_enter_single()
712 th->th.th_ident = id_ref; in __kmp_enter_single()
717 kmp_int32 old_this = th->th.th_local.this_construct; in __kmp_enter_single()
719 ++th->th.th_local.this_construct; in __kmp_enter_single()
725 th->th.th_local.this_construct); in __kmp_enter_single()
729 KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL && in __kmp_enter_single()
863 int cg_nthreads = this_thr->th.th_cg_roots->cg_nthreads; in __kmp_reserve_threads()
864 int max_cg_threads = this_thr->th.th_cg_roots->cg_thread_limit; in __kmp_reserve_threads()
949 if (this_thr->th.th_nt_strict && new_nthreads < set_nthreads) { in __kmp_reserve_threads()
950 __kmpc_error(this_thr->th.th_nt_loc, this_thr->th.th_nt_sev, in __kmp_reserve_threads()
951 this_thr->th.th_nt_msg); in __kmp_reserve_threads()
970 master_th->th.th_info.ds.ds_tid = 0; in __kmp_fork_team_threads()
971 master_th->th.th_team = team; in __kmp_fork_team_threads()
972 master_th->th.th_team_nproc = team->t.t_nproc; in __kmp_fork_team_threads()
973 master_th->th.th_team_master = master_th; in __kmp_fork_team_threads()
974 master_th->th.th_team_serialized = FALSE; in __kmp_fork_team_threads()
975 master_th->th.th_dispatch = &team->t.t_dispatch[0]; in __kmp_fork_team_threads()
980 kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams; in __kmp_fork_team_threads()
984 if (master_th->th.th_teams_microtask) { // are we inside the teams? in __kmp_fork_team_threads()
985 if (master_th->th.th_teams_size.nteams > 1) { in __kmp_fork_team_threads()
990 master_th->th.th_teams_level == team->t.t_level) { in __kmp_fork_team_threads()
1025 KMP_DEBUG_ASSERT(thr->th.th_team == team); in __kmp_fork_team_threads()
1033 thr->th.th_teams_microtask = master_th->th.th_teams_microtask; in __kmp_fork_team_threads()
1034 thr->th.th_teams_level = master_th->th.th_teams_level; in __kmp_fork_team_threads()
1035 thr->th.th_teams_size = master_th->th.th_teams_size; in __kmp_fork_team_threads()
1038 kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar; in __kmp_fork_team_threads()
1073 __kmp_gtid_from_thread(master_th), master_th->th.th_task_team, in __kmp_fork_team_threads()
1074 team->t.t_parent, team->t.t_task_team[master_th->th.th_task_state], in __kmp_fork_team_threads()
1079 master_th->th.th_task_state); in __kmp_fork_team_threads()
1084 KMP_DEBUG_ASSERT(team->t.t_threads[1]->th.th_task_state == 0 || in __kmp_fork_team_threads()
1085 team->t.t_threads[1]->th.th_task_state == 1); in __kmp_fork_team_threads()
1086 KMP_CHECK_UPDATE(master_th->th.th_task_state, in __kmp_fork_team_threads()
1087 team->t.t_threads[1]->th.th_task_state); in __kmp_fork_team_threads()
1089 master_th->th.th_task_state = 0; in __kmp_fork_team_threads()
1094 master_th->th.th_task_state); in __kmp_fork_team_threads()
1096 master_th->th.th_task_state = 0; in __kmp_fork_team_threads()
1103 if (thr->th.th_prev_num_threads != team->t.t_nproc || in __kmp_fork_team_threads()
1104 thr->th.th_prev_level != team->t.t_level) { in __kmp_fork_team_threads()
1197 serial_team = this_thr->th.th_serial_team; in __kmp_serialized_parallel()
1203 kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind; in __kmp_serialized_parallel()
1204 if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) { in __kmp_serialized_parallel()
1209 proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind; in __kmp_serialized_parallel()
1212 this_thr->th.th_set_proc_bind = proc_bind_default; in __kmp_serialized_parallel()
1215 this_thr->th.th_set_nproc = 0; in __kmp_serialized_parallel()
1221 this_thr->th.ompt_thread_info.state != ompt_state_overhead) { in __kmp_serialized_parallel()
1238 if (this_thr->th.th_team != serial_team) { in __kmp_serialized_parallel()
1240 int level = this_thr->th.th_team->t.t_level; in __kmp_serialized_parallel()
1250 __kmp_allocate_team(this_thr->th.th_root, 1, 1, in __kmp_serialized_parallel()
1254 proc_bind, &this_thr->th.th_current_task->td_icvs, in __kmp_serialized_parallel()
1261 new_team->t.t_parent = this_thr->th.th_team; in __kmp_serialized_parallel()
1263 this_thr->th.th_serial_team = serial_team; in __kmp_serialized_parallel()
1283 KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team); in __kmp_serialized_parallel()
1287 serial_team->t.t_parent = this_thr->th.th_team; in __kmp_serialized_parallel()
1288 if (this_thr->th.th_team->t.t_nested_nth) in __kmp_serialized_parallel()
1289 serial_team->t.t_nested_nth = this_thr->th.th_team->t.t_nested_nth; in __kmp_serialized_parallel()
1293 serial_team->t.t_primary_task_state = this_thr->th.th_task_state; in __kmp_serialized_parallel()
1294 serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched; in __kmp_serialized_parallel()
1295 this_thr->th.th_team = serial_team; in __kmp_serialized_parallel()
1296 serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid; in __kmp_serialized_parallel()
1299 this_thr->th.th_current_task)); in __kmp_serialized_parallel()
1300 KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1); in __kmp_serialized_parallel()
1301 this_thr->th.th_current_task->td_flags.executing = 0; in __kmp_serialized_parallel()
1308 copy_icvs(&this_thr->th.th_current_task->td_icvs, in __kmp_serialized_parallel()
1309 &this_thr->th.th_current_task->td_parent->td_icvs); in __kmp_serialized_parallel()
1314 if (this_thr->th.th_team->t.t_nested_nth) in __kmp_serialized_parallel()
1315 nested_nth = this_thr->th.th_team->t.t_nested_nth; in __kmp_serialized_parallel()
1317 this_thr->th.th_current_task->td_icvs.nproc = nested_nth->nth[level + 1]; in __kmp_serialized_parallel()
1322 this_thr->th.th_current_task->td_icvs.proc_bind = in __kmp_serialized_parallel()
1329 this_thr->th.th_info.ds.ds_tid = 0; in __kmp_serialized_parallel()
1332 this_thr->th.th_team_nproc = 1; in __kmp_serialized_parallel()
1333 this_thr->th.th_team_master = this_thr; in __kmp_serialized_parallel()
1334 this_thr->th.th_team_serialized = 1; in __kmp_serialized_parallel()
1335 this_thr->th.th_task_team = NULL; in __kmp_serialized_parallel()
1336 this_thr->th.th_task_state = 0; in __kmp_serialized_parallel()
1340 serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save in __kmp_serialized_parallel()
1351 this_thr->th.th_dispatch = serial_team->t.t_dispatch; in __kmp_serialized_parallel()
1358 KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team); in __kmp_serialized_parallel()
1362 this_thr->th.th_team_serialized = serial_team->t.t_serialized; in __kmp_serialized_parallel()
1365 int level = this_thr->th.th_team->t.t_level; in __kmp_serialized_parallel()
1373 this_thr->th.th_current_task->td_icvs.nproc = nested_nth->nth[level + 1]; in __kmp_serialized_parallel()
1390 this_thr->th.th_dispatch = serial_team->t.t_dispatch; in __kmp_serialized_parallel()
1402 if (this_thr->th.th_prev_level != serial_team->t.t_level || in __kmp_serialized_parallel()
1403 this_thr->th.th_prev_num_threads != 1) { in __kmp_serialized_parallel()
1406 this_thr->th.th_prev_level = serial_team->t.t_level; in __kmp_serialized_parallel()
1407 this_thr->th.th_prev_num_threads = 1; in __kmp_serialized_parallel()
1416 this_thr->th.ompt_thread_info.state != ompt_state_overhead) { in __kmp_serialized_parallel()
1438 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_serialized_parallel()
1449 return (master_th->th.th_teams_microtask && ap && in __kmp_is_fork_in_teams()
1484 if (parent_team == master_th->th.th_serial_team) { in __kmp_fork_in_teams()
1524 master_th->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_fork_in_teams()
1561 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_fork_in_teams()
1572 parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save in __kmp_fork_in_teams()
1579 master_th->th.th_teams_size.nth = parent_team->t.t_nproc; in __kmp_fork_in_teams()
1592 if (master_set_numthreads <= master_th->th.th_teams_size.nth) { in __kmp_fork_in_teams()
1597 int old_proc = master_th->th.th_teams_size.nth; in __kmp_fork_in_teams()
1604 other_threads[i]->th.th_team_nproc = master_set_numthreads; in __kmp_fork_in_teams()
1608 master_th->th.th_set_nproc = 0; in __kmp_fork_in_teams()
1621 kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind; in __kmp_fork_in_teams()
1624 if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) { in __kmp_fork_in_teams()
1629 proc_bind = master_th->th.th_current_task->td_icvs.proc_bind; in __kmp_fork_in_teams()
1637 master_th->th.th_current_task->td_icvs.proc_bind)) { in __kmp_fork_in_teams()
1644 master_th->th.th_current_task->td_icvs.proc_bind != proc_bind_icv) { in __kmp_fork_in_teams()
1646 for (i = 0; i < master_th->th.th_team_nproc; ++i) { in __kmp_fork_in_teams()
1647 other_threads[i]->th.th_current_task->td_icvs.proc_bind = proc_bind_icv; in __kmp_fork_in_teams()
1651 master_th->th.th_set_proc_bind = proc_bind_default; in __kmp_fork_in_teams()
1658 && master_th->th.th_teams_size.nteams == 1) { in __kmp_fork_in_teams()
1660 master_th->th.th_frame_time = tmp_time; in __kmp_fork_in_teams()
1729 master_th->th.th_serial_team->t.t_pkfn = microtask; in __kmp_serial_fork_call()
1734 master_th->th.th_serial_team->t.t_ident = loc; in __kmp_serial_fork_call()
1737 master_th->th.th_serial_team->t.t_level--; in __kmp_serial_fork_call()
1763 master_th->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_serial_fork_call()
1795 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_serial_fork_call()
1799 KMP_DEBUG_ASSERT(master_th->th.th_team == master_th->th.th_serial_team); in __kmp_serial_fork_call()
1800 team = master_th->th.th_team; in __kmp_serial_fork_call()
1827 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_serial_fork_call()
1862 master_th->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_serial_fork_call()
1895 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_serial_fork_call()
1975 parent_team = master_th->th.th_team; in __kmp_fork_call()
1976 master_tid = master_th->th.th_info.ds.ds_tid; in __kmp_fork_call()
1977 master_this_cons = master_th->th.th_local.this_construct; in __kmp_fork_call()
1978 root = master_th->th.th_root; in __kmp_fork_call()
1980 master_set_numthreads = master_th->th.th_set_nproc; in __kmp_fork_call()
1982 master_th->th.th_current_task->td_icvs.task_thread_limit; in __kmp_fork_call()
2005 teams_level = master_th->th.th_teams_level; in __kmp_fork_call()
2007 p_hot_teams = &master_th->th.th_hot_teams; in __kmp_fork_call()
2031 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_fork_call()
2035 master_th->th.th_ident = loc; in __kmp_fork_call()
2059 master_th->th.th_current_task->td_icvs.max_active_levels)) || in __kmp_fork_call()
2095 master_th->th.th_set_nproc = 0; in __kmp_fork_call()
2112 master_th->th.th_current_task, in __kmp_fork_call()
2113 master_th->th.th_current_task->td_icvs.max_active_levels)); in __kmp_fork_call()
2117 master_th->th.th_current_task->td_flags.executing = 0; in __kmp_fork_call()
2119 if (!master_th->th.th_teams_microtask || level > teams_level) { in __kmp_fork_call()
2125 int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc; in __kmp_fork_call()
2127 if (!master_th->th.th_set_nested_nth && in __kmp_fork_call()
2131 } else if (master_th->th.th_set_nested_nth) { in __kmp_fork_call()
2143 kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind; in __kmp_fork_call()
2146 if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) { in __kmp_fork_call()
2152 proc_bind = master_th->th.th_current_task->td_icvs.proc_bind; in __kmp_fork_call()
2155 if (master_th->th.th_teams_microtask && in __kmp_fork_call()
2165 master_th->th.th_current_task->td_icvs.proc_bind)) { in __kmp_fork_call()
2168 if (!master_th->th.th_teams_microtask || in __kmp_fork_call()
2175 master_th->th.th_set_proc_bind = proc_bind_default; in __kmp_fork_call()
2179 copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs); in __kmp_fork_call()
2206 &master_th->th.th_current_task->td_icvs, in __kmp_fork_call()
2210 &master_th->th.th_current_task->td_icvs); in __kmp_fork_call()
2227 if (!master_th->th.th_teams_microtask || level > teams_level) { in __kmp_fork_call()
2244 KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator); in __kmp_fork_call()
2254 if (master_th->th.th_set_nested_nth) { in __kmp_fork_call()
2258 KMP_INTERNAL_FREE(master_th->th.th_set_nested_nth); in __kmp_fork_call()
2259 master_th->th.th_set_nested_nth = NULL; in __kmp_fork_call()
2260 master_th->th.th_set_nested_nth_sz = 0; in __kmp_fork_call()
2261 master_th->th.th_nt_strict = false; in __kmp_fork_call()
2304 &master_th->th.th_current_task->td_icvs, loc); in __kmp_fork_call()
2307 master_th->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_fork_call()
2314 && !master_th->th.th_teams_microtask) { // not in teams construct in __kmp_fork_call()
2323 master_th->th.th_frame_time = tmp_time; in __kmp_fork_call()
2338 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team); in __kmp_fork_call()
2406 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_fork_call()
2417 thread->th.ompt_thread_info.state = in __kmp_join_restore_state()
2454 root = master_th->th.th_root; in __kmp_join_call()
2455 team = master_th->th.th_team; in __kmp_join_call()
2458 master_th->th.th_ident = loc; in __kmp_join_call()
2467 master_th->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_join_call()
2476 team->t.t_task_team[master_th->th.th_task_state], in __kmp_join_call()
2477 master_th->th.th_task_team)); in __kmp_join_call()
2483 if (master_th->th.th_teams_microtask) { in __kmp_join_call()
2486 int tlevel = master_th->th.th_teams_level; in __kmp_join_call()
2527 master_th->th.th_task_state = in __kmp_join_call()
2551 (!master_th->th.th_teams_microtask || /* not in teams construct */ in __kmp_join_call()
2552 master_th->th.th_teams_size.nteams == 1)) { in __kmp_join_call()
2553 master_th->th.th_ident = loc; in __kmp_join_call()
2559 master_th->th.th_frame_time, 0, loc, in __kmp_join_call()
2560 master_th->th.th_team_nproc, 1); in __kmp_join_call()
2570 master_th->th.th_first_place = team->t.t_first_place; in __kmp_join_call()
2571 master_th->th.th_last_place = team->t.t_last_place; in __kmp_join_call()
2575 if (master_th->th.th_teams_microtask && !exit_teams && in __kmp_join_call()
2577 team->t.t_level == master_th->th.th_teams_level + 1) { in __kmp_join_call()
2606 if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) { in __kmp_join_call()
2607 int old_num = master_th->th.th_team_nproc; in __kmp_join_call()
2608 int new_num = master_th->th.th_teams_size.nth; in __kmp_join_call()
2612 other_threads[i]->th.th_team_nproc = new_num; in __kmp_join_call()
2618 kmp_balign_t *balign = other_threads[i]->th.th_bar; in __kmp_join_call()
2628 other_threads[i]->th.th_task_state = master_th->th.th_task_state; in __kmp_join_call()
2644 master_th->th.th_info.ds.ds_tid = team->t.t_master_tid; in __kmp_join_call()
2645 master_th->th.th_local.this_construct = team->t.t_master_this_cons; in __kmp_join_call()
2647 master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid]; in __kmp_join_call()
2654 if (!master_th->th.th_teams_microtask || in __kmp_join_call()
2655 team->t.t_level > master_th->th.th_teams_level) { in __kmp_join_call()
2682 master_th->th.th_def_allocator = team->t.t_def_allocator; in __kmp_join_call()
2701 master_th->th.th_team = parent_team; in __kmp_join_call()
2702 master_th->th.th_team_nproc = parent_team->t.t_nproc; in __kmp_join_call()
2703 master_th->th.th_team_master = parent_team->t.t_threads[0]; in __kmp_join_call()
2704 master_th->th.th_team_serialized = parent_team->t.t_serialized; in __kmp_join_call()
2708 parent_team != master_th->th.th_serial_team && in __kmp_join_call()
2711 master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL)); in __kmp_join_call()
2712 master_th->th.th_serial_team = parent_team; in __kmp_join_call()
2719 master_th->th.th_task_state = (kmp_uint8)team->t.t_primary_task_state; in __kmp_join_call()
2722 master_th->th.th_task_team = in __kmp_join_call()
2723 parent_team->t.t_task_team[master_th->th.th_task_state]; in __kmp_join_call()
2726 __kmp_gtid_from_thread(master_th), master_th->th.th_task_team, in __kmp_join_call()
2733 master_th->th.th_current_task->td_flags.executing = 1; in __kmp_join_call()
2738 if (master_th->th.th_team->t.t_level == 0 && __kmp_affinity.flags.reset) { in __kmp_join_call()
2761 if (thread->th.th_team != thread->th.th_serial_team) { in __kmp_save_internal_controls()
2764 if (thread->th.th_team->t.t_serialized > 1) { in __kmp_save_internal_controls()
2767 if (thread->th.th_team->t.t_control_stack_top == NULL) { in __kmp_save_internal_controls()
2770 if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level != in __kmp_save_internal_controls()
2771 thread->th.th_team->t.t_serialized) { in __kmp_save_internal_controls()
2780 copy_icvs(control, &thread->th.th_current_task->td_icvs); in __kmp_save_internal_controls()
2782 control->serial_nesting_level = thread->th.th_team->t.t_serialized; in __kmp_save_internal_controls()
2784 control->next = thread->th.th_team->t.t_control_stack_top; in __kmp_save_internal_controls()
2785 thread->th.th_team->t.t_control_stack_top = control; in __kmp_save_internal_controls()
2805 if (thread->th.th_current_task->td_icvs.nproc == new_nth) in __kmp_set_num_threads()
2815 root = thread->th.th_root; in __kmp_set_num_threads()
2836 hot_team->t.t_threads[f]->th.th_task_team = NULL; in __kmp_set_num_threads()
2843 if (thread->th.th_hot_teams) { in __kmp_set_num_threads()
2844 KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team); in __kmp_set_num_threads()
2845 thread->th.th_hot_teams[0].hot_team_nth = new_nth; in __kmp_set_num_threads()
2859 hot_team->t.t_threads[f]->th.th_team_nproc = new_nth; in __kmp_set_num_threads()
2919 KMP_DEBUG_ASSERT(thread->th.th_current_task); in __kmp_get_max_active_levels()
2922 gtid, thread->th.th_current_task, in __kmp_get_max_active_levels()
2923 thread->th.th_current_task->td_icvs.max_active_levels)); in __kmp_get_max_active_levels()
2924 return thread->th.th_current_task->td_icvs.max_active_levels; in __kmp_get_max_active_levels()
2978 thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static; in __kmp_set_schedule()
2980 thread->th.th_current_task->td_icvs.sched.r_sched_type = in __kmp_set_schedule()
2986 thread->th.th_current_task->td_icvs.sched.r_sched_type = in __kmp_set_schedule()
2991 orig_kind, &(thread->th.th_current_task->td_icvs.sched.r_sched_type)); in __kmp_set_schedule()
2994 thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK; in __kmp_set_schedule()
2996 thread->th.th_current_task->td_icvs.sched.chunk = chunk; in __kmp_set_schedule()
3010 th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type; in __kmp_get_schedule()
3046 *chunk = thread->th.th_current_task->td_icvs.sched.chunk; in __kmp_get_schedule()
3064 team = thr->th.th_team; in __kmp_get_ancestor_thread_num()
3069 if (thr->th.th_teams_microtask) { in __kmp_get_ancestor_thread_num()
3071 int tlevel = thr->th.th_teams_level; // the level of the teams construct in __kmp_get_ancestor_thread_num()
3122 team = thr->th.th_team; in __kmp_get_team_size()
3127 if (thr->th.th_teams_microtask) { in __kmp_get_team_size()
3129 int tlevel = thr->th.th_teams_level; // the level of the teams construct in __kmp_get_team_size()
3338 copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs); in __kmp_get_x_global_icvs()
3506 __kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread); in __kmp_print_structure_thread()
3547 __kmp_printf(" Our Root: %p\n", thread->th.th_root); in __kmp_print_structure()
3548 __kmp_print_structure_team(" Our Team: ", thread->th.th_team); in __kmp_print_structure()
3550 thread->th.th_serial_team); in __kmp_print_structure()
3551 __kmp_printf(" Threads: %2d\n", thread->th.th_team_nproc); in __kmp_print_structure()
3553 thread->th.th_team_master); in __kmp_print_structure()
3554 __kmp_printf(" Serialized?: %2d\n", thread->th.th_team_serialized); in __kmp_print_structure()
3555 __kmp_printf(" Set NProc: %2d\n", thread->th.th_set_nproc); in __kmp_print_structure()
3556 __kmp_printf(" Set Proc Bind: %2d\n", thread->th.th_set_proc_bind); in __kmp_print_structure()
3558 thread->th.th_next_pool); in __kmp_print_structure()
3560 __kmp_print_structure_team_accum(list, thread->th.th_team); in __kmp_print_structure()
3561 __kmp_print_structure_team_accum(list, thread->th.th_serial_team); in __kmp_print_structure()
3651 unsigned x = thread->th.th_x; in __kmp_get_random()
3654 thread->th.th_x = x * thread->th.th_a + 1; in __kmp_get_random()
3657 thread->th.th_info.ds.ds_tid, r)); in __kmp_get_random()
3664 unsigned seed = thread->th.th_info.ds.ds_tid; in __kmp_init_random()
3666 thread->th.th_a = in __kmp_init_random()
3668 thread->th.th_x = (seed + 1) * thread->th.th_a + 1; in __kmp_init_random()
3670 ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a)); in __kmp_init_random()
3933 root_thread->th.th_info.ds.ds_gtid = gtid; in __kmp_register_root()
3935 root_thread->th.ompt_thread_info.thread_data = ompt_data_none; in __kmp_register_root()
3937 root_thread->th.th_root = root; in __kmp_register_root()
3939 root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid); in __kmp_register_root()
3946 KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL); in __kmp_register_root()
3953 if (!root_thread->th.th_serial_team) { in __kmp_register_root()
3956 root_thread->th.th_serial_team = __kmp_allocate_team( in __kmp_register_root()
3963 KMP_ASSERT(root_thread->th.th_serial_team); in __kmp_register_root()
3965 root_thread->th.th_serial_team)); in __kmp_register_root()
3972 root_thread->th.th_serial_team->t.t_threads[0] = root_thread; in __kmp_register_root()
3974 root_thread->th.th_serial_team->t.t_serialized = 0; in __kmp_register_root()
4002 root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE; in __kmp_register_root()
4004 root_thread->th.th_bar[b].bb.b_worker_arrived = 0; in __kmp_register_root()
4012 root_thread->th.th_current_place = KMP_PLACE_UNDEFINED; in __kmp_register_root()
4013 root_thread->th.th_new_place = KMP_PLACE_UNDEFINED; in __kmp_register_root()
4014 root_thread->th.th_first_place = KMP_PLACE_UNDEFINED; in __kmp_register_root()
4015 root_thread->th.th_last_place = KMP_PLACE_UNDEFINED; in __kmp_register_root()
4017 root_thread->th.th_def_allocator = __kmp_def_allocator; in __kmp_register_root()
4018 root_thread->th.th_prev_level = 0; in __kmp_register_root()
4019 root_thread->th.th_prev_num_threads = 1; in __kmp_register_root()
4029 root_thread->th.th_cg_roots = tmp; in __kmp_register_root()
4071 kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams; in __kmp_free_hot_teams()
4081 kmp_info_t *th = team->t.t_threads[i]; in __kmp_free_hot_teams() local
4082 n += __kmp_free_hot_teams(root, th, level + 1, max_level); in __kmp_free_hot_teams()
4083 if (i > 0 && th->th.th_hot_teams) { in __kmp_free_hot_teams()
4084 __kmp_free(th->th.th_hot_teams); in __kmp_free_hot_teams()
4085 th->th.th_hot_teams = NULL; in __kmp_free_hot_teams()
4113 kmp_info_t *th = hot_team->t.t_threads[i]; in __kmp_reset_root() local
4115 n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level); in __kmp_reset_root()
4117 if (th->th.th_hot_teams) { in __kmp_reset_root()
4118 __kmp_free(th->th.th_hot_teams); in __kmp_reset_root()
4119 th->th.th_hot_teams = NULL; in __kmp_reset_root()
4138 (LPVOID) & (root->r.r_uber_thread->th), in __kmp_reset_root()
4139 root->r.r_uber_thread->th.th_info.ds.ds_thread)); in __kmp_reset_root()
4140 __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread); in __kmp_reset_root()
4159 &(root->r.r_uber_thread->th.ompt_thread_info.thread_data)); in __kmp_reset_root()
4165 i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--; in __kmp_reset_root()
4168 root->r.r_uber_thread, root->r.r_uber_thread->th.th_cg_roots, in __kmp_reset_root()
4169 root->r.r_uber_thread->th.th_cg_roots->cg_nthreads)); in __kmp_reset_root()
4173 root->r.r_uber_thread->th.th_cg_roots->cg_root); in __kmp_reset_root()
4174 KMP_DEBUG_ASSERT(root->r.r_uber_thread->th.th_cg_roots->up == NULL); in __kmp_reset_root()
4175 __kmp_free(root->r.r_uber_thread->th.th_cg_roots); in __kmp_reset_root()
4176 root->r.r_uber_thread->th.th_cg_roots = NULL; in __kmp_reset_root()
4206 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root); in __kmp_unregister_root_current_thread()
4212 kmp_team_t *team = thread->th.th_team; in __kmp_unregister_root_current_thread()
4213 kmp_task_team_t *task_team = thread->th.th_task_team; in __kmp_unregister_root_current_thread()
4220 thread->th.ompt_thread_info.state = ompt_state_undefined; in __kmp_unregister_root_current_thread()
4245 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root); in __kmp_unregister_root_other_thread()
4261 kmp_team_t *steam = this_thr->th.th_serial_team; in __kmp_task_info()
4262 kmp_team_t *team = this_thr->th.th_team; in __kmp_task_info()
4267 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task, in __kmp_task_info()
4281 KMP_DEBUG_ASSERT(this_thr->th.th_serial_team); in __kmp_initialize_info()
4287 KMP_DEBUG_ASSERT(master->th.th_root); in __kmp_initialize_info()
4291 TCW_SYNC_PTR(this_thr->th.th_team, team); in __kmp_initialize_info()
4293 this_thr->th.th_info.ds.ds_tid = tid; in __kmp_initialize_info()
4294 this_thr->th.th_set_nproc = 0; in __kmp_initialize_info()
4298 this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP; in __kmp_initialize_info()
4300 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP; in __kmp_initialize_info()
4301 this_thr->th.th_set_proc_bind = proc_bind_default; in __kmp_initialize_info()
4304 this_thr->th.th_new_place = this_thr->th.th_current_place; in __kmp_initialize_info()
4306 this_thr->th.th_root = master->th.th_root; in __kmp_initialize_info()
4309 this_thr->th.th_team_nproc = team->t.t_nproc; in __kmp_initialize_info()
4310 this_thr->th.th_team_master = master; in __kmp_initialize_info()
4311 this_thr->th.th_team_serialized = team->t.t_serialized; in __kmp_initialize_info()
4316 tid, gtid, this_thr, this_thr->th.th_current_task)); in __kmp_initialize_info()
4318 __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr, in __kmp_initialize_info()
4322 tid, gtid, this_thr, this_thr->th.th_current_task)); in __kmp_initialize_info()
4327 this_thr->th.th_dispatch = &team->t.t_dispatch[tid]; in __kmp_initialize_info()
4329 this_thr->th.th_local.this_construct = 0; in __kmp_initialize_info()
4331 if (!this_thr->th.th_pri_common) { in __kmp_initialize_info()
4332 this_thr->th.th_pri_common = in __kmp_initialize_info()
4336 gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1, in __kmp_initialize_info()
4339 this_thr->th.th_pri_head = NULL; in __kmp_initialize_info()
4343 this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set in __kmp_initialize_info()
4345 KMP_DEBUG_ASSERT(master->th.th_cg_roots); in __kmp_initialize_info()
4346 kmp_cg_root_t *tmp = this_thr->th.th_cg_roots; in __kmp_initialize_info()
4357 this_thr->th.th_cg_roots = master->th.th_cg_roots; in __kmp_initialize_info()
4359 this_thr->th.th_cg_roots->cg_nthreads++; in __kmp_initialize_info()
4362 this_thr, this_thr->th.th_cg_roots, in __kmp_initialize_info()
4363 this_thr->th.th_cg_roots->cg_root, in __kmp_initialize_info()
4364 this_thr->th.th_cg_roots->cg_nthreads)); in __kmp_initialize_info()
4365 this_thr->th.th_current_task->td_icvs.thread_limit = in __kmp_initialize_info()
4366 this_thr->th.th_cg_roots->cg_thread_limit; in __kmp_initialize_info()
4371 volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch; in __kmp_initialize_info()
4410 this_thr->th.th_next_pool = NULL; in __kmp_initialize_info()
4412 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here); in __kmp_initialize_info()
4413 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); in __kmp_initialize_info()
4441 __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool; in __kmp_allocate_thread()
4445 TCW_4(new_thr->th.th_in_pool, FALSE); in __kmp_allocate_thread()
4448 if (new_thr->th.th_active_in_pool == TRUE) { in __kmp_allocate_thread()
4449 KMP_DEBUG_ASSERT(new_thr->th.th_active == TRUE); in __kmp_allocate_thread()
4451 new_thr->th.th_active_in_pool = FALSE; in __kmp_allocate_thread()
4456 __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid)); in __kmp_allocate_thread()
4457 KMP_ASSERT(!new_thr->th.th_team); in __kmp_allocate_thread()
4462 new_thr->th.th_info.ds.ds_gtid); in __kmp_allocate_thread()
4463 KMP_DEBUG_ASSERT(new_thr->th.th_serial_team); in __kmp_allocate_thread()
4467 new_thr->th.th_task_state = 0; in __kmp_allocate_thread()
4471 KMP_DEBUG_ASSERT(new_thr->th.th_used_in_team.load() == 0); in __kmp_allocate_thread()
4489 kmp_balign_t *balign = new_thr->th.th_bar; in __kmp_allocate_thread()
4495 __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid)); in __kmp_allocate_thread()
4555 new_thr->th.th_nt_strict = false; in __kmp_allocate_thread()
4556 new_thr->th.th_nt_loc = NULL; in __kmp_allocate_thread()
4557 new_thr->th.th_nt_sev = severity_fatal; in __kmp_allocate_thread()
4558 new_thr->th.th_nt_msg = NULL; in __kmp_allocate_thread()
4567 &new_thr->th.th_sleep_loc, sizeof(new_thr->th.th_sleep_loc)); in __kmp_allocate_thread()
4570 &new_thr->th.th_reap_state, sizeof(new_thr->th.th_reap_state)); in __kmp_allocate_thread()
4574 &new_thr->th.th_suspend_init, sizeof(new_thr->th.th_suspend_init)); in __kmp_allocate_thread()
4578 &new_thr->th.th_suspend_init_count, in __kmp_allocate_thread()
4579 sizeof(new_thr->th.th_suspend_init_count)); in __kmp_allocate_thread()
4584 CCAST(kmp_uint64 *, &new_thr->th.th_bar[0].bb.b_go), in __kmp_allocate_thread()
4585 sizeof(new_thr->th.th_bar[0].bb.b_go)); in __kmp_allocate_thread()
4588 CCAST(kmp_uint64 *, &new_thr->th.th_bar[1].bb.b_go), in __kmp_allocate_thread()
4589 sizeof(new_thr->th.th_bar[1].bb.b_go)); in __kmp_allocate_thread()
4592 CCAST(kmp_uint64 *, &new_thr->th.th_bar[2].bb.b_go), in __kmp_allocate_thread()
4593 sizeof(new_thr->th.th_bar[2].bb.b_go)); in __kmp_allocate_thread()
4603 new_thr->th.th_serial_team = serial_team = in __kmp_allocate_thread()
4627 KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL); in __kmp_allocate_thread()
4639 kmp_balign_t *balign = new_thr->th.th_bar; in __kmp_allocate_thread()
4647 TCW_PTR(new_thr->th.th_sleep_loc, NULL); in __kmp_allocate_thread()
4648 new_thr->th.th_sleep_loc_type = flag_unset; in __kmp_allocate_thread()
4650 new_thr->th.th_spin_here = FALSE; in __kmp_allocate_thread()
4651 new_thr->th.th_next_waiting = 0; in __kmp_allocate_thread()
4653 new_thr->th.th_blocking = false; in __kmp_allocate_thread()
4657 new_thr->th.th_current_place = KMP_PLACE_UNDEFINED; in __kmp_allocate_thread()
4658 new_thr->th.th_new_place = KMP_PLACE_UNDEFINED; in __kmp_allocate_thread()
4659 new_thr->th.th_first_place = KMP_PLACE_UNDEFINED; in __kmp_allocate_thread()
4660 new_thr->th.th_last_place = KMP_PLACE_UNDEFINED; in __kmp_allocate_thread()
4662 new_thr->th.th_def_allocator = __kmp_def_allocator; in __kmp_allocate_thread()
4663 new_thr->th.th_prev_level = 0; in __kmp_allocate_thread()
4664 new_thr->th.th_prev_num_threads = 1; in __kmp_allocate_thread()
4666 TCW_4(new_thr->th.th_in_pool, FALSE); in __kmp_allocate_thread()
4667 new_thr->th.th_active_in_pool = FALSE; in __kmp_allocate_thread()
4668 TCW_4(new_thr->th.th_active, TRUE); in __kmp_allocate_thread()
4670 new_thr->th.th_set_nested_nth = NULL; in __kmp_allocate_thread()
4671 new_thr->th.th_set_nested_nth_sz = 0; in __kmp_allocate_thread()
4799 static inline void __kmp_set_thread_place(kmp_team_t *team, kmp_info_t *th, in __kmp_set_thread_place() argument
4801 th->th.th_first_place = first; in __kmp_set_thread_place()
4802 th->th.th_last_place = last; in __kmp_set_thread_place()
4803 th->th.th_new_place = newp; in __kmp_set_thread_place()
4804 if (newp != th->th.th_current_place) { in __kmp_set_thread_place()
4808 th->th.th_topology_ids = __kmp_affinity.ids[th->th.th_new_place]; in __kmp_set_thread_place()
4809 th->th.th_topology_attrs = __kmp_affinity.attrs[th->th.th_new_place]; in __kmp_set_thread_place()
4825 int first_place = master_th->th.th_first_place; in __kmp_partition_places()
4826 int last_place = master_th->th.th_last_place; in __kmp_partition_places()
4827 int masters_place = master_th->th.th_current_place; in __kmp_partition_places()
4849 kmp_info_t *th = team->t.t_threads[f]; in __kmp_partition_places() local
4850 KMP_DEBUG_ASSERT(th != NULL); in __kmp_partition_places()
4851 __kmp_set_thread_place(team, th, first_place, last_place, masters_place); in __kmp_partition_places()
4872 kmp_info_t *th = team->t.t_threads[f]; in __kmp_partition_places() local
4873 KMP_DEBUG_ASSERT(th != NULL); in __kmp_partition_places()
4882 __kmp_set_thread_place(team, th, first_place, last_place, place); in __kmp_partition_places()
4898 kmp_info_t *th = team->t.t_threads[f]; in __kmp_partition_places() local
4899 KMP_DEBUG_ASSERT(th != NULL); in __kmp_partition_places()
4901 __kmp_set_thread_place(team, th, first_place, last_place, place); in __kmp_partition_places()
4934 th->th.th_new_place, first_place, last_place)); in __kmp_partition_places()
4965 kmp_info_t *th = team->t.t_threads[f]; in __kmp_partition_places() local
4966 KMP_DEBUG_ASSERT(th != NULL); in __kmp_partition_places()
4991 __kmp_set_thread_place(team, th, fplace, place, nplace); in __kmp_partition_places()
5006 f, th->th.th_new_place, th->th.th_first_place, in __kmp_partition_places()
5007 th->th.th_last_place, num_masks)); in __kmp_partition_places()
5017 kmp_info_t *th; in __kmp_partition_places() local
5055 th = team->t.t_threads[f]; in __kmp_partition_places()
5056 KMP_DEBUG_ASSERT(th); in __kmp_partition_places()
5057 __kmp_set_thread_place(team, th, first, last, place); in __kmp_partition_places()
5062 team->t.t_id, f, th->th.th_new_place, in __kmp_partition_places()
5063 th->th.th_first_place, th->th.th_last_place, spacing)); in __kmp_partition_places()
5080 kmp_info_t *th = team->t.t_threads[f]; in __kmp_partition_places() local
5081 KMP_DEBUG_ASSERT(th != NULL); in __kmp_partition_places()
5083 __kmp_set_thread_place(team, th, place, place, place); in __kmp_partition_places()
5115 team->t.t_id, f, th->th.th_new_place, in __kmp_partition_places()
5116 th->th.th_first_place, th->th.th_last_place)); in __kmp_partition_places()
5156 team = master->th.th_team; in __kmp_allocate_team()
5158 if (master->th.th_teams_microtask) { // in teams construct? in __kmp_allocate_team()
5159 if (master->th.th_teams_size.nteams > 1 && in __kmp_allocate_team()
5163 master->th.th_teams_level < in __kmp_allocate_team()
5170 if ((master->th.th_teams_size.nteams == 1 && in __kmp_allocate_team()
5171 master->th.th_teams_level >= team->t.t_level) || in __kmp_allocate_team()
5175 hot_teams = master->th.th_hot_teams; in __kmp_allocate_team()
5234 root->r.r_uber_thread->th.th_ident); in __kmp_allocate_team()
5277 kmp_info_t *th = team->t.t_threads[f]; in __kmp_allocate_team() local
5278 KMP_DEBUG_ASSERT(th); in __kmp_allocate_team()
5279 th->th.th_task_team = NULL; in __kmp_allocate_team()
5302 kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar; in __kmp_allocate_team()
5316 root->r.r_uber_thread->th.th_ident); in __kmp_allocate_team()
5320 team->t.t_threads[f]->th.th_team_nproc = new_nproc; in __kmp_allocate_team()
5333 team->t.t_threads[f]->th.th_team_nproc == in __kmp_allocate_team()
5361 kmp_balign_t *balign = other_threads[f]->th.th_bar; in __kmp_allocate_team()
5412 kmp_balign_t *balign = new_worker->th.th_bar; in __kmp_allocate_team()
5440 root->r.r_uber_thread->th.th_ident); in __kmp_allocate_team()
5449 kmp_uint8 old_state = team->t.t_threads[old_nproc - 1]->th.th_task_state; in __kmp_allocate_team()
5451 team->t.t_threads[f]->th.th_task_state = old_state; in __kmp_allocate_team()
5456 team->t.t_threads[f]->th.th_team_nproc == in __kmp_allocate_team()
5469 if (master->th.th_teams_microtask) { in __kmp_allocate_team()
5473 thr->th.th_teams_microtask = master->th.th_teams_microtask; in __kmp_allocate_team()
5474 thr->th.th_teams_level = master->th.th_teams_level; in __kmp_allocate_team()
5475 thr->th.th_teams_size = master->th.th_teams_size; in __kmp_allocate_team()
5485 kmp_balign_t *balign = thr->th.th_bar; in __kmp_allocate_team()
5677 if (master->th.th_teams_microtask) { // in teams construct? in __kmp_free_team()
5678 if (master->th.th_teams_size.nteams > 1) { in __kmp_free_team()
5683 master->th.th_teams_level == team->t.t_level) { in __kmp_free_team()
5689 kmp_hot_team_ptr_t *hot_teams = master->th.th_hot_teams; in __kmp_free_team()
5712 kmp_info_t *th = team->t.t_threads[f]; in __kmp_free_team() local
5713 volatile kmp_uint32 *state = &th->th.th_reap_state; in __kmp_free_team()
5718 if (!__kmp_is_thread_alive(th, &ecode)) { in __kmp_free_team()
5724 if (th->th.th_sleep_loc) in __kmp_free_team()
5725 __kmp_null_resume_wrapper(th); in __kmp_free_team()
5737 team->t.t_threads[f]->th.th_task_team = NULL; in __kmp_free_team()
5768 KMP_COMPARE_AND_STORE_ACQ32(&(team->t.t_threads[f]->th.th_used_in_team), in __kmp_free_team()
5782 team->t.t_threads[f]->th.th_info.ds.ds_gtid, in __kmp_free_team()
5789 while (team->t.t_threads[f]->th.th_used_in_team.load() != 0) in __kmp_free_team()
5811 team->t.t_threads[1]->th.th_cg_roots); in __kmp_free_team()
5812 if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) { in __kmp_free_team()
5816 KMP_DEBUG_ASSERT(thr && thr->th.th_cg_roots && in __kmp_free_team()
5817 thr->th.th_cg_roots->cg_root == thr); in __kmp_free_team()
5819 kmp_cg_root_t *tmp = thr->th.th_cg_roots; in __kmp_free_team()
5820 thr->th.th_cg_roots = tmp->up; in __kmp_free_team()
5823 thr, tmp, thr->th.th_cg_roots, tmp->cg_nthreads)); in __kmp_free_team()
5829 if (thr->th.th_cg_roots) in __kmp_free_team()
5830 thr->th.th_current_task->td_icvs.thread_limit = in __kmp_free_team()
5831 thr->th.th_cg_roots->cg_thread_limit; in __kmp_free_team()
5891 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid)); in __kmp_free_thread()
5898 kmp_balign_t *balign = this_th->th.th_bar; in __kmp_free_thread()
5905 this_th->th.th_task_state = 0; in __kmp_free_thread()
5906 this_th->th.th_reap_state = KMP_SAFE_TO_REAP; in __kmp_free_thread()
5909 TCW_PTR(this_th->th.th_team, NULL); in __kmp_free_thread()
5910 TCW_PTR(this_th->th.th_root, NULL); in __kmp_free_thread()
5911 TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */ in __kmp_free_thread()
5913 while (this_th->th.th_cg_roots) { in __kmp_free_thread()
5914 this_th->th.th_cg_roots->cg_nthreads--; in __kmp_free_thread()
5917 this_th, this_th->th.th_cg_roots, in __kmp_free_thread()
5918 this_th->th.th_cg_roots->cg_root, in __kmp_free_thread()
5919 this_th->th.th_cg_roots->cg_nthreads)); in __kmp_free_thread()
5920 kmp_cg_root_t *tmp = this_th->th.th_cg_roots; in __kmp_free_thread()
5925 this_th->th.th_cg_roots = tmp->up; in __kmp_free_thread()
5931 this_th->th.th_cg_roots = NULL; in __kmp_free_thread()
5942 this_th->th.th_current_task = NULL; in __kmp_free_thread()
5946 gtid = this_th->th.th_info.ds.ds_gtid; in __kmp_free_thread()
5949 if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) { in __kmp_free_thread()
5960 scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool); in __kmp_free_thread()
5964 for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid); in __kmp_free_thread()
5965 scan = &((*scan)->th.th_next_pool)) in __kmp_free_thread()
5970 TCW_PTR(this_th->th.th_next_pool, *scan); in __kmp_free_thread()
5972 KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) || in __kmp_free_thread()
5973 (this_th->th.th_info.ds.ds_gtid < in __kmp_free_thread()
5974 this_th->th.th_next_pool->th.th_info.ds.ds_gtid)); in __kmp_free_thread()
5975 TCW_4(this_th->th.th_in_pool, TRUE); in __kmp_free_thread()
5978 if (this_th->th.th_active == TRUE) { in __kmp_free_thread()
5980 this_th->th.th_active_in_pool = TRUE; in __kmp_free_thread()
5984 KMP_DEBUG_ASSERT(this_th->th.th_active_in_pool == FALSE); in __kmp_free_thread()
6015 int gtid = this_thr->th.th_info.ds.ds_gtid; in __kmp_launch_thread()
6023 this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak? in __kmp_launch_thread()
6034 thread_data = &(this_thr->th.ompt_thread_info.thread_data); in __kmp_launch_thread()
6037 this_thr->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_launch_thread()
6038 this_thr->th.ompt_thread_info.wait_id = 0; in __kmp_launch_thread()
6039 this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0); in __kmp_launch_thread()
6040 this_thr->th.ompt_thread_info.parallel_flags = 0; in __kmp_launch_thread()
6045 this_thr->th.ompt_thread_info.state = ompt_state_idle; in __kmp_launch_thread()
6062 this_thr->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_launch_thread()
6066 pteam = &this_thr->th.th_team; in __kmp_launch_thread()
6082 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel; in __kmp_launch_thread()
6099 this_thr->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_launch_thread()
6118 this_thr->th.th_task_team = NULL; in __kmp_launch_thread()
6193 gtid = thread->th.th_info.ds.ds_gtid; in __kmp_reap_thread()
6203 !KMP_COMPARE_AND_STORE_ACQ32(&(thread->th.th_used_in_team), 0, 3)) in __kmp_reap_thread()
6209 kmp_flag_64<> flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, in __kmp_reap_thread()
6228 if (thread->th.th_active_in_pool) { in __kmp_reap_thread()
6229 thread->th.th_active_in_pool = FALSE; in __kmp_reap_thread()
6263 if (thread->th.th_cons) { in __kmp_reap_thread()
6264 __kmp_free_cons_stack(thread->th.th_cons); in __kmp_reap_thread()
6265 thread->th.th_cons = NULL; in __kmp_reap_thread()
6269 if (thread->th.th_pri_common != NULL) { in __kmp_reap_thread()
6270 __kmp_free(thread->th.th_pri_common); in __kmp_reap_thread()
6271 thread->th.th_pri_common = NULL; in __kmp_reap_thread()
6275 if (thread->th.th_local.bget_data != NULL) { in __kmp_reap_thread()
6281 if (thread->th.th_affin_mask != NULL) { in __kmp_reap_thread()
6282 KMP_CPU_FREE(thread->th.th_affin_mask); in __kmp_reap_thread()
6283 thread->th.th_affin_mask = NULL; in __kmp_reap_thread()
6288 if (thread->th.th_hier_bar_data != NULL) { in __kmp_reap_thread()
6289 __kmp_free(thread->th.th_hier_bar_data); in __kmp_reap_thread()
6290 thread->th.th_hier_bar_data = NULL; in __kmp_reap_thread()
6294 __kmp_reap_team(thread->th.th_serial_team); in __kmp_reap_thread()
6295 thread->th.th_serial_team = NULL; in __kmp_reap_thread()
6302 static void __kmp_itthash_clean(kmp_info_t *th) { in __kmp_itthash_clean() argument
6309 __kmp_thread_free(th, bucket); in __kmp_itthash_clean()
6319 __kmp_thread_free(th, bucket); in __kmp_itthash_clean()
6389 __kmp_thread_pool = thread->th.th_next_pool; in __kmp_internal_end()
6391 KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP); in __kmp_internal_end()
6392 thread->th.th_next_pool = NULL; in __kmp_internal_end()
6393 thread->th.th_in_pool = FALSE; in __kmp_internal_end()
6417 while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking)) in __kmp_internal_end()
6646 __kmp_threads[gtid]->th.th_task_team = NULL; in __kmp_internal_end_thread()
7489 if (thread->th.th_current_task->td_icvs.nproc != 0) in __kmp_do_middle_initialize()
7654 this_thr->th.th_local.this_construct = 0; in __kmp_run_before_invoked_task()
7656 KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived); in __kmp_run_before_invoked_task()
7658 dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch); in __kmp_run_before_invoked_task()
7684 kmp_team_t *team = this_thr->th.th_team; in __kmp_invoke_task_func()
7748 this_thr->th.ompt_thread_info.parallel_flags = ompt_parallel_team; in __kmp_invoke_task_func()
7778 kmp_team_t *team = thr->th.th_team; in __kmp_teams_master()
7780 thr->th.th_set_nproc = thr->th.th_teams_size.nth; in __kmp_teams_master()
7781 KMP_DEBUG_ASSERT(thr->th.th_teams_microtask); in __kmp_teams_master()
7782 KMP_DEBUG_ASSERT(thr->th.th_set_nproc); in __kmp_teams_master()
7784 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask)); in __kmp_teams_master()
7790 tmp->cg_thread_limit = thr->th.th_current_task->td_icvs.thread_limit; in __kmp_teams_master()
7795 tmp->up = thr->th.th_cg_roots; in __kmp_teams_master()
7796 thr->th.th_cg_roots = tmp; in __kmp_teams_master()
7804 (microtask_t)thr->th.th_teams_microtask, // "wrapped" task in __kmp_teams_master()
7810 if (thr->th.th_team_nproc < thr->th.th_teams_size.nth) in __kmp_teams_master()
7811 thr->th.th_teams_size.nth = thr->th.th_team_nproc; in __kmp_teams_master()
7825 kmp_team_t *team = this_thr->th.th_team; in __kmp_invoke_teams_master()
7827 if (!__kmp_threads[gtid]->th.th_team->t.t_serialized) in __kmp_invoke_teams_master()
7828 KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn == in __kmp_invoke_teams_master()
7846 this_thr->th.ompt_thread_info.parallel_flags = ompt_parallel_league; in __kmp_invoke_teams_master()
7860 thr->th.th_set_nproc = num_threads; in __kmp_push_num_threads()
7870 thr->th.th_set_nproc = num_threads_list[0]; in __kmp_push_num_threads_list()
7871 thr->th.th_set_nested_nth = in __kmp_push_num_threads_list()
7874 thr->th.th_set_nested_nth[i] = num_threads_list[i]; in __kmp_push_num_threads_list()
7875 thr->th.th_set_nested_nth_sz = list_length; in __kmp_push_num_threads_list()
7881 thr->th.th_nt_strict = true; in __kmp_set_strict_num_threads()
7882 thr->th.th_nt_loc = loc; in __kmp_set_strict_num_threads()
7885 thr->th.th_nt_sev = sev; in __kmp_set_strict_num_threads()
7887 thr->th.th_nt_sev = severity_fatal; in __kmp_set_strict_num_threads()
7890 thr->th.th_nt_msg = msg; in __kmp_set_strict_num_threads()
7892 thr->th.th_nt_msg = "Cannot form team with number of threads specified by " in __kmp_set_strict_num_threads()
7918 if (num_threads > thr->th.th_current_task->td_icvs.thread_limit) { in __kmp_push_thread_limit()
7919 num_threads = thr->th.th_current_task->td_icvs.thread_limit; in __kmp_push_thread_limit()
7935 thr->th.th_current_task->td_icvs.thread_limit = num_threads; in __kmp_push_thread_limit()
7956 thr->th.th_teams_size.nth = num_threads; in __kmp_push_thread_limit()
7989 thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams; in __kmp_push_num_teams()
8046 thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams; in __kmp_push_num_teams_51()
8054 thr->th.th_set_proc_bind = proc_bind; in __kmp_push_proc_bind()
8067 KMP_DEBUG_ASSERT(this_thr->th.th_team == team); in __kmp_internal_fork()
8089 KMP_ASSERT(this_thr->th.th_team == team); in __kmp_internal_fork()
8094 team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc); in __kmp_internal_fork()
8106 KMP_DEBUG_ASSERT(this_thr->th.th_team == team); in __kmp_internal_join()
8114 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) { in __kmp_internal_join()
8119 gtid, __kmp_threads[gtid]->th.th_team_nproc, team, in __kmp_internal_join()
8124 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc); in __kmp_internal_join()
8129 ompt_state_t ompt_state = this_thr->th.ompt_thread_info.state; in __kmp_internal_join()
8133 int ds_tid = this_thr->th.th_info.ds.ds_tid; in __kmp_internal_join()
8135 this_thr->th.ompt_thread_info.state = ompt_state_overhead; in __kmp_internal_join()
8144 if (this_thr->th.ompt_thread_info.parallel_flags & ompt_parallel_league) in __kmp_internal_join()
8164 KMP_ASSERT(this_thr->th.th_team == team); in __kmp_internal_join()
8189 if (hot_team->t.t_threads[i]->th.th_active) { in __kmp_active_hot_team_nproc()
8209 ->th.th_current_task->td_icvs.dynamic == TRUE); in __kmp_load_balance_nproc()
8409 root = __kmp_threads[gtid]->th.th_root; in __kmp_internal_begin()
8437 root = thread->th.th_root; in __kmp_user_set_library()
8449 thread->th.th_set_nproc = 0; in __kmp_user_set_library()
8453 thread->th.th_set_nproc = 0; in __kmp_user_set_library()
8458 thread->th.th_set_nproc = 0; in __kmp_user_set_library()
8526 if (thr->th.th_teams_microtask) { in __kmp_aux_get_team_info()
8527 kmp_team_t *team = thr->th.th_team; in __kmp_aux_get_team_info()
8528 int tlevel = thr->th.th_teams_level; // the level of the teams construct in __kmp_aux_get_team_info()
8632 static int __kmp_aux_capture_affinity_field(int gtid, const kmp_info_t *th, in __kmp_aux_capture_affinity_field() argument
8643 KMP_DEBUG_ASSERT(th); in __kmp_aux_capture_affinity_field()
8743 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level); in __kmp_aux_capture_affinity_field()
8761 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc); in __kmp_aux_capture_affinity_field()
8765 __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1); in __kmp_aux_capture_affinity_field()
8772 __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask); in __kmp_aux_capture_affinity_field()
8805 const kmp_info_t *th; in __kmp_aux_capture_affinity() local
8814 th = __kmp_threads[gtid]; in __kmp_aux_capture_affinity()
8829 int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field); in __kmp_aux_capture_affinity()
8868 set__blocktime_team(thread->th.th_team, tid, blocktime); in __kmp_aux_set_blocktime()
8869 set__blocktime_team(thread->th.th_serial_team, 0, blocktime); in __kmp_aux_set_blocktime()
8875 set__bt_intervals_team(thread->th.th_team, tid, bt_intervals); in __kmp_aux_set_blocktime()
8876 set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals); in __kmp_aux_set_blocktime()
8882 set__bt_set_team(thread->th.th_team, tid, bt_set); in __kmp_aux_set_blocktime()
8883 set__bt_set_team(thread->th.th_serial_team, 0, bt_set); in __kmp_aux_set_blocktime()
8887 __kmp_gtid_from_tid(tid, thread->th.th_team), in __kmp_aux_set_blocktime()
8888 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals, in __kmp_aux_set_blocktime()
8892 __kmp_gtid_from_tid(tid, thread->th.th_team), in __kmp_aux_set_blocktime()
8893 thread->th.th_team->t.t_id, tid, blocktime)); in __kmp_aux_set_blocktime()
9070 return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8); in __kmp_get_reduce_method()
9092 kmp_flag_64<> fl(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, in __kmp_resume_if_soft_paused()
9169 if (team->t.t_threads[f]->th.th_used_in_team.load() == 0) { in __kmp_resize_dist_barrier()
9175 if (team->t.t_threads[f]->th.th_used_in_team.load() == 3) { in __kmp_resize_dist_barrier()
9176 while (team->t.t_threads[f]->th.th_used_in_team.load() == 3) in __kmp_resize_dist_barrier()
9180 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 1); in __kmp_resize_dist_barrier()
9182 team->t.t_threads[f]->th.th_used_in_team.store(2); in __kmp_resize_dist_barrier()
9183 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 2); in __kmp_resize_dist_barrier()
9196 if (other_threads[f]->th.th_used_in_team.load() != 0) { in __kmp_resize_dist_barrier()
9199 void *, other_threads[f]->th.th_sleep_loc); in __kmp_resize_dist_barrier()
9200 __kmp_atomic_resume_64(other_threads[f]->th.th_info.ds.ds_gtid, flag); in __kmp_resize_dist_barrier()
9203 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 0); in __kmp_resize_dist_barrier()
9223 KMP_COMPARE_AND_STORE_ACQ32(&(team->t.t_threads[f]->th.th_used_in_team), 0, in __kmp_add_threads_to_team()
9226 __kmp_resume_32(team->t.t_threads[f]->th.th_info.ds.ds_gtid, in __kmp_add_threads_to_team()
9237 if (team->t.t_threads[f]->th.th_used_in_team.load() == 1) { in __kmp_add_threads_to_team()
9288 __kmp_hidden_helper_main_thread->th.th_set_nproc = in __kmp_hidden_helper_threads_initz_routine()