Lines Matching full:pr
106 dispatch_private_info_template<T> *pr, in __kmp_initialize_self_buffer() argument
112 if (pr->flags.use_hybrid) { in __kmp_initialize_self_buffer()
116 T pchunks = pr->u.p.pchunks; in __kmp_initialize_self_buffer()
118 T num_procs_with_pcore = pr->u.p.num_procs_with_pcore; in __kmp_initialize_self_buffer()
120 T first_thread_with_ecore = pr->u.p.first_thread_with_ecore; in __kmp_initialize_self_buffer()
185 dispatch_private_info_template<T> *pr, in __kmp_dispatch_init_algorithm() argument
209 "pr:%%p lb:%%%s ub:%%%s st:%%%s " in __kmp_dispatch_init_algorithm()
214 KD_TRACE(10, (buff, gtid, pr, lb, ub, st, schedule, chunk, nproc, tid)); in __kmp_dispatch_init_algorithm()
231 use_hier = pr->flags.use_hier; in __kmp_dispatch_init_algorithm()
242 pr->flags.nomerge = TRUE; in __kmp_dispatch_init_algorithm()
246 pr->flags.nomerge = FALSE; in __kmp_dispatch_init_algorithm()
248 pr->type_size = traits_t<T>::type_size; // remember the size of variables in __kmp_dispatch_init_algorithm()
250 pr->flags.ordered = TRUE; in __kmp_dispatch_init_algorithm()
254 pr->flags.ordered = FALSE; in __kmp_dispatch_init_algorithm()
257 if (pr->flags.ordered) { in __kmp_dispatch_init_algorithm()
270 if (pr->flags.ordered) // correct monotonicity for ordered loop if needed in __kmp_dispatch_init_algorithm()
367 pr->u.p.parm1 = chunk; in __kmp_dispatch_init_algorithm()
372 pr->u.p.count = 0; in __kmp_dispatch_init_algorithm()
377 (pr->flags.ordered ? ct_pdo_ordered : ct_pdo), loc); in __kmp_dispatch_init_algorithm()
411 pr->u.p.lb = lb; in __kmp_dispatch_init_algorithm()
412 pr->u.p.ub = ub; in __kmp_dispatch_init_algorithm()
413 pr->u.p.st = st; in __kmp_dispatch_init_algorithm()
414 pr->u.p.tc = tc; in __kmp_dispatch_init_algorithm()
417 pr->u.p.last_upper = ub + st; in __kmp_dispatch_init_algorithm()
423 if (pr->flags.ordered) { in __kmp_dispatch_init_algorithm()
424 pr->ordered_bumped = 0; in __kmp_dispatch_init_algorithm()
425 pr->u.p.ordered_lower = 1; in __kmp_dispatch_init_algorithm()
426 pr->u.p.ordered_upper = 0; in __kmp_dispatch_init_algorithm()
445 int claimed = pr->steal_flag.compare_exchange_strong(old, CLAIMED); in __kmp_dispatch_init_algorithm()
452 pr->u.p.steal_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); in __kmp_dispatch_init_algorithm()
453 __kmp_init_lock(pr->u.p.steal_lock); in __kmp_dispatch_init_algorithm()
515 pr->flags.use_hybrid = use_hybrid; in __kmp_dispatch_init_algorithm()
516 pr->u.p.pchunks = p_ntc; in __kmp_dispatch_init_algorithm()
517 pr->u.p.num_procs_with_pcore = num_procs_with_pcore; in __kmp_dispatch_init_algorithm()
518 pr->u.p.first_thread_with_ecore = first_thread_with_ecore; in __kmp_dispatch_init_algorithm()
556 pr->u.p.count = init; in __kmp_dispatch_init_algorithm()
558 pr->u.p.ub = init + small_chunk + p_extra + (id < extras ? 1 : 0); in __kmp_dispatch_init_algorithm()
561 KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); in __kmp_dispatch_init_algorithm()
564 KMP_DEBUG_ASSERT(pr->steal_flag == THIEF); in __kmp_dispatch_init_algorithm()
565 pr->u.p.ub = init; // mark there is no iterations to work on in __kmp_dispatch_init_algorithm()
567 pr->u.p.parm2 = ntc; // save number of chunks in __kmp_dispatch_init_algorithm()
570 pr->u.p.parm3 = nproc; in __kmp_dispatch_init_algorithm()
571 pr->u.p.parm4 = (id + 1) % nproc; // remember neighbour tid in __kmp_dispatch_init_algorithm()
599 pr->u.p.parm1 = (id == tc - 1); /* parm1 stores *plastiter */ in __kmp_dispatch_init_algorithm()
601 pr->u.p.count = 1; /* means no more chunks to execute */ in __kmp_dispatch_init_algorithm()
602 pr->u.p.parm1 = FALSE; in __kmp_dispatch_init_algorithm()
610 pr->u.p.parm1 = (id == nproc - 1); in __kmp_dispatch_init_algorithm()
616 pr->u.p.parm1 = TRUE; in __kmp_dispatch_init_algorithm()
619 pr->u.p.count = 1; /* means no more chunks to execute */ in __kmp_dispatch_init_algorithm()
620 pr->u.p.parm1 = FALSE; in __kmp_dispatch_init_algorithm()
631 pr->u.p.lb = lb + init; in __kmp_dispatch_init_algorithm()
632 pr->u.p.ub = lb + limit; in __kmp_dispatch_init_algorithm()
636 pr->u.p.lb = lb + init * st; in __kmp_dispatch_init_algorithm()
640 pr->u.p.ub = (ub_tmp + st > ub ? ub : ub_tmp); in __kmp_dispatch_init_algorithm()
642 pr->u.p.ub = (ub_tmp + st < ub ? ub : ub_tmp); in __kmp_dispatch_init_algorithm()
645 if (pr->flags.ordered) { in __kmp_dispatch_init_algorithm()
646 pr->u.p.ordered_lower = init; in __kmp_dispatch_init_algorithm()
647 pr->u.p.ordered_upper = limit; in __kmp_dispatch_init_algorithm()
659 pr->u.p.parm1 = ((tc + nth - 1) / nth + chunk - 1) & ~(chunk - 1); in __kmp_dispatch_init_algorithm()
661 pr->u.p.parm1 = tc; in __kmp_dispatch_init_algorithm()
679 pr->u.p.parm2 = guided_int_param * nproc * (chunk + 1); in __kmp_dispatch_init_algorithm()
680 *(double *)&pr->u.p.parm3 = in __kmp_dispatch_init_algorithm()
693 pr->u.p.parm1 = tc; in __kmp_dispatch_init_algorithm()
749 (((ptrdiff_t)&pr->u.p.parm3) & (natural_alignment)) == 0); in __kmp_dispatch_init_algorithm()
754 *(DBL *)&pr->u.p.parm3 = x; in __kmp_dispatch_init_algorithm()
795 pr->u.p.parm2 = cross; in __kmp_dispatch_init_algorithm()
799 #define GUIDED_ANALYTICAL_WORKAROUND (*(DBL *)&pr->u.p.parm3) in __kmp_dispatch_init_algorithm()
804 pr->u.p.count = tc - in __kmp_dispatch_init_algorithm()
819 pr->u.p.parm1 = tc; in __kmp_dispatch_init_algorithm()
828 pr->u.p.parm1 = (nproc > 1) ? (tc + nproc - 1) / nproc : tc; in __kmp_dispatch_init_algorithm()
835 if (pr->u.p.parm1 <= 0) in __kmp_dispatch_init_algorithm()
836 pr->u.p.parm1 = KMP_DEFAULT_CHUNK; in __kmp_dispatch_init_algorithm()
837 else if (pr->u.p.parm1 > tc) in __kmp_dispatch_init_algorithm()
838 pr->u.p.parm1 = tc; in __kmp_dispatch_init_algorithm()
841 pr->u.p.parm2 = (tc / pr->u.p.parm1) + (tc % pr->u.p.parm1 ? 1 : 0); in __kmp_dispatch_init_algorithm()
888 pr->u.p.parm1 = parm1; in __kmp_dispatch_init_algorithm()
889 pr->u.p.parm2 = parm2; in __kmp_dispatch_init_algorithm()
890 pr->u.p.parm3 = parm3; in __kmp_dispatch_init_algorithm()
891 pr->u.p.parm4 = parm4; in __kmp_dispatch_init_algorithm()
902 pr->schedule = schedule; in __kmp_dispatch_init_algorithm()
971 dispatch_private_info_template<T> *pr; in __kmp_dispatch_init() local
1023 pr = reinterpret_cast<dispatch_private_info_template<T> *>( in __kmp_dispatch_init()
1031 if (pr->flags.use_hier) { in __kmp_dispatch_init()
1036 pr->flags.use_hier = FALSE; in __kmp_dispatch_init()
1042 if (!ordered && !pr->flags.use_hier) in __kmp_dispatch_init()
1055 pr = reinterpret_cast<dispatch_private_info_template<T> *>( in __kmp_dispatch_init()
1064 pr = reinterpret_cast<dispatch_private_info_template<T> *>( in __kmp_dispatch_init()
1085 __kmp_dispatch_init_algorithm(loc, gtid, pr, schedule, lb, ub, st, in __kmp_dispatch_init()
1092 if (pr->flags.ordered == 0) { in __kmp_dispatch_init()
1099 th->th.th_dispatch->th_dispatch_pr_current = (dispatch_private_info_t *)pr; in __kmp_dispatch_init()
1103 if (pr->flags.ordered) { in __kmp_dispatch_init()
1115 cur_chunk = pr->u.p.parm1; in __kmp_dispatch_init()
1131 __kmp_itt_metadata_loop(loc, schedtype, pr->u.p.tc, cur_chunk); in __kmp_dispatch_init()
1134 if (pr->flags.use_hier) { in __kmp_dispatch_init()
1135 pr->u.p.count = 0; in __kmp_dispatch_init()
1136 pr->u.p.ub = pr->u.p.lb = pr->u.p.st = pr->u.p.tc = 0; in __kmp_dispatch_init()
1155 KD_TRACE(10, (buff, gtid, pr->schedule, pr->flags.ordered, pr->u.p.lb, in __kmp_dispatch_init()
1156 pr->u.p.ub, pr->u.p.st, pr->u.p.tc, pr->u.p.count, in __kmp_dispatch_init()
1157 pr->u.p.ordered_lower, pr->u.p.ordered_upper, pr->u.p.parm1, in __kmp_dispatch_init()
1158 pr->u.p.parm2, pr->u.p.parm3, pr->u.p.parm4)); in __kmp_dispatch_init()
1167 ompt_get_work_schedule(pr->schedule), ompt_scope_begin, in __kmp_dispatch_init()
1168 &(team_info->parallel_data), &(task_info->task_data), pr->u.p.tc, in __kmp_dispatch_init()
1189 dispatch_private_info_template<UT> *pr = in __kmp_dispatch_finish() local
1195 KMP_DEBUG_ASSERT(pr); in __kmp_dispatch_finish()
1200 if (pr->ordered_bumped) { in __kmp_dispatch_finish()
1205 pr->ordered_bumped = 0; in __kmp_dispatch_finish()
1207 UT lower = pr->u.p.ordered_lower; in __kmp_dispatch_finish()
1252 dispatch_private_info_template<UT> *pr = in __kmp_dispatch_finish_chunk() local
1258 KMP_DEBUG_ASSERT(pr); in __kmp_dispatch_finish_chunk()
1263 UT lower = pr->u.p.ordered_lower; in __kmp_dispatch_finish_chunk()
1264 UT upper = pr->u.p.ordered_upper; in __kmp_dispatch_finish_chunk()
1267 if (pr->ordered_bumped == inc) { in __kmp_dispatch_finish_chunk()
1272 pr->ordered_bumped = 0; in __kmp_dispatch_finish_chunk()
1274 inc -= pr->ordered_bumped; in __kmp_dispatch_finish_chunk()
1296 pr->ordered_bumped = 0; in __kmp_dispatch_finish_chunk()
1324 dispatch_private_info_template<T> *pr, in __kmp_dispatch_next_algorithm() argument
1342 KMP_DEBUG_ASSERT(pr); in __kmp_dispatch_next_algorithm()
1350 __kmp_str_format("__kmp_dispatch_next_algorithm: T#%%d called pr:%%p " in __kmp_dispatch_next_algorithm()
1353 KD_TRACE(10, (buff, gtid, pr, sh, nproc, tid)); in __kmp_dispatch_next_algorithm()
1359 if (pr->u.p.tc == 0) { in __kmp_dispatch_next_algorithm()
1367 switch (pr->schedule) { in __kmp_dispatch_next_algorithm()
1370 T chunk = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1371 UT nchunks = pr->u.p.parm2; in __kmp_dispatch_next_algorithm()
1376 trip = pr->u.p.tc - 1; in __kmp_dispatch_next_algorithm()
1381 kmp_lock_t *lck = pr->u.p.steal_lock; in __kmp_dispatch_next_algorithm()
1383 if (pr->u.p.count < (UT)pr->u.p.ub) { in __kmp_dispatch_next_algorithm()
1384 KMP_DEBUG_ASSERT(pr->steal_flag == READY); in __kmp_dispatch_next_algorithm()
1387 init = (pr->u.p.count)++; in __kmp_dispatch_next_algorithm()
1388 status = (init < (UT)pr->u.p.ub); in __kmp_dispatch_next_algorithm()
1395 T while_limit = pr->u.p.parm3; in __kmp_dispatch_next_algorithm()
1400 KMP_ATOMIC_ST_REL(&pr->steal_flag, THIEF); // mark self buffer inactive in __kmp_dispatch_next_algorithm()
1404 T victimId = pr->u.p.parm4; in __kmp_dispatch_next_algorithm()
1409 while ((v == pr || KMP_ATOMIC_LD_RLX(&v->steal_flag) == THIEF) && in __kmp_dispatch_next_algorithm()
1416 if (v == pr || KMP_ATOMIC_LD_RLX(&v->steal_flag) == THIEF) { in __kmp_dispatch_next_algorithm()
1427 __kmp_initialize_self_buffer<T>(team, id, pr, nchunks, nproc, in __kmp_dispatch_next_algorithm()
1431 pr->u.p.count = init + 1; // exclude one we execute immediately in __kmp_dispatch_next_algorithm()
1432 pr->u.p.ub = init + small_chunk + p_extra + (id < extras ? 1 : 0); in __kmp_dispatch_next_algorithm()
1434 pr->u.p.parm4 = (id + 1) % nproc; // remember neighbour tid in __kmp_dispatch_next_algorithm()
1444 KD_TRACE(10, (buff, gtid, id, pr->u.p.count, pr->u.p.ub)); in __kmp_dispatch_next_algorithm()
1449 if (pr->u.p.count < (UT)pr->u.p.ub) in __kmp_dispatch_next_algorithm()
1450 KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); in __kmp_dispatch_next_algorithm()
1456 pr->u.p.parm4 = (victimId + 1) % nproc; // shift start victim tid in __kmp_dispatch_next_algorithm()
1465 pr->u.p.parm4 = (victimId + 1) % nproc; // shift start victim tid in __kmp_dispatch_next_algorithm()
1495 pr->u.p.parm4 = victimId; // remember victim to steal from in __kmp_dispatch_next_algorithm()
1499 pr->u.p.count = init + 1; in __kmp_dispatch_next_algorithm()
1500 pr->u.p.ub = limit; in __kmp_dispatch_next_algorithm()
1504 KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); in __kmp_dispatch_next_algorithm()
1518 if (pr->u.p.count < (UT)pr->u.p.ub) { in __kmp_dispatch_next_algorithm()
1519 KMP_DEBUG_ASSERT(pr->steal_flag == READY); in __kmp_dispatch_next_algorithm()
1520 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm()
1524 (volatile kmp_int64 *)&pr->u.p.count, in __kmp_dispatch_next_algorithm()
1528 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm()
1538 T while_limit = pr->u.p.parm3; in __kmp_dispatch_next_algorithm()
1543 KMP_ATOMIC_ST_REL(&pr->steal_flag, THIEF); // mark self buffer inactive in __kmp_dispatch_next_algorithm()
1547 T victimId = pr->u.p.parm4; in __kmp_dispatch_next_algorithm()
1552 while ((v == pr || KMP_ATOMIC_LD_RLX(&v->steal_flag) == THIEF) && in __kmp_dispatch_next_algorithm()
1559 if (v == pr || KMP_ATOMIC_LD_RLX(&v->steal_flag) == THIEF) { in __kmp_dispatch_next_algorithm()
1570 __kmp_initialize_self_buffer<T>(team, id, pr, nchunks, nproc, in __kmp_dispatch_next_algorithm()
1577 KMP_XCHG_FIXED64((volatile kmp_int64 *)(&pr->u.p.count), vnew.b); in __kmp_dispatch_next_algorithm()
1579 *(volatile kmp_int64 *)(&pr->u.p.count) = vnew.b; in __kmp_dispatch_next_algorithm()
1581 pr->u.p.parm4 = (id + 1) % nproc; // remember neighbour tid in __kmp_dispatch_next_algorithm()
1591 KD_TRACE(10, (buff, gtid, id, pr->u.p.count, pr->u.p.ub)); in __kmp_dispatch_next_algorithm()
1596 if (pr->u.p.count < (UT)pr->u.p.ub) in __kmp_dispatch_next_algorithm()
1597 KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); in __kmp_dispatch_next_algorithm()
1606 pr->u.p.parm4 = (victimId + 1) % nproc; // shift start victim id in __kmp_dispatch_next_algorithm()
1639 pr->u.p.parm4 = victimId; // keep victim id in __kmp_dispatch_next_algorithm()
1644 KMP_XCHG_FIXED64((volatile kmp_int64 *)(&pr->u.p.count), vold.b); in __kmp_dispatch_next_algorithm()
1646 *(volatile kmp_int64 *)(&pr->u.p.count) = vold.b; in __kmp_dispatch_next_algorithm()
1650 KMP_ATOMIC_ST_REL(&pr->steal_flag, READY); in __kmp_dispatch_next_algorithm()
1664 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1667 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1696 if ((status = !pr->u.p.count) != 0) { in __kmp_dispatch_next_algorithm()
1697 pr->u.p.count = 1; in __kmp_dispatch_next_algorithm()
1698 *p_lb = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1699 *p_ub = pr->u.p.ub; in __kmp_dispatch_next_algorithm()
1700 last = (pr->u.p.parm1 != 0); in __kmp_dispatch_next_algorithm()
1702 *p_st = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1704 pr->u.p.lb = pr->u.p.ub + pr->u.p.st; in __kmp_dispatch_next_algorithm()
1716 parm1 = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1718 trip = pr->u.p.tc - 1; in __kmp_dispatch_next_algorithm()
1719 init = parm1 * (pr->u.p.count + tid); in __kmp_dispatch_next_algorithm()
1722 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1723 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1732 pr->u.p.count += nproc; in __kmp_dispatch_next_algorithm()
1742 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
1743 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
1744 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
1752 UT chunk_size = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1753 UT nchunks = pr->u.p.parm2; in __kmp_dispatch_next_algorithm()
1769 trip = pr->u.p.tc - 1; in __kmp_dispatch_next_algorithm()
1770 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1771 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1789 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
1790 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
1791 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
1798 T chunkspec = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1802 trip = pr->u.p.tc; in __kmp_dispatch_next_algorithm()
1814 pr->u.p.parm2) { // compare with K*nproc*(chunk+1), K=2 by default in __kmp_dispatch_next_algorithm()
1835 *(double *)&pr->u.p.parm3); // divide by K*nproc in __kmp_dispatch_next_algorithm()
1845 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1846 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1851 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
1852 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
1853 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
1867 T chunk = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1871 trip = pr->u.p.tc; in __kmp_dispatch_next_algorithm()
1883 if ((T)remaining < pr->u.p.parm2) { in __kmp_dispatch_next_algorithm()
1905 __kmp_type_convert((double)remaining * (*(double *)&pr->u.p.parm3), in __kmp_dispatch_next_algorithm()
1920 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
1921 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
1926 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
1927 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
1928 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
1940 T chunkspec = pr->u.p.parm1; in __kmp_dispatch_next_algorithm()
1952 trip = pr->u.p.tc; in __kmp_dispatch_next_algorithm()
1960 if (chunkIdx >= (UT)pr->u.p.parm2) { in __kmp_dispatch_next_algorithm()
1963 init = chunkIdx * chunkspec + pr->u.p.count; in __kmp_dispatch_next_algorithm()
1991 trip, *(DBL *)&pr->u.p.parm3, chunkIdx); in __kmp_dispatch_next_algorithm()
1997 trip, *(DBL *)&pr->u.p.parm3, chunkIdx + 1); in __kmp_dispatch_next_algorithm()
2015 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
2016 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
2021 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
2022 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
2023 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
2036 T parm2 = pr->u.p.parm2; in __kmp_dispatch_next_algorithm()
2037 T parm3 = pr->u.p.parm3; in __kmp_dispatch_next_algorithm()
2038 T parm4 = pr->u.p.parm4; in __kmp_dispatch_next_algorithm()
2046 trip = pr->u.p.tc - 1; in __kmp_dispatch_next_algorithm()
2054 start = pr->u.p.lb; in __kmp_dispatch_next_algorithm()
2056 incr = pr->u.p.st; in __kmp_dispatch_next_algorithm()
2072 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
2073 pr->u.p.ordered_lower = init; in __kmp_dispatch_next_algorithm()
2074 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next_algorithm()
2090 if (pr->flags.ordered) { in __kmp_dispatch_next_algorithm()
2096 KD_TRACE(1000, (buff, gtid, pr->u.p.ordered_lower, pr->u.p.ordered_upper)); in __kmp_dispatch_next_algorithm()
2125 ompt_get_work_schedule(pr->schedule), ompt_scope_end, \
2153 i = (kmp_int64)(pr->u.p.st); \
2198 dispatch_private_info_template<T> *pr; in __kmp_dispatch_next() local
2211 pr = reinterpret_cast<dispatch_private_info_template<T> *>( in __kmp_dispatch_next()
2213 KMP_DEBUG_ASSERT(pr); in __kmp_dispatch_next()
2215 if ((status = (pr->u.p.tc != 0)) == 0) { in __kmp_dispatch_next()
2223 if (pr->pushed_ws != ct_none) { in __kmp_dispatch_next()
2224 pr->pushed_ws = __kmp_pop_workshare(gtid, pr->pushed_ws, loc); in __kmp_dispatch_next()
2227 } else if (pr->flags.nomerge) { in __kmp_dispatch_next()
2232 T chunk = pr->u.p.parm1; in __kmp_dispatch_next()
2237 init = chunk * pr->u.p.count++; in __kmp_dispatch_next()
2238 trip = pr->u.p.tc - 1; in __kmp_dispatch_next()
2248 if (pr->pushed_ws != ct_none) { in __kmp_dispatch_next()
2249 pr->pushed_ws = __kmp_pop_workshare(gtid, pr->pushed_ws, loc); in __kmp_dispatch_next()
2253 start = pr->u.p.lb; in __kmp_dispatch_next()
2255 incr = pr->u.p.st; in __kmp_dispatch_next()
2260 pr->u.p.last_upper = pr->u.p.ub; in __kmp_dispatch_next()
2275 if (pr->flags.ordered) { in __kmp_dispatch_next()
2276 pr->u.p.ordered_lower = init; in __kmp_dispatch_next()
2277 pr->u.p.ordered_upper = limit; in __kmp_dispatch_next()
2285 KD_TRACE(1000, (buff, gtid, pr->u.p.ordered_lower, in __kmp_dispatch_next()
2286 pr->u.p.ordered_upper)); in __kmp_dispatch_next()
2293 pr->u.p.tc = 0; in __kmp_dispatch_next()
2294 *p_lb = pr->u.p.lb; in __kmp_dispatch_next()
2295 *p_ub = pr->u.p.ub; in __kmp_dispatch_next()
2297 pr->u.p.last_upper = *p_ub; in __kmp_dispatch_next()
2302 *p_st = pr->u.p.st; in __kmp_dispatch_next()
2320 OMPT_LOOP_DISPATCH(*p_lb, *p_ub, pr->u.p.st, status); in __kmp_dispatch_next()
2331 pr = reinterpret_cast<dispatch_private_info_template<T> *>( in __kmp_dispatch_next()
2333 KMP_DEBUG_ASSERT(pr); in __kmp_dispatch_next()
2339 if (pr->flags.use_hier) in __kmp_dispatch_next()
2340 status = sh->hier->next(loc, gtid, pr, &last, p_lb, p_ub, p_st); in __kmp_dispatch_next()
2343 status = __kmp_dispatch_next_algorithm<T>(gtid, pr, sh, &last, p_lb, p_ub, in __kmp_dispatch_next()
2363 pr->flags.use_hier = FALSE; in __kmp_dispatch_next()
2367 if (pr->schedule == kmp_sch_static_steal) { in __kmp_dispatch_next()
2397 if (pr->flags.ordered) { in __kmp_dispatch_next()
2411 if (pr->pushed_ws != ct_none) { in __kmp_dispatch_next()
2412 pr->pushed_ws = __kmp_pop_workshare(gtid, pr->pushed_ws, loc); in __kmp_dispatch_next()
2423 pr->u.p.last_upper = pr->u.p.ub; in __kmp_dispatch_next()
2446 OMPT_LOOP_DISPATCH(*p_lb, *p_ub, pr->u.p.st, status); in __kmp_dispatch_next()