Home
last modified time | relevance | path

Searched refs:task_rq (Results 1 – 8 of 8) sorted by relevance

/linux/kernel/sched/
H A Dext.c1208 lockdep_assert_rq_held(task_rq(p)); in find_dsq_for_dispatch()
1281 struct rq *rq = task_rq(p); in direct_dispatch()
1423 dsq = &task_rq(p)->scx.bypass_dsq; in set_task_runnable()
1754 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
1797 /* task_rq couldn't have changed if we're still the holding cpu */
1799 !WARN_ON_ONCE(src_rq != task_rq(p));
1825 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local in move_task_between_dsqs()
1826 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq in move_task_between_dsqs()
1827 * will change. As @p's task_rq is locked, this function doesn't need to use the in move_task_between_dsqs()
1830 * On return, @src_dsq is unlocked and only @p's new task_rq, whic in move_task_between_dsqs()
1881 struct rq *task_rq = task_rq(p); consume_dispatch_q() local
[all...]
H A Dcore.c272 if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) in __sched_core_less()
714 rq = task_rq(p); in __task_rq_lock()
716 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
738 rq = task_rq(p); in task_rq_lock()
757 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
1451 p->sched_class->reweight_task(task_rq(p), p, &lw); in set_load_weight()
2243 rq = task_rq(p); in wait_task_inactive()
2530 if (task_rq(p) == rq) { in migration_cpu_stop()
2611 if (task_rq(p) != rq) in push_cpu_stop()
2628 if (task_rq(p) == rq) { in push_cpu_stop()
[all …]
H A Dsyscalls.c1122 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
1413 p_rq = task_rq(p); in yield_to()
1422 if (task_rq(p) != p_rq) in yield_to()
H A Ddeadline.c81 rq = task_rq(dl_task_of(dl_se)); in rq_of_dl_se()
340 dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); in dl_change_utilization()
2436 rq = task_rq(p); in migrate_task_rq_dl()
2699 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()
2838 (task_rq(task) != rq || in find_lock_later_rq()
3066 rq = task_rq(p); in set_cpus_allowed_dl()
H A Dstats.h142 if (task_on_cpu(task_rq(p), p)) in psi_enqueue()
H A Drt.c313 return task_rq(p); in rq_of_rt_se()
1778 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1783 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()
H A Dsched.h1363 #define task_rq(p) cpu_rq(task_cpu(p)) macro
1638 return &task_rq(p)->cfs; in task_cfs_rq()
1644 struct rq *rq = task_rq(p); in cfs_rq_of()
H A Dfair.c1461 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
6745 WARN_ON_ONCE(task_rq(p) != rq); in hrtick_start_fair()
9738 WARN_ON_ONCE(task_rq(p) != rq); in attach_task()
13299 struct rq *rq = task_rq(a); in cfs_prio_less()
13306 WARN_ON_ONCE(task_rq(b)->core != rq->core); in cfs_prio_less()
13329 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less()
13330 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less()
13381 check_update_overutilized_status(task_rq(curr)); in task_tick_fair()