Home
last modified time | relevance | path

Searched refs:src_rq (Results 1 – 6 of 6) sorted by relevance

/linux/kernel/sched/
H A Dext.c1661 struct rq *src_rq, struct rq *dst_rq) in move_remote_task_to_local_dsq() argument
1663 lockdep_assert_rq_held(src_rq); in move_remote_task_to_local_dsq()
1666 deactivate_task(src_rq, p, 0); in move_remote_task_to_local_dsq()
1670 raw_spin_rq_unlock(src_rq); in move_remote_task_to_local_dsq()
1784 struct rq *src_rq) in unlink_dsq_and_lock_src_rq() argument
1795 raw_spin_rq_lock(src_rq); in unlink_dsq_and_lock_src_rq()
1799 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq()
1803 struct scx_dispatch_q *dsq, struct rq *src_rq) in consume_remote_task() argument
1807 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { in consume_remote_task()
1808 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task()
[all …]
H A Drt.c2234 struct rq *src_rq; in pull_rt_task() local
2262 src_rq = cpu_rq(cpu); in pull_rt_task()
2271 if (src_rq->rt.highest_prio.next >= in pull_rt_task()
2281 double_lock_balance(this_rq, src_rq); in pull_rt_task()
2287 p = pick_highest_pushable_task(src_rq, this_cpu); in pull_rt_task()
2294 WARN_ON(p == src_rq->curr); in pull_rt_task()
2305 if (p->prio < src_rq->donor->prio) in pull_rt_task()
2309 push_task = get_push_task(src_rq); in pull_rt_task()
2311 move_queued_task_locked(src_rq, this_rq, p); in pull_rt_task()
2322 double_unlock_balance(this_rq, src_rq); in pull_rt_task()
[all …]
H A Ddeadline.c2957 struct rq *src_rq; in pull_dl_task() local
2973 src_rq = cpu_rq(cpu); in pull_dl_task()
2981 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2986 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2992 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2995 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task()
3004 WARN_ON(p == src_rq->curr); in pull_dl_task()
3012 src_rq->donor->dl.deadline)) in pull_dl_task()
3016 push_task = get_push_task(src_rq); in pull_dl_task()
3018 move_queued_task_locked(src_rq, this_rq, p); in pull_dl_task()
[all …]
H A Dfair.c9276 struct rq *src_rq; member
9307 lockdep_assert_rq_held(env->src_rq); in task_hot()
9339 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9370 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
9436 lockdep_assert_rq_held(env->src_rq); in can_migrate_task()
9507 if (task_on_cpu(env->src_rq, p) || in can_migrate_task()
9508 task_current_donor(env->src_rq, p)) { in can_migrate_task()
9544 lockdep_assert_rq_held(env->src_rq); in detach_task()
9552 WARN_ON(task_current(env->src_rq, p)); in detach_task()
9553 WARN_ON(task_current_donor(env->src_rq, p)); in detach_task()
[all …]
H A Dcore.c3276 struct rq *src_rq, *dst_rq; in __migrate_swap_task() local
3279 src_rq = task_rq(p); in __migrate_swap_task()
3282 rq_pin_lock(src_rq, &srf); in __migrate_swap_task()
3285 move_queued_task_locked(src_rq, dst_rq, p); in __migrate_swap_task()
3289 rq_unpin_lock(src_rq, &srf); in __migrate_swap_task()
3309 struct rq *src_rq, *dst_rq; in migrate_swap_stop() local
3314 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3318 guard(double_rq_lock)(src_rq, dst_rq); in migrate_swap_stop()
H A Dsched.h3896 void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task) in move_queued_task_locked() argument
3898 lockdep_assert_rq_held(src_rq); in move_queued_task_locked()
3901 deactivate_task(src_rq, task, 0); in move_queued_task_locked()