Home
last modified time | relevance | path

Searched refs:src_rq (Results 1 – 5 of 5) sorted by relevance

/linux/kernel/sched/
H A Dext.c2296 struct rq *src_rq, struct rq *dst_rq) in move_remote_task_to_local_dsq() argument
2298 lockdep_assert_rq_held(src_rq); in move_remote_task_to_local_dsq()
2301 deactivate_task(src_rq, p, 0); in move_remote_task_to_local_dsq()
2305 raw_spin_rq_unlock(src_rq); in move_remote_task_to_local_dsq()
2415 struct rq *src_rq) in unlink_dsq_and_lock_src_rq() argument
2426 raw_spin_rq_lock(src_rq); in unlink_dsq_and_lock_src_rq()
2430 !WARN_ON_ONCE(src_rq != task_rq(p)); in unlink_dsq_and_lock_src_rq()
2434 struct scx_dispatch_q *dsq, struct rq *src_rq) in consume_remote_task() argument
2438 if (unlink_dsq_and_lock_src_rq(p, dsq, src_rq)) { in consume_remote_task()
2439 move_remote_task_to_local_dsq(p, 0, src_rq, this_rq); in consume_remote_task()
[all …]
H A Ddeadline.c2788 struct rq *src_rq; in pull_dl_task() local
2804 src_rq = cpu_rq(cpu); in pull_dl_task()
2812 src_rq->dl.earliest_dl.next)) in pull_dl_task()
2817 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2823 if (src_rq->dl.dl_nr_running <= 1) in pull_dl_task()
2826 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task()
2835 WARN_ON(p == src_rq->curr); in pull_dl_task()
2843 src_rq->donor->dl.deadline)) in pull_dl_task()
2847 push_task = get_push_task(src_rq); in pull_dl_task()
2849 move_queued_task_locked(src_rq, this_rq, p); in pull_dl_task()
[all …]
H A Dcore.c3353 struct rq *src_rq, *dst_rq; in __migrate_swap_task() local
3356 src_rq = task_rq(p); in __migrate_swap_task()
3359 rq_pin_lock(src_rq, &srf); in __migrate_swap_task()
3362 move_queued_task_locked(src_rq, dst_rq, p); in __migrate_swap_task()
3366 rq_unpin_lock(src_rq, &srf); in __migrate_swap_task()
3386 struct rq *src_rq, *dst_rq; in migrate_swap_stop() local
3391 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
3395 guard(double_rq_lock)(src_rq, dst_rq); in migrate_swap_stop()
10274 int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, in __sched_mm_cid_migrate_from_fetch_cid() argument
10303 src_task = rcu_dereference(src_rq->curr); in __sched_mm_cid_migrate_from_fetch_cid()
[all …]
H A Dfair.c9206 struct rq *src_rq; member
9237 lockdep_assert_rq_held(env->src_rq); in task_hot()
9269 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9300 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
9366 lockdep_assert_rq_held(env->src_rq); in can_migrate_task()
9434 if (task_on_cpu(env->src_rq, p)) { in can_migrate_task()
9470 lockdep_assert_rq_held(env->src_rq); in detach_task()
9478 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
9492 lockdep_assert_rq_held(env->src_rq); in detach_one_task()
9495 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
[all …]
H A Dsched.h3910 void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_struct *task) in move_queued_task_locked() argument
3912 lockdep_assert_rq_held(src_rq); in move_queued_task_locked()
3915 deactivate_task(src_rq, task, 0); in move_queued_task_locked()