Lines Matching refs:task_rq

1208 	lockdep_assert_rq_held(task_rq(p));
1281 struct rq *rq = task_rq(p);
1423 dsq = &task_rq(p)->scx.bypass_dsq;
1754 * unlink_dsq_and_lock_src_rq() - Unlink task from its DSQ and lock its task_rq
1797 /* task_rq couldn't have changed if we're still the holding cpu */
1799 !WARN_ON_ONCE(src_rq != task_rq(p));
1825 * Must be called with @p's task_rq and @src_dsq locked. If @dst_dsq is a local
1826 * DSQ and @p is on a different CPU, @p will be migrated and thus its task_rq
1827 * will change. As @p's task_rq is locked, this function doesn't need to use the
1830 * On return, @src_dsq is unlocked and only @p's new task_rq, which is the
1838 struct rq *src_rq = task_rq(p), *dst_rq;
1902 struct rq *task_rq = task_rq(p);
1915 if (rq == task_rq) {
1923 if (likely(consume_remote_task(rq, p, dsq, task_rq)))
1959 struct rq *src_rq = task_rq(p);
1965 * be dequeued, its task_rq and cpus_allowed are stable too.
2005 /* task_rq couldn't have changed if we're still the holding cpu */
2007 !WARN_ON_ONCE(src_rq != task_rq(p))) {
2544 !scx_rq_bypassing(task_rq(a)))
2572 rq_bypass = scx_rq_bypassing(task_rq(p));
2891 struct rq *rq = task_rq(p);
2919 struct rq *rq = task_rq(p);
2936 lockdep_assert_rq_held(task_rq(p));
2955 SCX_CALL_OP_TASK(sch, SCX_KF_REST, exit_task, task_rq(p),
3063 lockdep_assert_rq_held(task_rq(p));
3100 lockdep_assert_rq_held(task_rq(p));
4270 update_rq_clock(task_rq(p));
5916 * context where no rq lock is held. If latter, lock @p's task_rq which
5919 src_rq = task_rq(p);
5944 WARN_ON_ONCE(src_rq != task_rq(p))) {
6985 return task_rq(p)->curr == p;