Searched refs:cpu_rq (Results 1 – 16 of 16) sorted by relevance
114 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()131 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()150 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()158 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()341 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
122 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()127 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()159 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity()165 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited()183 struct rq *rq = cpu_rq(i); in __dl_update()673 later_rq = cpu_rq(cpu); in dl_task_offline_migration()1834 rq = cpu_rq(cpu); in sched_init_dl_servers()2389 rq = cpu_rq(cpu); in select_task_rq_dl()2420 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl()2800 later_rq = cpu_rq(cpu); in find_lock_later_rq()[all …]
405 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()414 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()435 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()437 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()448 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()458 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()1173 struct rq *rq = cpu_rq(cpu); in resched_cpu()1235 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()2031 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()2441 rq = cpu_rq(new_cpu); in move_queued_task()[all …]
2108 struct rq *rq = cpu_rq(cpu); in update_numa_stats()2140 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()2155 rq = cpu_rq(env->dst_cpu); in task_numa_assign()2170 rq = cpu_rq(env->best_cpu); in task_numa_assign()2229 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()2589 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()3020 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()4924 capacity -= max(hw_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu)); in get_actual_cpu_capacity()6395 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()6555 struct rq *rq = cpu_rq(i); in destroy_cfs_bandwidth()[all …]
1230 return &cpu_rq(cpu)->scx.local_dsq; in find_dsq_for_dispatch()2722 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) in scx_watchdog_workfn()3851 donee_rq = cpu_rq(donee); in bypass_lb_cpu()3916 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); in bypass_lb_node()3938 if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target) in bypass_lb_node()3945 struct rq *rq = cpu_rq(cpu); in bypass_lb_node()3963 u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); in bypass_lb_node()4084 struct rq *rq = cpu_rq(cpu); in scx_bypass()4284 struct rq *rq = cpu_rq(cpu); in scx_disable_workfn()4575 struct rq *rq = cpu_rq(cpu); in scx_dump_state()[all …]
28 return cpu_rq(cpu)->scx.cpuperf_target; in scx_cpuperf_target()
230 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()950 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()1509 rq = cpu_rq(cpu); in select_task_rq_rt()1560 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()1882 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()2262 src_rq = cpu_rq(cpu); in pull_rt_task()2563 rt_rq = &cpu_rq(cpu)->rt; in task_is_throttled_rt()2940 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
277 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
121 rq = cpu_rq(cpu); in show_schedstat()
183 struct rq *rq = cpu_rq(cpu); in idle_cpu()222 return cpu_rq(cpu)->idle; in idle_task()228 struct rq *rq = cpu_rq(cpu); in sched_core_idle_cpu()
372 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu))) in sugov_hold_freq()395 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) in ignore_dl_rate_limit()
1361 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro1363 #define task_rq(p) cpu_rq(task_cpu(p))1364 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)1471 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match()2024 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \3266 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
414 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()718 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()2675 rq = cpu_rq(i); in build_sched_domains()2907 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
804 lockdep_assert_rq_held(cpu_rq(cpu)); in psi_group_change()1235 guard(rq_lock_irq)(cpu_rq(cpu)); in psi_cgroup_restart()
548 cpu_rq(cpu)->scx.local_dsq.nr == 0 && in scx_select_cpu_dfl()
234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …