Searched refs:cpu_rq (Results 1 – 15 of 15) sorted by relevance
282 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()290 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()368 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()381 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()477 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()339 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
374 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()383 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()404 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()406 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip()417 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()427 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()1155 struct rq *rq = cpu_rq(cpu); in resched_cpu()1218 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()2015 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()2489 rq = cpu_rq(new_cpu); in move_queued_task()[all …]
119 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()124 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()165 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity()171 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited()189 struct rq *rq = cpu_rq(i); in __dl_update()197 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()714 later_rq = cpu_rq(cpu); in dl_task_offline_migration()2241 rq = cpu_rq(cpu); in select_task_rq_dl()2272 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl()2637 later_rq = cpu_rq(cpu); in find_lock_later_rq()[all …]
2127 struct rq *rq = cpu_rq(cpu); in update_numa_stats()2159 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()2174 rq = cpu_rq(env->dst_cpu); in task_numa_assign()2189 rq = cpu_rq(env->best_cpu); in task_numa_assign()2248 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()2607 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()3038 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()4966 capacity -= max(hw_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu)); in get_actual_cpu_capacity()6445 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()6597 struct rq *rq = cpu_rq(i); in destroy_cfs_bandwidth()[all …]
349 struct rq *rq = cpu_rq(cpu); in sched_fair_server_write()406 struct rq *rq = cpu_rq(cpu); in sched_fair_server_show()810 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()927 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()939 struct rq *rq = cpu_rq(cpu); in print_cpu()1301 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
27 return cpu_rq(cpu)->scx.cpuperf_target; in scx_cpuperf_target()
1894 return &cpu_rq(cpu)->scx.local_dsq; in find_dsq_for_dispatch()2926 struct rq *srq = cpu_rq(scpu); in balance_scx()3544 cpu_rq(cpu)->scx.local_dsq.nr == 0) { in scx_select_cpu_dfl()3864 if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) in scx_watchdog_workfn()4893 struct rq *rq = cpu_rq(cpu); in scx_ops_bypass()5090 struct rq *rq = cpu_rq(cpu); in scx_ops_disable_workfn()5382 struct rq *rq = cpu_rq(cpu); in scx_dump_state()5641 cpu_rq(cpu)->scx.cpuperf_target = SCX_CPUPERF_ONE; in scx_ops_enable()6183 struct rq *rq = cpu_rq(cpu); in kick_one_cpu()6219 struct rq *rq = cpu_rq(cpu); in kick_one_cpu_if_idle()[all …]
122 rq = cpu_rq(cpu); in show_schedstat()
275 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
204 struct rq *rq = cpu_rq(cpu); in idle_cpu()245 return cpu_rq(cpu)->idle; in idle_task()251 struct rq *rq = cpu_rq(cpu); in sched_core_idle_cpu()
344 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu))) in sugov_hold_freq()367 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min) in ignore_dl_rate_limit()
1352 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro1354 #define task_rq(p) cpu_rq(task_cpu(p))1355 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)1452 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match()1993 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \3211 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
982 rq = cpu_rq(cpu); in kcpustat_field() 1069 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …