Searched refs:cpu_of (Results 1 – 9 of 9) sorted by relevance
/linux/kernel/sched/ |
H A D | pelt.c | 439 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 440 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg() 480 unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); in update_other_load_avgs()
|
H A D | pelt.h | 119 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 120 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
H A D | ext.c | 1642 p->scx.core_sched_at = sched_clock_cpu(cpu_of(rq)); in touch_core_sched() 1998 return likely((rq->scx.flags & SCX_RQ_ONLINE) && cpu_active(cpu_of(rq))); in scx_rq_online() 2010 if (sticky_cpu == cpu_of(rq)) in do_enqueue_task() 2132 sticky_cpu = cpu_of(rq); in enqueue_task_scx() 2302 set_task_cpu(p, cpu_of(dst_rq)); in move_remote_task_to_local_dsq() 2303 p->scx.sticky_cpu = cpu_of(dst_rq); in move_remote_task_to_local_dsq() 2313 WARN_ON_ONCE(!cpumask_test_cpu(cpu_of(dst_rq), p->cpus_ptr)); in move_remote_task_to_local_dsq() 2342 int cpu = cpu_of(rq); in task_can_run_on_remote_rq() 2595 int node = cpu_to_node(cpu_of(rq)); in consume_global_dsq() 2814 SCX_CALL_OP(SCX_KF_REST, cpu_acquire, cpu_of(rq), NULL); in balance_one() [all …]
|
H A D | fair.c | 315 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 1087 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 4104 if (!cpu_active(cpu_of(rq_of(cfs_rq)))) in update_tg_load_avg() 4111 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in update_tg_load_avg() 4134 now = sched_clock_cpu(cpu_of(rq_of(cfs_rq))); in clear_tg_load_avg() 4157 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in clear_tg_offline_cfs_rqs() 4548 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); in migrate_se_pelt_lag() 4925 if (dequeued > arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)))) in util_est_update() 5104 int cpu = cpu_of(rq); in update_misfit_status() 5825 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() [all …]
|
H A D | core_sched.c | 242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
|
H A D | sched.h | 1330 static inline int cpu_of(struct rq *rq) in cpu_of() function 1427 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match() 2749 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2873 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 3328 cpu_of(rq))); in cpufreq_update_util() 3423 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped() 3762 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
|
H A D | core.c | 744 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 773 steal = prev_steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 808 clock = sched_clock_cpu(cpu_of(rq)); in update_rq_clock() 839 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 892 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 1105 cpu = cpu_of(rq); in __resched_curr() 1285 int cpu = cpu_of(rq); in nohz_csd_func() 3019 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, in affine_move_task() 3828 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 3829 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending() [all …]
|
H A D | deadline.c | 1478 int cpu = cpu_of(rq); in dl_scaled_delta_exec() 1700 int cpu = cpu_of(rq); in __dl_server_attach_root() 1703 dl_b = dl_bw_of(cpu_of(rq)); in __dl_server_attach_root() 1717 int cpu = cpu_of(rq); in dl_server_apply_params() 2908 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|
H A D | debug.c | 393 cpu_of(rq)); in sched_fair_server_write()
|