Home
last modified time | relevance | path

Searched refs:rq_clock (Results 1 – 9 of 9) sorted by relevance

/linux/kernel/sched/
H A Dstats.c11 wait_start = rq_clock(rq); in __update_stats_wait_start()
23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); in __update_stats_wait_end()
56 u64 delta = rq_clock(rq) - sleep_start; in __update_stats_enqueue_sleeper()
74 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper()
H A Dstats.h217 delta = rq_clock(rq) - t->sched_info.last_queued; in sched_info_dequeue()
236 now = rq_clock(rq); in sched_info_arrive()
254 t->sched_info.last_queued = rq_clock(rq); in sched_info_enqueue()
267 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; in sched_info_depart()
H A Ddeadline.c438 zerolag_time -= rq_clock(rq); in task_non_contending()
777 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
808 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); in setup_new_dl_entity()
865 (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) { in replenish_dl_entity()
866 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_entity()
893 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
918 dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) { in dl_entity_overflow()
1016 u64 laxity = dl_se->deadline - rq_clock(rq); in update_dl_entity()
1024 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); in update_dl_entity()
1079 if (dl_time_before(dl_se->deadline, rq_clock(r in start_dl_timer()
[all...]
H A Dpelt.h78 u64_u32_store(rq->clock_idle, rq_clock(rq)); in _update_idle_rq_clock_pelt()
H A Dcore_sched.c243 u64 delta, now = rq_clock(rq->core); in __sched_core_account_forceidle()
H A Dcore.c3688 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_activate()
5522 u64 resched_latency, now = rq_clock(rq); in cpu_resched_latency()
6186 rq->core->core_forceidle_start = rq_clock(rq->core); in pick_next_task()
10353 u64 rq_clock; in sched_mm_cid_remote_clear_old() local
10359 rq_clock = READ_ONCE(rq->clock); in sched_mm_cid_remote_clear_old()
10370 WRITE_ONCE(pcpu_cid->time, rq_clock); in sched_mm_cid_remote_clear_old()
10375 if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) in sched_mm_cid_remote_clear_old()
H A Dfair.c1366 rq_clock(rq_of(cfs_rq)));
1369 rq_clock(rq_of(cfs_rq)));
4606 * = sched_clock_cpu() - rq_clock()@rq_idle in update_cfs_rq_load_avg()
4611 * sched_clock_cpu() - rq_clock()@rq_idle in update_cfs_rq_load_avg()
4614 * rq_clock()@rq_idle is rq->clock_idle in update_cfs_rq_load_avg()
5441 cfs_rq->throttled_clock = rq_clock(rq); in set_next_entity()
5443 cfs_rq->throttled_clock_self = rq_clock(rq); in set_next_entity()
5881 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self; in unthrottle_cfs_rq()
5907 cfs_rq->throttled_clock_self = rq_clock(rq); in unthrottle_cfs_rq()
6013 cfs_rq->throttled_clock = rq_clock(r in unthrottle_cfs_rq_async()
[all...]
H A Dsched.h1642 static inline u64 rq_clock(struct rq *rq) in rq_pin_lock()
3234 data->func(data, rq_clock(rq), flags); in scale_irq_capacity()
1552 static inline u64 rq_clock(struct rq *rq) rq_clock() function
H A Drt.c1357 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1361 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()