Lines Matching refs:avg
659 s64 avg = cfs_rq->avg_vruntime; in avg_vruntime() local
665 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
671 if (avg < 0) in avg_vruntime()
672 avg -= (load - 1); in avg_vruntime()
673 avg = div_s64(avg, load); in avg_vruntime()
676 return cfs_rq->min_vruntime + avg; in avg_vruntime()
727 s64 avg = cfs_rq->avg_vruntime; in vruntime_eligible() local
733 avg += entity_key(cfs_rq, curr) * weight; in vruntime_eligible()
737 return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load; in vruntime_eligible()
1039 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1086 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1088 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1101 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1106 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1107 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1108 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
2782 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3751 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3752 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3758 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3759 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3761 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3762 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3802 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3804 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3921 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
4032 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
4069 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
4115 delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
4118 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
4187 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4194 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4267 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4278 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4282 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4283 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4284 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4285 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4288 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4289 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4292 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4293 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4299 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4310 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4313 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4314 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4315 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4316 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4319 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4320 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4322 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4323 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4344 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4351 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4359 load_sum = div_u64(gcfs_rq->avg.load_sum, in update_tg_cfs_load()
4364 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4373 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4379 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4383 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4385 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4386 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4387 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4388 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4390 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4391 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4440 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4481 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4575 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4580 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4649 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4658 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4659 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4667 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4669 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4671 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4672 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4673 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4675 se->avg.load_sum = 1; in attach_entity_load_avg()
4678 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4679 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4680 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4681 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4683 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4701 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4702 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4704 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4705 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4707 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4708 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4710 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4711 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4713 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4738 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4744 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4803 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4804 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4805 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4811 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4816 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4823 return READ_ONCE(p->se.avg.util_avg); in task_util()
4828 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4833 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4850 enqueued = cfs_rq->avg.util_est; in util_est_enqueue()
4852 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_enqueue()
4866 enqueued = cfs_rq->avg.util_est; in util_est_dequeue()
4868 WRITE_ONCE(cfs_rq->avg.util_est, enqueued); in util_est_dequeue()
4892 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
4957 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
7230 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7234 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
7253 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7257 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
7260 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7988 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7992 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
8010 util_est = READ_ONCE(cfs_rq->avg.util_est); in cpu_util()
8075 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8657 se->avg.last_update_time = 0; in migrate_task_rq_fair()
9706 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9709 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9840 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9853 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9871 return p->se.avg.load_avg; in task_h_load()
10607 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
13183 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
13314 p->se.avg.last_update_time = 0; in task_change_group_fair()