Lines Matching full:se
292 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
294 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
295 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
309 #define for_each_sched_entity(se) \ argument
310 for (; se; se = se->parent)
412 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
414 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
415 return se->cfs_rq; in is_same_group()
420 static inline struct sched_entity *parent_entity(const struct sched_entity *se) in parent_entity() argument
422 return se->parent; in parent_entity()
426 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
438 se_depth = (*se)->depth; in find_matching_se()
443 *se = parent_entity(*se); in find_matching_se()
451 while (!is_same_group(*se, *pse)) { in find_matching_se()
452 *se = parent_entity(*se); in find_matching_se()
467 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
469 if (entity_is_task(se)) in se_is_idle()
470 return task_has_idle_policy(task_of(se)); in se_is_idle()
471 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
476 #define for_each_sched_entity(se) \ argument
477 for (; se; se = NULL)
495 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
501 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
515 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
517 return task_has_idle_policy(task_of(se)); in se_is_idle()
557 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
559 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
584 * se->vruntime):
624 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
626 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_add()
627 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
634 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
636 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_sub()
637 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
695 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
699 SCHED_WARN_ON(!se->on_rq); in update_entity_lag()
701 vlag = avg_vruntime(cfs_rq) - se->vruntime; in update_entity_lag()
702 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); in update_entity_lag()
704 se->vlag = clamp(vlag, -limit, limit); in update_entity_lag()
721 * Note: using 'avg_vruntime() > se->vruntime' is inaccurate due
740 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
742 return vruntime_eligible(cfs_rq, se->vruntime); in entity_eligible()
761 struct sched_entity *se = __pick_root_entity(cfs_rq); in update_min_vruntime() local
772 if (se) { in update_min_vruntime()
774 vruntime = se->min_vruntime; in update_min_vruntime()
776 vruntime = min_vruntime(vruntime, se->min_vruntime); in update_min_vruntime()
805 static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node *node) in __min_vruntime_update() argument
809 if (vruntime_gt(min_vruntime, se, rse)) in __min_vruntime_update()
810 se->min_vruntime = rse->min_vruntime; in __min_vruntime_update()
814 static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node) in __min_slice_update() argument
818 if (rse->min_slice < se->min_slice) in __min_slice_update()
819 se->min_slice = rse->min_slice; in __min_slice_update()
824 * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime)
826 static inline bool min_vruntime_update(struct sched_entity *se, bool exit) in min_vruntime_update() argument
828 u64 old_min_vruntime = se->min_vruntime; in min_vruntime_update()
829 u64 old_min_slice = se->min_slice; in min_vruntime_update()
830 struct rb_node *node = &se->run_node; in min_vruntime_update()
832 se->min_vruntime = se->vruntime; in min_vruntime_update()
833 __min_vruntime_update(se, node->rb_right); in min_vruntime_update()
834 __min_vruntime_update(se, node->rb_left); in min_vruntime_update()
836 se->min_slice = se->slice; in min_vruntime_update()
837 __min_slice_update(se, node->rb_right); in min_vruntime_update()
838 __min_slice_update(se, node->rb_left); in min_vruntime_update()
840 return se->min_vruntime == old_min_vruntime && in min_vruntime_update()
841 se->min_slice == old_min_slice; in min_vruntime_update()
850 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
852 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
853 se->min_vruntime = se->vruntime; in __enqueue_entity()
854 se->min_slice = se->slice; in __enqueue_entity()
855 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
859 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
861 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
863 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
901 * se->min_vruntime = min(se->vruntime, se->{left,right}->min_vruntime)
908 struct sched_entity *se = __pick_first_entity(cfs_rq); in pick_eevdf() local
917 return curr && curr->on_rq ? curr : se; in pick_eevdf()
930 if (se && entity_eligible(cfs_rq, se)) { in pick_eevdf()
931 best = se; in pick_eevdf()
949 se = __node_2_se(node); in pick_eevdf()
956 if (entity_eligible(cfs_rq, se)) { in pick_eevdf()
957 best = se; in pick_eevdf()
999 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1005 static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1007 if ((s64)(se->vruntime - se->deadline) < 0) in update_deadline()
1015 if (!se->custom_slice) in update_deadline()
1016 se->slice = sysctl_sched_base_slice; in update_deadline()
1021 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); in update_deadline()
1037 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1039 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1049 if (entity_is_task(se)) in init_entity_runnable_average()
1050 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
1060 * * se_weight(se)
1084 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
1085 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
1086 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1095 attach_entity_load_avg(cfs_rq, se); in post_init_entity_util_avg()
1101 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1107 sa->util_avg = cfs_rq->avg.util_avg * se_weight(se); in post_init_entity_util_avg()
1121 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1174 struct sched_entity *pse, struct sched_entity *se) in do_preempt_short() argument
1179 if (pse->slice >= se->slice) in do_preempt_short()
1185 if (entity_before(pse, se)) in do_preempt_short()
1188 if (!entity_eligible(cfs_rq, se)) in do_preempt_short()
1202 delta_exec = update_curr_se(rq, &donor->se); in update_curr_common()
1262 update_curr(cfs_rq_of(&rq->donor->se)); in update_curr_fair()
1266 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1274 stats = __schedstats_from_se(se); in update_stats_wait_start_fair()
1276 if (entity_is_task(se)) in update_stats_wait_start_fair()
1277 p = task_of(se); in update_stats_wait_start_fair()
1283 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1291 stats = __schedstats_from_se(se); in update_stats_wait_end_fair()
1294 * When the sched_schedstat changes from 0 to 1, some sched se in update_stats_wait_end_fair()
1295 * maybe already in the runqueue, the se->statistics.wait_start in update_stats_wait_end_fair()
1302 if (entity_is_task(se)) in update_stats_wait_end_fair()
1303 p = task_of(se); in update_stats_wait_end_fair()
1309 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1317 stats = __schedstats_from_se(se); in update_stats_enqueue_sleeper_fair()
1319 if (entity_is_task(se)) in update_stats_enqueue_sleeper_fair()
1320 tsk = task_of(se); in update_stats_enqueue_sleeper_fair()
1329 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1338 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1339 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1342 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1346 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1356 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1357 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1359 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue_fair()
1360 struct task_struct *tsk = task_of(se); in update_stats_dequeue_fair()
1378 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1383 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2771 now = p->se.exec_start; in numa_get_avg_runtime()
2772 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2782 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3295 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3532 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3533 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3605 now = curr->se.sum_exec_runtime; in task_tick_numa()
3672 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3674 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3676 if (entity_is_task(se)) { in account_entity_enqueue()
3679 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3680 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3687 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3689 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3691 if (entity_is_task(se)) { in account_entity_dequeue()
3692 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3693 list_del_init(&se->group_node); in account_entity_dequeue()
3749 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3751 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3752 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3756 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3758 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3759 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3766 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3768 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3771 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
3773 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3776 bool curr = cfs_rq->curr == se; in reweight_entity()
3778 if (se->on_rq) { in reweight_entity()
3781 update_entity_lag(cfs_rq, se); in reweight_entity()
3782 se->deadline -= se->vruntime; in reweight_entity()
3783 se->rel_deadline = 1; in reweight_entity()
3785 __dequeue_entity(cfs_rq, se); in reweight_entity()
3786 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3788 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3791 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i), in reweight_entity()
3792 * we need to scale se->vlag when w_i changes. in reweight_entity()
3794 se->vlag = div_s64(se->vlag * se->load.weight, weight); in reweight_entity()
3795 if (se->rel_deadline) in reweight_entity()
3796 se->deadline = div_s64(se->deadline * se->load.weight, weight); in reweight_entity()
3798 update_load_set(&se->load, weight); in reweight_entity()
3802 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3804 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3808 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3809 if (se->on_rq) { in reweight_entity()
3810 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3811 place_entity(cfs_rq, se, 0); in reweight_entity()
3813 __enqueue_entity(cfs_rq, se); in reweight_entity()
3829 struct sched_entity *se = &p->se; in reweight_task_fair() local
3830 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task_fair()
3831 struct load_weight *load = &se->load; in reweight_task_fair()
3833 reweight_entity(cfs_rq, se, lw->weight); in reweight_task_fair()
3953 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3955 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3973 if (unlikely(se->load.weight != shares)) in update_cfs_group()
3974 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3978 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
4171 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
4187 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
4193 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
4194 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4265 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4267 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4275 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_util()
4282 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4283 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4284 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4285 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4297 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4299 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4307 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_runnable()
4313 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4314 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4315 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4316 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4327 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4341 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_load()
4351 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4363 /* But make sure to not inflate se's runnable */ in update_tg_cfs_load()
4364 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4373 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4376 load_sum = se_weight(se) * runnable_sum; in update_tg_cfs_load()
4379 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4383 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4385 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4386 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4401 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4405 if (entity_is_task(se)) in propagate_entity_load_avg()
4408 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
4414 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4418 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4419 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4420 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4423 trace_pelt_se_tp(se); in propagate_entity_load_avg()
4432 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
4434 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4440 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4464 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4474 static inline void migrate_se_pelt_lag(struct sched_entity *se) in migrate_se_pelt_lag() argument
4481 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4484 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4550 __update_load_avg_blocked_se(now, se); in migrate_se_pelt_lag()
4553 static void migrate_se_pelt_lag(struct sched_entity *se) {} in migrate_se_pelt_lag() argument
4599 * Because of rounding, se->util_sum might ends up being +1 more than in update_cfs_rq_load_avg()
4638 * @se: sched_entity to attach
4643 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4646 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in attach_entity_load_avg()
4652 * When we attach the @se to the @cfs_rq, we must align the decay in attach_entity_load_avg()
4658 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4659 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4667 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4669 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4671 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4672 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4673 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4675 se->avg.load_sum = 1; in attach_entity_load_avg()
4677 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4678 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4679 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4680 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4681 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4683 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4693 * @se: sched_entity to detach
4698 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4700 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4701 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4702 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4707 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4708 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4713 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4729 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4738 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4739 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4742 decayed |= propagate_entity_load_avg(se); in update_load_avg()
4744 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4753 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4761 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4775 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
4777 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
4781 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
4788 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
4790 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
4799 sync_entity_load_avg(se); in remove_entity_load_avg()
4803 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4804 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4805 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4823 return READ_ONCE(p->se.avg.util_avg); in task_util()
4828 return READ_ONCE(p->se.avg.runnable_avg); in task_runnable()
4833 return READ_ONCE(p->se.avg.util_est) & ~UTIL_AVG_UNCHANGED; in _task_util_est()
4892 ewma = READ_ONCE(p->se.avg.util_est); in util_est_update()
4957 WRITE_ONCE(p->se.avg.util_est, ewma); in util_est_update()
4959 trace_sched_util_est_se_tp(&p->se); in util_est_update()
5140 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
5145 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
5148 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
5150 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
5172 struct sched_entity *se = &p->se; in __setparam_fair() local
5176 se->custom_slice = 1; in __setparam_fair()
5177 se->slice = clamp_t(u64, attr->sched_runtime, in __setparam_fair()
5181 se->custom_slice = 0; in __setparam_fair()
5182 se->slice = sysctl_sched_base_slice; in __setparam_fair()
5187 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
5192 if (!se->custom_slice) in place_entity()
5193 se->slice = sysctl_sched_base_slice; in place_entity()
5194 vslice = calc_delta_fair(se->slice, se); in place_entity()
5204 if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) { in place_entity()
5208 lag = se->vlag; in place_entity()
5266 lag *= load + scale_load_down(se->load.weight); in place_entity()
5272 se->vruntime = vruntime - lag; in place_entity()
5274 if (se->rel_deadline) { in place_entity()
5275 se->deadline += se->vruntime; in place_entity()
5276 se->rel_deadline = 0; in place_entity()
5291 se->deadline = se->vruntime + vslice; in place_entity()
5298 requeue_delayed_entity(struct sched_entity *se);
5301 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5303 bool curr = cfs_rq->curr == se; in enqueue_entity()
5310 place_entity(cfs_rq, se, flags); in enqueue_entity()
5323 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5324 se_update_runnable(se); in enqueue_entity()
5330 update_cfs_group(se); in enqueue_entity()
5337 place_entity(cfs_rq, se, flags); in enqueue_entity()
5339 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5343 se->exec_start = 0; in enqueue_entity()
5346 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5348 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5349 se->on_rq = 1; in enqueue_entity()
5368 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
5370 for_each_sched_entity(se) { in __clear_buddies_next()
5371 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
5372 if (cfs_rq->next != se) in __clear_buddies_next()
5379 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5381 if (cfs_rq->next == se) in clear_buddies()
5382 __clear_buddies_next(se); in clear_buddies()
5387 static void set_delayed(struct sched_entity *se) in set_delayed() argument
5389 se->sched_delayed = 1; in set_delayed()
5392 * Delayed se of cfs_rq have no tasks queued on them. in set_delayed()
5396 if (!entity_is_task(se)) in set_delayed()
5399 for_each_sched_entity(se) { in set_delayed()
5400 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_delayed()
5408 static void clear_delayed(struct sched_entity *se) in clear_delayed() argument
5410 se->sched_delayed = 0; in clear_delayed()
5413 * Delayed se of cfs_rq have no tasks queued on them. in clear_delayed()
5418 if (!entity_is_task(se)) in clear_delayed()
5421 for_each_sched_entity(se) { in clear_delayed()
5422 struct cfs_rq *cfs_rq = cfs_rq_of(se); in clear_delayed()
5430 static inline void finish_delayed_dequeue_entity(struct sched_entity *se) in finish_delayed_dequeue_entity() argument
5432 clear_delayed(se); in finish_delayed_dequeue_entity()
5433 if (sched_feat(DELAY_ZERO) && se->vlag > 0) in finish_delayed_dequeue_entity()
5434 se->vlag = 0; in finish_delayed_dequeue_entity()
5438 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5444 clear_buddies(cfs_rq, se); in dequeue_entity()
5447 SCHED_WARN_ON(!se->sched_delayed); in dequeue_entity()
5457 SCHED_WARN_ON(delay && se->sched_delayed); in dequeue_entity()
5460 !entity_eligible(cfs_rq, se)) { in dequeue_entity()
5461 update_load_avg(cfs_rq, se, 0); in dequeue_entity()
5462 set_delayed(se); in dequeue_entity()
5467 if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) in dequeue_entity()
5479 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5480 se_update_runnable(se); in dequeue_entity()
5482 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5484 update_entity_lag(cfs_rq, se); in dequeue_entity()
5486 se->deadline -= se->vruntime; in dequeue_entity()
5487 se->rel_deadline = 1; in dequeue_entity()
5490 if (se != cfs_rq->curr) in dequeue_entity()
5491 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5492 se->on_rq = 0; in dequeue_entity()
5493 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5498 update_cfs_group(se); in dequeue_entity()
5501 * Now advance min_vruntime if @se was the entity holding it back, in dequeue_entity()
5510 finish_delayed_dequeue_entity(se); in dequeue_entity()
5519 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5521 clear_buddies(cfs_rq, se); in set_next_entity()
5524 if (se->on_rq) { in set_next_entity()
5530 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5531 __dequeue_entity(cfs_rq, se); in set_next_entity()
5532 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5537 se->vlag = se->deadline; in set_next_entity()
5540 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5542 cfs_rq->curr = se; in set_next_entity()
5550 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5553 stats = __schedstats_from_se(se); in set_next_entity()
5556 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
5559 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
5562 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags);
5574 struct sched_entity *se; in pick_next_entity() local
5586 se = pick_eevdf(cfs_rq); in pick_next_entity()
5587 if (se->sched_delayed) { in pick_next_entity()
5588 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in pick_next_entity()
5590 * Must not reference @se again, see __block_task(). in pick_next_entity()
5594 return se; in pick_next_entity()
5874 struct sched_entity *se; in throttle_cfs_rq() local
5899 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5909 for_each_sched_entity(se) { in throttle_cfs_rq()
5910 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5914 if (!se->on_rq) in throttle_cfs_rq()
5923 if (se->sched_delayed) in throttle_cfs_rq()
5925 dequeue_entity(qcfs_rq, se, flags); in throttle_cfs_rq()
5927 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5936 se = parent_entity(se); in throttle_cfs_rq()
5941 for_each_sched_entity(se) { in throttle_cfs_rq()
5942 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5944 if (!se->on_rq) in throttle_cfs_rq()
5947 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5948 se_update_runnable(se); in throttle_cfs_rq()
5950 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5958 /* At this point se is NULL and we are at root level*/ in throttle_cfs_rq()
5980 struct sched_entity *se; in unthrottle_cfs_rq() local
5984 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
6008 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6009 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) in unthrottle_cfs_rq()
6018 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6019 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6022 if (se->sched_delayed) { in unthrottle_cfs_rq()
6025 dequeue_entity(qcfs_rq, se, flags); in unthrottle_cfs_rq()
6026 } else if (se->on_rq) in unthrottle_cfs_rq()
6028 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
6030 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6042 for_each_sched_entity(se) { in unthrottle_cfs_rq()
6043 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
6045 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
6046 se_update_runnable(se); in unthrottle_cfs_rq()
6048 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6064 /* At this point se is NULL and we are at root level*/ in unthrottle_cfs_rq()
6777 struct sched_entity *se = &p->se; in hrtick_start_fair() local
6782 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
6783 u64 slice = se->slice; in hrtick_start_fair()
6881 requeue_delayed_entity(struct sched_entity *se) in requeue_delayed_entity() argument
6883 struct cfs_rq *cfs_rq = cfs_rq_of(se); in requeue_delayed_entity()
6886 * se->sched_delayed should imply: se->on_rq == 1. in requeue_delayed_entity()
6890 SCHED_WARN_ON(!se->sched_delayed); in requeue_delayed_entity()
6891 SCHED_WARN_ON(!se->on_rq); in requeue_delayed_entity()
6894 update_entity_lag(cfs_rq, se); in requeue_delayed_entity()
6895 if (se->vlag > 0) { in requeue_delayed_entity()
6897 if (se != cfs_rq->curr) in requeue_delayed_entity()
6898 __dequeue_entity(cfs_rq, se); in requeue_delayed_entity()
6899 se->vlag = 0; in requeue_delayed_entity()
6900 place_entity(cfs_rq, se, 0); in requeue_delayed_entity()
6901 if (se != cfs_rq->curr) in requeue_delayed_entity()
6902 __enqueue_entity(cfs_rq, se); in requeue_delayed_entity()
6907 update_load_avg(cfs_rq, se, 0); in requeue_delayed_entity()
6908 clear_delayed(se); in requeue_delayed_entity()
6920 struct sched_entity *se = &p->se; in enqueue_task_fair() local
6933 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) in enqueue_task_fair()
6937 requeue_delayed_entity(se); in enqueue_task_fair()
6949 if (task_new && se->sched_delayed) in enqueue_task_fair()
6952 for_each_sched_entity(se) { in enqueue_task_fair()
6953 if (se->on_rq) { in enqueue_task_fair()
6954 if (se->sched_delayed) in enqueue_task_fair()
6955 requeue_delayed_entity(se); in enqueue_task_fair()
6958 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6966 se->slice = slice; in enqueue_task_fair()
6967 se->custom_slice = 1; in enqueue_task_fair()
6969 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6986 for_each_sched_entity(se) { in enqueue_task_fair()
6987 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6989 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6990 se_update_runnable(se); in enqueue_task_fair()
6991 update_cfs_group(se); in enqueue_task_fair()
6993 se->slice = slice; in enqueue_task_fair()
7015 /* At this point se is NULL and we are at root level*/ in enqueue_task_fair()
7041 static void set_next_buddy(struct sched_entity *se);
7052 static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) in dequeue_entities() argument
7065 if (entity_is_task(se)) { in dequeue_entities()
7066 p = task_of(se); in dequeue_entities()
7069 if (task_sleep || task_delayed || !se->sched_delayed) in dequeue_entities()
7072 cfs_rq = group_cfs_rq(se); in dequeue_entities()
7076 for_each_sched_entity(se) { in dequeue_entities()
7077 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7079 if (!dequeue_entity(cfs_rq, se, flags)) { in dequeue_entities()
7080 if (p && &p->se == se) in dequeue_entities()
7102 se = parent_entity(se); in dequeue_entities()
7107 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_entities()
7108 set_next_buddy(se); in dequeue_entities()
7115 for_each_sched_entity(se) { in dequeue_entities()
7116 cfs_rq = cfs_rq_of(se); in dequeue_entities()
7118 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entities()
7119 se_update_runnable(se); in dequeue_entities()
7120 update_cfs_group(se); in dequeue_entities()
7122 se->slice = slice; in dequeue_entities()
7171 if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) in dequeue_task_fair()
7175 if (dequeue_entities(rq, &p->se, flags) < 0) in dequeue_task_fair()
7230 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
7253 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
7260 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7495 sync_entity_load_avg(&p->se); in sched_balance_find_dst_cpu()
7828 sync_entity_load_avg(&p->se); in select_idle_sibling()
8075 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8396 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
8638 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
8641 remove_entity_load_avg(se); in migrate_task_rq_fair()
8653 migrate_se_pelt_lag(se); in migrate_task_rq_fair()
8657 se->avg.last_update_time = 0; in migrate_task_rq_fair()
8664 struct sched_entity *se = &p->se; in task_dead_fair() local
8666 if (se->sched_delayed) { in task_dead_fair()
8671 if (se->sched_delayed) { in task_dead_fair()
8673 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in task_dead_fair()
8678 remove_entity_load_avg(se); in task_dead_fair()
8723 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
8725 for_each_sched_entity(se) { in set_next_buddy()
8726 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
8728 if (se_is_idle(se)) in set_next_buddy()
8730 cfs_rq_of(se)->next = se; in set_next_buddy()
8740 struct sched_entity *se = &donor->se, *pse = &p->se; in check_preempt_wakeup_fair() local
8744 if (unlikely(se == pse)) in check_preempt_wakeup_fair()
8776 find_matching_se(&se, &pse); in check_preempt_wakeup_fair()
8779 cse_is_idle = se_is_idle(se); in check_preempt_wakeup_fair()
8797 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup_fair()
8806 if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline) in check_preempt_wakeup_fair()
8807 se->vlag = se->deadline + 1; in check_preempt_wakeup_fair()
8823 struct sched_entity *se; in pick_task_fair() local
8839 se = pick_next_entity(rq, cfs_rq); in pick_task_fair()
8840 if (!se) in pick_task_fair()
8842 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8845 return task_of(se); in pick_task_fair()
8854 struct sched_entity *se; in pick_next_task_fair() local
8862 se = &p->se; in pick_next_task_fair()
8882 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
8885 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8886 int se_depth = se->depth; in pick_next_task_fair()
8894 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
8895 se = parent_entity(se); in pick_next_task_fair()
8900 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8967 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
8970 for_each_sched_entity(se) { in put_prev_task_fair()
8971 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8972 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8983 struct sched_entity *se = &curr->se; in yield_task_fair() local
8991 clear_buddies(cfs_rq, se); in yield_task_fair()
9005 se->deadline += calc_delta_fair(se->slice, se); in yield_task_fair()
9010 struct sched_entity *se = &p->se; in yield_to_task_fair() local
9013 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
9016 /* Tell the scheduler that we'd really like se to run next. */ in yield_to_task_fair()
9017 set_next_buddy(se); in yield_to_task_fair()
9253 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
9269 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
9352 !entity_eligible(task_cfs_rq(p), &p->se)) in task_is_ineligible_on_dst_cpu()
9378 if ((p->se.sched_delayed) && (env->migration_type != migrate_load)) in can_migrate_task()
9495 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
9560 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
9615 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
9641 list_move(&p->se.group_node, tasks); in detach_tasks()
9694 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9695 list_del_init(&p->se.group_node); in attach_tasks()
9778 struct sched_entity *se; in __update_blocked_fair() local
9791 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9792 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
9793 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
9818 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
9826 for_each_sched_entity(se) { in update_cfs_rq_h_load()
9827 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9828 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9833 if (!se) { in update_cfs_rq_h_load()
9838 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9840 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9842 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9853 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9871 return p->se.avg.load_avg; in task_h_load()
10607 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12951 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
12953 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
12954 u64 slice = se->slice; in __entity_slice_used()
12980 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
12987 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, in se_fi_update() argument
12990 for_each_sched_entity(se) { in se_fi_update()
12991 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
13005 struct sched_entity *se = &p->se; in task_vruntime_update() local
13010 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
13017 const struct sched_entity *sea = &a->se; in cfs_prio_less()
13018 const struct sched_entity *seb = &b->se; in cfs_prio_less()
13027 * Find an se in the hierarchy for tasks a and b, such that the se's in cfs_prio_less()
13051 * Find delta after normalizing se's vruntime with its cfs_rq's in cfs_prio_less()
13087 struct sched_entity *se = &curr->se; in task_tick_fair() local
13089 for_each_sched_entity(se) { in task_tick_fair()
13090 cfs_rq = cfs_rq_of(se); in task_tick_fair()
13091 entity_tick(cfs_rq, se, queued); in task_tick_fair()
13143 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
13145 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13154 se = se->parent; in propagate_entity_cfs_rq()
13156 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
13157 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
13159 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
13169 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
13172 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
13174 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
13183 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
13188 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
13189 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
13191 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
13194 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
13196 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
13199 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
13200 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
13202 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
13207 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
13209 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
13214 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
13216 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
13226 SCHED_WARN_ON(p->se.sched_delayed); in switched_to_fair()
13247 struct sched_entity *se = &p->se; in __set_next_task_fair() local
13255 list_move(&se->group_node, &rq->cfs_tasks); in __set_next_task_fair()
13261 SCHED_WARN_ON(se->sched_delayed); in __set_next_task_fair()
13278 struct sched_entity *se = &p->se; in set_next_task_fair() local
13280 for_each_sched_entity(se) { in set_next_task_fair()
13281 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
13283 set_next_entity(cfs_rq, se); in set_next_task_fair()
13313 /* Tell se's cfs_rq has been changed -- migrated */ in task_change_group_fair()
13314 p->se.avg.last_update_time = 0; in task_change_group_fair()
13327 if (tg->se) in free_fair_sched_group()
13328 kfree(tg->se[i]); in free_fair_sched_group()
13332 kfree(tg->se); in free_fair_sched_group()
13337 struct sched_entity *se; in alloc_fair_sched_group() local
13344 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
13345 if (!tg->se) in alloc_fair_sched_group()
13358 se = kzalloc_node(sizeof(struct sched_entity_stats), in alloc_fair_sched_group()
13360 if (!se) in alloc_fair_sched_group()
13364 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
13365 init_entity_runnable_average(se); in alloc_fair_sched_group()
13378 struct sched_entity *se; in online_fair_sched_group() local
13385 se = tg->se[i]; in online_fair_sched_group()
13388 attach_entity_cfs_rq(se); in online_fair_sched_group()
13402 struct sched_entity *se = tg->se[cpu]; in unregister_fair_sched_group() local
13405 if (se) { in unregister_fair_sched_group()
13406 if (se->sched_delayed) { in unregister_fair_sched_group()
13408 if (se->sched_delayed) { in unregister_fair_sched_group()
13410 dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); in unregister_fair_sched_group()
13414 remove_entity_load_avg(se); in unregister_fair_sched_group()
13429 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
13439 tg->se[cpu] = se; in init_tg_cfs_entry()
13441 /* se could be NULL for root_task_group */ in init_tg_cfs_entry()
13442 if (!se) in init_tg_cfs_entry()
13446 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
13447 se->depth = 0; in init_tg_cfs_entry()
13449 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
13450 se->depth = parent->depth + 1; in init_tg_cfs_entry()
13453 se->my_q = cfs_rq; in init_tg_cfs_entry()
13455 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
13456 se->parent = parent; in init_tg_cfs_entry()
13470 if (!tg->se[0]) in __sched_group_set_shares()
13481 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
13487 for_each_sched_entity(se) { in __sched_group_set_shares()
13488 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
13489 update_cfs_group(se); in __sched_group_set_shares()
13532 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
13549 for_each_sched_entity(se) { in sched_group_set_idle()
13550 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
13552 if (!se->on_rq) in sched_group_set_idle()
13581 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
13589 rr_interval = NS_TO_JIFFIES(se->slice); in get_rr_interval_fair()