Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 – 7 of 7) sorted by relevance

/linux/kernel/sched/
H A Dfair.c317 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
319 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
322 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
325 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
336 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
337 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
344 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
345 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
355 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
360 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
[all …]
H A Dpelt.h9 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
10 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
161 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument
165 if (unlikely(cfs_rq->pelt_clock_throttled)) in update_idle_cfs_rq_clock_pelt()
168 throttled = cfs_rq->throttled_clock_pelt_time; in update_idle_cfs_rq_clock_pelt()
170 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled); in update_idle_cfs_rq_clock_pelt()
174 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
176 if (unlikely(cfs_rq->pelt_clock_throttled)) in cfs_rq_clock_pelt()
177 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
179 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
[all …]
H A Dpelt.c307 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
310 cfs_rq->curr == se)) { in __update_load_avg_se()
321 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
323 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
324 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
325 cfs_rq->h_nr_runnable, in __update_load_avg_cfs_rq()
326 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
328 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
329 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
H A Ddebug.c903 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
916 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); in print_cfs_rq()
923 root = __pick_root_entity(cfs_rq); in print_cfs_rq()
926 first = __pick_first_entity(cfs_rq); in print_cfs_rq()
929 last = __pick_last_entity(cfs_rq); in print_cfs_rq()
932 zero_vruntime = cfs_rq->zero_vruntime; in print_cfs_rq()
933 sum_w_vruntime = cfs_rq->sum_w_vruntime; in print_cfs_rq()
934 sum_weight = cfs_rq->sum_weight; in print_cfs_rq()
935 sum_shift = cfs_rq->sum_shift; in print_cfs_rq()
936 avruntime = avg_vruntime(cfs_rq); in print_cfs_rq()
[all …]
H A Dsched.h84 struct cfs_rq;
486 struct cfs_rq **cfs_rq; member
582 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
589 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
615 struct cfs_rq *prev, struct cfs_rq *next);
678 struct cfs_rq { struct
1183 struct cfs_rq cfs;
1361 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1363 return cfs_rq->rq; in rq_of()
1368 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
[all …]
H A Dcore.c4468 p->se.cfs_rq = NULL; in __sched_fork()
5535 struct sched_entity *curr = p->se.cfs_rq->curr; in prefetch_curr_exec_start()
8904 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
9831 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
9832 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
9835 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
9836 cfs_rq->runtime_remaining = 1; in tg_set_cfs_bandwidth()
9838 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
9839 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
9999 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
/linux/include/linux/
H A Dsched.h65 struct cfs_rq;
484 * For cfs_rq, they are the aggregated values of all runnable and blocked
499 * with the highest load (=88761), always runnable on a single cfs_rq,
607 struct cfs_rq *cfs_rq; member
609 struct cfs_rq *my_q;