Lines Matching refs:tg

336 	if (cfs_rq->tg->parent &&  in list_add_leaf_cfs_rq()
337 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
345 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
355 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
462 static int tg_is_idle(struct task_group *tg) in tg_is_idle() argument
464 return tg->idle > 0; in tg_is_idle()
510 static inline int tg_is_idle(struct task_group *tg) in tg_is_idle() argument
4208 struct task_group *tg = cfs_rq->tg; in calc_group_shares() local
4210 tg_shares = READ_ONCE(tg->shares); in calc_group_shares()
4214 tg_weight = atomic_long_read(&tg->load_avg); in calc_group_shares()
4343 return (prev_cfs_rq->tg->parent == cfs_rq->tg); in child_cfs_rq_on_list()
4385 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
4402 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
4416 if (cfs_rq->tg == &root_task_group) in clear_tg_load_avg()
4421 atomic_long_add(delta, &cfs_rq->tg->load_avg); in clear_tg_load_avg()
4429 struct task_group *tg; in clear_tg_offline_cfs_rqs() local
4441 list_for_each_entry_rcu(tg, &task_groups, list) { in clear_tg_offline_cfs_rqs()
4442 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in clear_tg_offline_cfs_rqs()
5911 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
5913 return &tg->cfs_bandwidth; in tg_cfs_bandwidth()
5947 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
6145 static int tg_unthrottle_up(struct task_group *tg, void *data) in tg_unthrottle_up() argument
6148 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up()
6216 static int tg_throttle_down(struct task_group *tg, void *data) in tg_throttle_down() argument
6219 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down()
6242 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
6268 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
6283 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
6284 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
6311 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
6598 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
6683 static void sync_throttle(struct task_group *tg, int cpu) in sync_throttle() argument
6690 if (!tg->parent) in sync_throttle()
6693 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
6694 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
6879 struct task_group *tg; in update_runtime_enabled() local
6884 list_for_each_entry_rcu(tg, &task_groups, list) { in update_runtime_enabled()
6885 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in update_runtime_enabled()
6886 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled()
6898 struct task_group *tg; in unthrottle_offline_cfs_rqs() local
6914 list_for_each_entry_rcu(tg, &task_groups, list) { in unthrottle_offline_cfs_rqs()
6915 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs()
6949 tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF) in cfs_task_bw_constrained()
6986 static inline void sync_throttle(struct task_group *tg, int cpu) {} in sync_throttle() argument
7019 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) in tg_cfs_bandwidth() argument
10150 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
10177 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
13633 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { in cfs_prio_less()
13930 void free_fair_sched_group(struct task_group *tg) in free_fair_sched_group() argument
13935 if (tg->cfs_rq) in free_fair_sched_group()
13936 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
13937 if (tg->se) in free_fair_sched_group()
13938 kfree(tg->se[i]); in free_fair_sched_group()
13941 kfree(tg->cfs_rq); in free_fair_sched_group()
13942 kfree(tg->se); in free_fair_sched_group()
13945 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) in alloc_fair_sched_group() argument
13951 tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids); in alloc_fair_sched_group()
13952 if (!tg->cfs_rq) in alloc_fair_sched_group()
13954 tg->se = kzalloc_objs(se, nr_cpu_ids); in alloc_fair_sched_group()
13955 if (!tg->se) in alloc_fair_sched_group()
13958 tg->shares = NICE_0_LOAD; in alloc_fair_sched_group()
13960 init_cfs_bandwidth(tg_cfs_bandwidth(tg), tg_cfs_bandwidth(parent)); in alloc_fair_sched_group()
13974 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
13986 void online_fair_sched_group(struct task_group *tg) in online_fair_sched_group() argument
13995 se = tg->se[i]; in online_fair_sched_group()
13999 sync_throttle(tg, i); in online_fair_sched_group()
14004 void unregister_fair_sched_group(struct task_group *tg) in unregister_fair_sched_group() argument
14008 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); in unregister_fair_sched_group()
14011 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; in unregister_fair_sched_group()
14012 struct sched_entity *se = tg->se[cpu]; in unregister_fair_sched_group()
14038 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
14044 cfs_rq->tg = tg; in init_tg_cfs_entry()
14048 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
14049 tg->se[cpu] = se; in init_tg_cfs_entry()
14071 static int __sched_group_set_shares(struct task_group *tg, unsigned long shares) in __sched_group_set_shares() argument
14080 if (!tg->se[0]) in __sched_group_set_shares()
14085 if (tg->shares == shares) in __sched_group_set_shares()
14088 tg->shares = shares; in __sched_group_set_shares()
14091 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares()
14107 int sched_group_set_shares(struct task_group *tg, unsigned long shares) in sched_group_set_shares() argument
14112 if (tg_is_idle(tg)) in sched_group_set_shares()
14115 ret = __sched_group_set_shares(tg, shares); in sched_group_set_shares()
14121 int sched_group_set_idle(struct task_group *tg, long idle) in sched_group_set_idle() argument
14125 if (tg == &root_task_group) in sched_group_set_idle()
14133 if (tg->idle == idle) { in sched_group_set_idle()
14138 tg->idle = idle; in sched_group_set_idle()
14142 struct sched_entity *se = tg->se[i]; in sched_group_set_idle()
14143 struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i]; in sched_group_set_idle()
14177 if (tg_is_idle(tg)) in sched_group_set_idle()
14178 __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO)); in sched_group_set_idle()
14180 __sched_group_set_shares(tg, NICE_0_LOAD); in sched_group_set_idle()