Lines Matching +full:subsystem +full:- +full:level

1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
20 * Other css's associated with a subsystem use rstat only when
21 * they define the ss->css_rstat_flush callback.
25 return css_is_self(css) || css->ss->css_rstat_flush != NULL; in css_uses_rstat()
31 return per_cpu_ptr(css->rstat_cpu, cpu); in css_rstat_cpu()
37 return per_cpu_ptr(cgrp->rstat_base_cpu, cpu); in cgroup_rstat_base_cpu()
43 return &ss->rstat_ss_lock; in ss_rstat_lock()
51 return per_cpu_ptr(ss->lhead, cpu); in ss_lhead_cpu()
56 * css_rstat_updated - keep track of updated rstat_cpu
57 * @css: target cgroup subsystem state
67 * barrier after updating the per-cpu stats and before calling
101 if (llist_on_list(&rstatc->lnode)) in css_rstat_updated()
106 * and may try to insert the same per-cpu lnode into the llist. Note in css_rstat_updated()
113 * successful and the winner will eventually add the per-cpu lnode to in css_rstat_updated()
116 self = &rstatc->lnode; in css_rstat_updated()
117 rstatc_pcpu = css->rstat_cpu; in css_rstat_updated()
118 if (this_cpu_cmpxchg(rstatc_pcpu->lnode.next, self, NULL) != self) in css_rstat_updated()
121 lhead = ss_lhead_cpu(css->ss, cpu); in css_rstat_updated()
122 llist_add(&rstatc->lnode, lhead); in css_rstat_updated()
130 struct cgroup_subsys_state *parent = css->parent; in __css_process_update_tree()
134 * Both additions and removals are bottom-up. If a cgroup in __css_process_update_tree()
137 if (rstatc->updated_next) in __css_process_update_tree()
142 rstatc->updated_next = css; in __css_process_update_tree()
147 rstatc->updated_next = prstatc->updated_children; in __css_process_update_tree()
148 prstatc->updated_children = css; in __css_process_update_tree()
164 * init_llist_node() and per-cpu stats flushing) if the in css_process_update_tree()
173 * barrier here but if such a use-case arise, please add in css_process_update_tree()
178 __css_process_update_tree(rstatc->owner, cpu); in css_process_update_tree()
183 * css_rstat_push_children - push children css's into the given list
189 * Iteratively traverse down the css_rstat_cpu updated tree level by
190 * level and push all the parents first before their next level children
199 struct cgroup_subsys_state *cnext = child; /* Next head of child css level */ in css_rstat_push_children()
200 struct cgroup_subsys_state *ghead = NULL; /* Head of grandchild css level */ in css_rstat_push_children()
204 child->rstat_flush_next = NULL; in css_rstat_push_children()
207 * The subsystem rstat lock must be held for the whole duration from in css_rstat_push_children()
211 lockdep_assert_held(ss_rstat_lock(head->ss)); in css_rstat_push_children()
214 * Notation: -> updated_next pointer in css_rstat_push_children()
218 * P: C1 -> C2 -> P in css_rstat_push_children()
219 * C1: G11 -> G12 -> C1 in css_rstat_push_children()
220 * C2: G21 -> G22 -> C2 in css_rstat_push_children()
232 cnext = child->rstat_flush_next; in css_rstat_push_children()
233 parent = child->parent; in css_rstat_push_children()
237 child->rstat_flush_next = head; in css_rstat_push_children()
240 grandchild = crstatc->updated_children; in css_rstat_push_children()
242 /* Push the grand child to the next level */ in css_rstat_push_children()
243 crstatc->updated_children = child; in css_rstat_push_children()
244 grandchild->rstat_flush_next = ghead; in css_rstat_push_children()
247 child = crstatc->updated_next; in css_rstat_push_children()
248 crstatc->updated_next = NULL; in css_rstat_push_children()
261 * css_rstat_updated_list - build a list of updated css's to be flushed
284 css_process_update_tree(root->ss, cpu); in css_rstat_updated_list()
286 /* Return NULL if this subtree is not on-list */ in css_rstat_updated_list()
287 if (!rstatc->updated_next) in css_rstat_updated_list()
294 parent = root->parent; in css_rstat_updated_list()
300 nextp = &prstatc->updated_children; in css_rstat_updated_list()
306 nextp = &nrstatc->updated_next; in css_rstat_updated_list()
308 *nextp = rstatc->updated_next; in css_rstat_updated_list()
311 rstatc->updated_next = NULL; in css_rstat_updated_list()
315 root->rstat_flush_next = NULL; in css_rstat_updated_list()
316 child = rstatc->updated_children; in css_rstat_updated_list()
317 rstatc->updated_children = root; in css_rstat_updated_list()
351 * was released and re-taken when collection data from the CPUs. The
352 * value -1 is used when obtaining the main lock else this is the CPU
357 __acquires(ss_rstat_lock(css->ss)) in __css_rstat_lock()
359 struct cgroup *cgrp = css->cgroup; in __css_rstat_lock()
363 lock = ss_rstat_lock(css->ss); in __css_rstat_lock()
374 __releases(ss_rstat_lock(css->ss)) in __css_rstat_unlock()
376 struct cgroup *cgrp = css->cgroup; in __css_rstat_unlock()
379 lock = ss_rstat_lock(css->ss); in __css_rstat_unlock()
385 * css_rstat_flush - flush stats in @css's rstat subtree
386 * @css: target cgroup subsystem state
388 * Collect all per-cpu stats in @css's subtree into the global counters
390 * nodes in the subtree have up-to-date ->stat.
393 * ->updated_children lists.
416 for (; pos; pos = pos->rstat_flush_next) { in css_rstat_flush()
418 cgroup_base_stat_flush(pos->cgroup, cpu); in css_rstat_flush()
419 bpf_rstat_flush(pos->cgroup, in css_rstat_flush()
420 cgroup_parent(pos->cgroup), cpu); in css_rstat_flush()
422 pos->ss->css_rstat_flush(pos, cpu); in css_rstat_flush()
432 struct cgroup *cgrp = css->cgroup; in css_rstat_init()
438 if (!cgrp->rstat_base_cpu) { in css_rstat_init()
439 cgrp->rstat_base_cpu = alloc_percpu(struct cgroup_rstat_base_cpu); in css_rstat_init()
440 if (!cgrp->rstat_base_cpu) in css_rstat_init()
441 return -ENOMEM; in css_rstat_init()
443 } else if (css->ss->css_rstat_flush == NULL) in css_rstat_init()
447 if (!css->rstat_cpu) { in css_rstat_init()
448 css->rstat_cpu = alloc_percpu(struct css_rstat_cpu); in css_rstat_init()
449 if (!css->rstat_cpu) { in css_rstat_init()
451 free_percpu(cgrp->rstat_base_cpu); in css_rstat_init()
453 return -ENOMEM; in css_rstat_init()
457 /* ->updated_children list is self terminated */ in css_rstat_init()
461 rstatc->owner = rstatc->updated_children = css; in css_rstat_init()
462 init_llist_node(&rstatc->lnode); in css_rstat_init()
468 u64_stats_init(&rstatbc->bsync); in css_rstat_init()
482 if (!css->rstat_cpu) in css_rstat_exit()
491 if (WARN_ON_ONCE(rstatc->updated_children != css) || in css_rstat_exit()
492 WARN_ON_ONCE(rstatc->updated_next)) in css_rstat_exit()
497 struct cgroup *cgrp = css->cgroup; in css_rstat_exit()
499 free_percpu(cgrp->rstat_base_cpu); in css_rstat_exit()
500 cgrp->rstat_base_cpu = NULL; in css_rstat_exit()
503 free_percpu(css->rstat_cpu); in css_rstat_exit()
504 css->rstat_cpu = NULL; in css_rstat_exit()
508 * ss_rstat_init - subsystem-specific rstat initialization
509 * @ss: target subsystem
512 * are initialized. If @ss is non-NULL, the subsystem-specific locks
520 ss->lhead = alloc_percpu(struct llist_head); in ss_rstat_init()
521 if (!ss->lhead) in ss_rstat_init()
522 return -ENOMEM; in ss_rstat_init()
539 dst_bstat->cputime.utime += src_bstat->cputime.utime; in cgroup_base_stat_add()
540 dst_bstat->cputime.stime += src_bstat->cputime.stime; in cgroup_base_stat_add()
541 dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; in cgroup_base_stat_add()
543 dst_bstat->forceidle_sum += src_bstat->forceidle_sum; in cgroup_base_stat_add()
545 dst_bstat->ntime += src_bstat->ntime; in cgroup_base_stat_add()
551 dst_bstat->cputime.utime -= src_bstat->cputime.utime; in cgroup_base_stat_sub()
552 dst_bstat->cputime.stime -= src_bstat->cputime.stime; in cgroup_base_stat_sub()
553 dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; in cgroup_base_stat_sub()
555 dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; in cgroup_base_stat_sub()
557 dst_bstat->ntime -= src_bstat->ntime; in cgroup_base_stat_sub()
568 /* Root-level stats are sourced from system-wide CPU stats */ in cgroup_base_stat_flush()
572 /* fetch the current per-cpu values */ in cgroup_base_stat_flush()
574 seq = __u64_stats_fetch_begin(&rstatbc->bsync); in cgroup_base_stat_flush()
575 delta = rstatbc->bstat; in cgroup_base_stat_flush()
576 } while (__u64_stats_fetch_retry(&rstatbc->bsync, seq)); in cgroup_base_stat_flush()
578 /* propagate per-cpu delta to cgroup and per-cpu global statistics */ in cgroup_base_stat_flush()
579 cgroup_base_stat_sub(&delta, &rstatbc->last_bstat); in cgroup_base_stat_flush()
580 cgroup_base_stat_add(&cgrp->bstat, &delta); in cgroup_base_stat_flush()
581 cgroup_base_stat_add(&rstatbc->last_bstat, &delta); in cgroup_base_stat_flush()
582 cgroup_base_stat_add(&rstatbc->subtree_bstat, &delta); in cgroup_base_stat_flush()
584 /* propagate cgroup and per-cpu global delta to parent (unless that's root) */ in cgroup_base_stat_flush()
586 delta = cgrp->bstat; in cgroup_base_stat_flush()
587 cgroup_base_stat_sub(&delta, &cgrp->last_bstat); in cgroup_base_stat_flush()
588 cgroup_base_stat_add(&parent->bstat, &delta); in cgroup_base_stat_flush()
589 cgroup_base_stat_add(&cgrp->last_bstat, &delta); in cgroup_base_stat_flush()
591 delta = rstatbc->subtree_bstat; in cgroup_base_stat_flush()
593 cgroup_base_stat_sub(&delta, &rstatbc->last_subtree_bstat); in cgroup_base_stat_flush()
594 cgroup_base_stat_add(&prstatbc->subtree_bstat, &delta); in cgroup_base_stat_flush()
595 cgroup_base_stat_add(&rstatbc->last_subtree_bstat, &delta); in cgroup_base_stat_flush()
604 rstatbc = get_cpu_ptr(cgrp->rstat_base_cpu); in cgroup_base_stat_cputime_account_begin()
605 *flags = u64_stats_update_begin_irqsave(&rstatbc->bsync); in cgroup_base_stat_cputime_account_begin()
613 u64_stats_update_end_irqrestore(&rstatbc->bsync, flags); in cgroup_base_stat_cputime_account_end()
614 css_rstat_updated(&cgrp->self, smp_processor_id()); in cgroup_base_stat_cputime_account_end()
624 rstatbc->bstat.cputime.sum_exec_runtime += delta_exec; in __cgroup_account_cputime()
638 rstatbc->bstat.ntime += delta_exec; in __cgroup_account_cputime_field()
641 rstatbc->bstat.cputime.utime += delta_exec; in __cgroup_account_cputime_field()
646 rstatbc->bstat.cputime.stime += delta_exec; in __cgroup_account_cputime_field()
650 rstatbc->bstat.forceidle_sum += delta_exec; in __cgroup_account_cputime_field()
662 * at a global level, then categorizing the fields in a manner consistent
668 struct task_cputime *cputime = &bstat->cputime; in root_cgroup_cputime()
682 cputime->utime += user; in root_cgroup_cputime()
687 cputime->stime += sys; in root_cgroup_cputime()
689 cputime->sum_exec_runtime += user; in root_cgroup_cputime()
690 cputime->sum_exec_runtime += sys; in root_cgroup_cputime()
693 bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; in root_cgroup_cputime()
695 bstat->ntime += cpustat[CPUTIME_NICE]; in root_cgroup_cputime()
703 u64 forceidle_time = bstat->forceidle_sum; in cgroup_force_idle_show()
712 struct cgroup *cgrp = seq_css(seq)->cgroup; in cgroup_base_stat_cputime_show()
716 css_rstat_flush(&cgrp->self); in cgroup_base_stat_cputime_show()
717 __css_rstat_lock(&cgrp->self, -1); in cgroup_base_stat_cputime_show()
718 bstat = cgrp->bstat; in cgroup_base_stat_cputime_show()
719 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, in cgroup_base_stat_cputime_show()
721 __css_rstat_unlock(&cgrp->self, -1); in cgroup_base_stat_cputime_show()