1 // SPDX-License-Identifier: GPL-2.0-only 2 #include "cgroup-internal.h" 3 4 #include <linux/sched/cputime.h> 5 6 #include <linux/bpf.h> 7 #include <linux/btf.h> 8 #include <linux/btf_ids.h> 9 10 #include <trace/events/cgroup.h> 11 12 static DEFINE_SPINLOCK(cgroup_rstat_lock); 13 static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); 14 15 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); 16 17 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu) 18 { 19 return per_cpu_ptr(cgrp->rstat_cpu, cpu); 20 } 21 22 /* 23 * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock). 24 * 25 * This makes it easier to diagnose locking issues and contention in 26 * production environments. The parameter @fast_path determine the 27 * tracepoints being added, allowing us to diagnose "flush" related 28 * operations without handling high-frequency fast-path "update" events. 29 */ 30 static __always_inline 31 unsigned long _cgroup_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu, 32 struct cgroup *cgrp, const bool fast_path) 33 { 34 unsigned long flags; 35 bool contended; 36 37 /* 38 * The _irqsave() is needed because cgroup_rstat_lock is 39 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring 40 * this lock with the _irq() suffix only disables interrupts on 41 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables 42 * interrupts on both configurations. The _irqsave() ensures 43 * that interrupts are always disabled and later restored. 44 */ 45 contended = !raw_spin_trylock_irqsave(cpu_lock, flags); 46 if (contended) { 47 if (fast_path) 48 trace_cgroup_rstat_cpu_lock_contended_fastpath(cgrp, cpu, contended); 49 else 50 trace_cgroup_rstat_cpu_lock_contended(cgrp, cpu, contended); 51 52 raw_spin_lock_irqsave(cpu_lock, flags); 53 } 54 55 if (fast_path) 56 trace_cgroup_rstat_cpu_locked_fastpath(cgrp, cpu, contended); 57 else 58 trace_cgroup_rstat_cpu_locked(cgrp, cpu, contended); 59 60 return flags; 61 } 62 63 static __always_inline 64 void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu, 65 struct cgroup *cgrp, unsigned long flags, 66 const bool fast_path) 67 { 68 if (fast_path) 69 trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false); 70 else 71 trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false); 72 73 raw_spin_unlock_irqrestore(cpu_lock, flags); 74 } 75 76 /** 77 * cgroup_rstat_updated - keep track of updated rstat_cpu 78 * @cgrp: target cgroup 79 * @cpu: cpu on which rstat_cpu was updated 80 * 81 * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching 82 * rstat_cpu->updated_children list. See the comment on top of 83 * cgroup_rstat_cpu definition for details. 84 */ 85 __bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu) 86 { 87 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); 88 unsigned long flags; 89 90 /* 91 * Speculative already-on-list test. This may race leading to 92 * temporary inaccuracies, which is fine. 93 * 94 * Because @parent's updated_children is terminated with @parent 95 * instead of NULL, we can tell whether @cgrp is on the list by 96 * testing the next pointer for NULL. 97 */ 98 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next)) 99 return; 100 101 flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, cgrp, true); 102 103 /* put @cgrp and all ancestors on the corresponding updated lists */ 104 while (true) { 105 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); 106 struct cgroup *parent = cgroup_parent(cgrp); 107 struct cgroup_rstat_cpu *prstatc; 108 109 /* 110 * Both additions and removals are bottom-up. If a cgroup 111 * is already in the tree, all ancestors are. 112 */ 113 if (rstatc->updated_next) 114 break; 115 116 /* Root has no parent to link it to, but mark it busy */ 117 if (!parent) { 118 rstatc->updated_next = cgrp; 119 break; 120 } 121 122 prstatc = cgroup_rstat_cpu(parent, cpu); 123 rstatc->updated_next = prstatc->updated_children; 124 prstatc->updated_children = cgrp; 125 126 cgrp = parent; 127 } 128 129 _cgroup_rstat_cpu_unlock(cpu_lock, cpu, cgrp, flags, true); 130 } 131 132 /** 133 * cgroup_rstat_push_children - push children cgroups into the given list 134 * @head: current head of the list (= subtree root) 135 * @child: first child of the root 136 * @cpu: target cpu 137 * Return: A new singly linked list of cgroups to be flush 138 * 139 * Iteratively traverse down the cgroup_rstat_cpu updated tree level by 140 * level and push all the parents first before their next level children 141 * into a singly linked list built from the tail backward like "pushing" 142 * cgroups into a stack. The root is pushed by the caller. 143 */ 144 static struct cgroup *cgroup_rstat_push_children(struct cgroup *head, 145 struct cgroup *child, int cpu) 146 { 147 struct cgroup *chead = child; /* Head of child cgroup level */ 148 struct cgroup *ghead = NULL; /* Head of grandchild cgroup level */ 149 struct cgroup *parent, *grandchild; 150 struct cgroup_rstat_cpu *crstatc; 151 152 child->rstat_flush_next = NULL; 153 154 next_level: 155 while (chead) { 156 child = chead; 157 chead = child->rstat_flush_next; 158 parent = cgroup_parent(child); 159 160 /* updated_next is parent cgroup terminated */ 161 while (child != parent) { 162 child->rstat_flush_next = head; 163 head = child; 164 crstatc = cgroup_rstat_cpu(child, cpu); 165 grandchild = crstatc->updated_children; 166 if (grandchild != child) { 167 /* Push the grand child to the next level */ 168 crstatc->updated_children = child; 169 grandchild->rstat_flush_next = ghead; 170 ghead = grandchild; 171 } 172 child = crstatc->updated_next; 173 crstatc->updated_next = NULL; 174 } 175 } 176 177 if (ghead) { 178 chead = ghead; 179 ghead = NULL; 180 goto next_level; 181 } 182 return head; 183 } 184 185 /** 186 * cgroup_rstat_updated_list - return a list of updated cgroups to be flushed 187 * @root: root of the cgroup subtree to traverse 188 * @cpu: target cpu 189 * Return: A singly linked list of cgroups to be flushed 190 * 191 * Walks the updated rstat_cpu tree on @cpu from @root. During traversal, 192 * each returned cgroup is unlinked from the updated tree. 193 * 194 * The only ordering guarantee is that, for a parent and a child pair 195 * covered by a given traversal, the child is before its parent in 196 * the list. 197 * 198 * Note that updated_children is self terminated and points to a list of 199 * child cgroups if not empty. Whereas updated_next is like a sibling link 200 * within the children list and terminated by the parent cgroup. An exception 201 * here is the cgroup root whose updated_next can be self terminated. 202 */ 203 static struct cgroup *cgroup_rstat_updated_list(struct cgroup *root, int cpu) 204 { 205 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu); 206 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(root, cpu); 207 struct cgroup *head = NULL, *parent, *child; 208 unsigned long flags; 209 210 flags = _cgroup_rstat_cpu_lock(cpu_lock, cpu, root, false); 211 212 /* Return NULL if this subtree is not on-list */ 213 if (!rstatc->updated_next) 214 goto unlock_ret; 215 216 /* 217 * Unlink @root from its parent. As the updated_children list is 218 * singly linked, we have to walk it to find the removal point. 219 */ 220 parent = cgroup_parent(root); 221 if (parent) { 222 struct cgroup_rstat_cpu *prstatc; 223 struct cgroup **nextp; 224 225 prstatc = cgroup_rstat_cpu(parent, cpu); 226 nextp = &prstatc->updated_children; 227 while (*nextp != root) { 228 struct cgroup_rstat_cpu *nrstatc; 229 230 nrstatc = cgroup_rstat_cpu(*nextp, cpu); 231 WARN_ON_ONCE(*nextp == parent); 232 nextp = &nrstatc->updated_next; 233 } 234 *nextp = rstatc->updated_next; 235 } 236 237 rstatc->updated_next = NULL; 238 239 /* Push @root to the list first before pushing the children */ 240 head = root; 241 root->rstat_flush_next = NULL; 242 child = rstatc->updated_children; 243 rstatc->updated_children = root; 244 if (child != root) 245 head = cgroup_rstat_push_children(head, child, cpu); 246 unlock_ret: 247 _cgroup_rstat_cpu_unlock(cpu_lock, cpu, root, flags, false); 248 return head; 249 } 250 251 /* 252 * A hook for bpf stat collectors to attach to and flush their stats. 253 * Together with providing bpf kfuncs for cgroup_rstat_updated() and 254 * cgroup_rstat_flush(), this enables a complete workflow where bpf progs that 255 * collect cgroup stats can integrate with rstat for efficient flushing. 256 * 257 * A static noinline declaration here could cause the compiler to optimize away 258 * the function. A global noinline declaration will keep the definition, but may 259 * optimize away the callsite. Therefore, __weak is needed to ensure that the 260 * call is still emitted, by telling the compiler that we don't know what the 261 * function might eventually be. 262 */ 263 264 __bpf_hook_start(); 265 266 __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, 267 struct cgroup *parent, int cpu) 268 { 269 } 270 271 __bpf_hook_end(); 272 273 /* 274 * Helper functions for locking cgroup_rstat_lock. 275 * 276 * This makes it easier to diagnose locking issues and contention in 277 * production environments. The parameter @cpu_in_loop indicate lock 278 * was released and re-taken when collection data from the CPUs. The 279 * value -1 is used when obtaining the main lock else this is the CPU 280 * number processed last. 281 */ 282 static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop) 283 __acquires(&cgroup_rstat_lock) 284 { 285 bool contended; 286 287 contended = !spin_trylock_irq(&cgroup_rstat_lock); 288 if (contended) { 289 trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); 290 spin_lock_irq(&cgroup_rstat_lock); 291 } 292 trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); 293 } 294 295 static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop) 296 __releases(&cgroup_rstat_lock) 297 { 298 trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false); 299 spin_unlock_irq(&cgroup_rstat_lock); 300 } 301 302 /** 303 * cgroup_rstat_flush - flush stats in @cgrp's subtree 304 * @cgrp: target cgroup 305 * 306 * Collect all per-cpu stats in @cgrp's subtree into the global counters 307 * and propagate them upwards. After this function returns, all cgroups in 308 * the subtree have up-to-date ->stat. 309 * 310 * This also gets all cgroups in the subtree including @cgrp off the 311 * ->updated_children lists. 312 * 313 * This function may block. 314 */ 315 __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp) 316 { 317 int cpu; 318 319 might_sleep(); 320 for_each_possible_cpu(cpu) { 321 struct cgroup *pos; 322 323 /* Reacquire for each CPU to avoid disabling IRQs too long */ 324 __cgroup_rstat_lock(cgrp, cpu); 325 pos = cgroup_rstat_updated_list(cgrp, cpu); 326 for (; pos; pos = pos->rstat_flush_next) { 327 struct cgroup_subsys_state *css; 328 329 cgroup_base_stat_flush(pos, cpu); 330 bpf_rstat_flush(pos, cgroup_parent(pos), cpu); 331 332 rcu_read_lock(); 333 list_for_each_entry_rcu(css, &pos->rstat_css_list, 334 rstat_css_node) 335 css->ss->css_rstat_flush(css, cpu); 336 rcu_read_unlock(); 337 } 338 __cgroup_rstat_unlock(cgrp, cpu); 339 if (!cond_resched()) 340 cpu_relax(); 341 } 342 } 343 344 int cgroup_rstat_init(struct cgroup *cgrp) 345 { 346 int cpu; 347 348 /* the root cgrp has rstat_cpu preallocated */ 349 if (!cgrp->rstat_cpu) { 350 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu); 351 if (!cgrp->rstat_cpu) 352 return -ENOMEM; 353 } 354 355 /* ->updated_children list is self terminated */ 356 for_each_possible_cpu(cpu) { 357 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); 358 359 rstatc->updated_children = cgrp; 360 u64_stats_init(&rstatc->bsync); 361 } 362 363 return 0; 364 } 365 366 void cgroup_rstat_exit(struct cgroup *cgrp) 367 { 368 int cpu; 369 370 cgroup_rstat_flush(cgrp); 371 372 /* sanity check */ 373 for_each_possible_cpu(cpu) { 374 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); 375 376 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) || 377 WARN_ON_ONCE(rstatc->updated_next)) 378 return; 379 } 380 381 free_percpu(cgrp->rstat_cpu); 382 cgrp->rstat_cpu = NULL; 383 } 384 385 void __init cgroup_rstat_boot(void) 386 { 387 int cpu; 388 389 for_each_possible_cpu(cpu) 390 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); 391 } 392 393 /* 394 * Functions for cgroup basic resource statistics implemented on top of 395 * rstat. 396 */ 397 static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, 398 struct cgroup_base_stat *src_bstat) 399 { 400 dst_bstat->cputime.utime += src_bstat->cputime.utime; 401 dst_bstat->cputime.stime += src_bstat->cputime.stime; 402 dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; 403 #ifdef CONFIG_SCHED_CORE 404 dst_bstat->forceidle_sum += src_bstat->forceidle_sum; 405 #endif 406 dst_bstat->ntime += src_bstat->ntime; 407 } 408 409 static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, 410 struct cgroup_base_stat *src_bstat) 411 { 412 dst_bstat->cputime.utime -= src_bstat->cputime.utime; 413 dst_bstat->cputime.stime -= src_bstat->cputime.stime; 414 dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; 415 #ifdef CONFIG_SCHED_CORE 416 dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; 417 #endif 418 dst_bstat->ntime -= src_bstat->ntime; 419 } 420 421 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) 422 { 423 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu); 424 struct cgroup *parent = cgroup_parent(cgrp); 425 struct cgroup_rstat_cpu *prstatc; 426 struct cgroup_base_stat delta; 427 unsigned seq; 428 429 /* Root-level stats are sourced from system-wide CPU stats */ 430 if (!parent) 431 return; 432 433 /* fetch the current per-cpu values */ 434 do { 435 seq = __u64_stats_fetch_begin(&rstatc->bsync); 436 delta = rstatc->bstat; 437 } while (__u64_stats_fetch_retry(&rstatc->bsync, seq)); 438 439 /* propagate per-cpu delta to cgroup and per-cpu global statistics */ 440 cgroup_base_stat_sub(&delta, &rstatc->last_bstat); 441 cgroup_base_stat_add(&cgrp->bstat, &delta); 442 cgroup_base_stat_add(&rstatc->last_bstat, &delta); 443 cgroup_base_stat_add(&rstatc->subtree_bstat, &delta); 444 445 /* propagate cgroup and per-cpu global delta to parent (unless that's root) */ 446 if (cgroup_parent(parent)) { 447 delta = cgrp->bstat; 448 cgroup_base_stat_sub(&delta, &cgrp->last_bstat); 449 cgroup_base_stat_add(&parent->bstat, &delta); 450 cgroup_base_stat_add(&cgrp->last_bstat, &delta); 451 452 delta = rstatc->subtree_bstat; 453 prstatc = cgroup_rstat_cpu(parent, cpu); 454 cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat); 455 cgroup_base_stat_add(&prstatc->subtree_bstat, &delta); 456 cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta); 457 } 458 } 459 460 static struct cgroup_rstat_cpu * 461 cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags) 462 { 463 struct cgroup_rstat_cpu *rstatc; 464 465 rstatc = get_cpu_ptr(cgrp->rstat_cpu); 466 *flags = u64_stats_update_begin_irqsave(&rstatc->bsync); 467 return rstatc; 468 } 469 470 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp, 471 struct cgroup_rstat_cpu *rstatc, 472 unsigned long flags) 473 { 474 u64_stats_update_end_irqrestore(&rstatc->bsync, flags); 475 cgroup_rstat_updated(cgrp, smp_processor_id()); 476 put_cpu_ptr(rstatc); 477 } 478 479 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec) 480 { 481 struct cgroup_rstat_cpu *rstatc; 482 unsigned long flags; 483 484 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); 485 rstatc->bstat.cputime.sum_exec_runtime += delta_exec; 486 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); 487 } 488 489 void __cgroup_account_cputime_field(struct cgroup *cgrp, 490 enum cpu_usage_stat index, u64 delta_exec) 491 { 492 struct cgroup_rstat_cpu *rstatc; 493 unsigned long flags; 494 495 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); 496 497 switch (index) { 498 case CPUTIME_NICE: 499 rstatc->bstat.ntime += delta_exec; 500 fallthrough; 501 case CPUTIME_USER: 502 rstatc->bstat.cputime.utime += delta_exec; 503 break; 504 case CPUTIME_SYSTEM: 505 case CPUTIME_IRQ: 506 case CPUTIME_SOFTIRQ: 507 rstatc->bstat.cputime.stime += delta_exec; 508 break; 509 #ifdef CONFIG_SCHED_CORE 510 case CPUTIME_FORCEIDLE: 511 rstatc->bstat.forceidle_sum += delta_exec; 512 break; 513 #endif 514 default: 515 break; 516 } 517 518 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags); 519 } 520 521 /* 522 * compute the cputime for the root cgroup by getting the per cpu data 523 * at a global level, then categorizing the fields in a manner consistent 524 * with how it is done by __cgroup_account_cputime_field for each bit of 525 * cpu time attributed to a cgroup. 526 */ 527 static void root_cgroup_cputime(struct cgroup_base_stat *bstat) 528 { 529 struct task_cputime *cputime = &bstat->cputime; 530 int i; 531 532 memset(bstat, 0, sizeof(*bstat)); 533 for_each_possible_cpu(i) { 534 struct kernel_cpustat kcpustat; 535 u64 *cpustat = kcpustat.cpustat; 536 u64 user = 0; 537 u64 sys = 0; 538 539 kcpustat_cpu_fetch(&kcpustat, i); 540 541 user += cpustat[CPUTIME_USER]; 542 user += cpustat[CPUTIME_NICE]; 543 cputime->utime += user; 544 545 sys += cpustat[CPUTIME_SYSTEM]; 546 sys += cpustat[CPUTIME_IRQ]; 547 sys += cpustat[CPUTIME_SOFTIRQ]; 548 cputime->stime += sys; 549 550 cputime->sum_exec_runtime += user; 551 cputime->sum_exec_runtime += sys; 552 553 #ifdef CONFIG_SCHED_CORE 554 bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; 555 #endif 556 bstat->ntime += cpustat[CPUTIME_NICE]; 557 } 558 } 559 560 561 static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat *bstat) 562 { 563 #ifdef CONFIG_SCHED_CORE 564 u64 forceidle_time = bstat->forceidle_sum; 565 566 do_div(forceidle_time, NSEC_PER_USEC); 567 seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); 568 #endif 569 } 570 571 void cgroup_base_stat_cputime_show(struct seq_file *seq) 572 { 573 struct cgroup *cgrp = seq_css(seq)->cgroup; 574 struct cgroup_base_stat bstat; 575 576 if (cgroup_parent(cgrp)) { 577 cgroup_rstat_flush(cgrp); 578 __cgroup_rstat_lock(cgrp, -1); 579 bstat = cgrp->bstat; 580 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, 581 &bstat.cputime.utime, &bstat.cputime.stime); 582 __cgroup_rstat_unlock(cgrp, -1); 583 } else { 584 root_cgroup_cputime(&bstat); 585 } 586 587 do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC); 588 do_div(bstat.cputime.utime, NSEC_PER_USEC); 589 do_div(bstat.cputime.stime, NSEC_PER_USEC); 590 do_div(bstat.ntime, NSEC_PER_USEC); 591 592 seq_printf(seq, "usage_usec %llu\n" 593 "user_usec %llu\n" 594 "system_usec %llu\n" 595 "nice_usec %llu\n", 596 bstat.cputime.sum_exec_runtime, 597 bstat.cputime.utime, 598 bstat.cputime.stime, 599 bstat.ntime); 600 601 cgroup_force_idle_show(seq, &bstat); 602 } 603 604 /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ 605 BTF_KFUNCS_START(bpf_rstat_kfunc_ids) 606 BTF_ID_FLAGS(func, cgroup_rstat_updated) 607 BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE) 608 BTF_KFUNCS_END(bpf_rstat_kfunc_ids) 609 610 static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = { 611 .owner = THIS_MODULE, 612 .set = &bpf_rstat_kfunc_ids, 613 }; 614 615 static int __init bpf_rstat_kfunc_init(void) 616 { 617 return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 618 &bpf_rstat_kfunc_set); 619 } 620 late_initcall(bpf_rstat_kfunc_init); 621