1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * A demo sched_ext flattened cgroup hierarchy scheduler. It implements 4 * hierarchical weight-based cgroup CPU control by flattening the cgroup 5 * hierarchy into a single layer by compounding the active weight share at each 6 * level. Consider the following hierarchy with weights in parentheses: 7 * 8 * R + A (100) + B (100) 9 * | \ C (100) 10 * \ D (200) 11 * 12 * Ignoring the root and threaded cgroups, only B, C and D can contain tasks. 13 * Let's say all three have runnable tasks. The total share that each of these 14 * three cgroups is entitled to can be calculated by compounding its share at 15 * each level. 16 * 17 * For example, B is competing against C and in that competition its share is 18 * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's 19 * share in that competition is 100/(200+100) == 1/3. B's eventual share in the 20 * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's 21 * eventual shaer is the same at 1/6. D is only competing at the top level and 22 * its share is 200/(100+200) == 2/3. 23 * 24 * So, instead of hierarchically scheduling level-by-level, we can consider it 25 * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3 26 * and keep updating the eventual shares as the cgroups' runnable states change. 27 * 28 * This flattening of hierarchy can bring a substantial performance gain when 29 * the cgroup hierarchy is nested multiple levels. in a simple benchmark using 30 * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it 31 * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two 32 * apache instances competing with 2:1 weight ratio nested four level deep. 33 * 34 * However, the gain comes at the cost of not being able to properly handle 35 * thundering herd of cgroups. For example, if many cgroups which are nested 36 * behind a low priority parent cgroup wake up around the same time, they may be 37 * able to consume more CPU cycles than they are entitled to. In many use cases, 38 * this isn't a real concern especially given the performance gain. Also, there 39 * are ways to mitigate the problem further by e.g. introducing an extra 40 * scheduling layer on cgroup delegation boundaries. 41 * 42 * The scheduler first picks the cgroup to run and then schedule the tasks 43 * within by using nested weighted vtime scheduling by default. The 44 * cgroup-internal scheduling can be switched to FIFO with the -f option. 45 */ 46 #include <scx/common.bpf.h> 47 #include "scx_flatcg.h" 48 49 /* 50 * Maximum amount of retries to find a valid cgroup. 51 */ 52 #define CGROUP_MAX_RETRIES 1024 53 54 char _license[] SEC("license") = "GPL"; 55 56 const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */ 57 const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL; 58 const volatile bool fifo_sched; 59 60 u64 cvtime_now; 61 UEI_DEFINE(uei); 62 63 struct { 64 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 65 __type(key, u32); 66 __type(value, u64); 67 __uint(max_entries, FCG_NR_STATS); 68 } stats SEC(".maps"); 69 70 static void stat_inc(enum fcg_stat_idx idx) 71 { 72 u32 idx_v = idx; 73 74 u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v); 75 if (cnt_p) 76 (*cnt_p)++; 77 } 78 79 struct fcg_cpu_ctx { 80 u64 cur_cgid; 81 u64 cur_at; 82 }; 83 84 struct { 85 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 86 __type(key, u32); 87 __type(value, struct fcg_cpu_ctx); 88 __uint(max_entries, 1); 89 } cpu_ctx SEC(".maps"); 90 91 struct { 92 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); 93 __uint(map_flags, BPF_F_NO_PREALLOC); 94 __type(key, int); 95 __type(value, struct fcg_cgrp_ctx); 96 } cgrp_ctx SEC(".maps"); 97 98 struct cgv_node { 99 struct bpf_rb_node rb_node; 100 __u64 cvtime; 101 __u64 cgid; 102 }; 103 104 private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock; 105 private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node); 106 107 struct cgv_node_stash { 108 struct cgv_node __kptr *node; 109 }; 110 111 struct { 112 __uint(type, BPF_MAP_TYPE_HASH); 113 __uint(max_entries, 16384); 114 __type(key, __u64); 115 __type(value, struct cgv_node_stash); 116 } cgv_node_stash SEC(".maps"); 117 118 struct fcg_task_ctx { 119 u64 bypassed_at; 120 }; 121 122 struct { 123 __uint(type, BPF_MAP_TYPE_TASK_STORAGE); 124 __uint(map_flags, BPF_F_NO_PREALLOC); 125 __type(key, int); 126 __type(value, struct fcg_task_ctx); 127 } task_ctx SEC(".maps"); 128 129 /* gets inc'd on weight tree changes to expire the cached hweights */ 130 u64 hweight_gen = 1; 131 132 static u64 div_round_up(u64 dividend, u64 divisor) 133 { 134 return (dividend + divisor - 1) / divisor; 135 } 136 137 static bool vtime_before(u64 a, u64 b) 138 { 139 return (s64)(a - b) < 0; 140 } 141 142 static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b) 143 { 144 struct cgv_node *cgc_a, *cgc_b; 145 146 cgc_a = container_of(a, struct cgv_node, rb_node); 147 cgc_b = container_of(b, struct cgv_node, rb_node); 148 149 return cgc_a->cvtime < cgc_b->cvtime; 150 } 151 152 static struct fcg_cpu_ctx *find_cpu_ctx(void) 153 { 154 struct fcg_cpu_ctx *cpuc; 155 u32 idx = 0; 156 157 cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx); 158 if (!cpuc) { 159 scx_bpf_error("cpu_ctx lookup failed"); 160 return NULL; 161 } 162 return cpuc; 163 } 164 165 static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp) 166 { 167 struct fcg_cgrp_ctx *cgc; 168 169 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); 170 if (!cgc) { 171 scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id); 172 return NULL; 173 } 174 return cgc; 175 } 176 177 static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level) 178 { 179 struct fcg_cgrp_ctx *cgc; 180 181 cgrp = bpf_cgroup_ancestor(cgrp, level); 182 if (!cgrp) { 183 scx_bpf_error("ancestor cgroup lookup failed"); 184 return NULL; 185 } 186 187 cgc = find_cgrp_ctx(cgrp); 188 if (!cgc) 189 scx_bpf_error("ancestor cgrp_ctx lookup failed"); 190 bpf_cgroup_release(cgrp); 191 return cgc; 192 } 193 194 static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc) 195 { 196 int level; 197 198 if (!cgc->nr_active) { 199 stat_inc(FCG_STAT_HWT_SKIP); 200 return; 201 } 202 203 if (cgc->hweight_gen == hweight_gen) { 204 stat_inc(FCG_STAT_HWT_CACHE); 205 return; 206 } 207 208 stat_inc(FCG_STAT_HWT_UPDATES); 209 bpf_for(level, 0, cgrp->level + 1) { 210 struct fcg_cgrp_ctx *cgc; 211 bool is_active; 212 213 cgc = find_ancestor_cgrp_ctx(cgrp, level); 214 if (!cgc) 215 break; 216 217 if (!level) { 218 cgc->hweight = FCG_HWEIGHT_ONE; 219 cgc->hweight_gen = hweight_gen; 220 } else { 221 struct fcg_cgrp_ctx *pcgc; 222 223 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1); 224 if (!pcgc) 225 break; 226 227 /* 228 * We can be opportunistic here and not grab the 229 * cgv_tree_lock and deal with the occasional races. 230 * However, hweight updates are already cached and 231 * relatively low-frequency. Let's just do the 232 * straightforward thing. 233 */ 234 bpf_spin_lock(&cgv_tree_lock); 235 is_active = cgc->nr_active; 236 if (is_active) { 237 cgc->hweight_gen = pcgc->hweight_gen; 238 cgc->hweight = 239 div_round_up(pcgc->hweight * cgc->weight, 240 pcgc->child_weight_sum); 241 } 242 bpf_spin_unlock(&cgv_tree_lock); 243 244 if (!is_active) { 245 stat_inc(FCG_STAT_HWT_RACE); 246 break; 247 } 248 } 249 } 250 } 251 252 static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc) 253 { 254 u64 delta, cvtime, max_budget; 255 256 /* 257 * A node which is on the rbtree can't be pointed to from elsewhere yet 258 * and thus can't be updated and repositioned. Instead, we collect the 259 * vtime deltas separately and apply it asynchronously here. 260 */ 261 delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta); 262 cvtime = cgv_node->cvtime + delta; 263 264 /* 265 * Allow a cgroup to carry the maximum budget proportional to its 266 * hweight such that a full-hweight cgroup can immediately take up half 267 * of the CPUs at the most while staying at the front of the rbtree. 268 */ 269 max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) / 270 (2 * FCG_HWEIGHT_ONE); 271 if (vtime_before(cvtime, cvtime_now - max_budget)) 272 cvtime = cvtime_now - max_budget; 273 274 cgv_node->cvtime = cvtime; 275 } 276 277 static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc) 278 { 279 struct cgv_node_stash *stash; 280 struct cgv_node *cgv_node; 281 u64 cgid = cgrp->kn->id; 282 283 /* paired with cmpxchg in try_pick_next_cgroup() */ 284 if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) { 285 stat_inc(FCG_STAT_ENQ_SKIP); 286 return; 287 } 288 289 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); 290 if (!stash) { 291 scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid); 292 return; 293 } 294 295 /* NULL if the node is already on the rbtree */ 296 cgv_node = bpf_kptr_xchg(&stash->node, NULL); 297 if (!cgv_node) { 298 stat_inc(FCG_STAT_ENQ_RACE); 299 return; 300 } 301 302 bpf_spin_lock(&cgv_tree_lock); 303 cgrp_cap_budget(cgv_node, cgc); 304 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); 305 bpf_spin_unlock(&cgv_tree_lock); 306 } 307 308 static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc) 309 { 310 /* 311 * Tell fcg_stopping() that this bypassed the regular scheduling path 312 * and should be force charged to the cgroup. 0 is used to indicate that 313 * the task isn't bypassing, so if the current runtime is 0, go back by 314 * one nanosecond. 315 */ 316 taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1; 317 } 318 319 s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) 320 { 321 struct fcg_task_ctx *taskc; 322 bool is_idle = false; 323 s32 cpu; 324 325 cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle); 326 327 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); 328 if (!taskc) { 329 scx_bpf_error("task_ctx lookup failed"); 330 return cpu; 331 } 332 333 /* 334 * If select_cpu_dfl() is recommending local enqueue, the target CPU is 335 * idle. Follow it and charge the cgroup later in fcg_stopping() after 336 * the fact. 337 */ 338 if (is_idle) { 339 set_bypassed_at(p, taskc); 340 stat_inc(FCG_STAT_LOCAL); 341 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); 342 } 343 344 return cpu; 345 } 346 347 void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags) 348 { 349 struct fcg_task_ctx *taskc; 350 struct cgroup *cgrp; 351 struct fcg_cgrp_ctx *cgc; 352 353 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); 354 if (!taskc) { 355 scx_bpf_error("task_ctx lookup failed"); 356 return; 357 } 358 359 /* 360 * Use the direct dispatching and force charging to deal with tasks with 361 * custom affinities so that we don't have to worry about per-cgroup 362 * dq's containing tasks that can't be executed from some CPUs. 363 */ 364 if (p->nr_cpus_allowed != nr_cpus) { 365 set_bypassed_at(p, taskc); 366 367 /* 368 * The global dq is deprioritized as we don't want to let tasks 369 * to boost themselves by constraining its cpumask. The 370 * deprioritization is rather severe, so let's not apply that to 371 * per-cpu kernel threads. This is ham-fisted. We probably wanna 372 * implement per-cgroup fallback dq's instead so that we have 373 * more control over when tasks with custom cpumask get issued. 374 */ 375 if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) { 376 stat_inc(FCG_STAT_LOCAL); 377 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); 378 } else { 379 stat_inc(FCG_STAT_GLOBAL); 380 scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); 381 } 382 return; 383 } 384 385 cgrp = __COMPAT_scx_bpf_task_cgroup(p); 386 cgc = find_cgrp_ctx(cgrp); 387 if (!cgc) 388 goto out_release; 389 390 if (fifo_sched) { 391 scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags); 392 } else { 393 u64 tvtime = p->scx.dsq_vtime; 394 395 /* 396 * Limit the amount of budget that an idling task can accumulate 397 * to one slice. 398 */ 399 if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL)) 400 tvtime = cgc->tvtime_now - SCX_SLICE_DFL; 401 402 scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL, 403 tvtime, enq_flags); 404 } 405 406 cgrp_enqueued(cgrp, cgc); 407 out_release: 408 bpf_cgroup_release(cgrp); 409 } 410 411 /* 412 * Walk the cgroup tree to update the active weight sums as tasks wake up and 413 * sleep. The weight sums are used as the base when calculating the proportion a 414 * given cgroup or task is entitled to at each level. 415 */ 416 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable) 417 { 418 struct fcg_cgrp_ctx *cgc; 419 bool updated = false; 420 int idx; 421 422 cgc = find_cgrp_ctx(cgrp); 423 if (!cgc) 424 return; 425 426 /* 427 * In most cases, a hot cgroup would have multiple threads going to 428 * sleep and waking up while the whole cgroup stays active. In leaf 429 * cgroups, ->nr_runnable which is updated with __sync operations gates 430 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock 431 * repeatedly for a busy cgroup which is staying active. 432 */ 433 if (runnable) { 434 if (__sync_fetch_and_add(&cgc->nr_runnable, 1)) 435 return; 436 stat_inc(FCG_STAT_ACT); 437 } else { 438 if (__sync_sub_and_fetch(&cgc->nr_runnable, 1)) 439 return; 440 stat_inc(FCG_STAT_DEACT); 441 } 442 443 /* 444 * If @cgrp is becoming runnable, its hweight should be refreshed after 445 * it's added to the weight tree so that enqueue has the up-to-date 446 * value. If @cgrp is becoming quiescent, the hweight should be 447 * refreshed before it's removed from the weight tree so that the usage 448 * charging which happens afterwards has access to the latest value. 449 */ 450 if (!runnable) 451 cgrp_refresh_hweight(cgrp, cgc); 452 453 /* propagate upwards */ 454 bpf_for(idx, 0, cgrp->level) { 455 int level = cgrp->level - idx; 456 struct fcg_cgrp_ctx *cgc, *pcgc = NULL; 457 bool propagate = false; 458 459 cgc = find_ancestor_cgrp_ctx(cgrp, level); 460 if (!cgc) 461 break; 462 if (level) { 463 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1); 464 if (!pcgc) 465 break; 466 } 467 468 /* 469 * We need the propagation protected by a lock to synchronize 470 * against weight changes. There's no reason to drop the lock at 471 * each level but bpf_spin_lock() doesn't want any function 472 * calls while locked. 473 */ 474 bpf_spin_lock(&cgv_tree_lock); 475 476 if (runnable) { 477 if (!cgc->nr_active++) { 478 updated = true; 479 if (pcgc) { 480 propagate = true; 481 pcgc->child_weight_sum += cgc->weight; 482 } 483 } 484 } else { 485 if (!--cgc->nr_active) { 486 updated = true; 487 if (pcgc) { 488 propagate = true; 489 pcgc->child_weight_sum -= cgc->weight; 490 } 491 } 492 } 493 494 bpf_spin_unlock(&cgv_tree_lock); 495 496 if (!propagate) 497 break; 498 } 499 500 if (updated) 501 __sync_fetch_and_add(&hweight_gen, 1); 502 503 if (runnable) 504 cgrp_refresh_hweight(cgrp, cgc); 505 } 506 507 void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags) 508 { 509 struct cgroup *cgrp; 510 511 cgrp = __COMPAT_scx_bpf_task_cgroup(p); 512 update_active_weight_sums(cgrp, true); 513 bpf_cgroup_release(cgrp); 514 } 515 516 void BPF_STRUCT_OPS(fcg_running, struct task_struct *p) 517 { 518 struct cgroup *cgrp; 519 struct fcg_cgrp_ctx *cgc; 520 521 if (fifo_sched) 522 return; 523 524 cgrp = __COMPAT_scx_bpf_task_cgroup(p); 525 cgc = find_cgrp_ctx(cgrp); 526 if (cgc) { 527 /* 528 * @cgc->tvtime_now always progresses forward as tasks start 529 * executing. The test and update can be performed concurrently 530 * from multiple CPUs and thus racy. Any error should be 531 * contained and temporary. Let's just live with it. 532 */ 533 if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime)) 534 cgc->tvtime_now = p->scx.dsq_vtime; 535 } 536 bpf_cgroup_release(cgrp); 537 } 538 539 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) 540 { 541 struct fcg_task_ctx *taskc; 542 struct cgroup *cgrp; 543 struct fcg_cgrp_ctx *cgc; 544 545 /* 546 * Scale the execution time by the inverse of the weight and charge. 547 * 548 * Note that the default yield implementation yields by setting 549 * @p->scx.slice to zero and the following would treat the yielding task 550 * as if it has consumed all its slice. If this penalizes yielding tasks 551 * too much, determine the execution time by taking explicit timestamps 552 * instead of depending on @p->scx.slice. 553 */ 554 if (!fifo_sched) 555 p->scx.dsq_vtime += 556 (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; 557 558 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); 559 if (!taskc) { 560 scx_bpf_error("task_ctx lookup failed"); 561 return; 562 } 563 564 if (!taskc->bypassed_at) 565 return; 566 567 cgrp = __COMPAT_scx_bpf_task_cgroup(p); 568 cgc = find_cgrp_ctx(cgrp); 569 if (cgc) { 570 __sync_fetch_and_add(&cgc->cvtime_delta, 571 p->se.sum_exec_runtime - taskc->bypassed_at); 572 taskc->bypassed_at = 0; 573 } 574 bpf_cgroup_release(cgrp); 575 } 576 577 void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags) 578 { 579 struct cgroup *cgrp; 580 581 cgrp = __COMPAT_scx_bpf_task_cgroup(p); 582 update_active_weight_sums(cgrp, false); 583 bpf_cgroup_release(cgrp); 584 } 585 586 void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight) 587 { 588 struct fcg_cgrp_ctx *cgc, *pcgc = NULL; 589 590 cgc = find_cgrp_ctx(cgrp); 591 if (!cgc) 592 return; 593 594 if (cgrp->level) { 595 pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1); 596 if (!pcgc) 597 return; 598 } 599 600 bpf_spin_lock(&cgv_tree_lock); 601 if (pcgc && cgc->nr_active) 602 pcgc->child_weight_sum += (s64)weight - cgc->weight; 603 cgc->weight = weight; 604 bpf_spin_unlock(&cgv_tree_lock); 605 } 606 607 static bool try_pick_next_cgroup(u64 *cgidp) 608 { 609 struct bpf_rb_node *rb_node; 610 struct cgv_node_stash *stash; 611 struct cgv_node *cgv_node; 612 struct fcg_cgrp_ctx *cgc; 613 struct cgroup *cgrp; 614 u64 cgid; 615 616 /* pop the front cgroup and wind cvtime_now accordingly */ 617 bpf_spin_lock(&cgv_tree_lock); 618 619 rb_node = bpf_rbtree_first(&cgv_tree); 620 if (!rb_node) { 621 bpf_spin_unlock(&cgv_tree_lock); 622 stat_inc(FCG_STAT_PNC_NO_CGRP); 623 *cgidp = 0; 624 return true; 625 } 626 627 rb_node = bpf_rbtree_remove(&cgv_tree, rb_node); 628 bpf_spin_unlock(&cgv_tree_lock); 629 630 if (!rb_node) { 631 /* 632 * This should never happen. bpf_rbtree_first() was called 633 * above while the tree lock was held, so the node should 634 * always be present. 635 */ 636 scx_bpf_error("node could not be removed"); 637 return true; 638 } 639 640 cgv_node = container_of(rb_node, struct cgv_node, rb_node); 641 cgid = cgv_node->cgid; 642 643 if (vtime_before(cvtime_now, cgv_node->cvtime)) 644 cvtime_now = cgv_node->cvtime; 645 646 /* 647 * If lookup fails, the cgroup's gone. Free and move on. See 648 * fcg_cgroup_exit(). 649 */ 650 cgrp = bpf_cgroup_from_id(cgid); 651 if (!cgrp) { 652 stat_inc(FCG_STAT_PNC_GONE); 653 goto out_free; 654 } 655 656 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); 657 if (!cgc) { 658 bpf_cgroup_release(cgrp); 659 stat_inc(FCG_STAT_PNC_GONE); 660 goto out_free; 661 } 662 663 if (!scx_bpf_consume(cgid)) { 664 bpf_cgroup_release(cgrp); 665 stat_inc(FCG_STAT_PNC_EMPTY); 666 goto out_stash; 667 } 668 669 /* 670 * Successfully consumed from the cgroup. This will be our current 671 * cgroup for the new slice. Refresh its hweight. 672 */ 673 cgrp_refresh_hweight(cgrp, cgc); 674 675 bpf_cgroup_release(cgrp); 676 677 /* 678 * As the cgroup may have more tasks, add it back to the rbtree. Note 679 * that here we charge the full slice upfront and then exact later 680 * according to the actual consumption. This prevents lowpri thundering 681 * herd from saturating the machine. 682 */ 683 bpf_spin_lock(&cgv_tree_lock); 684 cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1); 685 cgrp_cap_budget(cgv_node, cgc); 686 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); 687 bpf_spin_unlock(&cgv_tree_lock); 688 689 *cgidp = cgid; 690 stat_inc(FCG_STAT_PNC_NEXT); 691 return true; 692 693 out_stash: 694 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); 695 if (!stash) { 696 stat_inc(FCG_STAT_PNC_GONE); 697 goto out_free; 698 } 699 700 /* 701 * Paired with cmpxchg in cgrp_enqueued(). If they see the following 702 * transition, they'll enqueue the cgroup. If they are earlier, we'll 703 * see their task in the dq below and requeue the cgroup. 704 */ 705 __sync_val_compare_and_swap(&cgc->queued, 1, 0); 706 707 if (scx_bpf_dsq_nr_queued(cgid)) { 708 bpf_spin_lock(&cgv_tree_lock); 709 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less); 710 bpf_spin_unlock(&cgv_tree_lock); 711 stat_inc(FCG_STAT_PNC_RACE); 712 } else { 713 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node); 714 if (cgv_node) { 715 scx_bpf_error("unexpected !NULL cgv_node stash"); 716 goto out_free; 717 } 718 } 719 720 return false; 721 722 out_free: 723 bpf_obj_drop(cgv_node); 724 return false; 725 } 726 727 void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) 728 { 729 struct fcg_cpu_ctx *cpuc; 730 struct fcg_cgrp_ctx *cgc; 731 struct cgroup *cgrp; 732 u64 now = bpf_ktime_get_ns(); 733 bool picked_next = false; 734 735 cpuc = find_cpu_ctx(); 736 if (!cpuc) 737 return; 738 739 if (!cpuc->cur_cgid) 740 goto pick_next_cgroup; 741 742 if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) { 743 if (scx_bpf_consume(cpuc->cur_cgid)) { 744 stat_inc(FCG_STAT_CNS_KEEP); 745 return; 746 } 747 stat_inc(FCG_STAT_CNS_EMPTY); 748 } else { 749 stat_inc(FCG_STAT_CNS_EXPIRE); 750 } 751 752 /* 753 * The current cgroup is expiring. It was already charged a full slice. 754 * Calculate the actual usage and accumulate the delta. 755 */ 756 cgrp = bpf_cgroup_from_id(cpuc->cur_cgid); 757 if (!cgrp) { 758 stat_inc(FCG_STAT_CNS_GONE); 759 goto pick_next_cgroup; 760 } 761 762 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0); 763 if (cgc) { 764 /* 765 * We want to update the vtime delta and then look for the next 766 * cgroup to execute but the latter needs to be done in a loop 767 * and we can't keep the lock held. Oh well... 768 */ 769 bpf_spin_lock(&cgv_tree_lock); 770 __sync_fetch_and_add(&cgc->cvtime_delta, 771 (cpuc->cur_at + cgrp_slice_ns - now) * 772 FCG_HWEIGHT_ONE / (cgc->hweight ?: 1)); 773 bpf_spin_unlock(&cgv_tree_lock); 774 } else { 775 stat_inc(FCG_STAT_CNS_GONE); 776 } 777 778 bpf_cgroup_release(cgrp); 779 780 pick_next_cgroup: 781 cpuc->cur_at = now; 782 783 if (scx_bpf_consume(SCX_DSQ_GLOBAL)) { 784 cpuc->cur_cgid = 0; 785 return; 786 } 787 788 bpf_repeat(CGROUP_MAX_RETRIES) { 789 if (try_pick_next_cgroup(&cpuc->cur_cgid)) { 790 picked_next = true; 791 break; 792 } 793 } 794 795 /* 796 * This only happens if try_pick_next_cgroup() races against enqueue 797 * path for more than CGROUP_MAX_RETRIES times, which is extremely 798 * unlikely and likely indicates an underlying bug. There shouldn't be 799 * any stall risk as the race is against enqueue. 800 */ 801 if (!picked_next) 802 stat_inc(FCG_STAT_PNC_FAIL); 803 } 804 805 s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p, 806 struct scx_init_task_args *args) 807 { 808 struct fcg_task_ctx *taskc; 809 struct fcg_cgrp_ctx *cgc; 810 811 /* 812 * @p is new. Let's ensure that its task_ctx is available. We can sleep 813 * in this function and the following will automatically use GFP_KERNEL. 814 */ 815 taskc = bpf_task_storage_get(&task_ctx, p, 0, 816 BPF_LOCAL_STORAGE_GET_F_CREATE); 817 if (!taskc) 818 return -ENOMEM; 819 820 taskc->bypassed_at = 0; 821 822 if (!(cgc = find_cgrp_ctx(args->cgroup))) 823 return -ENOENT; 824 825 p->scx.dsq_vtime = cgc->tvtime_now; 826 827 return 0; 828 } 829 830 int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp, 831 struct scx_cgroup_init_args *args) 832 { 833 struct fcg_cgrp_ctx *cgc; 834 struct cgv_node *cgv_node; 835 struct cgv_node_stash empty_stash = {}, *stash; 836 u64 cgid = cgrp->kn->id; 837 int ret; 838 839 /* 840 * Technically incorrect as cgroup ID is full 64bit while dq ID is 841 * 63bit. Should not be a problem in practice and easy to spot in the 842 * unlikely case that it breaks. 843 */ 844 ret = scx_bpf_create_dsq(cgid, -1); 845 if (ret) 846 return ret; 847 848 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 849 BPF_LOCAL_STORAGE_GET_F_CREATE); 850 if (!cgc) { 851 ret = -ENOMEM; 852 goto err_destroy_dsq; 853 } 854 855 cgc->weight = args->weight; 856 cgc->hweight = FCG_HWEIGHT_ONE; 857 858 ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash, 859 BPF_NOEXIST); 860 if (ret) { 861 if (ret != -ENOMEM) 862 scx_bpf_error("unexpected stash creation error (%d)", 863 ret); 864 goto err_destroy_dsq; 865 } 866 867 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid); 868 if (!stash) { 869 scx_bpf_error("unexpected cgv_node stash lookup failure"); 870 ret = -ENOENT; 871 goto err_destroy_dsq; 872 } 873 874 cgv_node = bpf_obj_new(struct cgv_node); 875 if (!cgv_node) { 876 ret = -ENOMEM; 877 goto err_del_cgv_node; 878 } 879 880 cgv_node->cgid = cgid; 881 cgv_node->cvtime = cvtime_now; 882 883 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node); 884 if (cgv_node) { 885 scx_bpf_error("unexpected !NULL cgv_node stash"); 886 ret = -EBUSY; 887 goto err_drop; 888 } 889 890 return 0; 891 892 err_drop: 893 bpf_obj_drop(cgv_node); 894 err_del_cgv_node: 895 bpf_map_delete_elem(&cgv_node_stash, &cgid); 896 err_destroy_dsq: 897 scx_bpf_destroy_dsq(cgid); 898 return ret; 899 } 900 901 void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp) 902 { 903 u64 cgid = cgrp->kn->id; 904 905 /* 906 * For now, there's no way find and remove the cgv_node if it's on the 907 * cgv_tree. Let's drain them in the dispatch path as they get popped 908 * off the front of the tree. 909 */ 910 bpf_map_delete_elem(&cgv_node_stash, &cgid); 911 scx_bpf_destroy_dsq(cgid); 912 } 913 914 void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p, 915 struct cgroup *from, struct cgroup *to) 916 { 917 struct fcg_cgrp_ctx *from_cgc, *to_cgc; 918 s64 vtime_delta; 919 920 /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */ 921 if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to))) 922 return; 923 924 vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now; 925 p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta; 926 } 927 928 void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei) 929 { 930 UEI_RECORD(uei, ei); 931 } 932 933 SCX_OPS_DEFINE(flatcg_ops, 934 .select_cpu = (void *)fcg_select_cpu, 935 .enqueue = (void *)fcg_enqueue, 936 .dispatch = (void *)fcg_dispatch, 937 .runnable = (void *)fcg_runnable, 938 .running = (void *)fcg_running, 939 .stopping = (void *)fcg_stopping, 940 .quiescent = (void *)fcg_quiescent, 941 .init_task = (void *)fcg_init_task, 942 .cgroup_set_weight = (void *)fcg_cgroup_set_weight, 943 .cgroup_init = (void *)fcg_cgroup_init, 944 .cgroup_exit = (void *)fcg_cgroup_exit, 945 .cgroup_move = (void *)fcg_cgroup_move, 946 .exit = (void *)fcg_exit, 947 .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING, 948 .name = "flatcg"); 949