1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
4 * hierarchical weight-based cgroup CPU control by flattening the cgroup
5 * hierarchy into a single layer by compounding the active weight share at each
6 * level. Consider the following hierarchy with weights in parentheses:
7 *
8 * R + A (100) + B (100)
9 * | \ C (100)
10 * \ D (200)
11 *
12 * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
13 * Let's say all three have runnable tasks. The total share that each of these
14 * three cgroups is entitled to can be calculated by compounding its share at
15 * each level.
16 *
17 * For example, B is competing against C and in that competition its share is
18 * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
19 * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
20 * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
21 * eventual shaer is the same at 1/6. D is only competing at the top level and
22 * its share is 200/(100+200) == 2/3.
23 *
24 * So, instead of hierarchically scheduling level-by-level, we can consider it
25 * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
26 * and keep updating the eventual shares as the cgroups' runnable states change.
27 *
28 * This flattening of hierarchy can bring a substantial performance gain when
29 * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
30 * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
31 * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
32 * apache instances competing with 2:1 weight ratio nested four level deep.
33 *
34 * However, the gain comes at the cost of not being able to properly handle
35 * thundering herd of cgroups. For example, if many cgroups which are nested
36 * behind a low priority parent cgroup wake up around the same time, they may be
37 * able to consume more CPU cycles than they are entitled to. In many use cases,
38 * this isn't a real concern especially given the performance gain. Also, there
39 * are ways to mitigate the problem further by e.g. introducing an extra
40 * scheduling layer on cgroup delegation boundaries.
41 *
42 * The scheduler first picks the cgroup to run and then schedule the tasks
43 * within by using nested weighted vtime scheduling by default. The
44 * cgroup-internal scheduling can be switched to FIFO with the -f option.
45 */
46 #include <scx/common.bpf.h>
47 #include "scx_flatcg.h"
48
49 /*
50 * Maximum amount of retries to find a valid cgroup.
51 */
52 enum {
53 FALLBACK_DSQ = 0,
54 CGROUP_MAX_RETRIES = 1024,
55 };
56
57 char _license[] SEC("license") = "GPL";
58
59 const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
60 const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
61 const volatile bool fifo_sched;
62
63 u64 cvtime_now;
64 UEI_DEFINE(uei);
65
66 struct {
67 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
68 __type(key, u32);
69 __type(value, u64);
70 __uint(max_entries, FCG_NR_STATS);
71 } stats SEC(".maps");
72
stat_inc(enum fcg_stat_idx idx)73 static void stat_inc(enum fcg_stat_idx idx)
74 {
75 u32 idx_v = idx;
76
77 u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v);
78 if (cnt_p)
79 (*cnt_p)++;
80 }
81
82 struct fcg_cpu_ctx {
83 u64 cur_cgid;
84 u64 cur_at;
85 };
86
87 struct {
88 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
89 __type(key, u32);
90 __type(value, struct fcg_cpu_ctx);
91 __uint(max_entries, 1);
92 } cpu_ctx SEC(".maps");
93
94 struct {
95 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
96 __uint(map_flags, BPF_F_NO_PREALLOC);
97 __type(key, int);
98 __type(value, struct fcg_cgrp_ctx);
99 } cgrp_ctx SEC(".maps");
100
101 struct cgv_node {
102 struct bpf_rb_node rb_node;
103 __u64 cvtime;
104 __u64 cgid;
105 };
106
107 private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
108 private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node);
109
110 struct cgv_node_stash {
111 struct cgv_node __kptr *node;
112 };
113
114 struct {
115 __uint(type, BPF_MAP_TYPE_HASH);
116 __uint(max_entries, 16384);
117 __type(key, __u64);
118 __type(value, struct cgv_node_stash);
119 } cgv_node_stash SEC(".maps");
120
121 struct fcg_task_ctx {
122 u64 bypassed_at;
123 };
124
125 struct {
126 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
127 __uint(map_flags, BPF_F_NO_PREALLOC);
128 __type(key, int);
129 __type(value, struct fcg_task_ctx);
130 } task_ctx SEC(".maps");
131
132 /* gets inc'd on weight tree changes to expire the cached hweights */
133 u64 hweight_gen = 1;
134
div_round_up(u64 dividend,u64 divisor)135 static u64 div_round_up(u64 dividend, u64 divisor)
136 {
137 return (dividend + divisor - 1) / divisor;
138 }
139
vtime_before(u64 a,u64 b)140 static bool vtime_before(u64 a, u64 b)
141 {
142 return (s64)(a - b) < 0;
143 }
144
cgv_node_less(struct bpf_rb_node * a,const struct bpf_rb_node * b)145 static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
146 {
147 struct cgv_node *cgc_a, *cgc_b;
148
149 cgc_a = container_of(a, struct cgv_node, rb_node);
150 cgc_b = container_of(b, struct cgv_node, rb_node);
151
152 return cgc_a->cvtime < cgc_b->cvtime;
153 }
154
find_cpu_ctx(void)155 static struct fcg_cpu_ctx *find_cpu_ctx(void)
156 {
157 struct fcg_cpu_ctx *cpuc;
158 u32 idx = 0;
159
160 cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx);
161 if (!cpuc) {
162 scx_bpf_error("cpu_ctx lookup failed");
163 return NULL;
164 }
165 return cpuc;
166 }
167
find_cgrp_ctx(struct cgroup * cgrp)168 static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp)
169 {
170 struct fcg_cgrp_ctx *cgc;
171
172 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
173 if (!cgc) {
174 scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id);
175 return NULL;
176 }
177 return cgc;
178 }
179
find_ancestor_cgrp_ctx(struct cgroup * cgrp,int level)180 static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level)
181 {
182 struct fcg_cgrp_ctx *cgc;
183
184 cgrp = bpf_cgroup_ancestor(cgrp, level);
185 if (!cgrp) {
186 scx_bpf_error("ancestor cgroup lookup failed");
187 return NULL;
188 }
189
190 cgc = find_cgrp_ctx(cgrp);
191 if (!cgc)
192 scx_bpf_error("ancestor cgrp_ctx lookup failed");
193 bpf_cgroup_release(cgrp);
194 return cgc;
195 }
196
cgrp_refresh_hweight(struct cgroup * cgrp,struct fcg_cgrp_ctx * cgc)197 static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
198 {
199 int level;
200
201 if (!cgc->nr_active) {
202 stat_inc(FCG_STAT_HWT_SKIP);
203 return;
204 }
205
206 if (cgc->hweight_gen == hweight_gen) {
207 stat_inc(FCG_STAT_HWT_CACHE);
208 return;
209 }
210
211 stat_inc(FCG_STAT_HWT_UPDATES);
212 bpf_for(level, 0, cgrp->level + 1) {
213 struct fcg_cgrp_ctx *cgc;
214 bool is_active;
215
216 cgc = find_ancestor_cgrp_ctx(cgrp, level);
217 if (!cgc)
218 break;
219
220 if (!level) {
221 cgc->hweight = FCG_HWEIGHT_ONE;
222 cgc->hweight_gen = hweight_gen;
223 } else {
224 struct fcg_cgrp_ctx *pcgc;
225
226 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
227 if (!pcgc)
228 break;
229
230 /*
231 * We can be opportunistic here and not grab the
232 * cgv_tree_lock and deal with the occasional races.
233 * However, hweight updates are already cached and
234 * relatively low-frequency. Let's just do the
235 * straightforward thing.
236 */
237 bpf_spin_lock(&cgv_tree_lock);
238 is_active = cgc->nr_active;
239 if (is_active) {
240 cgc->hweight_gen = pcgc->hweight_gen;
241 cgc->hweight =
242 div_round_up(pcgc->hweight * cgc->weight,
243 pcgc->child_weight_sum);
244 }
245 bpf_spin_unlock(&cgv_tree_lock);
246
247 if (!is_active) {
248 stat_inc(FCG_STAT_HWT_RACE);
249 break;
250 }
251 }
252 }
253 }
254
cgrp_cap_budget(struct cgv_node * cgv_node,struct fcg_cgrp_ctx * cgc)255 static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
256 {
257 u64 delta, cvtime, max_budget;
258
259 /*
260 * A node which is on the rbtree can't be pointed to from elsewhere yet
261 * and thus can't be updated and repositioned. Instead, we collect the
262 * vtime deltas separately and apply it asynchronously here.
263 */
264 delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
265 cvtime = cgv_node->cvtime + delta;
266
267 /*
268 * Allow a cgroup to carry the maximum budget proportional to its
269 * hweight such that a full-hweight cgroup can immediately take up half
270 * of the CPUs at the most while staying at the front of the rbtree.
271 */
272 max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
273 (2 * FCG_HWEIGHT_ONE);
274 if (vtime_before(cvtime, cvtime_now - max_budget))
275 cvtime = cvtime_now - max_budget;
276
277 cgv_node->cvtime = cvtime;
278 }
279
cgrp_enqueued(struct cgroup * cgrp,struct fcg_cgrp_ctx * cgc)280 static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
281 {
282 struct cgv_node_stash *stash;
283 struct cgv_node *cgv_node;
284 u64 cgid = cgrp->kn->id;
285
286 /* paired with cmpxchg in try_pick_next_cgroup() */
287 if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) {
288 stat_inc(FCG_STAT_ENQ_SKIP);
289 return;
290 }
291
292 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
293 if (!stash) {
294 scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
295 return;
296 }
297
298 /* NULL if the node is already on the rbtree */
299 cgv_node = bpf_kptr_xchg(&stash->node, NULL);
300 if (!cgv_node) {
301 stat_inc(FCG_STAT_ENQ_RACE);
302 return;
303 }
304
305 bpf_spin_lock(&cgv_tree_lock);
306 cgrp_cap_budget(cgv_node, cgc);
307 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
308 bpf_spin_unlock(&cgv_tree_lock);
309 }
310
set_bypassed_at(struct task_struct * p,struct fcg_task_ctx * taskc)311 static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc)
312 {
313 /*
314 * Tell fcg_stopping() that this bypassed the regular scheduling path
315 * and should be force charged to the cgroup. 0 is used to indicate that
316 * the task isn't bypassing, so if the current runtime is 0, go back by
317 * one nanosecond.
318 */
319 taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1;
320 }
321
BPF_STRUCT_OPS(fcg_select_cpu,struct task_struct * p,s32 prev_cpu,u64 wake_flags)322 s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
323 {
324 struct fcg_task_ctx *taskc;
325 bool is_idle = false;
326 s32 cpu;
327
328 cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
329
330 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
331 if (!taskc) {
332 scx_bpf_error("task_ctx lookup failed");
333 return cpu;
334 }
335
336 /*
337 * If select_cpu_dfl() is recommending local enqueue, the target CPU is
338 * idle. Follow it and charge the cgroup later in fcg_stopping() after
339 * the fact.
340 */
341 if (is_idle) {
342 set_bypassed_at(p, taskc);
343 stat_inc(FCG_STAT_LOCAL);
344 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
345 }
346
347 return cpu;
348 }
349
BPF_STRUCT_OPS(fcg_enqueue,struct task_struct * p,u64 enq_flags)350 void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
351 {
352 struct fcg_task_ctx *taskc;
353 struct cgroup *cgrp;
354 struct fcg_cgrp_ctx *cgc;
355
356 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
357 if (!taskc) {
358 scx_bpf_error("task_ctx lookup failed");
359 return;
360 }
361
362 /*
363 * Use the direct dispatching and force charging to deal with tasks with
364 * custom affinities so that we don't have to worry about per-cgroup
365 * dq's containing tasks that can't be executed from some CPUs.
366 */
367 if (p->nr_cpus_allowed != nr_cpus) {
368 set_bypassed_at(p, taskc);
369
370 /*
371 * The global dq is deprioritized as we don't want to let tasks
372 * to boost themselves by constraining its cpumask. The
373 * deprioritization is rather severe, so let's not apply that to
374 * per-cpu kernel threads. This is ham-fisted. We probably wanna
375 * implement per-cgroup fallback dq's instead so that we have
376 * more control over when tasks with custom cpumask get issued.
377 */
378 if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
379 stat_inc(FCG_STAT_LOCAL);
380 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
381 } else {
382 stat_inc(FCG_STAT_GLOBAL);
383 scx_bpf_dispatch(p, FALLBACK_DSQ, SCX_SLICE_DFL, enq_flags);
384 }
385 return;
386 }
387
388 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
389 cgc = find_cgrp_ctx(cgrp);
390 if (!cgc)
391 goto out_release;
392
393 if (fifo_sched) {
394 scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
395 } else {
396 u64 tvtime = p->scx.dsq_vtime;
397
398 /*
399 * Limit the amount of budget that an idling task can accumulate
400 * to one slice.
401 */
402 if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
403 tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
404
405 scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
406 tvtime, enq_flags);
407 }
408
409 cgrp_enqueued(cgrp, cgc);
410 out_release:
411 bpf_cgroup_release(cgrp);
412 }
413
414 /*
415 * Walk the cgroup tree to update the active weight sums as tasks wake up and
416 * sleep. The weight sums are used as the base when calculating the proportion a
417 * given cgroup or task is entitled to at each level.
418 */
update_active_weight_sums(struct cgroup * cgrp,bool runnable)419 static void update_active_weight_sums(struct cgroup *cgrp, bool runnable)
420 {
421 struct fcg_cgrp_ctx *cgc;
422 bool updated = false;
423 int idx;
424
425 cgc = find_cgrp_ctx(cgrp);
426 if (!cgc)
427 return;
428
429 /*
430 * In most cases, a hot cgroup would have multiple threads going to
431 * sleep and waking up while the whole cgroup stays active. In leaf
432 * cgroups, ->nr_runnable which is updated with __sync operations gates
433 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
434 * repeatedly for a busy cgroup which is staying active.
435 */
436 if (runnable) {
437 if (__sync_fetch_and_add(&cgc->nr_runnable, 1))
438 return;
439 stat_inc(FCG_STAT_ACT);
440 } else {
441 if (__sync_sub_and_fetch(&cgc->nr_runnable, 1))
442 return;
443 stat_inc(FCG_STAT_DEACT);
444 }
445
446 /*
447 * If @cgrp is becoming runnable, its hweight should be refreshed after
448 * it's added to the weight tree so that enqueue has the up-to-date
449 * value. If @cgrp is becoming quiescent, the hweight should be
450 * refreshed before it's removed from the weight tree so that the usage
451 * charging which happens afterwards has access to the latest value.
452 */
453 if (!runnable)
454 cgrp_refresh_hweight(cgrp, cgc);
455
456 /* propagate upwards */
457 bpf_for(idx, 0, cgrp->level) {
458 int level = cgrp->level - idx;
459 struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
460 bool propagate = false;
461
462 cgc = find_ancestor_cgrp_ctx(cgrp, level);
463 if (!cgc)
464 break;
465 if (level) {
466 pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
467 if (!pcgc)
468 break;
469 }
470
471 /*
472 * We need the propagation protected by a lock to synchronize
473 * against weight changes. There's no reason to drop the lock at
474 * each level but bpf_spin_lock() doesn't want any function
475 * calls while locked.
476 */
477 bpf_spin_lock(&cgv_tree_lock);
478
479 if (runnable) {
480 if (!cgc->nr_active++) {
481 updated = true;
482 if (pcgc) {
483 propagate = true;
484 pcgc->child_weight_sum += cgc->weight;
485 }
486 }
487 } else {
488 if (!--cgc->nr_active) {
489 updated = true;
490 if (pcgc) {
491 propagate = true;
492 pcgc->child_weight_sum -= cgc->weight;
493 }
494 }
495 }
496
497 bpf_spin_unlock(&cgv_tree_lock);
498
499 if (!propagate)
500 break;
501 }
502
503 if (updated)
504 __sync_fetch_and_add(&hweight_gen, 1);
505
506 if (runnable)
507 cgrp_refresh_hweight(cgrp, cgc);
508 }
509
BPF_STRUCT_OPS(fcg_runnable,struct task_struct * p,u64 enq_flags)510 void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
511 {
512 struct cgroup *cgrp;
513
514 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
515 update_active_weight_sums(cgrp, true);
516 bpf_cgroup_release(cgrp);
517 }
518
BPF_STRUCT_OPS(fcg_running,struct task_struct * p)519 void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
520 {
521 struct cgroup *cgrp;
522 struct fcg_cgrp_ctx *cgc;
523
524 if (fifo_sched)
525 return;
526
527 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
528 cgc = find_cgrp_ctx(cgrp);
529 if (cgc) {
530 /*
531 * @cgc->tvtime_now always progresses forward as tasks start
532 * executing. The test and update can be performed concurrently
533 * from multiple CPUs and thus racy. Any error should be
534 * contained and temporary. Let's just live with it.
535 */
536 if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
537 cgc->tvtime_now = p->scx.dsq_vtime;
538 }
539 bpf_cgroup_release(cgrp);
540 }
541
BPF_STRUCT_OPS(fcg_stopping,struct task_struct * p,bool runnable)542 void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
543 {
544 struct fcg_task_ctx *taskc;
545 struct cgroup *cgrp;
546 struct fcg_cgrp_ctx *cgc;
547
548 /*
549 * Scale the execution time by the inverse of the weight and charge.
550 *
551 * Note that the default yield implementation yields by setting
552 * @p->scx.slice to zero and the following would treat the yielding task
553 * as if it has consumed all its slice. If this penalizes yielding tasks
554 * too much, determine the execution time by taking explicit timestamps
555 * instead of depending on @p->scx.slice.
556 */
557 if (!fifo_sched)
558 p->scx.dsq_vtime +=
559 (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
560
561 taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
562 if (!taskc) {
563 scx_bpf_error("task_ctx lookup failed");
564 return;
565 }
566
567 if (!taskc->bypassed_at)
568 return;
569
570 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
571 cgc = find_cgrp_ctx(cgrp);
572 if (cgc) {
573 __sync_fetch_and_add(&cgc->cvtime_delta,
574 p->se.sum_exec_runtime - taskc->bypassed_at);
575 taskc->bypassed_at = 0;
576 }
577 bpf_cgroup_release(cgrp);
578 }
579
BPF_STRUCT_OPS(fcg_quiescent,struct task_struct * p,u64 deq_flags)580 void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
581 {
582 struct cgroup *cgrp;
583
584 cgrp = __COMPAT_scx_bpf_task_cgroup(p);
585 update_active_weight_sums(cgrp, false);
586 bpf_cgroup_release(cgrp);
587 }
588
BPF_STRUCT_OPS(fcg_cgroup_set_weight,struct cgroup * cgrp,u32 weight)589 void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
590 {
591 struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
592
593 cgc = find_cgrp_ctx(cgrp);
594 if (!cgc)
595 return;
596
597 if (cgrp->level) {
598 pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1);
599 if (!pcgc)
600 return;
601 }
602
603 bpf_spin_lock(&cgv_tree_lock);
604 if (pcgc && cgc->nr_active)
605 pcgc->child_weight_sum += (s64)weight - cgc->weight;
606 cgc->weight = weight;
607 bpf_spin_unlock(&cgv_tree_lock);
608 }
609
try_pick_next_cgroup(u64 * cgidp)610 static bool try_pick_next_cgroup(u64 *cgidp)
611 {
612 struct bpf_rb_node *rb_node;
613 struct cgv_node_stash *stash;
614 struct cgv_node *cgv_node;
615 struct fcg_cgrp_ctx *cgc;
616 struct cgroup *cgrp;
617 u64 cgid;
618
619 /* pop the front cgroup and wind cvtime_now accordingly */
620 bpf_spin_lock(&cgv_tree_lock);
621
622 rb_node = bpf_rbtree_first(&cgv_tree);
623 if (!rb_node) {
624 bpf_spin_unlock(&cgv_tree_lock);
625 stat_inc(FCG_STAT_PNC_NO_CGRP);
626 *cgidp = 0;
627 return true;
628 }
629
630 rb_node = bpf_rbtree_remove(&cgv_tree, rb_node);
631 bpf_spin_unlock(&cgv_tree_lock);
632
633 if (!rb_node) {
634 /*
635 * This should never happen. bpf_rbtree_first() was called
636 * above while the tree lock was held, so the node should
637 * always be present.
638 */
639 scx_bpf_error("node could not be removed");
640 return true;
641 }
642
643 cgv_node = container_of(rb_node, struct cgv_node, rb_node);
644 cgid = cgv_node->cgid;
645
646 if (vtime_before(cvtime_now, cgv_node->cvtime))
647 cvtime_now = cgv_node->cvtime;
648
649 /*
650 * If lookup fails, the cgroup's gone. Free and move on. See
651 * fcg_cgroup_exit().
652 */
653 cgrp = bpf_cgroup_from_id(cgid);
654 if (!cgrp) {
655 stat_inc(FCG_STAT_PNC_GONE);
656 goto out_free;
657 }
658
659 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
660 if (!cgc) {
661 bpf_cgroup_release(cgrp);
662 stat_inc(FCG_STAT_PNC_GONE);
663 goto out_free;
664 }
665
666 if (!scx_bpf_consume(cgid)) {
667 bpf_cgroup_release(cgrp);
668 stat_inc(FCG_STAT_PNC_EMPTY);
669 goto out_stash;
670 }
671
672 /*
673 * Successfully consumed from the cgroup. This will be our current
674 * cgroup for the new slice. Refresh its hweight.
675 */
676 cgrp_refresh_hweight(cgrp, cgc);
677
678 bpf_cgroup_release(cgrp);
679
680 /*
681 * As the cgroup may have more tasks, add it back to the rbtree. Note
682 * that here we charge the full slice upfront and then exact later
683 * according to the actual consumption. This prevents lowpri thundering
684 * herd from saturating the machine.
685 */
686 bpf_spin_lock(&cgv_tree_lock);
687 cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1);
688 cgrp_cap_budget(cgv_node, cgc);
689 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
690 bpf_spin_unlock(&cgv_tree_lock);
691
692 *cgidp = cgid;
693 stat_inc(FCG_STAT_PNC_NEXT);
694 return true;
695
696 out_stash:
697 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
698 if (!stash) {
699 stat_inc(FCG_STAT_PNC_GONE);
700 goto out_free;
701 }
702
703 /*
704 * Paired with cmpxchg in cgrp_enqueued(). If they see the following
705 * transition, they'll enqueue the cgroup. If they are earlier, we'll
706 * see their task in the dq below and requeue the cgroup.
707 */
708 __sync_val_compare_and_swap(&cgc->queued, 1, 0);
709
710 if (scx_bpf_dsq_nr_queued(cgid)) {
711 bpf_spin_lock(&cgv_tree_lock);
712 bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
713 bpf_spin_unlock(&cgv_tree_lock);
714 stat_inc(FCG_STAT_PNC_RACE);
715 } else {
716 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
717 if (cgv_node) {
718 scx_bpf_error("unexpected !NULL cgv_node stash");
719 goto out_free;
720 }
721 }
722
723 return false;
724
725 out_free:
726 bpf_obj_drop(cgv_node);
727 return false;
728 }
729
BPF_STRUCT_OPS(fcg_dispatch,s32 cpu,struct task_struct * prev)730 void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
731 {
732 struct fcg_cpu_ctx *cpuc;
733 struct fcg_cgrp_ctx *cgc;
734 struct cgroup *cgrp;
735 u64 now = bpf_ktime_get_ns();
736 bool picked_next = false;
737
738 cpuc = find_cpu_ctx();
739 if (!cpuc)
740 return;
741
742 if (!cpuc->cur_cgid)
743 goto pick_next_cgroup;
744
745 if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
746 if (scx_bpf_consume(cpuc->cur_cgid)) {
747 stat_inc(FCG_STAT_CNS_KEEP);
748 return;
749 }
750 stat_inc(FCG_STAT_CNS_EMPTY);
751 } else {
752 stat_inc(FCG_STAT_CNS_EXPIRE);
753 }
754
755 /*
756 * The current cgroup is expiring. It was already charged a full slice.
757 * Calculate the actual usage and accumulate the delta.
758 */
759 cgrp = bpf_cgroup_from_id(cpuc->cur_cgid);
760 if (!cgrp) {
761 stat_inc(FCG_STAT_CNS_GONE);
762 goto pick_next_cgroup;
763 }
764
765 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
766 if (cgc) {
767 /*
768 * We want to update the vtime delta and then look for the next
769 * cgroup to execute but the latter needs to be done in a loop
770 * and we can't keep the lock held. Oh well...
771 */
772 bpf_spin_lock(&cgv_tree_lock);
773 __sync_fetch_and_add(&cgc->cvtime_delta,
774 (cpuc->cur_at + cgrp_slice_ns - now) *
775 FCG_HWEIGHT_ONE / (cgc->hweight ?: 1));
776 bpf_spin_unlock(&cgv_tree_lock);
777 } else {
778 stat_inc(FCG_STAT_CNS_GONE);
779 }
780
781 bpf_cgroup_release(cgrp);
782
783 pick_next_cgroup:
784 cpuc->cur_at = now;
785
786 if (scx_bpf_consume(FALLBACK_DSQ)) {
787 cpuc->cur_cgid = 0;
788 return;
789 }
790
791 bpf_repeat(CGROUP_MAX_RETRIES) {
792 if (try_pick_next_cgroup(&cpuc->cur_cgid)) {
793 picked_next = true;
794 break;
795 }
796 }
797
798 /*
799 * This only happens if try_pick_next_cgroup() races against enqueue
800 * path for more than CGROUP_MAX_RETRIES times, which is extremely
801 * unlikely and likely indicates an underlying bug. There shouldn't be
802 * any stall risk as the race is against enqueue.
803 */
804 if (!picked_next)
805 stat_inc(FCG_STAT_PNC_FAIL);
806 }
807
BPF_STRUCT_OPS(fcg_init_task,struct task_struct * p,struct scx_init_task_args * args)808 s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
809 struct scx_init_task_args *args)
810 {
811 struct fcg_task_ctx *taskc;
812 struct fcg_cgrp_ctx *cgc;
813
814 /*
815 * @p is new. Let's ensure that its task_ctx is available. We can sleep
816 * in this function and the following will automatically use GFP_KERNEL.
817 */
818 taskc = bpf_task_storage_get(&task_ctx, p, 0,
819 BPF_LOCAL_STORAGE_GET_F_CREATE);
820 if (!taskc)
821 return -ENOMEM;
822
823 taskc->bypassed_at = 0;
824
825 if (!(cgc = find_cgrp_ctx(args->cgroup)))
826 return -ENOENT;
827
828 p->scx.dsq_vtime = cgc->tvtime_now;
829
830 return 0;
831 }
832
BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init,struct cgroup * cgrp,struct scx_cgroup_init_args * args)833 int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp,
834 struct scx_cgroup_init_args *args)
835 {
836 struct fcg_cgrp_ctx *cgc;
837 struct cgv_node *cgv_node;
838 struct cgv_node_stash empty_stash = {}, *stash;
839 u64 cgid = cgrp->kn->id;
840 int ret;
841
842 /*
843 * Technically incorrect as cgroup ID is full 64bit while dsq ID is
844 * 63bit. Should not be a problem in practice and easy to spot in the
845 * unlikely case that it breaks.
846 */
847 ret = scx_bpf_create_dsq(cgid, -1);
848 if (ret)
849 return ret;
850
851 cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0,
852 BPF_LOCAL_STORAGE_GET_F_CREATE);
853 if (!cgc) {
854 ret = -ENOMEM;
855 goto err_destroy_dsq;
856 }
857
858 cgc->weight = args->weight;
859 cgc->hweight = FCG_HWEIGHT_ONE;
860
861 ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash,
862 BPF_NOEXIST);
863 if (ret) {
864 if (ret != -ENOMEM)
865 scx_bpf_error("unexpected stash creation error (%d)",
866 ret);
867 goto err_destroy_dsq;
868 }
869
870 stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
871 if (!stash) {
872 scx_bpf_error("unexpected cgv_node stash lookup failure");
873 ret = -ENOENT;
874 goto err_destroy_dsq;
875 }
876
877 cgv_node = bpf_obj_new(struct cgv_node);
878 if (!cgv_node) {
879 ret = -ENOMEM;
880 goto err_del_cgv_node;
881 }
882
883 cgv_node->cgid = cgid;
884 cgv_node->cvtime = cvtime_now;
885
886 cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
887 if (cgv_node) {
888 scx_bpf_error("unexpected !NULL cgv_node stash");
889 ret = -EBUSY;
890 goto err_drop;
891 }
892
893 return 0;
894
895 err_drop:
896 bpf_obj_drop(cgv_node);
897 err_del_cgv_node:
898 bpf_map_delete_elem(&cgv_node_stash, &cgid);
899 err_destroy_dsq:
900 scx_bpf_destroy_dsq(cgid);
901 return ret;
902 }
903
BPF_STRUCT_OPS(fcg_cgroup_exit,struct cgroup * cgrp)904 void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp)
905 {
906 u64 cgid = cgrp->kn->id;
907
908 /*
909 * For now, there's no way find and remove the cgv_node if it's on the
910 * cgv_tree. Let's drain them in the dispatch path as they get popped
911 * off the front of the tree.
912 */
913 bpf_map_delete_elem(&cgv_node_stash, &cgid);
914 scx_bpf_destroy_dsq(cgid);
915 }
916
BPF_STRUCT_OPS(fcg_cgroup_move,struct task_struct * p,struct cgroup * from,struct cgroup * to)917 void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
918 struct cgroup *from, struct cgroup *to)
919 {
920 struct fcg_cgrp_ctx *from_cgc, *to_cgc;
921 s64 vtime_delta;
922
923 /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
924 if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
925 return;
926
927 vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
928 p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
929 }
930
BPF_STRUCT_OPS_SLEEPABLE(fcg_init)931 s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
932 {
933 return scx_bpf_create_dsq(FALLBACK_DSQ, -1);
934 }
935
BPF_STRUCT_OPS(fcg_exit,struct scx_exit_info * ei)936 void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
937 {
938 UEI_RECORD(uei, ei);
939 }
940
941 SCX_OPS_DEFINE(flatcg_ops,
942 .select_cpu = (void *)fcg_select_cpu,
943 .enqueue = (void *)fcg_enqueue,
944 .dispatch = (void *)fcg_dispatch,
945 .runnable = (void *)fcg_runnable,
946 .running = (void *)fcg_running,
947 .stopping = (void *)fcg_stopping,
948 .quiescent = (void *)fcg_quiescent,
949 .init_task = (void *)fcg_init_task,
950 .cgroup_set_weight = (void *)fcg_cgroup_set_weight,
951 .cgroup_init = (void *)fcg_cgroup_init,
952 .cgroup_exit = (void *)fcg_cgroup_exit,
953 .cgroup_move = (void *)fcg_cgroup_move,
954 .init = (void *)fcg_init,
955 .exit = (void *)fcg_exit,
956 .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
957 .name = "flatcg");
958