xref: /linux/tools/sched_ext/scx_flatcg.bpf.c (revision e32cde8d2bd7d251a8f9b434143977ddf13dcec6)
1a4103eacSTejun Heo /* SPDX-License-Identifier: GPL-2.0 */
2a4103eacSTejun Heo /*
3a4103eacSTejun Heo  * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
4a4103eacSTejun Heo  * hierarchical weight-based cgroup CPU control by flattening the cgroup
5a4103eacSTejun Heo  * hierarchy into a single layer by compounding the active weight share at each
6a4103eacSTejun Heo  * level. Consider the following hierarchy with weights in parentheses:
7a4103eacSTejun Heo  *
8a4103eacSTejun Heo  * R + A (100) + B (100)
9a4103eacSTejun Heo  *   |         \ C (100)
10a4103eacSTejun Heo  *   \ D (200)
11a4103eacSTejun Heo  *
12a4103eacSTejun Heo  * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
13a4103eacSTejun Heo  * Let's say all three have runnable tasks. The total share that each of these
14a4103eacSTejun Heo  * three cgroups is entitled to can be calculated by compounding its share at
15a4103eacSTejun Heo  * each level.
16a4103eacSTejun Heo  *
17a4103eacSTejun Heo  * For example, B is competing against C and in that competition its share is
18a4103eacSTejun Heo  * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
19a4103eacSTejun Heo  * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
20a4103eacSTejun Heo  * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
21a4103eacSTejun Heo  * eventual shaer is the same at 1/6. D is only competing at the top level and
22a4103eacSTejun Heo  * its share is 200/(100+200) == 2/3.
23a4103eacSTejun Heo  *
24a4103eacSTejun Heo  * So, instead of hierarchically scheduling level-by-level, we can consider it
25a4103eacSTejun Heo  * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
26a4103eacSTejun Heo  * and keep updating the eventual shares as the cgroups' runnable states change.
27a4103eacSTejun Heo  *
28a4103eacSTejun Heo  * This flattening of hierarchy can bring a substantial performance gain when
29a4103eacSTejun Heo  * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
30a4103eacSTejun Heo  * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
31a4103eacSTejun Heo  * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
32a4103eacSTejun Heo  * apache instances competing with 2:1 weight ratio nested four level deep.
33a4103eacSTejun Heo  *
34a4103eacSTejun Heo  * However, the gain comes at the cost of not being able to properly handle
35a4103eacSTejun Heo  * thundering herd of cgroups. For example, if many cgroups which are nested
36a4103eacSTejun Heo  * behind a low priority parent cgroup wake up around the same time, they may be
37a4103eacSTejun Heo  * able to consume more CPU cycles than they are entitled to. In many use cases,
38a4103eacSTejun Heo  * this isn't a real concern especially given the performance gain. Also, there
39a4103eacSTejun Heo  * are ways to mitigate the problem further by e.g. introducing an extra
40a4103eacSTejun Heo  * scheduling layer on cgroup delegation boundaries.
41a4103eacSTejun Heo  *
42a4103eacSTejun Heo  * The scheduler first picks the cgroup to run and then schedule the tasks
43a4103eacSTejun Heo  * within by using nested weighted vtime scheduling by default. The
44a4103eacSTejun Heo  * cgroup-internal scheduling can be switched to FIFO with the -f option.
45a4103eacSTejun Heo  */
46a4103eacSTejun Heo #include <scx/common.bpf.h>
47a4103eacSTejun Heo #include "scx_flatcg.h"
48a4103eacSTejun Heo 
49a4103eacSTejun Heo /*
50a4103eacSTejun Heo  * Maximum amount of retries to find a valid cgroup.
51a4103eacSTejun Heo  */
52*c9c809f4STejun Heo enum {
53*c9c809f4STejun Heo 	FALLBACK_DSQ		= 0,
54*c9c809f4STejun Heo 	CGROUP_MAX_RETRIES	= 1024,
55*c9c809f4STejun Heo };
56a4103eacSTejun Heo 
57a4103eacSTejun Heo char _license[] SEC("license") = "GPL";
58a4103eacSTejun Heo 
59a4103eacSTejun Heo const volatile u32 nr_cpus = 32;	/* !0 for veristat, set during init */
60a4103eacSTejun Heo const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
61a4103eacSTejun Heo const volatile bool fifo_sched;
62a4103eacSTejun Heo 
63a4103eacSTejun Heo u64 cvtime_now;
64a4103eacSTejun Heo UEI_DEFINE(uei);
65a4103eacSTejun Heo 
66a4103eacSTejun Heo struct {
67a4103eacSTejun Heo 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
68a4103eacSTejun Heo 	__type(key, u32);
69a4103eacSTejun Heo 	__type(value, u64);
70a4103eacSTejun Heo 	__uint(max_entries, FCG_NR_STATS);
71a4103eacSTejun Heo } stats SEC(".maps");
72a4103eacSTejun Heo 
stat_inc(enum fcg_stat_idx idx)73a4103eacSTejun Heo static void stat_inc(enum fcg_stat_idx idx)
74a4103eacSTejun Heo {
75a4103eacSTejun Heo 	u32 idx_v = idx;
76a4103eacSTejun Heo 
77a4103eacSTejun Heo 	u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v);
78a4103eacSTejun Heo 	if (cnt_p)
79a4103eacSTejun Heo 		(*cnt_p)++;
80a4103eacSTejun Heo }
81a4103eacSTejun Heo 
82a4103eacSTejun Heo struct fcg_cpu_ctx {
83a4103eacSTejun Heo 	u64			cur_cgid;
84a4103eacSTejun Heo 	u64			cur_at;
85a4103eacSTejun Heo };
86a4103eacSTejun Heo 
87a4103eacSTejun Heo struct {
88a4103eacSTejun Heo 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
89a4103eacSTejun Heo 	__type(key, u32);
90a4103eacSTejun Heo 	__type(value, struct fcg_cpu_ctx);
91a4103eacSTejun Heo 	__uint(max_entries, 1);
92a4103eacSTejun Heo } cpu_ctx SEC(".maps");
93a4103eacSTejun Heo 
94a4103eacSTejun Heo struct {
95a4103eacSTejun Heo 	__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
96a4103eacSTejun Heo 	__uint(map_flags, BPF_F_NO_PREALLOC);
97a4103eacSTejun Heo 	__type(key, int);
98a4103eacSTejun Heo 	__type(value, struct fcg_cgrp_ctx);
99a4103eacSTejun Heo } cgrp_ctx SEC(".maps");
100a4103eacSTejun Heo 
101a4103eacSTejun Heo struct cgv_node {
102a4103eacSTejun Heo 	struct bpf_rb_node	rb_node;
103a4103eacSTejun Heo 	__u64			cvtime;
104a4103eacSTejun Heo 	__u64			cgid;
105a4103eacSTejun Heo };
106a4103eacSTejun Heo 
107a4103eacSTejun Heo private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
108a4103eacSTejun Heo private(CGV_TREE) struct bpf_rb_root cgv_tree __contains(cgv_node, rb_node);
109a4103eacSTejun Heo 
110a4103eacSTejun Heo struct cgv_node_stash {
111a4103eacSTejun Heo 	struct cgv_node __kptr *node;
112a4103eacSTejun Heo };
113a4103eacSTejun Heo 
114a4103eacSTejun Heo struct {
115a4103eacSTejun Heo 	__uint(type, BPF_MAP_TYPE_HASH);
116a4103eacSTejun Heo 	__uint(max_entries, 16384);
117a4103eacSTejun Heo 	__type(key, __u64);
118a4103eacSTejun Heo 	__type(value, struct cgv_node_stash);
119a4103eacSTejun Heo } cgv_node_stash SEC(".maps");
120a4103eacSTejun Heo 
121a4103eacSTejun Heo struct fcg_task_ctx {
122a4103eacSTejun Heo 	u64		bypassed_at;
123a4103eacSTejun Heo };
124a4103eacSTejun Heo 
125a4103eacSTejun Heo struct {
126a4103eacSTejun Heo 	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
127a4103eacSTejun Heo 	__uint(map_flags, BPF_F_NO_PREALLOC);
128a4103eacSTejun Heo 	__type(key, int);
129a4103eacSTejun Heo 	__type(value, struct fcg_task_ctx);
130a4103eacSTejun Heo } task_ctx SEC(".maps");
131a4103eacSTejun Heo 
132a4103eacSTejun Heo /* gets inc'd on weight tree changes to expire the cached hweights */
133a4103eacSTejun Heo u64 hweight_gen = 1;
134a4103eacSTejun Heo 
div_round_up(u64 dividend,u64 divisor)135a4103eacSTejun Heo static u64 div_round_up(u64 dividend, u64 divisor)
136a4103eacSTejun Heo {
137a4103eacSTejun Heo 	return (dividend + divisor - 1) / divisor;
138a4103eacSTejun Heo }
139a4103eacSTejun Heo 
vtime_before(u64 a,u64 b)140a4103eacSTejun Heo static bool vtime_before(u64 a, u64 b)
141a4103eacSTejun Heo {
142a4103eacSTejun Heo 	return (s64)(a - b) < 0;
143a4103eacSTejun Heo }
144a4103eacSTejun Heo 
cgv_node_less(struct bpf_rb_node * a,const struct bpf_rb_node * b)145a4103eacSTejun Heo static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
146a4103eacSTejun Heo {
147a4103eacSTejun Heo 	struct cgv_node *cgc_a, *cgc_b;
148a4103eacSTejun Heo 
149a4103eacSTejun Heo 	cgc_a = container_of(a, struct cgv_node, rb_node);
150a4103eacSTejun Heo 	cgc_b = container_of(b, struct cgv_node, rb_node);
151a4103eacSTejun Heo 
152a4103eacSTejun Heo 	return cgc_a->cvtime < cgc_b->cvtime;
153a4103eacSTejun Heo }
154a4103eacSTejun Heo 
find_cpu_ctx(void)155a4103eacSTejun Heo static struct fcg_cpu_ctx *find_cpu_ctx(void)
156a4103eacSTejun Heo {
157a4103eacSTejun Heo 	struct fcg_cpu_ctx *cpuc;
158a4103eacSTejun Heo 	u32 idx = 0;
159a4103eacSTejun Heo 
160a4103eacSTejun Heo 	cpuc = bpf_map_lookup_elem(&cpu_ctx, &idx);
161a4103eacSTejun Heo 	if (!cpuc) {
162a4103eacSTejun Heo 		scx_bpf_error("cpu_ctx lookup failed");
163a4103eacSTejun Heo 		return NULL;
164a4103eacSTejun Heo 	}
165a4103eacSTejun Heo 	return cpuc;
166a4103eacSTejun Heo }
167a4103eacSTejun Heo 
find_cgrp_ctx(struct cgroup * cgrp)168a4103eacSTejun Heo static struct fcg_cgrp_ctx *find_cgrp_ctx(struct cgroup *cgrp)
169a4103eacSTejun Heo {
170a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
171a4103eacSTejun Heo 
172a4103eacSTejun Heo 	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
173a4103eacSTejun Heo 	if (!cgc) {
174a4103eacSTejun Heo 		scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp->kn->id);
175a4103eacSTejun Heo 		return NULL;
176a4103eacSTejun Heo 	}
177a4103eacSTejun Heo 	return cgc;
178a4103eacSTejun Heo }
179a4103eacSTejun Heo 
find_ancestor_cgrp_ctx(struct cgroup * cgrp,int level)180a4103eacSTejun Heo static struct fcg_cgrp_ctx *find_ancestor_cgrp_ctx(struct cgroup *cgrp, int level)
181a4103eacSTejun Heo {
182a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
183a4103eacSTejun Heo 
184a4103eacSTejun Heo 	cgrp = bpf_cgroup_ancestor(cgrp, level);
185a4103eacSTejun Heo 	if (!cgrp) {
186a4103eacSTejun Heo 		scx_bpf_error("ancestor cgroup lookup failed");
187a4103eacSTejun Heo 		return NULL;
188a4103eacSTejun Heo 	}
189a4103eacSTejun Heo 
190a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
191a4103eacSTejun Heo 	if (!cgc)
192a4103eacSTejun Heo 		scx_bpf_error("ancestor cgrp_ctx lookup failed");
193a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
194a4103eacSTejun Heo 	return cgc;
195a4103eacSTejun Heo }
196a4103eacSTejun Heo 
cgrp_refresh_hweight(struct cgroup * cgrp,struct fcg_cgrp_ctx * cgc)197a4103eacSTejun Heo static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
198a4103eacSTejun Heo {
199a4103eacSTejun Heo 	int level;
200a4103eacSTejun Heo 
201a4103eacSTejun Heo 	if (!cgc->nr_active) {
202a4103eacSTejun Heo 		stat_inc(FCG_STAT_HWT_SKIP);
203a4103eacSTejun Heo 		return;
204a4103eacSTejun Heo 	}
205a4103eacSTejun Heo 
206a4103eacSTejun Heo 	if (cgc->hweight_gen == hweight_gen) {
207a4103eacSTejun Heo 		stat_inc(FCG_STAT_HWT_CACHE);
208a4103eacSTejun Heo 		return;
209a4103eacSTejun Heo 	}
210a4103eacSTejun Heo 
211a4103eacSTejun Heo 	stat_inc(FCG_STAT_HWT_UPDATES);
212a4103eacSTejun Heo 	bpf_for(level, 0, cgrp->level + 1) {
213a4103eacSTejun Heo 		struct fcg_cgrp_ctx *cgc;
214a4103eacSTejun Heo 		bool is_active;
215a4103eacSTejun Heo 
216a4103eacSTejun Heo 		cgc = find_ancestor_cgrp_ctx(cgrp, level);
217a4103eacSTejun Heo 		if (!cgc)
218a4103eacSTejun Heo 			break;
219a4103eacSTejun Heo 
220a4103eacSTejun Heo 		if (!level) {
221a4103eacSTejun Heo 			cgc->hweight = FCG_HWEIGHT_ONE;
222a4103eacSTejun Heo 			cgc->hweight_gen = hweight_gen;
223a4103eacSTejun Heo 		} else {
224a4103eacSTejun Heo 			struct fcg_cgrp_ctx *pcgc;
225a4103eacSTejun Heo 
226a4103eacSTejun Heo 			pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
227a4103eacSTejun Heo 			if (!pcgc)
228a4103eacSTejun Heo 				break;
229a4103eacSTejun Heo 
230a4103eacSTejun Heo 			/*
231a748db0cSTejun Heo 			 * We can be opportunistic here and not grab the
232a4103eacSTejun Heo 			 * cgv_tree_lock and deal with the occasional races.
233a4103eacSTejun Heo 			 * However, hweight updates are already cached and
234a4103eacSTejun Heo 			 * relatively low-frequency. Let's just do the
235a4103eacSTejun Heo 			 * straightforward thing.
236a4103eacSTejun Heo 			 */
237a4103eacSTejun Heo 			bpf_spin_lock(&cgv_tree_lock);
238a4103eacSTejun Heo 			is_active = cgc->nr_active;
239a4103eacSTejun Heo 			if (is_active) {
240a4103eacSTejun Heo 				cgc->hweight_gen = pcgc->hweight_gen;
241a4103eacSTejun Heo 				cgc->hweight =
242a4103eacSTejun Heo 					div_round_up(pcgc->hweight * cgc->weight,
243a4103eacSTejun Heo 						     pcgc->child_weight_sum);
244a4103eacSTejun Heo 			}
245a4103eacSTejun Heo 			bpf_spin_unlock(&cgv_tree_lock);
246a4103eacSTejun Heo 
247a4103eacSTejun Heo 			if (!is_active) {
248a4103eacSTejun Heo 				stat_inc(FCG_STAT_HWT_RACE);
249a4103eacSTejun Heo 				break;
250a4103eacSTejun Heo 			}
251a4103eacSTejun Heo 		}
252a4103eacSTejun Heo 	}
253a4103eacSTejun Heo }
254a4103eacSTejun Heo 
cgrp_cap_budget(struct cgv_node * cgv_node,struct fcg_cgrp_ctx * cgc)255a4103eacSTejun Heo static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
256a4103eacSTejun Heo {
257a4103eacSTejun Heo 	u64 delta, cvtime, max_budget;
258a4103eacSTejun Heo 
259a4103eacSTejun Heo 	/*
260a4103eacSTejun Heo 	 * A node which is on the rbtree can't be pointed to from elsewhere yet
261a4103eacSTejun Heo 	 * and thus can't be updated and repositioned. Instead, we collect the
262a4103eacSTejun Heo 	 * vtime deltas separately and apply it asynchronously here.
263a4103eacSTejun Heo 	 */
264a748db0cSTejun Heo 	delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
265a4103eacSTejun Heo 	cvtime = cgv_node->cvtime + delta;
266a4103eacSTejun Heo 
267a4103eacSTejun Heo 	/*
268a4103eacSTejun Heo 	 * Allow a cgroup to carry the maximum budget proportional to its
269a4103eacSTejun Heo 	 * hweight such that a full-hweight cgroup can immediately take up half
270a4103eacSTejun Heo 	 * of the CPUs at the most while staying at the front of the rbtree.
271a4103eacSTejun Heo 	 */
272a4103eacSTejun Heo 	max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) /
273a4103eacSTejun Heo 		(2 * FCG_HWEIGHT_ONE);
274a4103eacSTejun Heo 	if (vtime_before(cvtime, cvtime_now - max_budget))
275a4103eacSTejun Heo 		cvtime = cvtime_now - max_budget;
276a4103eacSTejun Heo 
277a4103eacSTejun Heo 	cgv_node->cvtime = cvtime;
278a4103eacSTejun Heo }
279a4103eacSTejun Heo 
cgrp_enqueued(struct cgroup * cgrp,struct fcg_cgrp_ctx * cgc)280a4103eacSTejun Heo static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
281a4103eacSTejun Heo {
282a4103eacSTejun Heo 	struct cgv_node_stash *stash;
283a4103eacSTejun Heo 	struct cgv_node *cgv_node;
284a4103eacSTejun Heo 	u64 cgid = cgrp->kn->id;
285a4103eacSTejun Heo 
286a4103eacSTejun Heo 	/* paired with cmpxchg in try_pick_next_cgroup() */
287a4103eacSTejun Heo 	if (__sync_val_compare_and_swap(&cgc->queued, 0, 1)) {
288a4103eacSTejun Heo 		stat_inc(FCG_STAT_ENQ_SKIP);
289a4103eacSTejun Heo 		return;
290a4103eacSTejun Heo 	}
291a4103eacSTejun Heo 
292a4103eacSTejun Heo 	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
293a4103eacSTejun Heo 	if (!stash) {
294a4103eacSTejun Heo 		scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
295a4103eacSTejun Heo 		return;
296a4103eacSTejun Heo 	}
297a4103eacSTejun Heo 
298a4103eacSTejun Heo 	/* NULL if the node is already on the rbtree */
299a4103eacSTejun Heo 	cgv_node = bpf_kptr_xchg(&stash->node, NULL);
300a4103eacSTejun Heo 	if (!cgv_node) {
301a4103eacSTejun Heo 		stat_inc(FCG_STAT_ENQ_RACE);
302a4103eacSTejun Heo 		return;
303a4103eacSTejun Heo 	}
304a4103eacSTejun Heo 
305a4103eacSTejun Heo 	bpf_spin_lock(&cgv_tree_lock);
306a4103eacSTejun Heo 	cgrp_cap_budget(cgv_node, cgc);
307a4103eacSTejun Heo 	bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
308a4103eacSTejun Heo 	bpf_spin_unlock(&cgv_tree_lock);
309a4103eacSTejun Heo }
310a4103eacSTejun Heo 
set_bypassed_at(struct task_struct * p,struct fcg_task_ctx * taskc)311a4103eacSTejun Heo static void set_bypassed_at(struct task_struct *p, struct fcg_task_ctx *taskc)
312a4103eacSTejun Heo {
313a4103eacSTejun Heo 	/*
314a4103eacSTejun Heo 	 * Tell fcg_stopping() that this bypassed the regular scheduling path
315a4103eacSTejun Heo 	 * and should be force charged to the cgroup. 0 is used to indicate that
316a4103eacSTejun Heo 	 * the task isn't bypassing, so if the current runtime is 0, go back by
317a4103eacSTejun Heo 	 * one nanosecond.
318a4103eacSTejun Heo 	 */
319a4103eacSTejun Heo 	taskc->bypassed_at = p->se.sum_exec_runtime ?: (u64)-1;
320a4103eacSTejun Heo }
321a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_select_cpu,struct task_struct * p,s32 prev_cpu,u64 wake_flags)322a4103eacSTejun Heo s32 BPF_STRUCT_OPS(fcg_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
323a4103eacSTejun Heo {
324a4103eacSTejun Heo 	struct fcg_task_ctx *taskc;
325a4103eacSTejun Heo 	bool is_idle = false;
326a4103eacSTejun Heo 	s32 cpu;
327a4103eacSTejun Heo 
328a4103eacSTejun Heo 	cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
329a4103eacSTejun Heo 
330a4103eacSTejun Heo 	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
331a4103eacSTejun Heo 	if (!taskc) {
332a4103eacSTejun Heo 		scx_bpf_error("task_ctx lookup failed");
333a4103eacSTejun Heo 		return cpu;
334a4103eacSTejun Heo 	}
335a4103eacSTejun Heo 
336a4103eacSTejun Heo 	/*
337a4103eacSTejun Heo 	 * If select_cpu_dfl() is recommending local enqueue, the target CPU is
338a4103eacSTejun Heo 	 * idle. Follow it and charge the cgroup later in fcg_stopping() after
339a4103eacSTejun Heo 	 * the fact.
340a4103eacSTejun Heo 	 */
341a4103eacSTejun Heo 	if (is_idle) {
342a4103eacSTejun Heo 		set_bypassed_at(p, taskc);
343a4103eacSTejun Heo 		stat_inc(FCG_STAT_LOCAL);
344a4103eacSTejun Heo 		scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
345a4103eacSTejun Heo 	}
346a4103eacSTejun Heo 
347a4103eacSTejun Heo 	return cpu;
348a4103eacSTejun Heo }
349a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_enqueue,struct task_struct * p,u64 enq_flags)350a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags)
351a4103eacSTejun Heo {
352a4103eacSTejun Heo 	struct fcg_task_ctx *taskc;
353a4103eacSTejun Heo 	struct cgroup *cgrp;
354a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
355a4103eacSTejun Heo 
356a4103eacSTejun Heo 	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
357a4103eacSTejun Heo 	if (!taskc) {
358a4103eacSTejun Heo 		scx_bpf_error("task_ctx lookup failed");
359a4103eacSTejun Heo 		return;
360a4103eacSTejun Heo 	}
361a4103eacSTejun Heo 
362a4103eacSTejun Heo 	/*
363a4103eacSTejun Heo 	 * Use the direct dispatching and force charging to deal with tasks with
364a4103eacSTejun Heo 	 * custom affinities so that we don't have to worry about per-cgroup
365a4103eacSTejun Heo 	 * dq's containing tasks that can't be executed from some CPUs.
366a4103eacSTejun Heo 	 */
367a4103eacSTejun Heo 	if (p->nr_cpus_allowed != nr_cpus) {
368a4103eacSTejun Heo 		set_bypassed_at(p, taskc);
369a4103eacSTejun Heo 
370a4103eacSTejun Heo 		/*
371a4103eacSTejun Heo 		 * The global dq is deprioritized as we don't want to let tasks
372a4103eacSTejun Heo 		 * to boost themselves by constraining its cpumask. The
373a4103eacSTejun Heo 		 * deprioritization is rather severe, so let's not apply that to
374a4103eacSTejun Heo 		 * per-cpu kernel threads. This is ham-fisted. We probably wanna
375a4103eacSTejun Heo 		 * implement per-cgroup fallback dq's instead so that we have
376a4103eacSTejun Heo 		 * more control over when tasks with custom cpumask get issued.
377a4103eacSTejun Heo 		 */
378a4103eacSTejun Heo 		if (p->nr_cpus_allowed == 1 && (p->flags & PF_KTHREAD)) {
379a4103eacSTejun Heo 			stat_inc(FCG_STAT_LOCAL);
380a4103eacSTejun Heo 			scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags);
381a4103eacSTejun Heo 		} else {
382a4103eacSTejun Heo 			stat_inc(FCG_STAT_GLOBAL);
383*c9c809f4STejun Heo 			scx_bpf_dispatch(p, FALLBACK_DSQ, SCX_SLICE_DFL, enq_flags);
384a4103eacSTejun Heo 		}
385a4103eacSTejun Heo 		return;
386a4103eacSTejun Heo 	}
387a4103eacSTejun Heo 
3881e123fd7STejun Heo 	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
389a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
390a4103eacSTejun Heo 	if (!cgc)
391a4103eacSTejun Heo 		goto out_release;
392a4103eacSTejun Heo 
393a4103eacSTejun Heo 	if (fifo_sched) {
394a4103eacSTejun Heo 		scx_bpf_dispatch(p, cgrp->kn->id, SCX_SLICE_DFL, enq_flags);
395a4103eacSTejun Heo 	} else {
396a4103eacSTejun Heo 		u64 tvtime = p->scx.dsq_vtime;
397a4103eacSTejun Heo 
398a4103eacSTejun Heo 		/*
399a4103eacSTejun Heo 		 * Limit the amount of budget that an idling task can accumulate
400a4103eacSTejun Heo 		 * to one slice.
401a4103eacSTejun Heo 		 */
402a4103eacSTejun Heo 		if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL))
403a4103eacSTejun Heo 			tvtime = cgc->tvtime_now - SCX_SLICE_DFL;
404a4103eacSTejun Heo 
405a4103eacSTejun Heo 		scx_bpf_dispatch_vtime(p, cgrp->kn->id, SCX_SLICE_DFL,
406a4103eacSTejun Heo 				       tvtime, enq_flags);
407a4103eacSTejun Heo 	}
408a4103eacSTejun Heo 
409a4103eacSTejun Heo 	cgrp_enqueued(cgrp, cgc);
410a4103eacSTejun Heo out_release:
411a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
412a4103eacSTejun Heo }
413a4103eacSTejun Heo 
414a4103eacSTejun Heo /*
415a4103eacSTejun Heo  * Walk the cgroup tree to update the active weight sums as tasks wake up and
416a4103eacSTejun Heo  * sleep. The weight sums are used as the base when calculating the proportion a
417a4103eacSTejun Heo  * given cgroup or task is entitled to at each level.
418a4103eacSTejun Heo  */
update_active_weight_sums(struct cgroup * cgrp,bool runnable)419a4103eacSTejun Heo static void update_active_weight_sums(struct cgroup *cgrp, bool runnable)
420a4103eacSTejun Heo {
421a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
422a4103eacSTejun Heo 	bool updated = false;
423a4103eacSTejun Heo 	int idx;
424a4103eacSTejun Heo 
425a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
426a4103eacSTejun Heo 	if (!cgc)
427a4103eacSTejun Heo 		return;
428a4103eacSTejun Heo 
429a4103eacSTejun Heo 	/*
430a4103eacSTejun Heo 	 * In most cases, a hot cgroup would have multiple threads going to
431a4103eacSTejun Heo 	 * sleep and waking up while the whole cgroup stays active. In leaf
432a4103eacSTejun Heo 	 * cgroups, ->nr_runnable which is updated with __sync operations gates
433a4103eacSTejun Heo 	 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
434a4103eacSTejun Heo 	 * repeatedly for a busy cgroup which is staying active.
435a4103eacSTejun Heo 	 */
436a4103eacSTejun Heo 	if (runnable) {
437a4103eacSTejun Heo 		if (__sync_fetch_and_add(&cgc->nr_runnable, 1))
438a4103eacSTejun Heo 			return;
439a4103eacSTejun Heo 		stat_inc(FCG_STAT_ACT);
440a4103eacSTejun Heo 	} else {
441a4103eacSTejun Heo 		if (__sync_sub_and_fetch(&cgc->nr_runnable, 1))
442a4103eacSTejun Heo 			return;
443a4103eacSTejun Heo 		stat_inc(FCG_STAT_DEACT);
444a4103eacSTejun Heo 	}
445a4103eacSTejun Heo 
446a4103eacSTejun Heo 	/*
447a4103eacSTejun Heo 	 * If @cgrp is becoming runnable, its hweight should be refreshed after
448a4103eacSTejun Heo 	 * it's added to the weight tree so that enqueue has the up-to-date
449a4103eacSTejun Heo 	 * value. If @cgrp is becoming quiescent, the hweight should be
450a4103eacSTejun Heo 	 * refreshed before it's removed from the weight tree so that the usage
451a4103eacSTejun Heo 	 * charging which happens afterwards has access to the latest value.
452a4103eacSTejun Heo 	 */
453a4103eacSTejun Heo 	if (!runnable)
454a4103eacSTejun Heo 		cgrp_refresh_hweight(cgrp, cgc);
455a4103eacSTejun Heo 
456a4103eacSTejun Heo 	/* propagate upwards */
457a4103eacSTejun Heo 	bpf_for(idx, 0, cgrp->level) {
458a4103eacSTejun Heo 		int level = cgrp->level - idx;
459a4103eacSTejun Heo 		struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
460a4103eacSTejun Heo 		bool propagate = false;
461a4103eacSTejun Heo 
462a4103eacSTejun Heo 		cgc = find_ancestor_cgrp_ctx(cgrp, level);
463a4103eacSTejun Heo 		if (!cgc)
464a4103eacSTejun Heo 			break;
465a4103eacSTejun Heo 		if (level) {
466a4103eacSTejun Heo 			pcgc = find_ancestor_cgrp_ctx(cgrp, level - 1);
467a4103eacSTejun Heo 			if (!pcgc)
468a4103eacSTejun Heo 				break;
469a4103eacSTejun Heo 		}
470a4103eacSTejun Heo 
471a4103eacSTejun Heo 		/*
472a4103eacSTejun Heo 		 * We need the propagation protected by a lock to synchronize
473a4103eacSTejun Heo 		 * against weight changes. There's no reason to drop the lock at
474a4103eacSTejun Heo 		 * each level but bpf_spin_lock() doesn't want any function
475a4103eacSTejun Heo 		 * calls while locked.
476a4103eacSTejun Heo 		 */
477a4103eacSTejun Heo 		bpf_spin_lock(&cgv_tree_lock);
478a4103eacSTejun Heo 
479a4103eacSTejun Heo 		if (runnable) {
480a4103eacSTejun Heo 			if (!cgc->nr_active++) {
481a4103eacSTejun Heo 				updated = true;
482a4103eacSTejun Heo 				if (pcgc) {
483a4103eacSTejun Heo 					propagate = true;
484a4103eacSTejun Heo 					pcgc->child_weight_sum += cgc->weight;
485a4103eacSTejun Heo 				}
486a4103eacSTejun Heo 			}
487a4103eacSTejun Heo 		} else {
488a4103eacSTejun Heo 			if (!--cgc->nr_active) {
489a4103eacSTejun Heo 				updated = true;
490a4103eacSTejun Heo 				if (pcgc) {
491a4103eacSTejun Heo 					propagate = true;
492a4103eacSTejun Heo 					pcgc->child_weight_sum -= cgc->weight;
493a4103eacSTejun Heo 				}
494a4103eacSTejun Heo 			}
495a4103eacSTejun Heo 		}
496a4103eacSTejun Heo 
497a4103eacSTejun Heo 		bpf_spin_unlock(&cgv_tree_lock);
498a4103eacSTejun Heo 
499a4103eacSTejun Heo 		if (!propagate)
500a4103eacSTejun Heo 			break;
501a4103eacSTejun Heo 	}
502a4103eacSTejun Heo 
503a4103eacSTejun Heo 	if (updated)
504a4103eacSTejun Heo 		__sync_fetch_and_add(&hweight_gen, 1);
505a4103eacSTejun Heo 
506a4103eacSTejun Heo 	if (runnable)
507a4103eacSTejun Heo 		cgrp_refresh_hweight(cgrp, cgc);
508a4103eacSTejun Heo }
509a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_runnable,struct task_struct * p,u64 enq_flags)510a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags)
511a4103eacSTejun Heo {
512a4103eacSTejun Heo 	struct cgroup *cgrp;
513a4103eacSTejun Heo 
5141e123fd7STejun Heo 	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
515a4103eacSTejun Heo 	update_active_weight_sums(cgrp, true);
516a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
517a4103eacSTejun Heo }
518a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_running,struct task_struct * p)519a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_running, struct task_struct *p)
520a4103eacSTejun Heo {
521a4103eacSTejun Heo 	struct cgroup *cgrp;
522a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
523a4103eacSTejun Heo 
524a4103eacSTejun Heo 	if (fifo_sched)
525a4103eacSTejun Heo 		return;
526a4103eacSTejun Heo 
5271e123fd7STejun Heo 	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
528a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
529a4103eacSTejun Heo 	if (cgc) {
530a4103eacSTejun Heo 		/*
531a4103eacSTejun Heo 		 * @cgc->tvtime_now always progresses forward as tasks start
532a4103eacSTejun Heo 		 * executing. The test and update can be performed concurrently
533a4103eacSTejun Heo 		 * from multiple CPUs and thus racy. Any error should be
534a4103eacSTejun Heo 		 * contained and temporary. Let's just live with it.
535a4103eacSTejun Heo 		 */
536a4103eacSTejun Heo 		if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime))
537a4103eacSTejun Heo 			cgc->tvtime_now = p->scx.dsq_vtime;
538a4103eacSTejun Heo 	}
539a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
540a4103eacSTejun Heo }
541a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_stopping,struct task_struct * p,bool runnable)542a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
543a4103eacSTejun Heo {
544a4103eacSTejun Heo 	struct fcg_task_ctx *taskc;
545a4103eacSTejun Heo 	struct cgroup *cgrp;
546a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
547a4103eacSTejun Heo 
548a4103eacSTejun Heo 	/*
549a4103eacSTejun Heo 	 * Scale the execution time by the inverse of the weight and charge.
550a4103eacSTejun Heo 	 *
551a4103eacSTejun Heo 	 * Note that the default yield implementation yields by setting
552a4103eacSTejun Heo 	 * @p->scx.slice to zero and the following would treat the yielding task
553a4103eacSTejun Heo 	 * as if it has consumed all its slice. If this penalizes yielding tasks
554a4103eacSTejun Heo 	 * too much, determine the execution time by taking explicit timestamps
555a4103eacSTejun Heo 	 * instead of depending on @p->scx.slice.
556a4103eacSTejun Heo 	 */
557a4103eacSTejun Heo 	if (!fifo_sched)
558a4103eacSTejun Heo 		p->scx.dsq_vtime +=
559a4103eacSTejun Heo 			(SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
560a4103eacSTejun Heo 
561a4103eacSTejun Heo 	taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
562a4103eacSTejun Heo 	if (!taskc) {
563a4103eacSTejun Heo 		scx_bpf_error("task_ctx lookup failed");
564a4103eacSTejun Heo 		return;
565a4103eacSTejun Heo 	}
566a4103eacSTejun Heo 
567a4103eacSTejun Heo 	if (!taskc->bypassed_at)
568a4103eacSTejun Heo 		return;
569a4103eacSTejun Heo 
5701e123fd7STejun Heo 	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
571a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
572a4103eacSTejun Heo 	if (cgc) {
573a4103eacSTejun Heo 		__sync_fetch_and_add(&cgc->cvtime_delta,
574a4103eacSTejun Heo 				     p->se.sum_exec_runtime - taskc->bypassed_at);
575a4103eacSTejun Heo 		taskc->bypassed_at = 0;
576a4103eacSTejun Heo 	}
577a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
578a4103eacSTejun Heo }
579a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_quiescent,struct task_struct * p,u64 deq_flags)580a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags)
581a4103eacSTejun Heo {
582a4103eacSTejun Heo 	struct cgroup *cgrp;
583a4103eacSTejun Heo 
5841e123fd7STejun Heo 	cgrp = __COMPAT_scx_bpf_task_cgroup(p);
585a4103eacSTejun Heo 	update_active_weight_sums(cgrp, false);
586a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
587a4103eacSTejun Heo }
588a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_cgroup_set_weight,struct cgroup * cgrp,u32 weight)589a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
590a4103eacSTejun Heo {
591a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc, *pcgc = NULL;
592a4103eacSTejun Heo 
593a4103eacSTejun Heo 	cgc = find_cgrp_ctx(cgrp);
594a4103eacSTejun Heo 	if (!cgc)
595a4103eacSTejun Heo 		return;
596a4103eacSTejun Heo 
597a4103eacSTejun Heo 	if (cgrp->level) {
598a4103eacSTejun Heo 		pcgc = find_ancestor_cgrp_ctx(cgrp, cgrp->level - 1);
599a4103eacSTejun Heo 		if (!pcgc)
600a4103eacSTejun Heo 			return;
601a4103eacSTejun Heo 	}
602a4103eacSTejun Heo 
603a4103eacSTejun Heo 	bpf_spin_lock(&cgv_tree_lock);
604a4103eacSTejun Heo 	if (pcgc && cgc->nr_active)
605a4103eacSTejun Heo 		pcgc->child_weight_sum += (s64)weight - cgc->weight;
606a4103eacSTejun Heo 	cgc->weight = weight;
607a4103eacSTejun Heo 	bpf_spin_unlock(&cgv_tree_lock);
608a4103eacSTejun Heo }
609a4103eacSTejun Heo 
try_pick_next_cgroup(u64 * cgidp)610a4103eacSTejun Heo static bool try_pick_next_cgroup(u64 *cgidp)
611a4103eacSTejun Heo {
612a4103eacSTejun Heo 	struct bpf_rb_node *rb_node;
613a4103eacSTejun Heo 	struct cgv_node_stash *stash;
614a4103eacSTejun Heo 	struct cgv_node *cgv_node;
615a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
616a4103eacSTejun Heo 	struct cgroup *cgrp;
617a4103eacSTejun Heo 	u64 cgid;
618a4103eacSTejun Heo 
619a4103eacSTejun Heo 	/* pop the front cgroup and wind cvtime_now accordingly */
620a4103eacSTejun Heo 	bpf_spin_lock(&cgv_tree_lock);
621a4103eacSTejun Heo 
622a4103eacSTejun Heo 	rb_node = bpf_rbtree_first(&cgv_tree);
623a4103eacSTejun Heo 	if (!rb_node) {
624a4103eacSTejun Heo 		bpf_spin_unlock(&cgv_tree_lock);
625a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_NO_CGRP);
626a4103eacSTejun Heo 		*cgidp = 0;
627a4103eacSTejun Heo 		return true;
628a4103eacSTejun Heo 	}
629a4103eacSTejun Heo 
630a4103eacSTejun Heo 	rb_node = bpf_rbtree_remove(&cgv_tree, rb_node);
631a4103eacSTejun Heo 	bpf_spin_unlock(&cgv_tree_lock);
632a4103eacSTejun Heo 
633a4103eacSTejun Heo 	if (!rb_node) {
634a4103eacSTejun Heo 		/*
635a4103eacSTejun Heo 		 * This should never happen. bpf_rbtree_first() was called
636a4103eacSTejun Heo 		 * above while the tree lock was held, so the node should
637a4103eacSTejun Heo 		 * always be present.
638a4103eacSTejun Heo 		 */
639a4103eacSTejun Heo 		scx_bpf_error("node could not be removed");
640a4103eacSTejun Heo 		return true;
641a4103eacSTejun Heo 	}
642a4103eacSTejun Heo 
643a4103eacSTejun Heo 	cgv_node = container_of(rb_node, struct cgv_node, rb_node);
644a4103eacSTejun Heo 	cgid = cgv_node->cgid;
645a4103eacSTejun Heo 
646a4103eacSTejun Heo 	if (vtime_before(cvtime_now, cgv_node->cvtime))
647a4103eacSTejun Heo 		cvtime_now = cgv_node->cvtime;
648a4103eacSTejun Heo 
649a4103eacSTejun Heo 	/*
650a4103eacSTejun Heo 	 * If lookup fails, the cgroup's gone. Free and move on. See
651a4103eacSTejun Heo 	 * fcg_cgroup_exit().
652a4103eacSTejun Heo 	 */
653a4103eacSTejun Heo 	cgrp = bpf_cgroup_from_id(cgid);
654a4103eacSTejun Heo 	if (!cgrp) {
655a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_GONE);
656a4103eacSTejun Heo 		goto out_free;
657a4103eacSTejun Heo 	}
658a4103eacSTejun Heo 
659a4103eacSTejun Heo 	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
660a4103eacSTejun Heo 	if (!cgc) {
661a4103eacSTejun Heo 		bpf_cgroup_release(cgrp);
662a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_GONE);
663a4103eacSTejun Heo 		goto out_free;
664a4103eacSTejun Heo 	}
665a4103eacSTejun Heo 
666a4103eacSTejun Heo 	if (!scx_bpf_consume(cgid)) {
667a4103eacSTejun Heo 		bpf_cgroup_release(cgrp);
668a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_EMPTY);
669a4103eacSTejun Heo 		goto out_stash;
670a4103eacSTejun Heo 	}
671a4103eacSTejun Heo 
672a4103eacSTejun Heo 	/*
673a4103eacSTejun Heo 	 * Successfully consumed from the cgroup. This will be our current
674a4103eacSTejun Heo 	 * cgroup for the new slice. Refresh its hweight.
675a4103eacSTejun Heo 	 */
676a4103eacSTejun Heo 	cgrp_refresh_hweight(cgrp, cgc);
677a4103eacSTejun Heo 
678a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
679a4103eacSTejun Heo 
680a4103eacSTejun Heo 	/*
681a4103eacSTejun Heo 	 * As the cgroup may have more tasks, add it back to the rbtree. Note
682a4103eacSTejun Heo 	 * that here we charge the full slice upfront and then exact later
683a4103eacSTejun Heo 	 * according to the actual consumption. This prevents lowpri thundering
684a4103eacSTejun Heo 	 * herd from saturating the machine.
685a4103eacSTejun Heo 	 */
686a4103eacSTejun Heo 	bpf_spin_lock(&cgv_tree_lock);
687a4103eacSTejun Heo 	cgv_node->cvtime += cgrp_slice_ns * FCG_HWEIGHT_ONE / (cgc->hweight ?: 1);
688a4103eacSTejun Heo 	cgrp_cap_budget(cgv_node, cgc);
689a4103eacSTejun Heo 	bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
690a4103eacSTejun Heo 	bpf_spin_unlock(&cgv_tree_lock);
691a4103eacSTejun Heo 
692a4103eacSTejun Heo 	*cgidp = cgid;
693a4103eacSTejun Heo 	stat_inc(FCG_STAT_PNC_NEXT);
694a4103eacSTejun Heo 	return true;
695a4103eacSTejun Heo 
696a4103eacSTejun Heo out_stash:
697a4103eacSTejun Heo 	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
698a4103eacSTejun Heo 	if (!stash) {
699a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_GONE);
700a4103eacSTejun Heo 		goto out_free;
701a4103eacSTejun Heo 	}
702a4103eacSTejun Heo 
703a4103eacSTejun Heo 	/*
704a4103eacSTejun Heo 	 * Paired with cmpxchg in cgrp_enqueued(). If they see the following
705a4103eacSTejun Heo 	 * transition, they'll enqueue the cgroup. If they are earlier, we'll
706a4103eacSTejun Heo 	 * see their task in the dq below and requeue the cgroup.
707a4103eacSTejun Heo 	 */
708a4103eacSTejun Heo 	__sync_val_compare_and_swap(&cgc->queued, 1, 0);
709a4103eacSTejun Heo 
710a4103eacSTejun Heo 	if (scx_bpf_dsq_nr_queued(cgid)) {
711a4103eacSTejun Heo 		bpf_spin_lock(&cgv_tree_lock);
712a4103eacSTejun Heo 		bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
713a4103eacSTejun Heo 		bpf_spin_unlock(&cgv_tree_lock);
714a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_RACE);
715a4103eacSTejun Heo 	} else {
716a4103eacSTejun Heo 		cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
717a4103eacSTejun Heo 		if (cgv_node) {
718a4103eacSTejun Heo 			scx_bpf_error("unexpected !NULL cgv_node stash");
719a4103eacSTejun Heo 			goto out_free;
720a4103eacSTejun Heo 		}
721a4103eacSTejun Heo 	}
722a4103eacSTejun Heo 
723a4103eacSTejun Heo 	return false;
724a4103eacSTejun Heo 
725a4103eacSTejun Heo out_free:
726a4103eacSTejun Heo 	bpf_obj_drop(cgv_node);
727a4103eacSTejun Heo 	return false;
728a4103eacSTejun Heo }
729a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_dispatch,s32 cpu,struct task_struct * prev)730a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
731a4103eacSTejun Heo {
732a4103eacSTejun Heo 	struct fcg_cpu_ctx *cpuc;
733a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
734a4103eacSTejun Heo 	struct cgroup *cgrp;
735a4103eacSTejun Heo 	u64 now = bpf_ktime_get_ns();
736a4103eacSTejun Heo 	bool picked_next = false;
737a4103eacSTejun Heo 
738a4103eacSTejun Heo 	cpuc = find_cpu_ctx();
739a4103eacSTejun Heo 	if (!cpuc)
740a4103eacSTejun Heo 		return;
741a4103eacSTejun Heo 
742a4103eacSTejun Heo 	if (!cpuc->cur_cgid)
743a4103eacSTejun Heo 		goto pick_next_cgroup;
744a4103eacSTejun Heo 
745a4103eacSTejun Heo 	if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
746a4103eacSTejun Heo 		if (scx_bpf_consume(cpuc->cur_cgid)) {
747a4103eacSTejun Heo 			stat_inc(FCG_STAT_CNS_KEEP);
748a4103eacSTejun Heo 			return;
749a4103eacSTejun Heo 		}
750a4103eacSTejun Heo 		stat_inc(FCG_STAT_CNS_EMPTY);
751a4103eacSTejun Heo 	} else {
752a4103eacSTejun Heo 		stat_inc(FCG_STAT_CNS_EXPIRE);
753a4103eacSTejun Heo 	}
754a4103eacSTejun Heo 
755a4103eacSTejun Heo 	/*
756a4103eacSTejun Heo 	 * The current cgroup is expiring. It was already charged a full slice.
757a4103eacSTejun Heo 	 * Calculate the actual usage and accumulate the delta.
758a4103eacSTejun Heo 	 */
759a4103eacSTejun Heo 	cgrp = bpf_cgroup_from_id(cpuc->cur_cgid);
760a4103eacSTejun Heo 	if (!cgrp) {
761a4103eacSTejun Heo 		stat_inc(FCG_STAT_CNS_GONE);
762a4103eacSTejun Heo 		goto pick_next_cgroup;
763a4103eacSTejun Heo 	}
764a4103eacSTejun Heo 
765a4103eacSTejun Heo 	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, 0);
766a4103eacSTejun Heo 	if (cgc) {
767a4103eacSTejun Heo 		/*
768a4103eacSTejun Heo 		 * We want to update the vtime delta and then look for the next
769a4103eacSTejun Heo 		 * cgroup to execute but the latter needs to be done in a loop
770a4103eacSTejun Heo 		 * and we can't keep the lock held. Oh well...
771a4103eacSTejun Heo 		 */
772a4103eacSTejun Heo 		bpf_spin_lock(&cgv_tree_lock);
773a4103eacSTejun Heo 		__sync_fetch_and_add(&cgc->cvtime_delta,
774a4103eacSTejun Heo 				     (cpuc->cur_at + cgrp_slice_ns - now) *
775a4103eacSTejun Heo 				     FCG_HWEIGHT_ONE / (cgc->hweight ?: 1));
776a4103eacSTejun Heo 		bpf_spin_unlock(&cgv_tree_lock);
777a4103eacSTejun Heo 	} else {
778a4103eacSTejun Heo 		stat_inc(FCG_STAT_CNS_GONE);
779a4103eacSTejun Heo 	}
780a4103eacSTejun Heo 
781a4103eacSTejun Heo 	bpf_cgroup_release(cgrp);
782a4103eacSTejun Heo 
783a4103eacSTejun Heo pick_next_cgroup:
784a4103eacSTejun Heo 	cpuc->cur_at = now;
785a4103eacSTejun Heo 
786*c9c809f4STejun Heo 	if (scx_bpf_consume(FALLBACK_DSQ)) {
787a4103eacSTejun Heo 		cpuc->cur_cgid = 0;
788a4103eacSTejun Heo 		return;
789a4103eacSTejun Heo 	}
790a4103eacSTejun Heo 
791a4103eacSTejun Heo 	bpf_repeat(CGROUP_MAX_RETRIES) {
792a4103eacSTejun Heo 		if (try_pick_next_cgroup(&cpuc->cur_cgid)) {
793a4103eacSTejun Heo 			picked_next = true;
794a4103eacSTejun Heo 			break;
795a4103eacSTejun Heo 		}
796a4103eacSTejun Heo 	}
797a4103eacSTejun Heo 
798a4103eacSTejun Heo 	/*
799a4103eacSTejun Heo 	 * This only happens if try_pick_next_cgroup() races against enqueue
800a4103eacSTejun Heo 	 * path for more than CGROUP_MAX_RETRIES times, which is extremely
801a4103eacSTejun Heo 	 * unlikely and likely indicates an underlying bug. There shouldn't be
802a4103eacSTejun Heo 	 * any stall risk as the race is against enqueue.
803a4103eacSTejun Heo 	 */
804a4103eacSTejun Heo 	if (!picked_next)
805a4103eacSTejun Heo 		stat_inc(FCG_STAT_PNC_FAIL);
806a4103eacSTejun Heo }
807a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_init_task,struct task_struct * p,struct scx_init_task_args * args)808a4103eacSTejun Heo s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
809a4103eacSTejun Heo 		   struct scx_init_task_args *args)
810a4103eacSTejun Heo {
811a4103eacSTejun Heo 	struct fcg_task_ctx *taskc;
812a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
813a4103eacSTejun Heo 
814a4103eacSTejun Heo 	/*
815a4103eacSTejun Heo 	 * @p is new. Let's ensure that its task_ctx is available. We can sleep
816a4103eacSTejun Heo 	 * in this function and the following will automatically use GFP_KERNEL.
817a4103eacSTejun Heo 	 */
818a4103eacSTejun Heo 	taskc = bpf_task_storage_get(&task_ctx, p, 0,
819a4103eacSTejun Heo 				     BPF_LOCAL_STORAGE_GET_F_CREATE);
820a4103eacSTejun Heo 	if (!taskc)
821a4103eacSTejun Heo 		return -ENOMEM;
822a4103eacSTejun Heo 
823a4103eacSTejun Heo 	taskc->bypassed_at = 0;
824a4103eacSTejun Heo 
825a4103eacSTejun Heo 	if (!(cgc = find_cgrp_ctx(args->cgroup)))
826a4103eacSTejun Heo 		return -ENOENT;
827a4103eacSTejun Heo 
828a4103eacSTejun Heo 	p->scx.dsq_vtime = cgc->tvtime_now;
829a4103eacSTejun Heo 
830a4103eacSTejun Heo 	return 0;
831a4103eacSTejun Heo }
832a4103eacSTejun Heo 
BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init,struct cgroup * cgrp,struct scx_cgroup_init_args * args)833a4103eacSTejun Heo int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp,
834a4103eacSTejun Heo 			     struct scx_cgroup_init_args *args)
835a4103eacSTejun Heo {
836a4103eacSTejun Heo 	struct fcg_cgrp_ctx *cgc;
837a4103eacSTejun Heo 	struct cgv_node *cgv_node;
838a4103eacSTejun Heo 	struct cgv_node_stash empty_stash = {}, *stash;
839a4103eacSTejun Heo 	u64 cgid = cgrp->kn->id;
840a4103eacSTejun Heo 	int ret;
841a4103eacSTejun Heo 
842a4103eacSTejun Heo 	/*
843*c9c809f4STejun Heo 	 * Technically incorrect as cgroup ID is full 64bit while dsq ID is
844a4103eacSTejun Heo 	 * 63bit. Should not be a problem in practice and easy to spot in the
845a4103eacSTejun Heo 	 * unlikely case that it breaks.
846a4103eacSTejun Heo 	 */
847a4103eacSTejun Heo 	ret = scx_bpf_create_dsq(cgid, -1);
848a4103eacSTejun Heo 	if (ret)
849a4103eacSTejun Heo 		return ret;
850a4103eacSTejun Heo 
851a4103eacSTejun Heo 	cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0,
852a4103eacSTejun Heo 				   BPF_LOCAL_STORAGE_GET_F_CREATE);
853a4103eacSTejun Heo 	if (!cgc) {
854a4103eacSTejun Heo 		ret = -ENOMEM;
855a4103eacSTejun Heo 		goto err_destroy_dsq;
856a4103eacSTejun Heo 	}
857a4103eacSTejun Heo 
858a4103eacSTejun Heo 	cgc->weight = args->weight;
859a4103eacSTejun Heo 	cgc->hweight = FCG_HWEIGHT_ONE;
860a4103eacSTejun Heo 
861a4103eacSTejun Heo 	ret = bpf_map_update_elem(&cgv_node_stash, &cgid, &empty_stash,
862a4103eacSTejun Heo 				  BPF_NOEXIST);
863a4103eacSTejun Heo 	if (ret) {
864a4103eacSTejun Heo 		if (ret != -ENOMEM)
865a4103eacSTejun Heo 			scx_bpf_error("unexpected stash creation error (%d)",
866a4103eacSTejun Heo 				      ret);
867a4103eacSTejun Heo 		goto err_destroy_dsq;
868a4103eacSTejun Heo 	}
869a4103eacSTejun Heo 
870a4103eacSTejun Heo 	stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
871a4103eacSTejun Heo 	if (!stash) {
872a4103eacSTejun Heo 		scx_bpf_error("unexpected cgv_node stash lookup failure");
873a4103eacSTejun Heo 		ret = -ENOENT;
874a4103eacSTejun Heo 		goto err_destroy_dsq;
875a4103eacSTejun Heo 	}
876a4103eacSTejun Heo 
877a4103eacSTejun Heo 	cgv_node = bpf_obj_new(struct cgv_node);
878a4103eacSTejun Heo 	if (!cgv_node) {
879a4103eacSTejun Heo 		ret = -ENOMEM;
880a4103eacSTejun Heo 		goto err_del_cgv_node;
881a4103eacSTejun Heo 	}
882a4103eacSTejun Heo 
883a4103eacSTejun Heo 	cgv_node->cgid = cgid;
884a4103eacSTejun Heo 	cgv_node->cvtime = cvtime_now;
885a4103eacSTejun Heo 
886a4103eacSTejun Heo 	cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
887a4103eacSTejun Heo 	if (cgv_node) {
888a4103eacSTejun Heo 		scx_bpf_error("unexpected !NULL cgv_node stash");
889a4103eacSTejun Heo 		ret = -EBUSY;
890a4103eacSTejun Heo 		goto err_drop;
891a4103eacSTejun Heo 	}
892a4103eacSTejun Heo 
893a4103eacSTejun Heo 	return 0;
894a4103eacSTejun Heo 
895a4103eacSTejun Heo err_drop:
896a4103eacSTejun Heo 	bpf_obj_drop(cgv_node);
897a4103eacSTejun Heo err_del_cgv_node:
898a4103eacSTejun Heo 	bpf_map_delete_elem(&cgv_node_stash, &cgid);
899a4103eacSTejun Heo err_destroy_dsq:
900a4103eacSTejun Heo 	scx_bpf_destroy_dsq(cgid);
901a4103eacSTejun Heo 	return ret;
902a4103eacSTejun Heo }
903a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_cgroup_exit,struct cgroup * cgrp)904a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_cgroup_exit, struct cgroup *cgrp)
905a4103eacSTejun Heo {
906a4103eacSTejun Heo 	u64 cgid = cgrp->kn->id;
907a4103eacSTejun Heo 
908a4103eacSTejun Heo 	/*
909a4103eacSTejun Heo 	 * For now, there's no way find and remove the cgv_node if it's on the
910a4103eacSTejun Heo 	 * cgv_tree. Let's drain them in the dispatch path as they get popped
911a4103eacSTejun Heo 	 * off the front of the tree.
912a4103eacSTejun Heo 	 */
913a4103eacSTejun Heo 	bpf_map_delete_elem(&cgv_node_stash, &cgid);
914a4103eacSTejun Heo 	scx_bpf_destroy_dsq(cgid);
915a4103eacSTejun Heo }
916a4103eacSTejun Heo 
BPF_STRUCT_OPS(fcg_cgroup_move,struct task_struct * p,struct cgroup * from,struct cgroup * to)917a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
918a4103eacSTejun Heo 		    struct cgroup *from, struct cgroup *to)
919a4103eacSTejun Heo {
920a4103eacSTejun Heo 	struct fcg_cgrp_ctx *from_cgc, *to_cgc;
921a4103eacSTejun Heo 	s64 vtime_delta;
922a4103eacSTejun Heo 
923a4103eacSTejun Heo 	/* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
924a4103eacSTejun Heo 	if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to)))
925a4103eacSTejun Heo 		return;
926a4103eacSTejun Heo 
927a4103eacSTejun Heo 	vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now;
928a4103eacSTejun Heo 	p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta;
929a4103eacSTejun Heo }
930a4103eacSTejun Heo 
BPF_STRUCT_OPS_SLEEPABLE(fcg_init)931*c9c809f4STejun Heo s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
932*c9c809f4STejun Heo {
933*c9c809f4STejun Heo 	return scx_bpf_create_dsq(FALLBACK_DSQ, -1);
934*c9c809f4STejun Heo }
935*c9c809f4STejun Heo 
BPF_STRUCT_OPS(fcg_exit,struct scx_exit_info * ei)936a4103eacSTejun Heo void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei)
937a4103eacSTejun Heo {
938a4103eacSTejun Heo 	UEI_RECORD(uei, ei);
939a4103eacSTejun Heo }
940a4103eacSTejun Heo 
941a4103eacSTejun Heo SCX_OPS_DEFINE(flatcg_ops,
942a4103eacSTejun Heo 	       .select_cpu		= (void *)fcg_select_cpu,
943a4103eacSTejun Heo 	       .enqueue			= (void *)fcg_enqueue,
944a4103eacSTejun Heo 	       .dispatch		= (void *)fcg_dispatch,
945a4103eacSTejun Heo 	       .runnable		= (void *)fcg_runnable,
946a4103eacSTejun Heo 	       .running			= (void *)fcg_running,
947a4103eacSTejun Heo 	       .stopping		= (void *)fcg_stopping,
948a4103eacSTejun Heo 	       .quiescent		= (void *)fcg_quiescent,
949a4103eacSTejun Heo 	       .init_task		= (void *)fcg_init_task,
950a4103eacSTejun Heo 	       .cgroup_set_weight	= (void *)fcg_cgroup_set_weight,
951a4103eacSTejun Heo 	       .cgroup_init		= (void *)fcg_cgroup_init,
952a4103eacSTejun Heo 	       .cgroup_exit		= (void *)fcg_cgroup_exit,
953a4103eacSTejun Heo 	       .cgroup_move		= (void *)fcg_cgroup_move,
954*c9c809f4STejun Heo 	       .init			= (void *)fcg_init,
955a4103eacSTejun Heo 	       .exit			= (void *)fcg_exit,
956a4103eacSTejun Heo 	       .flags			= SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING,
957a4103eacSTejun Heo 	       .name			= "flatcg");
958