xref: /linux/tools/sched_ext/include/scx/compat.bpf.h (revision 2dbbdeda77a61b39dc4a34dfce873907cfea2c4b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMPAT_BPF_H
8 #define __SCX_COMPAT_BPF_H
9 
10 #define __COMPAT_ENUM_OR_ZERO(__type, __ent)					\
11 ({										\
12 	__type __ret = 0;							\
13 	if (bpf_core_enum_value_exists(__type, __ent))				\
14 		__ret = __ent;							\
15 	__ret;									\
16 })
17 
18 /*
19  * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
20  *
21  * Compat macro will be dropped on v6.19 release.
22  */
23 int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
24 
25 #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz)		\
26 	(bpf_ksym_exists(bpf_cpumask_populate) ?			\
27 	 (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
28 
29 /*
30  * v6.19: Introduce lockless peek API for user DSQs.
31  *
32  * Preserve the following macro until v6.21.
33  */
34 static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)
35 {
36 	struct task_struct *p = NULL;
37 	struct bpf_iter_scx_dsq it;
38 
39 	if (bpf_ksym_exists(scx_bpf_dsq_peek))
40 		return scx_bpf_dsq_peek(dsq_id);
41 	if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))
42 		p = bpf_iter_scx_dsq_next(&it);
43 	bpf_iter_scx_dsq_destroy(&it);
44 	return p;
45 }
46 
47 /**
48  * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
49  * in a compatible way. We will preserve this __COMPAT helper until v6.16.
50  *
51  * @enq_flags: enqueue flags from ops.enqueue()
52  *
53  * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
54  */
55 static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
56 {
57 #ifdef HAVE_SCX_ENQ_CPU_SELECTED
58 	/*
59 	 * This is the case that a BPF code compiled against vmlinux.h
60 	 * where the enum SCX_ENQ_CPU_SELECTED exists.
61 	 */
62 
63 	/*
64 	 * We should temporarily suspend the macro expansion of
65 	 * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
66 	 * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
67 	 * is defined in 'scripts/gen_enums.py'.
68 	 */
69 #pragma push_macro("SCX_ENQ_CPU_SELECTED")
70 #undef SCX_ENQ_CPU_SELECTED
71 	u64 flag;
72 
73 	/*
74 	 * When the kernel did not have SCX_ENQ_CPU_SELECTED,
75 	 * select_task_rq_scx() has never been skipped. Thus, this case
76 	 * should be considered that the CPU has already been selected.
77 	 */
78 	if (!bpf_core_enum_value_exists(enum scx_enq_flags,
79 					SCX_ENQ_CPU_SELECTED))
80 		return true;
81 
82 	flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
83 	return enq_flags & flag;
84 
85 	/*
86 	 * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
87 	 */
88 #pragma pop_macro("SCX_ENQ_CPU_SELECTED")
89 #else
90 	/*
91 	 * This is the case that a BPF code compiled against vmlinux.h
92 	 * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
93 	 */
94 	return true;
95 #endif /* HAVE_SCX_ENQ_CPU_SELECTED */
96 }
97 
98 
99 #define scx_bpf_now()								\
100 	(bpf_ksym_exists(scx_bpf_now) ?						\
101 	 scx_bpf_now() :							\
102 	 bpf_ktime_get_ns())
103 
104 /*
105  * v6.15: Introduce event counters.
106  *
107  * Preserve the following macro until v6.17.
108  */
109 #define __COMPAT_scx_bpf_events(events, size)					\
110 	(bpf_ksym_exists(scx_bpf_events) ?					\
111 	 scx_bpf_events(events, size) : ({}))
112 
113 /*
114  * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
115  * cpumasks.
116  *
117  * Preserve the following __COMPAT_scx_*_node macros until v6.17.
118  */
119 #define __COMPAT_scx_bpf_nr_node_ids()						\
120 	(bpf_ksym_exists(scx_bpf_nr_node_ids) ?					\
121 	 scx_bpf_nr_node_ids() : 1U)
122 
123 #define __COMPAT_scx_bpf_cpu_node(cpu)						\
124 	(bpf_ksym_exists(scx_bpf_cpu_node) ?					\
125 	 scx_bpf_cpu_node(cpu) : 0)
126 
127 #define __COMPAT_scx_bpf_get_idle_cpumask_node(node)				\
128 	(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ?			\
129 	 scx_bpf_get_idle_cpumask_node(node) :					\
130 	 scx_bpf_get_idle_cpumask())						\
131 
132 #define __COMPAT_scx_bpf_get_idle_smtmask_node(node)				\
133 	(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ?			\
134 	 scx_bpf_get_idle_smtmask_node(node) :					\
135 	 scx_bpf_get_idle_smtmask())
136 
137 #define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags)		\
138 	(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ?				\
139 	 scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) :		\
140 	 scx_bpf_pick_idle_cpu(cpus_allowed, flags))
141 
142 #define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags)		\
143 	(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ?				\
144 	 scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) :			\
145 	 scx_bpf_pick_any_cpu(cpus_allowed, flags))
146 
147 /*
148  * v6.18: Add a helper to retrieve the current task running on a CPU.
149  *
150  * Keep this helper available until v6.20 for compatibility.
151  */
152 static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
153 {
154 	struct rq *rq;
155 
156 	if (bpf_ksym_exists(scx_bpf_cpu_curr))
157 		return scx_bpf_cpu_curr(cpu);
158 
159 	rq = scx_bpf_cpu_rq(cpu);
160 
161 	return rq ? rq->curr : NULL;
162 }
163 
164 /*
165  * v6.19: To work around BPF maximum parameter limit, the following kfuncs are
166  * replaced with variants that pack scalar arguments in a struct. Wrappers are
167  * provided to maintain source compatibility.
168  *
169  * The kernel will carry the compat variants until v6.23 to maintain binary
170  * compatibility. After v6.23 release, remove the compat handling and move the
171  * wrappers to common.bpf.h.
172  */
173 s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
174 				    const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
175 void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
176 
177 /**
178  * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
179  * @p: task_struct to select a CPU for
180  * @prev_cpu: CPU @p was on previously
181  * @wake_flags: %SCX_WAKE_* flags
182  * @cpus_allowed: cpumask of allowed CPUs
183  * @flags: %SCX_PICK_IDLE* flags
184  *
185  * Inline wrapper that packs scalar arguments into a struct and calls
186  * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
187  */
188 static inline s32
189 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
190 		       const struct cpumask *cpus_allowed, u64 flags)
191 {
192 	if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
193 		struct scx_bpf_select_cpu_and_args args = {
194 			.prev_cpu = prev_cpu,
195 			.wake_flags = wake_flags,
196 			.flags = flags,
197 		};
198 
199 		return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
200 	} else {
201 		return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
202 						       cpus_allowed, flags);
203 	}
204 }
205 
206 /**
207  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
208  * @p: task_struct to insert
209  * @dsq_id: DSQ to insert into
210  * @slice: duration @p can run for in nsecs, 0 to keep the current value
211  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
212  * @enq_flags: SCX_ENQ_*
213  *
214  * Inline wrapper that packs scalar arguments into a struct and calls
215  * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
216  */
217 static inline bool
218 scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
219 			 u64 enq_flags)
220 {
221 	if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
222 		struct scx_bpf_dsq_insert_vtime_args args = {
223 			.dsq_id = dsq_id,
224 			.slice = slice,
225 			.vtime = vtime,
226 			.enq_flags = enq_flags,
227 		};
228 
229 		return __scx_bpf_dsq_insert_vtime(p, &args);
230 	} else {
231 		scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
232 						  enq_flags);
233 		return true;
234 	}
235 }
236 
237 /*
238  * v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
239  * scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
240  */
241 bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
242 void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
243 
244 static inline bool
245 scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
246 {
247 	if (bpf_ksym_exists(scx_bpf_dsq_insert___v2)) {
248 		return scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags);
249 	} else {
250 		scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);
251 		return true;
252 	}
253 }
254 
255 /*
256  * v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for
257  * sub-sched authority checks. Drop the wrappers and move the decls to
258  * common.bpf.h after v6.22.
259  */
260 bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;
261 bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;
262 
263 static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)
264 {
265 	if (bpf_ksym_exists(scx_bpf_task_set_slice___new))
266 		scx_bpf_task_set_slice___new(p, slice);
267 	else
268 		p->scx.slice = slice;
269 }
270 
271 static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)
272 {
273 	if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))
274 		scx_bpf_task_set_dsq_vtime___new(p, vtime);
275 	else
276 		p->scx.dsq_vtime = vtime;
277 }
278 
279 /*
280  * Define sched_ext_ops. This may be expanded to define multiple variants for
281  * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
282  */
283 #define SCX_OPS_DEFINE(__name, ...)						\
284 	SEC(".struct_ops.link")							\
285 	struct sched_ext_ops __name = {						\
286 		__VA_ARGS__,							\
287 	};
288 
289 #endif	/* __SCX_COMPAT_BPF_H */
290