xref: /linux/tools/sched_ext/include/scx/compat.bpf.h (revision a23cd25baed2316e50597f8b67192bdc904f955b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMPAT_BPF_H
8 #define __SCX_COMPAT_BPF_H
9 
10 #define __COMPAT_ENUM_OR_ZERO(__type, __ent)					\
11 ({										\
12 	__type __ret = 0;							\
13 	if (bpf_core_enum_value_exists(__type, __ent))				\
14 		__ret = __ent;							\
15 	__ret;									\
16 })
17 
18 /* v6.12: 819513666966 ("sched_ext: Add cgroup support") */
19 #define __COMPAT_scx_bpf_task_cgroup(p)						\
20 	(bpf_ksym_exists(scx_bpf_task_cgroup) ?					\
21 	 scx_bpf_task_cgroup((p)) : NULL)
22 
23 /*
24  * v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are
25  * renamed to unload the verb.
26  *
27  * Build error is triggered if old names are used. New binaries work with both
28  * new and old names. The compat macros will be removed on v6.15 release.
29  *
30  * scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by
31  * 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").
32  * Preserve __COMPAT macros until v6.15.
33  */
34 void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
35 void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
36 bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
37 void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
38 void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
39 bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
40 bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
41 int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
42 
43 #define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags)				\
44 	(bpf_ksym_exists(scx_bpf_dsq_insert) ?					\
45 	 scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) :		\
46 	 scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags)))
47 
48 #define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags)		\
49 	(bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ?				\
50 	 scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
51 	 scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
52 
53 #define scx_bpf_dsq_move_to_local(dsq_id)					\
54 	(bpf_ksym_exists(scx_bpf_dsq_move_to_local) ?				\
55 	 scx_bpf_dsq_move_to_local((dsq_id)) :					\
56 	 scx_bpf_consume___compat((dsq_id)))
57 
58 #define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice)			\
59 	(bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ?				\
60 	 scx_bpf_dsq_move_set_slice((it__iter), (slice)) :			\
61 	 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ?	\
62 	  scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) :	\
63 	  (void)0))
64 
65 #define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime)			\
66 	(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ?				\
67 	 scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) :			\
68 	 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ?	\
69 	  scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) :	\
70 	  (void) 0))
71 
72 #define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags)		\
73 	(bpf_ksym_exists(scx_bpf_dsq_move) ?					\
74 	 scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) :		\
75 	 (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ?			\
76 	  scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
77 	  false))
78 
79 #define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags)		\
80 	(bpf_ksym_exists(scx_bpf_dsq_move_vtime) ?				\
81 	 scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) :	\
82 	 (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ?		\
83 	  scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \
84 	  false))
85 
86 #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz)		\
87 	(bpf_ksym_exists(bpf_cpumask_populate) ?			\
88 	 (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
89 
90 #define scx_bpf_dispatch(p, dsq_id, slice, enq_flags)				\
91 	_Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
92 
93 #define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags)		\
94 	_Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
95 
96 #define scx_bpf_consume(dsq_id) ({						\
97 	_Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
98 	false;									\
99 })
100 
101 #define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice)		\
102 	_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()")
103 
104 #define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime)		\
105 	_Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()")
106 
107 #define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({	\
108 	_Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \
109 	false;									\
110 })
111 
112 #define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({  \
113 	_Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \
114 	false;									\
115 })
116 
117 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice)		\
118 	_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()")
119 
120 #define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime)		\
121 	_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()")
122 
123 #define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({	\
124 	_Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \
125 	false;									\
126 })
127 
128 #define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({  \
129 	_Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \
130 	false;									\
131 })
132 
133 /**
134  * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
135  * in a compatible way. We will preserve this __COMPAT helper until v6.16.
136  *
137  * @enq_flags: enqueue flags from ops.enqueue()
138  *
139  * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
140  */
__COMPAT_is_enq_cpu_selected(u64 enq_flags)141 static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
142 {
143 #ifdef HAVE_SCX_ENQ_CPU_SELECTED
144 	/*
145 	 * This is the case that a BPF code compiled against vmlinux.h
146 	 * where the enum SCX_ENQ_CPU_SELECTED exists.
147 	 */
148 
149 	/*
150 	 * We should temporarily suspend the macro expansion of
151 	 * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
152 	 * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
153 	 * is defined in 'scripts/gen_enums.py'.
154 	 */
155 #pragma push_macro("SCX_ENQ_CPU_SELECTED")
156 #undef SCX_ENQ_CPU_SELECTED
157 	u64 flag;
158 
159 	/*
160 	 * When the kernel did not have SCX_ENQ_CPU_SELECTED,
161 	 * select_task_rq_scx() has never been skipped. Thus, this case
162 	 * should be considered that the CPU has already been selected.
163 	 */
164 	if (!bpf_core_enum_value_exists(enum scx_enq_flags,
165 					SCX_ENQ_CPU_SELECTED))
166 		return true;
167 
168 	flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
169 	return enq_flags & flag;
170 
171 	/*
172 	 * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
173 	 */
174 #pragma pop_macro("SCX_ENQ_CPU_SELECTED")
175 #else
176 	/*
177 	 * This is the case that a BPF code compiled against vmlinux.h
178 	 * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
179 	 */
180 	return true;
181 #endif /* HAVE_SCX_ENQ_CPU_SELECTED */
182 }
183 
184 
185 #define scx_bpf_now()								\
186 	(bpf_ksym_exists(scx_bpf_now) ?						\
187 	 scx_bpf_now() :							\
188 	 bpf_ktime_get_ns())
189 
190 /*
191  * v6.15: Introduce event counters.
192  *
193  * Preserve the following macro until v6.17.
194  */
195 #define __COMPAT_scx_bpf_events(events, size)					\
196 	(bpf_ksym_exists(scx_bpf_events) ?					\
197 	 scx_bpf_events(events, size) : ({}))
198 
199 /*
200  * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
201  * cpumasks.
202  *
203  * Preserve the following __COMPAT_scx_*_node macros until v6.17.
204  */
205 #define __COMPAT_scx_bpf_nr_node_ids()						\
206 	(bpf_ksym_exists(scx_bpf_nr_node_ids) ?					\
207 	 scx_bpf_nr_node_ids() : 1U)
208 
209 #define __COMPAT_scx_bpf_cpu_node(cpu)						\
210 	(bpf_ksym_exists(scx_bpf_cpu_node) ?					\
211 	 scx_bpf_cpu_node(cpu) : 0)
212 
213 #define __COMPAT_scx_bpf_get_idle_cpumask_node(node)				\
214 	(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ?			\
215 	 scx_bpf_get_idle_cpumask_node(node) :					\
216 	 scx_bpf_get_idle_cpumask())						\
217 
218 #define __COMPAT_scx_bpf_get_idle_smtmask_node(node)				\
219 	(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ?			\
220 	 scx_bpf_get_idle_smtmask_node(node) :					\
221 	 scx_bpf_get_idle_smtmask())
222 
223 #define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags)		\
224 	(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ?				\
225 	 scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) :		\
226 	 scx_bpf_pick_idle_cpu(cpus_allowed, flags))
227 
228 #define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags)		\
229 	(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ?				\
230 	 scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) :			\
231 	 scx_bpf_pick_any_cpu(cpus_allowed, flags))
232 
233 /*
234  * v6.18: Add a helper to retrieve the current task running on a CPU.
235  *
236  * Keep this helper available until v6.20 for compatibility.
237  */
__COMPAT_scx_bpf_cpu_curr(int cpu)238 static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
239 {
240 	struct rq *rq;
241 
242 	if (bpf_ksym_exists(scx_bpf_cpu_curr))
243 		return scx_bpf_cpu_curr(cpu);
244 
245 	rq = scx_bpf_cpu_rq(cpu);
246 
247 	return rq ? rq->curr : NULL;
248 }
249 
250 /*
251  * Define sched_ext_ops. This may be expanded to define multiple variants for
252  * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
253  */
254 #define SCX_OPS_DEFINE(__name, ...)						\
255 	SEC(".struct_ops.link")							\
256 	struct sched_ext_ops __name = {						\
257 		__VA_ARGS__,							\
258 	};
259 
260 #endif	/* __SCX_COMPAT_BPF_H */
261