xref: /linux/tools/sched_ext/include/scx/compat.bpf.h (revision cded46d971597ecfe505ba92a54253c0f5e1f2e4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMPAT_BPF_H
8 #define __SCX_COMPAT_BPF_H
9 
10 #define __COMPAT_ENUM_OR_ZERO(__type, __ent)					\
11 ({										\
12 	__type __ret = 0;							\
13 	if (bpf_core_enum_value_exists(__type, __ent))				\
14 		__ret = __ent;							\
15 	__ret;									\
16 })
17 
18 /*
19  * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")
20  *
21  * Compat macro will be dropped on v6.19 release.
22  */
23 int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;
24 
25 #define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz)		\
26 	(bpf_ksym_exists(bpf_cpumask_populate) ?			\
27 	 (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)
28 
29 /**
30  * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
31  * in a compatible way. We will preserve this __COMPAT helper until v6.16.
32  *
33  * @enq_flags: enqueue flags from ops.enqueue()
34  *
35  * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
36  */
37 static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
38 {
39 #ifdef HAVE_SCX_ENQ_CPU_SELECTED
40 	/*
41 	 * This is the case that a BPF code compiled against vmlinux.h
42 	 * where the enum SCX_ENQ_CPU_SELECTED exists.
43 	 */
44 
45 	/*
46 	 * We should temporarily suspend the macro expansion of
47 	 * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
48 	 * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
49 	 * is defined in 'scripts/gen_enums.py'.
50 	 */
51 #pragma push_macro("SCX_ENQ_CPU_SELECTED")
52 #undef SCX_ENQ_CPU_SELECTED
53 	u64 flag;
54 
55 	/*
56 	 * When the kernel did not have SCX_ENQ_CPU_SELECTED,
57 	 * select_task_rq_scx() has never been skipped. Thus, this case
58 	 * should be considered that the CPU has already been selected.
59 	 */
60 	if (!bpf_core_enum_value_exists(enum scx_enq_flags,
61 					SCX_ENQ_CPU_SELECTED))
62 		return true;
63 
64 	flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
65 	return enq_flags & flag;
66 
67 	/*
68 	 * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
69 	 */
70 #pragma pop_macro("SCX_ENQ_CPU_SELECTED")
71 #else
72 	/*
73 	 * This is the case that a BPF code compiled against vmlinux.h
74 	 * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
75 	 */
76 	return true;
77 #endif /* HAVE_SCX_ENQ_CPU_SELECTED */
78 }
79 
80 
81 #define scx_bpf_now()								\
82 	(bpf_ksym_exists(scx_bpf_now) ?						\
83 	 scx_bpf_now() :							\
84 	 bpf_ktime_get_ns())
85 
86 /*
87  * v6.15: Introduce event counters.
88  *
89  * Preserve the following macro until v6.17.
90  */
91 #define __COMPAT_scx_bpf_events(events, size)					\
92 	(bpf_ksym_exists(scx_bpf_events) ?					\
93 	 scx_bpf_events(events, size) : ({}))
94 
95 /*
96  * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
97  * cpumasks.
98  *
99  * Preserve the following __COMPAT_scx_*_node macros until v6.17.
100  */
101 #define __COMPAT_scx_bpf_nr_node_ids()						\
102 	(bpf_ksym_exists(scx_bpf_nr_node_ids) ?					\
103 	 scx_bpf_nr_node_ids() : 1U)
104 
105 #define __COMPAT_scx_bpf_cpu_node(cpu)						\
106 	(bpf_ksym_exists(scx_bpf_cpu_node) ?					\
107 	 scx_bpf_cpu_node(cpu) : 0)
108 
109 #define __COMPAT_scx_bpf_get_idle_cpumask_node(node)				\
110 	(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ?			\
111 	 scx_bpf_get_idle_cpumask_node(node) :					\
112 	 scx_bpf_get_idle_cpumask())						\
113 
114 #define __COMPAT_scx_bpf_get_idle_smtmask_node(node)				\
115 	(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ?			\
116 	 scx_bpf_get_idle_smtmask_node(node) :					\
117 	 scx_bpf_get_idle_smtmask())
118 
119 #define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags)		\
120 	(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ?				\
121 	 scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) :		\
122 	 scx_bpf_pick_idle_cpu(cpus_allowed, flags))
123 
124 #define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags)		\
125 	(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ?				\
126 	 scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) :			\
127 	 scx_bpf_pick_any_cpu(cpus_allowed, flags))
128 
129 /*
130  * v6.18: Add a helper to retrieve the current task running on a CPU.
131  *
132  * Keep this helper available until v6.20 for compatibility.
133  */
134 static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)
135 {
136 	struct rq *rq;
137 
138 	if (bpf_ksym_exists(scx_bpf_cpu_curr))
139 		return scx_bpf_cpu_curr(cpu);
140 
141 	rq = scx_bpf_cpu_rq(cpu);
142 
143 	return rq ? rq->curr : NULL;
144 }
145 
146 /*
147  * v6.19: To work around BPF maximum parameter limit, the following kfuncs are
148  * replaced with variants that pack scalar arguments in a struct. Wrappers are
149  * provided to maintain source compatibility.
150  *
151  * The kernel will carry the compat variants until v6.23 to maintain binary
152  * compatibility. After v6.23 release, remove the compat handling and move the
153  * wrappers to common.bpf.h.
154  */
155 s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
156 				    const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
157 void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
158 
159 /**
160  * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p
161  * @p: task_struct to select a CPU for
162  * @prev_cpu: CPU @p was on previously
163  * @wake_flags: %SCX_WAKE_* flags
164  * @cpus_allowed: cpumask of allowed CPUs
165  * @flags: %SCX_PICK_IDLE* flags
166  *
167  * Inline wrapper that packs scalar arguments into a struct and calls
168  * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.
169  */
170 static inline s32
171 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
172 		       const struct cpumask *cpus_allowed, u64 flags)
173 {
174 	if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {
175 		struct scx_bpf_select_cpu_and_args args = {
176 			.prev_cpu = prev_cpu,
177 			.wake_flags = wake_flags,
178 			.flags = flags,
179 		};
180 
181 		return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);
182 	} else {
183 		return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,
184 						       cpus_allowed, flags);
185 	}
186 }
187 
188 /**
189  * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ
190  * @p: task_struct to insert
191  * @dsq_id: DSQ to insert into
192  * @slice: duration @p can run for in nsecs, 0 to keep the current value
193  * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ
194  * @enq_flags: SCX_ENQ_*
195  *
196  * Inline wrapper that packs scalar arguments into a struct and calls
197  * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.
198  */
199 static inline bool
200 scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,
201 			 u64 enq_flags)
202 {
203 	if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {
204 		struct scx_bpf_dsq_insert_vtime_args args = {
205 			.dsq_id = dsq_id,
206 			.slice = slice,
207 			.vtime = vtime,
208 			.enq_flags = enq_flags,
209 		};
210 
211 		return __scx_bpf_dsq_insert_vtime(p, &args);
212 	} else {
213 		scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,
214 						  enq_flags);
215 		return true;
216 	}
217 }
218 
219 /*
220  * v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move
221  * scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.
222  */
223 bool scx_bpf_dsq_insert___new(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
224 void scx_bpf_dsq_insert___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
225 
226 static inline bool
227 scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)
228 {
229 	if (bpf_ksym_exists(scx_bpf_dsq_insert___new)) {
230 		return scx_bpf_dsq_insert___new(p, dsq_id, slice, enq_flags);
231 	} else {
232 		scx_bpf_dsq_insert___compat(p, dsq_id, slice, enq_flags);
233 		return true;
234 	}
235 }
236 
237 /*
238  * Define sched_ext_ops. This may be expanded to define multiple variants for
239  * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
240  */
241 #define SCX_OPS_DEFINE(__name, ...)						\
242 	SEC(".struct_ops.link")							\
243 	struct sched_ext_ops __name = {						\
244 		__VA_ARGS__,							\
245 	};
246 
247 #endif	/* __SCX_COMPAT_BPF_H */
248