xref: /linux/tools/sched_ext/include/scx/common.bpf.h (revision 0c436dfe5c25d0931b164b944165259f95e5281f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMMON_BPF_H
8 #define __SCX_COMMON_BPF_H
9 
10 #include "vmlinux.h"
11 #include <bpf/bpf_helpers.h>
12 #include <bpf/bpf_tracing.h>
13 #include <asm-generic/errno.h>
14 #include "user_exit_info.h"
15 
16 #define PF_WQ_WORKER			0x00000020	/* I'm a workqueue worker */
17 #define PF_KTHREAD			0x00200000	/* I am a kernel thread */
18 #define PF_EXITING			0x00000004
19 #define CLOCK_MONOTONIC			1
20 
21 /*
22  * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
23  * lead to really confusing misbehaviors. Let's trigger a build failure.
24  */
25 static inline void ___vmlinux_h_sanity_check___(void)
26 {
27 	_Static_assert(SCX_DSQ_FLAG_BUILTIN,
28 		       "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole");
29 }
30 
31 s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
32 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
33 void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym;
34 void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym;
35 u32 scx_bpf_dispatch_nr_slots(void) __ksym;
36 void scx_bpf_dispatch_cancel(void) __ksym;
37 bool scx_bpf_consume(u64 dsq_id) __ksym;
38 void scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
39 void scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
40 bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
41 bool scx_bpf_dispatch_vtime_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
42 u32 scx_bpf_reenqueue_local(void) __ksym;
43 void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
44 s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
45 void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
46 int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
47 struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
48 void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
49 void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak;
50 void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym;
51 void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak;
52 u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
53 u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
54 void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
55 u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
56 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
57 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
58 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
59 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
60 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
61 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
62 bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
63 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
64 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
65 bool scx_bpf_task_running(const struct task_struct *p) __ksym;
66 s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
67 struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
68 struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym;
69 
70 /*
71  * Use the following as @it__iter when calling
72  * scx_bpf_dispatch[_vtime]_from_dsq() from within bpf_for_each() loops.
73  */
74 #define BPF_FOR_EACH_ITER	(&___it)
75 
76 static inline __attribute__((format(printf, 1, 2)))
77 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
78 
79 /*
80  * Helper macro for initializing the fmt and variadic argument inputs to both
81  * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to
82  * refer to the initialized list of inputs to the bstr kfunc.
83  */
84 #define scx_bpf_bstr_preamble(fmt, args...)					\
85 	static char ___fmt[] = fmt;						\
86 	/*									\
87 	 * Note that __param[] must have at least one				\
88 	 * element to keep the verifier happy.					\
89 	 */									\
90 	unsigned long long ___param[___bpf_narg(args) ?: 1] = {};		\
91 										\
92 	_Pragma("GCC diagnostic push")						\
93 	_Pragma("GCC diagnostic ignored \"-Wint-conversion\"")			\
94 	___bpf_fill(___param, args);						\
95 	_Pragma("GCC diagnostic pop")						\
96 
97 /*
98  * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
99  * instead of an array of u64. Using this macro will cause the scheduler to
100  * exit cleanly with the specified exit code being passed to user space.
101  */
102 #define scx_bpf_exit(code, fmt, args...)					\
103 ({										\
104 	scx_bpf_bstr_preamble(fmt, args)					\
105 	scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param));		\
106 	___scx_bpf_bstr_format_checker(fmt, ##args);				\
107 })
108 
109 /*
110  * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments
111  * instead of an array of u64. Invoking this macro will cause the scheduler to
112  * exit in an erroneous state, with diagnostic information being passed to the
113  * user.
114  */
115 #define scx_bpf_error(fmt, args...)						\
116 ({										\
117 	scx_bpf_bstr_preamble(fmt, args)					\
118 	scx_bpf_error_bstr(___fmt, ___param, sizeof(___param));			\
119 	___scx_bpf_bstr_format_checker(fmt, ##args);				\
120 })
121 
122 /*
123  * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments
124  * instead of an array of u64. To be used from ops.dump() and friends.
125  */
126 #define scx_bpf_dump(fmt, args...)						\
127 ({										\
128 	scx_bpf_bstr_preamble(fmt, args)					\
129 	scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param));			\
130 	___scx_bpf_bstr_format_checker(fmt, ##args);				\
131 })
132 
133 #define BPF_STRUCT_OPS(name, args...)						\
134 SEC("struct_ops/"#name)								\
135 BPF_PROG(name, ##args)
136 
137 #define BPF_STRUCT_OPS_SLEEPABLE(name, args...)					\
138 SEC("struct_ops.s/"#name)							\
139 BPF_PROG(name, ##args)
140 
141 /**
142  * RESIZABLE_ARRAY - Generates annotations for an array that may be resized
143  * @elfsec: the data section of the BPF program in which to place the array
144  * @arr: the name of the array
145  *
146  * libbpf has an API for setting map value sizes. Since data sections (i.e.
147  * bss, data, rodata) themselves are maps, a data section can be resized. If
148  * a data section has an array as its last element, the BTF info for that
149  * array will be adjusted so that length of the array is extended to meet the
150  * new length of the data section. This macro annotates an array to have an
151  * element count of one with the assumption that this array can be resized
152  * within the userspace program. It also annotates the section specifier so
153  * this array exists in a custom sub data section which can be resized
154  * independently.
155  *
156  * See RESIZE_ARRAY() for the userspace convenience macro for resizing an
157  * array declared with RESIZABLE_ARRAY().
158  */
159 #define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr)
160 
161 /**
162  * MEMBER_VPTR - Obtain the verified pointer to a struct or array member
163  * @base: struct or array to index
164  * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...)
165  *
166  * The verifier often gets confused by the instruction sequence the compiler
167  * generates for indexing struct fields or arrays. This macro forces the
168  * compiler to generate a code sequence which first calculates the byte offset,
169  * checks it against the struct or array size and add that byte offset to
170  * generate the pointer to the member to help the verifier.
171  *
172  * Ideally, we want to abort if the calculated offset is out-of-bounds. However,
173  * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller
174  * must check for %NULL and take appropriate action to appease the verifier. To
175  * avoid confusing the verifier, it's best to check for %NULL and dereference
176  * immediately.
177  *
178  *	vptr = MEMBER_VPTR(my_array, [i][j]);
179  *	if (!vptr)
180  *		return error;
181  *	*vptr = new_value;
182  *
183  * sizeof(@base) should encompass the memory area to be accessed and thus can't
184  * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of
185  * `MEMBER_VPTR(ptr, ->member)`.
186  */
187 #define MEMBER_VPTR(base, member) (typeof((base) member) *)			\
188 ({										\
189 	u64 __base = (u64)&(base);						\
190 	u64 __addr = (u64)&((base) member) - __base;				\
191 	_Static_assert(sizeof(base) >= sizeof((base) member),			\
192 		       "@base is smaller than @member, is @base a pointer?");	\
193 	asm volatile (								\
194 		"if %0 <= %[max] goto +2\n"					\
195 		"%0 = 0\n"							\
196 		"goto +1\n"							\
197 		"%0 += %1\n"							\
198 		: "+r"(__addr)							\
199 		: "r"(__base),							\
200 		  [max]"i"(sizeof(base) - sizeof((base) member)));		\
201 	__addr;									\
202 })
203 
204 /**
205  * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element
206  * @arr: array to index into
207  * @i: array index
208  * @n: number of elements in array
209  *
210  * Similar to MEMBER_VPTR() but is intended for use with arrays where the
211  * element count needs to be explicit.
212  * It can be used in cases where a global array is defined with an initial
213  * size but is intended to be be resized before loading the BPF program.
214  * Without this version of the macro, MEMBER_VPTR() will use the compile time
215  * size of the array to compute the max, which will result in rejection by
216  * the verifier.
217  */
218 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *)				\
219 ({										\
220 	u64 __base = (u64)arr;							\
221 	u64 __addr = (u64)&(arr[i]) - __base;					\
222 	asm volatile (								\
223 		"if %0 <= %[max] goto +2\n"					\
224 		"%0 = 0\n"							\
225 		"goto +1\n"							\
226 		"%0 += %1\n"							\
227 		: "+r"(__addr)							\
228 		: "r"(__base),							\
229 		  [max]"r"(sizeof(arr[0]) * ((n) - 1)));			\
230 	__addr;									\
231 })
232 
233 
234 /*
235  * BPF declarations and helpers
236  */
237 
238 /* list and rbtree */
239 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
240 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
241 
242 void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
243 void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
244 
245 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
246 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
247 
248 void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
249 void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym;
250 struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
251 struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
252 struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
253 				      struct bpf_rb_node *node) __ksym;
254 int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
255 			bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
256 			void *meta, __u64 off) __ksym;
257 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
258 
259 struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
260 
261 void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
262 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
263 
264 /* task */
265 struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
266 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
267 void bpf_task_release(struct task_struct *p) __ksym;
268 
269 /* cgroup */
270 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
271 void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
272 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
273 
274 /* css iteration */
275 struct bpf_iter_css;
276 struct cgroup_subsys_state;
277 extern int bpf_iter_css_new(struct bpf_iter_css *it,
278 			    struct cgroup_subsys_state *start,
279 			    unsigned int flags) __weak __ksym;
280 extern struct cgroup_subsys_state *
281 bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
282 extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
283 
284 /* cpumask */
285 struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
286 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
287 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
288 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
289 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
290 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
291 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
292 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
293 bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
294 bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
295 void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
296 void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
297 bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1,
298 		     const struct cpumask *src2) __ksym;
299 void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1,
300 		    const struct cpumask *src2) __ksym;
301 void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1,
302 		     const struct cpumask *src2) __ksym;
303 bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
304 bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
305 bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
306 bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
307 bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
308 void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
309 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
310 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
311 				   const struct cpumask *src2) __ksym;
312 
313 /* rcu */
314 void bpf_rcu_read_lock(void) __ksym;
315 void bpf_rcu_read_unlock(void) __ksym;
316 
317 
318 /*
319  * Other helpers
320  */
321 
322 /* useful compiler attributes */
323 #define likely(x) __builtin_expect(!!(x), 1)
324 #define unlikely(x) __builtin_expect(!!(x), 0)
325 #define __maybe_unused __attribute__((__unused__))
326 
327 /*
328  * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
329  * prevent compiler from caching, redoing or reordering reads or writes.
330  */
331 typedef __u8  __attribute__((__may_alias__))  __u8_alias_t;
332 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
333 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
334 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
335 
336 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
337 {
338 	switch (size) {
339 	case 1: *(__u8_alias_t  *) res = *(volatile __u8_alias_t  *) p; break;
340 	case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
341 	case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
342 	case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
343 	default:
344 		barrier();
345 		__builtin_memcpy((void *)res, (const void *)p, size);
346 		barrier();
347 	}
348 }
349 
350 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
351 {
352 	switch (size) {
353 	case 1: *(volatile  __u8_alias_t *) p = *(__u8_alias_t  *) res; break;
354 	case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
355 	case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
356 	case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
357 	default:
358 		barrier();
359 		__builtin_memcpy((void *)p, (const void *)res, size);
360 		barrier();
361 	}
362 }
363 
364 #define READ_ONCE(x)					\
365 ({							\
366 	union { typeof(x) __val; char __c[1]; } __u =	\
367 		{ .__c = { 0 } };			\
368 	__read_once_size(&(x), __u.__c, sizeof(x));	\
369 	__u.__val;					\
370 })
371 
372 #define WRITE_ONCE(x, val)				\
373 ({							\
374 	union { typeof(x) __val; char __c[1]; } __u =	\
375 		{ .__val = (val) }; 			\
376 	__write_once_size(&(x), __u.__c, sizeof(x));	\
377 	__u.__val;					\
378 })
379 
380 /*
381  * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
382  * @v: The value for which we're computing the base 2 logarithm.
383  */
384 static inline u32 log2_u32(u32 v)
385 {
386         u32 r;
387         u32 shift;
388 
389         r = (v > 0xFFFF) << 4; v >>= r;
390         shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
391         shift = (v > 0xF) << 2; v >>= shift; r |= shift;
392         shift = (v > 0x3) << 1; v >>= shift; r |= shift;
393         r |= (v >> 1);
394         return r;
395 }
396 
397 /*
398  * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
399  * @v: The value for which we're computing the base 2 logarithm.
400  */
401 static inline u32 log2_u64(u64 v)
402 {
403         u32 hi = v >> 32;
404         if (hi)
405                 return log2_u32(hi) + 32 + 1;
406         else
407                 return log2_u32(v) + 1;
408 }
409 
410 #include "compat.bpf.h"
411 
412 #endif	/* __SCX_COMMON_BPF_H */
413