xref: /linux/tools/sched_ext/include/scx/compat.h (revision 07814a9439a3b03d79a1001614b5bc1cab69bcec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMPAT_H
8 #define __SCX_COMPAT_H
9 
10 #include <bpf/btf.h>
11 
12 struct btf *__COMPAT_vmlinux_btf __attribute__((weak));
13 
14 static inline void __COMPAT_load_vmlinux_btf(void)
15 {
16 	if (!__COMPAT_vmlinux_btf) {
17 		__COMPAT_vmlinux_btf = btf__load_vmlinux_btf();
18 		SCX_BUG_ON(!__COMPAT_vmlinux_btf, "btf__load_vmlinux_btf()");
19 	}
20 }
21 
22 static inline bool __COMPAT_read_enum(const char *type, const char *name, u64 *v)
23 {
24 	const struct btf_type *t;
25 	const char *n;
26 	s32 tid;
27 	int i;
28 
29 	__COMPAT_load_vmlinux_btf();
30 
31 	tid = btf__find_by_name(__COMPAT_vmlinux_btf, type);
32 	if (tid < 0)
33 		return false;
34 
35 	t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
36 	SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
37 
38 	if (btf_is_enum(t)) {
39 		struct btf_enum *e = btf_enum(t);
40 
41 		for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
42 			n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
43 			SCX_BUG_ON(!n, "btf__name_by_offset()");
44 			if (!strcmp(n, name)) {
45 				*v = e[i].val;
46 				return true;
47 			}
48 		}
49 	} else if (btf_is_enum64(t)) {
50 		struct btf_enum64 *e = btf_enum64(t);
51 
52 		for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
53 			n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
54 			SCX_BUG_ON(!n, "btf__name_by_offset()");
55 			if (!strcmp(n, name)) {
56 				*v = btf_enum64_value(&e[i]);
57 				return true;
58 			}
59 		}
60 	}
61 
62 	return false;
63 }
64 
65 #define __COMPAT_ENUM_OR_ZERO(__type, __ent)					\
66 ({										\
67 	u64 __val = 0;								\
68 	__COMPAT_read_enum(__type, __ent, &__val);				\
69 	__val;									\
70 })
71 
72 static inline bool __COMPAT_has_ksym(const char *ksym)
73 {
74 	__COMPAT_load_vmlinux_btf();
75 	return btf__find_by_name(__COMPAT_vmlinux_btf, ksym) >= 0;
76 }
77 
78 static inline bool __COMPAT_struct_has_field(const char *type, const char *field)
79 {
80 	const struct btf_type *t;
81 	const struct btf_member *m;
82 	const char *n;
83 	s32 tid;
84 	int i;
85 
86 	__COMPAT_load_vmlinux_btf();
87 	tid = btf__find_by_name_kind(__COMPAT_vmlinux_btf, type, BTF_KIND_STRUCT);
88 	if (tid < 0)
89 		return false;
90 
91 	t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
92 	SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
93 
94 	m = btf_members(t);
95 
96 	for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
97 		n = btf__name_by_offset(__COMPAT_vmlinux_btf, m[i].name_off);
98 		SCX_BUG_ON(!n, "btf__name_by_offset()");
99 			if (!strcmp(n, field))
100 				return true;
101 	}
102 
103 	return false;
104 }
105 
106 #define SCX_OPS_SWITCH_PARTIAL							\
107 	__COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
108 
109 /*
110  * struct sched_ext_ops can change over time. If compat.bpf.h::SCX_OPS_DEFINE()
111  * is used to define ops and compat.h::SCX_OPS_LOAD/ATTACH() are used to load
112  * and attach it, backward compatibility is automatically maintained where
113  * reasonable.
114  *
115  * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
116  * the current minimum required kernel version.
117  */
118 #define SCX_OPS_OPEN(__ops_name, __scx_name) ({					\
119 	struct __scx_name *__skel;						\
120 										\
121 	SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"),		\
122 		   "sched_ext_ops.dump() missing, kernel too old?");		\
123 										\
124 	__skel = __scx_name##__open();						\
125 	SCX_BUG_ON(!__skel, "Could not open " #__scx_name);			\
126 	__skel; 								\
127 })
128 
129 #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({		\
130 	UEI_SET_SIZE(__skel, __ops_name, __uei_name);				\
131 	SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel");	\
132 })
133 
134 /*
135  * New versions of bpftool now emit additional link placeholders for BPF maps,
136  * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps
137  * automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do
138  * nothing with those links and won't attempt to auto-attach maps.
139  *
140  * To maintain compatibility with older libbpf while avoiding trying to attach
141  * twice, disable the autoattach feature on newer libbpf.
142  */
143 #if LIBBPF_MAJOR_VERSION > 1 ||							\
144 	(LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 5)
145 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name)			\
146 	bpf_map__set_autoattach((__skel)->maps.__ops_name, false)
147 #else
148 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) do {} while (0)
149 #endif
150 
151 #define SCX_OPS_ATTACH(__skel, __ops_name, __scx_name) ({			\
152 	struct bpf_link *__link;						\
153 	__SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name);			\
154 	SCX_BUG_ON(__scx_name##__attach((__skel)), "Failed to attach skel");	\
155 	__link = bpf_map__attach_struct_ops((__skel)->maps.__ops_name);		\
156 	SCX_BUG_ON(!__link, "Failed to attach struct_ops");			\
157 	__link;									\
158 })
159 
160 #endif	/* __SCX_COMPAT_H */
161