xref: /linux/tools/sched_ext/include/scx/compat.h (revision 55d0969c451159cff86949b38c39171cab962069)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
4  * Copyright (c) 2024 Tejun Heo <tj@kernel.org>
5  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
6  */
7 #ifndef __SCX_COMPAT_H
8 #define __SCX_COMPAT_H
9 
10 #include <bpf/btf.h>
11 #include <fcntl.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 
15 struct btf *__COMPAT_vmlinux_btf __attribute__((weak));
16 
17 static inline void __COMPAT_load_vmlinux_btf(void)
18 {
19 	if (!__COMPAT_vmlinux_btf) {
20 		__COMPAT_vmlinux_btf = btf__load_vmlinux_btf();
21 		SCX_BUG_ON(!__COMPAT_vmlinux_btf, "btf__load_vmlinux_btf()");
22 	}
23 }
24 
25 static inline bool __COMPAT_read_enum(const char *type, const char *name, u64 *v)
26 {
27 	const struct btf_type *t;
28 	const char *n;
29 	s32 tid;
30 	int i;
31 
32 	__COMPAT_load_vmlinux_btf();
33 
34 	tid = btf__find_by_name(__COMPAT_vmlinux_btf, type);
35 	if (tid < 0)
36 		return false;
37 
38 	t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
39 	SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
40 
41 	if (btf_is_enum(t)) {
42 		struct btf_enum *e = btf_enum(t);
43 
44 		for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
45 			n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
46 			SCX_BUG_ON(!n, "btf__name_by_offset()");
47 			if (!strcmp(n, name)) {
48 				*v = e[i].val;
49 				return true;
50 			}
51 		}
52 	} else if (btf_is_enum64(t)) {
53 		struct btf_enum64 *e = btf_enum64(t);
54 
55 		for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
56 			n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off);
57 			SCX_BUG_ON(!n, "btf__name_by_offset()");
58 			if (!strcmp(n, name)) {
59 				*v = btf_enum64_value(&e[i]);
60 				return true;
61 			}
62 		}
63 	}
64 
65 	return false;
66 }
67 
68 #define __COMPAT_ENUM_OR_ZERO(__type, __ent)					\
69 ({										\
70 	u64 __val = 0;								\
71 	__COMPAT_read_enum(__type, __ent, &__val);				\
72 	__val;									\
73 })
74 
75 static inline bool __COMPAT_has_ksym(const char *ksym)
76 {
77 	__COMPAT_load_vmlinux_btf();
78 	return btf__find_by_name(__COMPAT_vmlinux_btf, ksym) >= 0;
79 }
80 
81 static inline bool __COMPAT_struct_has_field(const char *type, const char *field)
82 {
83 	const struct btf_type *t;
84 	const struct btf_member *m;
85 	const char *n;
86 	s32 tid;
87 	int i;
88 
89 	__COMPAT_load_vmlinux_btf();
90 	tid = btf__find_by_name_kind(__COMPAT_vmlinux_btf, type, BTF_KIND_STRUCT);
91 	if (tid < 0)
92 		return false;
93 
94 	t = btf__type_by_id(__COMPAT_vmlinux_btf, tid);
95 	SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid);
96 
97 	m = btf_members(t);
98 
99 	for (i = 0; i < BTF_INFO_VLEN(t->info); i++) {
100 		n = btf__name_by_offset(__COMPAT_vmlinux_btf, m[i].name_off);
101 		SCX_BUG_ON(!n, "btf__name_by_offset()");
102 			if (!strcmp(n, field))
103 				return true;
104 	}
105 
106 	return false;
107 }
108 
109 #define SCX_OPS_SWITCH_PARTIAL							\
110 	__COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
111 
112 static inline long scx_hotplug_seq(void)
113 {
114 	int fd;
115 	char buf[32];
116 	ssize_t len;
117 	long val;
118 
119 	fd = open("/sys/kernel/sched_ext/hotplug_seq", O_RDONLY);
120 	if (fd < 0)
121 		return -ENOENT;
122 
123 	len = read(fd, buf, sizeof(buf) - 1);
124 	SCX_BUG_ON(len <= 0, "read failed (%ld)", len);
125 	buf[len] = 0;
126 	close(fd);
127 
128 	val = strtoul(buf, NULL, 10);
129 	SCX_BUG_ON(val < 0, "invalid num hotplug events: %lu", val);
130 
131 	return val;
132 }
133 
134 /*
135  * struct sched_ext_ops can change over time. If compat.bpf.h::SCX_OPS_DEFINE()
136  * is used to define ops and compat.h::SCX_OPS_LOAD/ATTACH() are used to load
137  * and attach it, backward compatibility is automatically maintained where
138  * reasonable.
139  *
140  * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is
141  * the current minimum required kernel version.
142  */
143 #define SCX_OPS_OPEN(__ops_name, __scx_name) ({					\
144 	struct __scx_name *__skel;						\
145 										\
146 	SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"),		\
147 		   "sched_ext_ops.dump() missing, kernel too old?");		\
148 										\
149 	__skel = __scx_name##__open();						\
150 	SCX_BUG_ON(!__skel, "Could not open " #__scx_name);			\
151 	__skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq();		\
152 	__skel; 								\
153 })
154 
155 #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({		\
156 	UEI_SET_SIZE(__skel, __ops_name, __uei_name);				\
157 	SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel");	\
158 })
159 
160 /*
161  * New versions of bpftool now emit additional link placeholders for BPF maps,
162  * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps
163  * automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do
164  * nothing with those links and won't attempt to auto-attach maps.
165  *
166  * To maintain compatibility with older libbpf while avoiding trying to attach
167  * twice, disable the autoattach feature on newer libbpf.
168  */
169 #if LIBBPF_MAJOR_VERSION > 1 ||							\
170 	(LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 5)
171 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name)			\
172 	bpf_map__set_autoattach((__skel)->maps.__ops_name, false)
173 #else
174 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) do {} while (0)
175 #endif
176 
177 #define SCX_OPS_ATTACH(__skel, __ops_name, __scx_name) ({			\
178 	struct bpf_link *__link;						\
179 	__SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name);			\
180 	SCX_BUG_ON(__scx_name##__attach((__skel)), "Failed to attach skel");	\
181 	__link = bpf_map__attach_struct_ops((__skel)->maps.__ops_name);		\
182 	SCX_BUG_ON(!__link, "Failed to attach struct_ops");			\
183 	__link;									\
184 })
185 
186 #endif	/* __SCX_COMPAT_H */
187