1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2024 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2024 David Vernet <dvernet@meta.com> 6 */ 7 #ifndef __SCX_COMPAT_H 8 #define __SCX_COMPAT_H 9 10 #include <bpf/btf.h> 11 #include <fcntl.h> 12 #include <stdlib.h> 13 #include <unistd.h> 14 15 struct btf *__COMPAT_vmlinux_btf __attribute__((weak)); 16 17 static inline void __COMPAT_load_vmlinux_btf(void) 18 { 19 if (!__COMPAT_vmlinux_btf) { 20 __COMPAT_vmlinux_btf = btf__load_vmlinux_btf(); 21 SCX_BUG_ON(!__COMPAT_vmlinux_btf, "btf__load_vmlinux_btf()"); 22 } 23 } 24 25 static inline bool __COMPAT_read_enum(const char *type, const char *name, u64 *v) 26 { 27 const struct btf_type *t; 28 const char *n; 29 s32 tid; 30 int i; 31 32 __COMPAT_load_vmlinux_btf(); 33 34 tid = btf__find_by_name(__COMPAT_vmlinux_btf, type); 35 if (tid < 0) 36 return false; 37 38 t = btf__type_by_id(__COMPAT_vmlinux_btf, tid); 39 SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid); 40 41 if (btf_is_enum(t)) { 42 struct btf_enum *e = btf_enum(t); 43 44 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) { 45 n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off); 46 SCX_BUG_ON(!n, "btf__name_by_offset()"); 47 if (!strcmp(n, name)) { 48 *v = e[i].val; 49 return true; 50 } 51 } 52 } else if (btf_is_enum64(t)) { 53 struct btf_enum64 *e = btf_enum64(t); 54 55 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) { 56 n = btf__name_by_offset(__COMPAT_vmlinux_btf, e[i].name_off); 57 SCX_BUG_ON(!n, "btf__name_by_offset()"); 58 if (!strcmp(n, name)) { 59 *v = btf_enum64_value(&e[i]); 60 return true; 61 } 62 } 63 } 64 65 return false; 66 } 67 68 #define __COMPAT_ENUM_OR_ZERO(__type, __ent) \ 69 ({ \ 70 u64 __val = 0; \ 71 __COMPAT_read_enum(__type, __ent, &__val); \ 72 __val; \ 73 }) 74 75 static inline bool __COMPAT_has_ksym(const char *ksym) 76 { 77 __COMPAT_load_vmlinux_btf(); 78 return btf__find_by_name(__COMPAT_vmlinux_btf, ksym) >= 0; 79 } 80 81 static inline bool __COMPAT_struct_has_field(const char *type, const char *field) 82 { 83 const struct btf_type *t; 84 const struct btf_member *m; 85 const char *n; 86 s32 tid; 87 int i; 88 89 __COMPAT_load_vmlinux_btf(); 90 tid = btf__find_by_name_kind(__COMPAT_vmlinux_btf, type, BTF_KIND_STRUCT); 91 if (tid < 0) 92 return false; 93 94 t = btf__type_by_id(__COMPAT_vmlinux_btf, tid); 95 SCX_BUG_ON(!t, "btf__type_by_id(%d)", tid); 96 97 m = btf_members(t); 98 99 for (i = 0; i < BTF_INFO_VLEN(t->info); i++) { 100 n = btf__name_by_offset(__COMPAT_vmlinux_btf, m[i].name_off); 101 SCX_BUG_ON(!n, "btf__name_by_offset()"); 102 if (!strcmp(n, field)) 103 return true; 104 } 105 106 return false; 107 } 108 109 #define SCX_OPS_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_ops_flags", #name) 110 111 #define SCX_OPS_KEEP_BUILTIN_IDLE SCX_OPS_FLAG(SCX_OPS_KEEP_BUILTIN_IDLE) 112 #define SCX_OPS_ENQ_LAST SCX_OPS_FLAG(SCX_OPS_ENQ_LAST) 113 #define SCX_OPS_ENQ_EXITING SCX_OPS_FLAG(SCX_OPS_ENQ_EXITING) 114 #define SCX_OPS_SWITCH_PARTIAL SCX_OPS_FLAG(SCX_OPS_SWITCH_PARTIAL) 115 #define SCX_OPS_ENQ_MIGRATION_DISABLED SCX_OPS_FLAG(SCX_OPS_ENQ_MIGRATION_DISABLED) 116 #define SCX_OPS_ALLOW_QUEUED_WAKEUP SCX_OPS_FLAG(SCX_OPS_ALLOW_QUEUED_WAKEUP) 117 #define SCX_OPS_BUILTIN_IDLE_PER_NODE SCX_OPS_FLAG(SCX_OPS_BUILTIN_IDLE_PER_NODE) 118 119 #define SCX_PICK_IDLE_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_pick_idle_cpu_flags", #name) 120 121 #define SCX_PICK_IDLE_CORE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_CORE) 122 #define SCX_PICK_IDLE_IN_NODE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_IN_NODE) 123 124 static inline long scx_hotplug_seq(void) 125 { 126 int fd; 127 char buf[32]; 128 ssize_t len; 129 long val; 130 131 fd = open("/sys/kernel/sched_ext/hotplug_seq", O_RDONLY); 132 if (fd < 0) 133 return -ENOENT; 134 135 len = read(fd, buf, sizeof(buf) - 1); 136 SCX_BUG_ON(len <= 0, "read failed (%ld)", len); 137 buf[len] = 0; 138 close(fd); 139 140 val = strtoul(buf, NULL, 10); 141 SCX_BUG_ON(val < 0, "invalid num hotplug events: %lu", val); 142 143 return val; 144 } 145 146 /* 147 * struct sched_ext_ops can change over time. If compat.bpf.h::SCX_OPS_DEFINE() 148 * is used to define ops and compat.h::SCX_OPS_LOAD/ATTACH() are used to load 149 * and attach it, backward compatibility is automatically maintained where 150 * reasonable. 151 * 152 * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is 153 * the current minimum required kernel version. 154 * 155 * COMPAT: 156 * - v6.17: ops.cgroup_set_bandwidth() 157 * - v6.19: ops.cgroup_set_idle() 158 */ 159 #define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \ 160 struct __scx_name *__skel; \ 161 \ 162 SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"), \ 163 "sched_ext_ops.dump() missing, kernel too old?"); \ 164 \ 165 __skel = __scx_name##__open(); \ 166 SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \ 167 __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \ 168 SCX_ENUM_INIT(__skel); \ 169 if (__skel->struct_ops.__ops_name->cgroup_set_bandwidth && \ 170 !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_bandwidth")) { \ 171 fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_bandwidth()\n"); \ 172 __skel->struct_ops.__ops_name->cgroup_set_bandwidth = NULL; \ 173 } \ 174 if (__skel->struct_ops.__ops_name->cgroup_set_idle && \ 175 !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_idle")) { \ 176 fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_idle()\n"); \ 177 __skel->struct_ops.__ops_name->cgroup_set_idle = NULL; \ 178 } \ 179 __skel; \ 180 }) 181 182 #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({ \ 183 UEI_SET_SIZE(__skel, __ops_name, __uei_name); \ 184 SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \ 185 }) 186 187 /* 188 * New versions of bpftool now emit additional link placeholders for BPF maps, 189 * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps 190 * automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do 191 * nothing with those links and won't attempt to auto-attach maps. 192 * 193 * To maintain compatibility with older libbpf while avoiding trying to attach 194 * twice, disable the autoattach feature on newer libbpf. 195 */ 196 #if LIBBPF_MAJOR_VERSION > 1 || \ 197 (LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 5) 198 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) \ 199 bpf_map__set_autoattach((__skel)->maps.__ops_name, false) 200 #else 201 #define __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name) do {} while (0) 202 #endif 203 204 #define SCX_OPS_ATTACH(__skel, __ops_name, __scx_name) ({ \ 205 struct bpf_link *__link; \ 206 __SCX_OPS_DISABLE_AUTOATTACH(__skel, __ops_name); \ 207 SCX_BUG_ON(__scx_name##__attach((__skel)), "Failed to attach skel"); \ 208 __link = bpf_map__attach_struct_ops((__skel)->maps.__ops_name); \ 209 SCX_BUG_ON(!__link, "Failed to attach struct_ops"); \ 210 __link; \ 211 }) 212 213 #endif /* __SCX_COMPAT_H */ 214