1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 6 */ 7 #ifndef __SCX_COMMON_BPF_H 8 #define __SCX_COMMON_BPF_H 9 10 /* 11 * The generated kfunc prototypes in vmlinux.h are missing address space 12 * attributes which cause build failures. For now, suppress the generated 13 * prototypes. See https://github.com/sched-ext/scx/issues/1111. 14 */ 15 #define BPF_NO_KFUNC_PROTOTYPES 16 17 #ifdef LSP 18 #define __bpf__ 19 #include "../vmlinux.h" 20 #else 21 #include "vmlinux.h" 22 #endif 23 24 #include <bpf/bpf_helpers.h> 25 #include <bpf/bpf_tracing.h> 26 #include <asm-generic/errno.h> 27 #include "user_exit_info.bpf.h" 28 #include "enum_defs.autogen.h" 29 30 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 31 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 32 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 33 #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 34 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 35 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 36 #define PF_EXITING 0x00000004 37 #define CLOCK_MONOTONIC 1 38 39 #ifndef NR_CPUS 40 #define NR_CPUS 1024 41 #endif 42 43 #ifndef NUMA_NO_NODE 44 #define NUMA_NO_NODE (-1) 45 #endif 46 47 extern int LINUX_KERNEL_VERSION __kconfig; 48 extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; 49 extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; 50 51 /* 52 * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can 53 * lead to really confusing misbehaviors. Let's trigger a build failure. 54 */ 55 static inline void ___vmlinux_h_sanity_check___(void) 56 { 57 _Static_assert(SCX_DSQ_FLAG_BUILTIN, 58 "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); 59 } 60 61 s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; 62 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; 63 s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 64 const struct cpumask *cpus_allowed, u64 flags) __ksym __weak; 65 void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; 66 void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; 67 u32 scx_bpf_dispatch_nr_slots(void) __ksym; 68 void scx_bpf_dispatch_cancel(void) __ksym; 69 bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak; 70 void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; 71 void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 72 bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 73 bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 74 u32 scx_bpf_reenqueue_local(void) __ksym; 75 void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; 76 s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; 77 void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; 78 int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak; 79 struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; 80 void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; 81 void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak; 82 void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; 83 void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak; 84 u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak; 85 u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak; 86 void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak; 87 u32 scx_bpf_nr_node_ids(void) __ksym __weak; 88 u32 scx_bpf_nr_cpu_ids(void) __ksym __weak; 89 int scx_bpf_cpu_node(s32 cpu) __ksym __weak; 90 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; 91 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; 92 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; 93 const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak; 94 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; 95 const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak; 96 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; 97 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; 98 bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; 99 s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 100 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 101 s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 102 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 103 bool scx_bpf_task_running(const struct task_struct *p) __ksym; 104 s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; 105 struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; 106 struct rq *scx_bpf_locked_rq(void) __ksym; 107 struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak; 108 struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; 109 u64 scx_bpf_now(void) __ksym __weak; 110 void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak; 111 112 /* 113 * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from 114 * within bpf_for_each() loops. 115 */ 116 #define BPF_FOR_EACH_ITER (&___it) 117 118 #define scx_read_event(e, name) \ 119 (bpf_core_field_exists((e)->name) ? (e)->name : 0) 120 121 static inline __attribute__((format(printf, 1, 2))) 122 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} 123 124 #define SCX_STRINGIFY(x) #x 125 #define SCX_TOSTRING(x) SCX_STRINGIFY(x) 126 127 /* 128 * Helper macro for initializing the fmt and variadic argument inputs to both 129 * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to 130 * refer to the initialized list of inputs to the bstr kfunc. 131 */ 132 #define scx_bpf_bstr_preamble(fmt, args...) \ 133 static char ___fmt[] = fmt; \ 134 /* \ 135 * Note that __param[] must have at least one \ 136 * element to keep the verifier happy. \ 137 */ \ 138 unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ 139 \ 140 _Pragma("GCC diagnostic push") \ 141 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 142 ___bpf_fill(___param, args); \ 143 _Pragma("GCC diagnostic pop") 144 145 /* 146 * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments 147 * instead of an array of u64. Using this macro will cause the scheduler to 148 * exit cleanly with the specified exit code being passed to user space. 149 */ 150 #define scx_bpf_exit(code, fmt, args...) \ 151 ({ \ 152 scx_bpf_bstr_preamble(fmt, args) \ 153 scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \ 154 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 155 }) 156 157 /* 158 * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments 159 * instead of an array of u64. Invoking this macro will cause the scheduler to 160 * exit in an erroneous state, with diagnostic information being passed to the 161 * user. It appends the file and line number to aid debugging. 162 */ 163 #define scx_bpf_error(fmt, args...) \ 164 ({ \ 165 scx_bpf_bstr_preamble( \ 166 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \ 167 scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ 168 ___scx_bpf_bstr_format_checker( \ 169 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \ 170 }) 171 172 /* 173 * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments 174 * instead of an array of u64. To be used from ops.dump() and friends. 175 */ 176 #define scx_bpf_dump(fmt, args...) \ 177 ({ \ 178 scx_bpf_bstr_preamble(fmt, args) \ 179 scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \ 180 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 181 }) 182 183 /* 184 * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header 185 * of system information for debugging. 186 */ 187 #define scx_bpf_dump_header() \ 188 ({ \ 189 scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \ 190 LINUX_KERNEL_VERSION >> 16, \ 191 LINUX_KERNEL_VERSION >> 8 & 0xFF, \ 192 LINUX_KERNEL_VERSION & 0xFF, \ 193 CONFIG_LOCALVERSION, \ 194 CONFIG_CC_VERSION_TEXT); \ 195 }) 196 197 #define BPF_STRUCT_OPS(name, args...) \ 198 SEC("struct_ops/"#name) \ 199 BPF_PROG(name, ##args) 200 201 #define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ 202 SEC("struct_ops.s/"#name) \ 203 BPF_PROG(name, ##args) 204 205 /** 206 * RESIZABLE_ARRAY - Generates annotations for an array that may be resized 207 * @elfsec: the data section of the BPF program in which to place the array 208 * @arr: the name of the array 209 * 210 * libbpf has an API for setting map value sizes. Since data sections (i.e. 211 * bss, data, rodata) themselves are maps, a data section can be resized. If 212 * a data section has an array as its last element, the BTF info for that 213 * array will be adjusted so that length of the array is extended to meet the 214 * new length of the data section. This macro annotates an array to have an 215 * element count of one with the assumption that this array can be resized 216 * within the userspace program. It also annotates the section specifier so 217 * this array exists in a custom sub data section which can be resized 218 * independently. 219 * 220 * See RESIZE_ARRAY() for the userspace convenience macro for resizing an 221 * array declared with RESIZABLE_ARRAY(). 222 */ 223 #define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr) 224 225 /** 226 * MEMBER_VPTR - Obtain the verified pointer to a struct or array member 227 * @base: struct or array to index 228 * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...) 229 * 230 * The verifier often gets confused by the instruction sequence the compiler 231 * generates for indexing struct fields or arrays. This macro forces the 232 * compiler to generate a code sequence which first calculates the byte offset, 233 * checks it against the struct or array size and add that byte offset to 234 * generate the pointer to the member to help the verifier. 235 * 236 * Ideally, we want to abort if the calculated offset is out-of-bounds. However, 237 * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller 238 * must check for %NULL and take appropriate action to appease the verifier. To 239 * avoid confusing the verifier, it's best to check for %NULL and dereference 240 * immediately. 241 * 242 * vptr = MEMBER_VPTR(my_array, [i][j]); 243 * if (!vptr) 244 * return error; 245 * *vptr = new_value; 246 * 247 * sizeof(@base) should encompass the memory area to be accessed and thus can't 248 * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of 249 * `MEMBER_VPTR(ptr, ->member)`. 250 */ 251 #ifndef MEMBER_VPTR 252 #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ 253 ({ \ 254 u64 __base = (u64)&(base); \ 255 u64 __addr = (u64)&((base) member) - __base; \ 256 _Static_assert(sizeof(base) >= sizeof((base) member), \ 257 "@base is smaller than @member, is @base a pointer?"); \ 258 asm volatile ( \ 259 "if %0 <= %[max] goto +2\n" \ 260 "%0 = 0\n" \ 261 "goto +1\n" \ 262 "%0 += %1\n" \ 263 : "+r"(__addr) \ 264 : "r"(__base), \ 265 [max]"i"(sizeof(base) - sizeof((base) member))); \ 266 __addr; \ 267 }) 268 #endif /* MEMBER_VPTR */ 269 270 /** 271 * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element 272 * @arr: array to index into 273 * @i: array index 274 * @n: number of elements in array 275 * 276 * Similar to MEMBER_VPTR() but is intended for use with arrays where the 277 * element count needs to be explicit. 278 * It can be used in cases where a global array is defined with an initial 279 * size but is intended to be be resized before loading the BPF program. 280 * Without this version of the macro, MEMBER_VPTR() will use the compile time 281 * size of the array to compute the max, which will result in rejection by 282 * the verifier. 283 */ 284 #ifndef ARRAY_ELEM_PTR 285 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ 286 ({ \ 287 u64 __base = (u64)arr; \ 288 u64 __addr = (u64)&(arr[i]) - __base; \ 289 asm volatile ( \ 290 "if %0 <= %[max] goto +2\n" \ 291 "%0 = 0\n" \ 292 "goto +1\n" \ 293 "%0 += %1\n" \ 294 : "+r"(__addr) \ 295 : "r"(__base), \ 296 [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ 297 __addr; \ 298 }) 299 #endif /* ARRAY_ELEM_PTR */ 300 301 /* 302 * BPF declarations and helpers 303 */ 304 305 /* list and rbtree */ 306 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) 307 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 308 309 void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; 310 void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; 311 312 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) 313 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) 314 315 int bpf_list_push_front_impl(struct bpf_list_head *head, 316 struct bpf_list_node *node, 317 void *meta, __u64 off) __ksym; 318 #define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) 319 320 int bpf_list_push_back_impl(struct bpf_list_head *head, 321 struct bpf_list_node *node, 322 void *meta, __u64 off) __ksym; 323 #define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) 324 325 struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; 326 struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; 327 struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 328 struct bpf_rb_node *node) __ksym; 329 int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 330 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 331 void *meta, __u64 off) __ksym; 332 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) 333 334 struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; 335 336 void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; 337 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) 338 339 /* task */ 340 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 341 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 342 void bpf_task_release(struct task_struct *p) __ksym; 343 344 /* cgroup */ 345 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 346 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 347 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; 348 349 /* css iteration */ 350 struct bpf_iter_css; 351 struct cgroup_subsys_state; 352 extern int bpf_iter_css_new(struct bpf_iter_css *it, 353 struct cgroup_subsys_state *start, 354 unsigned int flags) __weak __ksym; 355 extern struct cgroup_subsys_state * 356 bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; 357 extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; 358 359 /* cpumask */ 360 struct bpf_cpumask *bpf_cpumask_create(void) __ksym; 361 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; 362 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; 363 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; 364 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; 365 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 366 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 367 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; 368 bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 369 bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 370 void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; 371 void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; 372 bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, 373 const struct cpumask *src2) __ksym; 374 void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, 375 const struct cpumask *src2) __ksym; 376 void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, 377 const struct cpumask *src2) __ksym; 378 bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; 379 bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; 380 bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; 381 bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; 382 bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; 383 void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; 384 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym; 385 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, 386 const struct cpumask *src2) __ksym; 387 u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; 388 389 int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym; 390 int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym; 391 void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym; 392 393 #define def_iter_struct(name) \ 394 struct bpf_iter_##name { \ 395 struct bpf_iter_bits it; \ 396 const struct cpumask *bitmap; \ 397 }; 398 399 #define def_iter_new(name) \ 400 static inline int bpf_iter_##name##_new( \ 401 struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \ 402 { \ 403 it->bitmap = scx_bpf_get_##name##_cpumask(); \ 404 return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \ 405 sizeof(struct cpumask) / 8); \ 406 } 407 408 #define def_iter_next(name) \ 409 static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \ 410 return bpf_iter_bits_next(&it->it); \ 411 } 412 413 #define def_iter_destroy(name) \ 414 static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \ 415 scx_bpf_put_cpumask(it->bitmap); \ 416 bpf_iter_bits_destroy(&it->it); \ 417 } 418 #define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu) 419 420 /// Provides iterator for possible and online cpus. 421 /// 422 /// # Example 423 /// 424 /// ``` 425 /// static inline void example_use() { 426 /// int *cpu; 427 /// 428 /// for_each_possible_cpu(cpu){ 429 /// bpf_printk("CPU %d is possible", *cpu); 430 /// } 431 /// 432 /// for_each_online_cpu(cpu){ 433 /// bpf_printk("CPU %d is online", *cpu); 434 /// } 435 /// } 436 /// ``` 437 def_iter_struct(possible); 438 def_iter_new(possible); 439 def_iter_next(possible); 440 def_iter_destroy(possible); 441 #define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0) 442 443 def_iter_struct(online); 444 def_iter_new(online); 445 def_iter_next(online); 446 def_iter_destroy(online); 447 #define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0) 448 449 /* 450 * Access a cpumask in read-only mode (typically to check bits). 451 */ 452 static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) 453 { 454 return (const struct cpumask *)mask; 455 } 456 457 /* 458 * Return true if task @p cannot migrate to a different CPU, false 459 * otherwise. 460 */ 461 static inline bool is_migration_disabled(const struct task_struct *p) 462 { 463 /* 464 * Testing p->migration_disabled in a BPF code is tricky because the 465 * migration is _always_ disabled while running the BPF code. 466 * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF 467 * code execution disable and re-enable the migration of the current 468 * task, respectively. So, the _current_ task of the sched_ext ops is 469 * always migration-disabled. Moreover, p->migration_disabled could be 470 * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is 471 * executed in the middle of the other BPF code execution. 472 * 473 * Therefore, we should decide that the _current_ task is 474 * migration-disabled only when its migration_disabled count is greater 475 * than one. In other words, when p->migration_disabled == 1, there is 476 * an ambiguity, so we should check if @p is the current task or not. 477 */ 478 if (bpf_core_field_exists(p->migration_disabled)) { 479 if (p->migration_disabled == 1) 480 return bpf_get_current_task_btf() != p; 481 else 482 return p->migration_disabled; 483 } 484 return false; 485 } 486 487 /* rcu */ 488 void bpf_rcu_read_lock(void) __ksym; 489 void bpf_rcu_read_unlock(void) __ksym; 490 491 /* 492 * Time helpers, most of which are from jiffies.h. 493 */ 494 495 /** 496 * time_delta - Calculate the delta between new and old time stamp 497 * @after: first comparable as u64 498 * @before: second comparable as u64 499 * 500 * Return: the time difference, which is >= 0 501 */ 502 static inline s64 time_delta(u64 after, u64 before) 503 { 504 return (s64)(after - before) > 0 ? (s64)(after - before) : 0; 505 } 506 507 /** 508 * time_after - returns true if the time a is after time b. 509 * @a: first comparable as u64 510 * @b: second comparable as u64 511 * 512 * Do this with "<0" and ">=0" to only test the sign of the result. A 513 * good compiler would generate better code (and a really good compiler 514 * wouldn't care). Gcc is currently neither. 515 * 516 * Return: %true is time a is after time b, otherwise %false. 517 */ 518 static inline bool time_after(u64 a, u64 b) 519 { 520 return (s64)(b - a) < 0; 521 } 522 523 /** 524 * time_before - returns true if the time a is before time b. 525 * @a: first comparable as u64 526 * @b: second comparable as u64 527 * 528 * Return: %true is time a is before time b, otherwise %false. 529 */ 530 static inline bool time_before(u64 a, u64 b) 531 { 532 return time_after(b, a); 533 } 534 535 /** 536 * time_after_eq - returns true if the time a is after or the same as time b. 537 * @a: first comparable as u64 538 * @b: second comparable as u64 539 * 540 * Return: %true is time a is after or the same as time b, otherwise %false. 541 */ 542 static inline bool time_after_eq(u64 a, u64 b) 543 { 544 return (s64)(a - b) >= 0; 545 } 546 547 /** 548 * time_before_eq - returns true if the time a is before or the same as time b. 549 * @a: first comparable as u64 550 * @b: second comparable as u64 551 * 552 * Return: %true is time a is before or the same as time b, otherwise %false. 553 */ 554 static inline bool time_before_eq(u64 a, u64 b) 555 { 556 return time_after_eq(b, a); 557 } 558 559 /** 560 * time_in_range - Calculate whether a is in the range of [b, c]. 561 * @a: time to test 562 * @b: beginning of the range 563 * @c: end of the range 564 * 565 * Return: %true is time a is in the range [b, c], otherwise %false. 566 */ 567 static inline bool time_in_range(u64 a, u64 b, u64 c) 568 { 569 return time_after_eq(a, b) && time_before_eq(a, c); 570 } 571 572 /** 573 * time_in_range_open - Calculate whether a is in the range of [b, c). 574 * @a: time to test 575 * @b: beginning of the range 576 * @c: end of the range 577 * 578 * Return: %true is time a is in the range [b, c), otherwise %false. 579 */ 580 static inline bool time_in_range_open(u64 a, u64 b, u64 c) 581 { 582 return time_after_eq(a, b) && time_before(a, c); 583 } 584 585 586 /* 587 * Other helpers 588 */ 589 590 /* useful compiler attributes */ 591 #ifndef likely 592 #define likely(x) __builtin_expect(!!(x), 1) 593 #endif 594 #ifndef unlikely 595 #define unlikely(x) __builtin_expect(!!(x), 0) 596 #endif 597 #ifndef __maybe_unused 598 #define __maybe_unused __attribute__((__unused__)) 599 #endif 600 601 /* 602 * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They 603 * prevent compiler from caching, redoing or reordering reads or writes. 604 */ 605 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; 606 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; 607 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; 608 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; 609 610 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 611 { 612 switch (size) { 613 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; 614 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; 615 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; 616 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; 617 default: 618 barrier(); 619 __builtin_memcpy((void *)res, (const void *)p, size); 620 barrier(); 621 } 622 } 623 624 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 625 { 626 switch (size) { 627 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; 628 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; 629 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; 630 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; 631 default: 632 barrier(); 633 __builtin_memcpy((void *)p, (const void *)res, size); 634 barrier(); 635 } 636 } 637 638 /* 639 * __unqual_typeof(x) - Declare an unqualified scalar type, leaving 640 * non-scalar types unchanged, 641 * 642 * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 643 * is not type-compatible with 'signed char', and we define a separate case. 644 * 645 * This is copied verbatim from kernel's include/linux/compiler_types.h, but 646 * with default expression (for pointers) changed from (x) to (typeof(x)0). 647 * 648 * This is because LLVM has a bug where for lvalue (x), it does not get rid of 649 * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). 650 * Hence, for pointers, we need to create an rvalue expression to get the 651 * desired type. See https://github.com/llvm/llvm-project/issues/53400. 652 */ 653 #define __scalar_type_to_expr_cases(type) \ 654 unsigned type : (unsigned type)0, signed type : (signed type)0 655 656 #define __unqual_typeof(x) \ 657 typeof(_Generic((x), \ 658 char: (char)0, \ 659 __scalar_type_to_expr_cases(char), \ 660 __scalar_type_to_expr_cases(short), \ 661 __scalar_type_to_expr_cases(int), \ 662 __scalar_type_to_expr_cases(long), \ 663 __scalar_type_to_expr_cases(long long), \ 664 default: (typeof(x))0)) 665 666 #define READ_ONCE(x) \ 667 ({ \ 668 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 669 { .__c = { 0 } }; \ 670 __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 671 __u.__val; \ 672 }) 673 674 #define WRITE_ONCE(x, val) \ 675 ({ \ 676 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 677 { .__val = (val) }; \ 678 __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 679 __u.__val; \ 680 }) 681 682 /* 683 * __calc_avg - Calculate exponential weighted moving average (EWMA) with 684 * @old and @new values. @decay represents how large the @old value remains. 685 * With a larger @decay value, the moving average changes slowly, exhibiting 686 * fewer fluctuations. 687 */ 688 #define __calc_avg(old, new, decay) ({ \ 689 typeof(decay) thr = 1 << (decay); \ 690 typeof(old) ret; \ 691 if (((old) < thr) || ((new) < thr)) { \ 692 if (((old) == 1) && ((new) == 0)) \ 693 ret = 0; \ 694 else \ 695 ret = ((old) - ((old) >> 1)) + ((new) >> 1); \ 696 } else { \ 697 ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \ 698 } \ 699 ret; \ 700 }) 701 702 /* 703 * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. 704 * @v: The value for which we're computing the base 2 logarithm. 705 */ 706 static inline u32 log2_u32(u32 v) 707 { 708 u32 r; 709 u32 shift; 710 711 r = (v > 0xFFFF) << 4; v >>= r; 712 shift = (v > 0xFF) << 3; v >>= shift; r |= shift; 713 shift = (v > 0xF) << 2; v >>= shift; r |= shift; 714 shift = (v > 0x3) << 1; v >>= shift; r |= shift; 715 r |= (v >> 1); 716 return r; 717 } 718 719 /* 720 * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value. 721 * @v: The value for which we're computing the base 2 logarithm. 722 */ 723 static inline u32 log2_u64(u64 v) 724 { 725 u32 hi = v >> 32; 726 if (hi) 727 return log2_u32(hi) + 32 + 1; 728 else 729 return log2_u32(v) + 1; 730 } 731 732 /* 733 * sqrt_u64 - Calculate the square root of value @x using Newton's method. 734 */ 735 static inline u64 __sqrt_u64(u64 x) 736 { 737 if (x == 0 || x == 1) 738 return x; 739 740 u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32); 741 742 for (int i = 0; i < 8; ++i) { 743 u64 q = x / r; 744 if (r <= q) 745 break; 746 r = (r + q) >> 1; 747 } 748 return r; 749 } 750 751 /* 752 * Return a value proportionally scaled to the task's weight. 753 */ 754 static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value) 755 { 756 return (value * p->scx.weight) / 100; 757 } 758 759 /* 760 * Return a value inversely proportional to the task's weight. 761 */ 762 static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value) 763 { 764 return value * 100 / p->scx.weight; 765 } 766 767 768 #include "compat.bpf.h" 769 #include "enums.bpf.h" 770 771 #endif /* __SCX_COMMON_BPF_H */ 772