1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 6 */ 7 #ifndef __SCX_COMMON_BPF_H 8 #define __SCX_COMMON_BPF_H 9 10 /* 11 * The generated kfunc prototypes in vmlinux.h are missing address space 12 * attributes which cause build failures. For now, suppress the generated 13 * prototypes. See https://github.com/sched-ext/scx/issues/1111. 14 */ 15 #define BPF_NO_KFUNC_PROTOTYPES 16 17 #ifdef LSP 18 #define __bpf__ 19 #include "../vmlinux.h" 20 #else 21 #include "vmlinux.h" 22 #endif 23 24 #include <bpf/bpf_helpers.h> 25 #include <bpf/bpf_tracing.h> 26 #include <asm-generic/errno.h> 27 #include "user_exit_info.bpf.h" 28 #include "enum_defs.autogen.h" 29 30 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 31 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 32 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 33 #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 34 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 35 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 36 #define PF_EXITING 0x00000004 37 #define CLOCK_MONOTONIC 1 38 39 #ifndef NR_CPUS 40 #define NR_CPUS 1024 41 #endif 42 43 #ifndef NUMA_NO_NODE 44 #define NUMA_NO_NODE (-1) 45 #endif 46 47 extern int LINUX_KERNEL_VERSION __kconfig; 48 extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; 49 extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; 50 51 /* 52 * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can 53 * lead to really confusing misbehaviors. Let's trigger a build failure. 54 */ 55 static inline void ___vmlinux_h_sanity_check___(void) 56 { 57 _Static_assert(SCX_DSQ_FLAG_BUILTIN, 58 "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); 59 } 60 61 s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; 62 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; 63 s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, 64 const struct cpumask *cpus_allowed, u64 flags) __ksym __weak; 65 void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; 66 void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; 67 u32 scx_bpf_dispatch_nr_slots(void) __ksym; 68 void scx_bpf_dispatch_cancel(void) __ksym; 69 bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak; 70 void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; 71 void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 72 bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 73 bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 74 u32 scx_bpf_reenqueue_local(void) __ksym; 75 void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; 76 s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; 77 void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; 78 int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak; 79 struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; 80 void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; 81 void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak; 82 void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; 83 void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak; 84 u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak; 85 u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak; 86 void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak; 87 u32 scx_bpf_nr_node_ids(void) __ksym __weak; 88 u32 scx_bpf_nr_cpu_ids(void) __ksym __weak; 89 int scx_bpf_cpu_node(s32 cpu) __ksym __weak; 90 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; 91 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; 92 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; 93 const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak; 94 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; 95 const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak; 96 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; 97 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; 98 bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; 99 s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 100 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 101 s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 102 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 103 bool scx_bpf_task_running(const struct task_struct *p) __ksym; 104 s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; 105 struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; 106 struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; 107 u64 scx_bpf_now(void) __ksym __weak; 108 void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak; 109 110 /* 111 * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from 112 * within bpf_for_each() loops. 113 */ 114 #define BPF_FOR_EACH_ITER (&___it) 115 116 #define scx_read_event(e, name) \ 117 (bpf_core_field_exists((e)->name) ? (e)->name : 0) 118 119 static inline __attribute__((format(printf, 1, 2))) 120 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} 121 122 #define SCX_STRINGIFY(x) #x 123 #define SCX_TOSTRING(x) SCX_STRINGIFY(x) 124 125 /* 126 * Helper macro for initializing the fmt and variadic argument inputs to both 127 * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to 128 * refer to the initialized list of inputs to the bstr kfunc. 129 */ 130 #define scx_bpf_bstr_preamble(fmt, args...) \ 131 static char ___fmt[] = fmt; \ 132 /* \ 133 * Note that __param[] must have at least one \ 134 * element to keep the verifier happy. \ 135 */ \ 136 unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ 137 \ 138 _Pragma("GCC diagnostic push") \ 139 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 140 ___bpf_fill(___param, args); \ 141 _Pragma("GCC diagnostic pop") 142 143 /* 144 * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments 145 * instead of an array of u64. Using this macro will cause the scheduler to 146 * exit cleanly with the specified exit code being passed to user space. 147 */ 148 #define scx_bpf_exit(code, fmt, args...) \ 149 ({ \ 150 scx_bpf_bstr_preamble(fmt, args) \ 151 scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \ 152 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 153 }) 154 155 /* 156 * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments 157 * instead of an array of u64. Invoking this macro will cause the scheduler to 158 * exit in an erroneous state, with diagnostic information being passed to the 159 * user. It appends the file and line number to aid debugging. 160 */ 161 #define scx_bpf_error(fmt, args...) \ 162 ({ \ 163 scx_bpf_bstr_preamble( \ 164 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \ 165 scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ 166 ___scx_bpf_bstr_format_checker( \ 167 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \ 168 }) 169 170 /* 171 * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments 172 * instead of an array of u64. To be used from ops.dump() and friends. 173 */ 174 #define scx_bpf_dump(fmt, args...) \ 175 ({ \ 176 scx_bpf_bstr_preamble(fmt, args) \ 177 scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \ 178 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 179 }) 180 181 /* 182 * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header 183 * of system information for debugging. 184 */ 185 #define scx_bpf_dump_header() \ 186 ({ \ 187 scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \ 188 LINUX_KERNEL_VERSION >> 16, \ 189 LINUX_KERNEL_VERSION >> 8 & 0xFF, \ 190 LINUX_KERNEL_VERSION & 0xFF, \ 191 CONFIG_LOCALVERSION, \ 192 CONFIG_CC_VERSION_TEXT); \ 193 }) 194 195 #define BPF_STRUCT_OPS(name, args...) \ 196 SEC("struct_ops/"#name) \ 197 BPF_PROG(name, ##args) 198 199 #define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ 200 SEC("struct_ops.s/"#name) \ 201 BPF_PROG(name, ##args) 202 203 /** 204 * RESIZABLE_ARRAY - Generates annotations for an array that may be resized 205 * @elfsec: the data section of the BPF program in which to place the array 206 * @arr: the name of the array 207 * 208 * libbpf has an API for setting map value sizes. Since data sections (i.e. 209 * bss, data, rodata) themselves are maps, a data section can be resized. If 210 * a data section has an array as its last element, the BTF info for that 211 * array will be adjusted so that length of the array is extended to meet the 212 * new length of the data section. This macro annotates an array to have an 213 * element count of one with the assumption that this array can be resized 214 * within the userspace program. It also annotates the section specifier so 215 * this array exists in a custom sub data section which can be resized 216 * independently. 217 * 218 * See RESIZE_ARRAY() for the userspace convenience macro for resizing an 219 * array declared with RESIZABLE_ARRAY(). 220 */ 221 #define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr) 222 223 /** 224 * MEMBER_VPTR - Obtain the verified pointer to a struct or array member 225 * @base: struct or array to index 226 * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...) 227 * 228 * The verifier often gets confused by the instruction sequence the compiler 229 * generates for indexing struct fields or arrays. This macro forces the 230 * compiler to generate a code sequence which first calculates the byte offset, 231 * checks it against the struct or array size and add that byte offset to 232 * generate the pointer to the member to help the verifier. 233 * 234 * Ideally, we want to abort if the calculated offset is out-of-bounds. However, 235 * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller 236 * must check for %NULL and take appropriate action to appease the verifier. To 237 * avoid confusing the verifier, it's best to check for %NULL and dereference 238 * immediately. 239 * 240 * vptr = MEMBER_VPTR(my_array, [i][j]); 241 * if (!vptr) 242 * return error; 243 * *vptr = new_value; 244 * 245 * sizeof(@base) should encompass the memory area to be accessed and thus can't 246 * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of 247 * `MEMBER_VPTR(ptr, ->member)`. 248 */ 249 #ifndef MEMBER_VPTR 250 #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ 251 ({ \ 252 u64 __base = (u64)&(base); \ 253 u64 __addr = (u64)&((base) member) - __base; \ 254 _Static_assert(sizeof(base) >= sizeof((base) member), \ 255 "@base is smaller than @member, is @base a pointer?"); \ 256 asm volatile ( \ 257 "if %0 <= %[max] goto +2\n" \ 258 "%0 = 0\n" \ 259 "goto +1\n" \ 260 "%0 += %1\n" \ 261 : "+r"(__addr) \ 262 : "r"(__base), \ 263 [max]"i"(sizeof(base) - sizeof((base) member))); \ 264 __addr; \ 265 }) 266 #endif /* MEMBER_VPTR */ 267 268 /** 269 * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element 270 * @arr: array to index into 271 * @i: array index 272 * @n: number of elements in array 273 * 274 * Similar to MEMBER_VPTR() but is intended for use with arrays where the 275 * element count needs to be explicit. 276 * It can be used in cases where a global array is defined with an initial 277 * size but is intended to be be resized before loading the BPF program. 278 * Without this version of the macro, MEMBER_VPTR() will use the compile time 279 * size of the array to compute the max, which will result in rejection by 280 * the verifier. 281 */ 282 #ifndef ARRAY_ELEM_PTR 283 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ 284 ({ \ 285 u64 __base = (u64)arr; \ 286 u64 __addr = (u64)&(arr[i]) - __base; \ 287 asm volatile ( \ 288 "if %0 <= %[max] goto +2\n" \ 289 "%0 = 0\n" \ 290 "goto +1\n" \ 291 "%0 += %1\n" \ 292 : "+r"(__addr) \ 293 : "r"(__base), \ 294 [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ 295 __addr; \ 296 }) 297 #endif /* ARRAY_ELEM_PTR */ 298 299 /* 300 * BPF declarations and helpers 301 */ 302 303 /* list and rbtree */ 304 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) 305 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 306 307 void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; 308 void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; 309 310 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) 311 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) 312 313 int bpf_list_push_front_impl(struct bpf_list_head *head, 314 struct bpf_list_node *node, 315 void *meta, __u64 off) __ksym; 316 #define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) 317 318 int bpf_list_push_back_impl(struct bpf_list_head *head, 319 struct bpf_list_node *node, 320 void *meta, __u64 off) __ksym; 321 #define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) 322 323 struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; 324 struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; 325 struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 326 struct bpf_rb_node *node) __ksym; 327 int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 328 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 329 void *meta, __u64 off) __ksym; 330 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) 331 332 struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; 333 334 void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; 335 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) 336 337 /* task */ 338 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 339 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 340 void bpf_task_release(struct task_struct *p) __ksym; 341 342 /* cgroup */ 343 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 344 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 345 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; 346 347 /* css iteration */ 348 struct bpf_iter_css; 349 struct cgroup_subsys_state; 350 extern int bpf_iter_css_new(struct bpf_iter_css *it, 351 struct cgroup_subsys_state *start, 352 unsigned int flags) __weak __ksym; 353 extern struct cgroup_subsys_state * 354 bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; 355 extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; 356 357 /* cpumask */ 358 struct bpf_cpumask *bpf_cpumask_create(void) __ksym; 359 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; 360 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; 361 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; 362 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; 363 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 364 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 365 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; 366 bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 367 bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 368 void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; 369 void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; 370 bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, 371 const struct cpumask *src2) __ksym; 372 void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, 373 const struct cpumask *src2) __ksym; 374 void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, 375 const struct cpumask *src2) __ksym; 376 bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; 377 bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; 378 bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; 379 bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; 380 bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; 381 void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; 382 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym; 383 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, 384 const struct cpumask *src2) __ksym; 385 u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; 386 387 int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym; 388 int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym; 389 void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym; 390 391 #define def_iter_struct(name) \ 392 struct bpf_iter_##name { \ 393 struct bpf_iter_bits it; \ 394 const struct cpumask *bitmap; \ 395 }; 396 397 #define def_iter_new(name) \ 398 static inline int bpf_iter_##name##_new( \ 399 struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \ 400 { \ 401 it->bitmap = scx_bpf_get_##name##_cpumask(); \ 402 return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \ 403 sizeof(struct cpumask) / 8); \ 404 } 405 406 #define def_iter_next(name) \ 407 static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \ 408 return bpf_iter_bits_next(&it->it); \ 409 } 410 411 #define def_iter_destroy(name) \ 412 static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \ 413 scx_bpf_put_cpumask(it->bitmap); \ 414 bpf_iter_bits_destroy(&it->it); \ 415 } 416 #define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu) 417 418 /// Provides iterator for possible and online cpus. 419 /// 420 /// # Example 421 /// 422 /// ``` 423 /// static inline void example_use() { 424 /// int *cpu; 425 /// 426 /// for_each_possible_cpu(cpu){ 427 /// bpf_printk("CPU %d is possible", *cpu); 428 /// } 429 /// 430 /// for_each_online_cpu(cpu){ 431 /// bpf_printk("CPU %d is online", *cpu); 432 /// } 433 /// } 434 /// ``` 435 def_iter_struct(possible); 436 def_iter_new(possible); 437 def_iter_next(possible); 438 def_iter_destroy(possible); 439 #define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0) 440 441 def_iter_struct(online); 442 def_iter_new(online); 443 def_iter_next(online); 444 def_iter_destroy(online); 445 #define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0) 446 447 /* 448 * Access a cpumask in read-only mode (typically to check bits). 449 */ 450 static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) 451 { 452 return (const struct cpumask *)mask; 453 } 454 455 /* 456 * Return true if task @p cannot migrate to a different CPU, false 457 * otherwise. 458 */ 459 static inline bool is_migration_disabled(const struct task_struct *p) 460 { 461 /* 462 * Testing p->migration_disabled in a BPF code is tricky because the 463 * migration is _always_ disabled while running the BPF code. 464 * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF 465 * code execution disable and re-enable the migration of the current 466 * task, respectively. So, the _current_ task of the sched_ext ops is 467 * always migration-disabled. Moreover, p->migration_disabled could be 468 * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is 469 * executed in the middle of the other BPF code execution. 470 * 471 * Therefore, we should decide that the _current_ task is 472 * migration-disabled only when its migration_disabled count is greater 473 * than one. In other words, when p->migration_disabled == 1, there is 474 * an ambiguity, so we should check if @p is the current task or not. 475 */ 476 if (bpf_core_field_exists(p->migration_disabled)) { 477 if (p->migration_disabled == 1) 478 return bpf_get_current_task_btf() != p; 479 else 480 return p->migration_disabled; 481 } 482 return false; 483 } 484 485 /* rcu */ 486 void bpf_rcu_read_lock(void) __ksym; 487 void bpf_rcu_read_unlock(void) __ksym; 488 489 /* 490 * Time helpers, most of which are from jiffies.h. 491 */ 492 493 /** 494 * time_delta - Calculate the delta between new and old time stamp 495 * @after: first comparable as u64 496 * @before: second comparable as u64 497 * 498 * Return: the time difference, which is >= 0 499 */ 500 static inline s64 time_delta(u64 after, u64 before) 501 { 502 return (s64)(after - before) > 0 ? (s64)(after - before) : 0; 503 } 504 505 /** 506 * time_after - returns true if the time a is after time b. 507 * @a: first comparable as u64 508 * @b: second comparable as u64 509 * 510 * Do this with "<0" and ">=0" to only test the sign of the result. A 511 * good compiler would generate better code (and a really good compiler 512 * wouldn't care). Gcc is currently neither. 513 * 514 * Return: %true is time a is after time b, otherwise %false. 515 */ 516 static inline bool time_after(u64 a, u64 b) 517 { 518 return (s64)(b - a) < 0; 519 } 520 521 /** 522 * time_before - returns true if the time a is before time b. 523 * @a: first comparable as u64 524 * @b: second comparable as u64 525 * 526 * Return: %true is time a is before time b, otherwise %false. 527 */ 528 static inline bool time_before(u64 a, u64 b) 529 { 530 return time_after(b, a); 531 } 532 533 /** 534 * time_after_eq - returns true if the time a is after or the same as time b. 535 * @a: first comparable as u64 536 * @b: second comparable as u64 537 * 538 * Return: %true is time a is after or the same as time b, otherwise %false. 539 */ 540 static inline bool time_after_eq(u64 a, u64 b) 541 { 542 return (s64)(a - b) >= 0; 543 } 544 545 /** 546 * time_before_eq - returns true if the time a is before or the same as time b. 547 * @a: first comparable as u64 548 * @b: second comparable as u64 549 * 550 * Return: %true is time a is before or the same as time b, otherwise %false. 551 */ 552 static inline bool time_before_eq(u64 a, u64 b) 553 { 554 return time_after_eq(b, a); 555 } 556 557 /** 558 * time_in_range - Calculate whether a is in the range of [b, c]. 559 * @a: time to test 560 * @b: beginning of the range 561 * @c: end of the range 562 * 563 * Return: %true is time a is in the range [b, c], otherwise %false. 564 */ 565 static inline bool time_in_range(u64 a, u64 b, u64 c) 566 { 567 return time_after_eq(a, b) && time_before_eq(a, c); 568 } 569 570 /** 571 * time_in_range_open - Calculate whether a is in the range of [b, c). 572 * @a: time to test 573 * @b: beginning of the range 574 * @c: end of the range 575 * 576 * Return: %true is time a is in the range [b, c), otherwise %false. 577 */ 578 static inline bool time_in_range_open(u64 a, u64 b, u64 c) 579 { 580 return time_after_eq(a, b) && time_before(a, c); 581 } 582 583 584 /* 585 * Other helpers 586 */ 587 588 /* useful compiler attributes */ 589 #ifndef likely 590 #define likely(x) __builtin_expect(!!(x), 1) 591 #endif 592 #ifndef unlikely 593 #define unlikely(x) __builtin_expect(!!(x), 0) 594 #endif 595 #ifndef __maybe_unused 596 #define __maybe_unused __attribute__((__unused__)) 597 #endif 598 599 /* 600 * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They 601 * prevent compiler from caching, redoing or reordering reads or writes. 602 */ 603 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; 604 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; 605 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; 606 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; 607 608 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 609 { 610 switch (size) { 611 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; 612 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; 613 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; 614 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; 615 default: 616 barrier(); 617 __builtin_memcpy((void *)res, (const void *)p, size); 618 barrier(); 619 } 620 } 621 622 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 623 { 624 switch (size) { 625 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; 626 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; 627 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; 628 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; 629 default: 630 barrier(); 631 __builtin_memcpy((void *)p, (const void *)res, size); 632 barrier(); 633 } 634 } 635 636 /* 637 * __unqual_typeof(x) - Declare an unqualified scalar type, leaving 638 * non-scalar types unchanged, 639 * 640 * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 641 * is not type-compatible with 'signed char', and we define a separate case. 642 * 643 * This is copied verbatim from kernel's include/linux/compiler_types.h, but 644 * with default expression (for pointers) changed from (x) to (typeof(x)0). 645 * 646 * This is because LLVM has a bug where for lvalue (x), it does not get rid of 647 * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). 648 * Hence, for pointers, we need to create an rvalue expression to get the 649 * desired type. See https://github.com/llvm/llvm-project/issues/53400. 650 */ 651 #define __scalar_type_to_expr_cases(type) \ 652 unsigned type : (unsigned type)0, signed type : (signed type)0 653 654 #define __unqual_typeof(x) \ 655 typeof(_Generic((x), \ 656 char: (char)0, \ 657 __scalar_type_to_expr_cases(char), \ 658 __scalar_type_to_expr_cases(short), \ 659 __scalar_type_to_expr_cases(int), \ 660 __scalar_type_to_expr_cases(long), \ 661 __scalar_type_to_expr_cases(long long), \ 662 default: (typeof(x))0)) 663 664 #define READ_ONCE(x) \ 665 ({ \ 666 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 667 { .__c = { 0 } }; \ 668 __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 669 __u.__val; \ 670 }) 671 672 #define WRITE_ONCE(x, val) \ 673 ({ \ 674 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 675 { .__val = (val) }; \ 676 __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 677 __u.__val; \ 678 }) 679 680 /* 681 * __calc_avg - Calculate exponential weighted moving average (EWMA) with 682 * @old and @new values. @decay represents how large the @old value remains. 683 * With a larger @decay value, the moving average changes slowly, exhibiting 684 * fewer fluctuations. 685 */ 686 #define __calc_avg(old, new, decay) ({ \ 687 typeof(decay) thr = 1 << (decay); \ 688 typeof(old) ret; \ 689 if (((old) < thr) || ((new) < thr)) { \ 690 if (((old) == 1) && ((new) == 0)) \ 691 ret = 0; \ 692 else \ 693 ret = ((old) - ((old) >> 1)) + ((new) >> 1); \ 694 } else { \ 695 ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \ 696 } \ 697 ret; \ 698 }) 699 700 /* 701 * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. 702 * @v: The value for which we're computing the base 2 logarithm. 703 */ 704 static inline u32 log2_u32(u32 v) 705 { 706 u32 r; 707 u32 shift; 708 709 r = (v > 0xFFFF) << 4; v >>= r; 710 shift = (v > 0xFF) << 3; v >>= shift; r |= shift; 711 shift = (v > 0xF) << 2; v >>= shift; r |= shift; 712 shift = (v > 0x3) << 1; v >>= shift; r |= shift; 713 r |= (v >> 1); 714 return r; 715 } 716 717 /* 718 * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value. 719 * @v: The value for which we're computing the base 2 logarithm. 720 */ 721 static inline u32 log2_u64(u64 v) 722 { 723 u32 hi = v >> 32; 724 if (hi) 725 return log2_u32(hi) + 32 + 1; 726 else 727 return log2_u32(v) + 1; 728 } 729 730 /* 731 * sqrt_u64 - Calculate the square root of value @x using Newton's method. 732 */ 733 static inline u64 __sqrt_u64(u64 x) 734 { 735 if (x == 0 || x == 1) 736 return x; 737 738 u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32); 739 740 for (int i = 0; i < 8; ++i) { 741 u64 q = x / r; 742 if (r <= q) 743 break; 744 r = (r + q) >> 1; 745 } 746 return r; 747 } 748 749 /* 750 * Return a value proportionally scaled to the task's weight. 751 */ 752 static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value) 753 { 754 return (value * p->scx.weight) / 100; 755 } 756 757 /* 758 * Return a value inversely proportional to the task's weight. 759 */ 760 static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value) 761 { 762 return value * 100 / p->scx.weight; 763 } 764 765 766 #include "compat.bpf.h" 767 #include "enums.bpf.h" 768 769 #endif /* __SCX_COMMON_BPF_H */ 770