1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 6 */ 7 #ifndef __SCX_COMMON_BPF_H 8 #define __SCX_COMMON_BPF_H 9 10 /* 11 * The generated kfunc prototypes in vmlinux.h are missing address space 12 * attributes which cause build failures. For now, suppress the generated 13 * prototypes. See https://github.com/sched-ext/scx/issues/1111. 14 */ 15 #define BPF_NO_KFUNC_PROTOTYPES 16 17 #ifdef LSP 18 #define __bpf__ 19 #include "../vmlinux.h" 20 #else 21 #include "vmlinux.h" 22 #endif 23 24 #include <bpf/bpf_helpers.h> 25 #include <bpf/bpf_tracing.h> 26 #include <asm-generic/errno.h> 27 #include "user_exit_info.bpf.h" 28 #include "enum_defs.autogen.h" 29 30 #define PF_IDLE 0x00000002 /* I am an IDLE thread */ 31 #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ 32 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 33 #define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ 34 #define PF_KSWAPD 0x00020000 /* I am kswapd */ 35 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 36 #define PF_EXITING 0x00000004 37 #define CLOCK_MONOTONIC 1 38 39 #ifndef NR_CPUS 40 #define NR_CPUS 1024 41 #endif 42 43 #ifndef NUMA_NO_NODE 44 #define NUMA_NO_NODE (-1) 45 #endif 46 47 extern int LINUX_KERNEL_VERSION __kconfig; 48 extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; 49 extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; 50 51 /* 52 * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can 53 * lead to really confusing misbehaviors. Let's trigger a build failure. 54 */ 55 static inline void ___vmlinux_h_sanity_check___(void) 56 { 57 _Static_assert(SCX_DSQ_FLAG_BUILTIN, 58 "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); 59 } 60 61 s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; 62 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; 63 s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed, 64 struct scx_bpf_select_cpu_and_args *args) __ksym __weak; 65 bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak; 66 u32 scx_bpf_dispatch_nr_slots(void) __ksym; 67 void scx_bpf_dispatch_cancel(void) __ksym; 68 bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak; 69 void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; 70 void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 71 bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 72 bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 73 u32 scx_bpf_reenqueue_local(void) __ksym; 74 void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; 75 s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; 76 void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; 77 int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak; 78 struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; 79 void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; 80 void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak; 81 void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; 82 void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak; 83 u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak; 84 u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak; 85 void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak; 86 u32 scx_bpf_nr_node_ids(void) __ksym __weak; 87 u32 scx_bpf_nr_cpu_ids(void) __ksym __weak; 88 int scx_bpf_cpu_node(s32 cpu) __ksym __weak; 89 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; 90 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; 91 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; 92 const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak; 93 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; 94 const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak; 95 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; 96 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; 97 bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; 98 s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 99 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 100 s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; 101 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 102 bool scx_bpf_task_running(const struct task_struct *p) __ksym; 103 s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; 104 struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; 105 struct rq *scx_bpf_locked_rq(void) __ksym; 106 struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak; 107 struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; 108 u64 scx_bpf_now(void) __ksym __weak; 109 void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak; 110 111 /* 112 * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from 113 * within bpf_for_each() loops. 114 */ 115 #define BPF_FOR_EACH_ITER (&___it) 116 117 #define scx_read_event(e, name) \ 118 (bpf_core_field_exists((e)->name) ? (e)->name : 0) 119 120 static inline __attribute__((format(printf, 1, 2))) 121 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} 122 123 #define SCX_STRINGIFY(x) #x 124 #define SCX_TOSTRING(x) SCX_STRINGIFY(x) 125 126 /* 127 * Helper macro for initializing the fmt and variadic argument inputs to both 128 * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to 129 * refer to the initialized list of inputs to the bstr kfunc. 130 */ 131 #define scx_bpf_bstr_preamble(fmt, args...) \ 132 static char ___fmt[] = fmt; \ 133 /* \ 134 * Note that __param[] must have at least one \ 135 * element to keep the verifier happy. \ 136 */ \ 137 unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ 138 \ 139 _Pragma("GCC diagnostic push") \ 140 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 141 ___bpf_fill(___param, args); \ 142 _Pragma("GCC diagnostic pop") 143 144 /* 145 * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments 146 * instead of an array of u64. Using this macro will cause the scheduler to 147 * exit cleanly with the specified exit code being passed to user space. 148 */ 149 #define scx_bpf_exit(code, fmt, args...) \ 150 ({ \ 151 scx_bpf_bstr_preamble(fmt, args) \ 152 scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \ 153 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 154 }) 155 156 /* 157 * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments 158 * instead of an array of u64. Invoking this macro will cause the scheduler to 159 * exit in an erroneous state, with diagnostic information being passed to the 160 * user. It appends the file and line number to aid debugging. 161 */ 162 #define scx_bpf_error(fmt, args...) \ 163 ({ \ 164 scx_bpf_bstr_preamble( \ 165 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \ 166 scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ 167 ___scx_bpf_bstr_format_checker( \ 168 __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \ 169 }) 170 171 /* 172 * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments 173 * instead of an array of u64. To be used from ops.dump() and friends. 174 */ 175 #define scx_bpf_dump(fmt, args...) \ 176 ({ \ 177 scx_bpf_bstr_preamble(fmt, args) \ 178 scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \ 179 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 180 }) 181 182 /* 183 * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header 184 * of system information for debugging. 185 */ 186 #define scx_bpf_dump_header() \ 187 ({ \ 188 scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \ 189 LINUX_KERNEL_VERSION >> 16, \ 190 LINUX_KERNEL_VERSION >> 8 & 0xFF, \ 191 LINUX_KERNEL_VERSION & 0xFF, \ 192 CONFIG_LOCALVERSION, \ 193 CONFIG_CC_VERSION_TEXT); \ 194 }) 195 196 #define BPF_STRUCT_OPS(name, args...) \ 197 SEC("struct_ops/"#name) \ 198 BPF_PROG(name, ##args) 199 200 #define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ 201 SEC("struct_ops.s/"#name) \ 202 BPF_PROG(name, ##args) 203 204 /** 205 * RESIZABLE_ARRAY - Generates annotations for an array that may be resized 206 * @elfsec: the data section of the BPF program in which to place the array 207 * @arr: the name of the array 208 * 209 * libbpf has an API for setting map value sizes. Since data sections (i.e. 210 * bss, data, rodata) themselves are maps, a data section can be resized. If 211 * a data section has an array as its last element, the BTF info for that 212 * array will be adjusted so that length of the array is extended to meet the 213 * new length of the data section. This macro annotates an array to have an 214 * element count of one with the assumption that this array can be resized 215 * within the userspace program. It also annotates the section specifier so 216 * this array exists in a custom sub data section which can be resized 217 * independently. 218 * 219 * See RESIZE_ARRAY() for the userspace convenience macro for resizing an 220 * array declared with RESIZABLE_ARRAY(). 221 */ 222 #define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr) 223 224 /** 225 * MEMBER_VPTR - Obtain the verified pointer to a struct or array member 226 * @base: struct or array to index 227 * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...) 228 * 229 * The verifier often gets confused by the instruction sequence the compiler 230 * generates for indexing struct fields or arrays. This macro forces the 231 * compiler to generate a code sequence which first calculates the byte offset, 232 * checks it against the struct or array size and add that byte offset to 233 * generate the pointer to the member to help the verifier. 234 * 235 * Ideally, we want to abort if the calculated offset is out-of-bounds. However, 236 * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller 237 * must check for %NULL and take appropriate action to appease the verifier. To 238 * avoid confusing the verifier, it's best to check for %NULL and dereference 239 * immediately. 240 * 241 * vptr = MEMBER_VPTR(my_array, [i][j]); 242 * if (!vptr) 243 * return error; 244 * *vptr = new_value; 245 * 246 * sizeof(@base) should encompass the memory area to be accessed and thus can't 247 * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of 248 * `MEMBER_VPTR(ptr, ->member)`. 249 */ 250 #ifndef MEMBER_VPTR 251 #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ 252 ({ \ 253 u64 __base = (u64)&(base); \ 254 u64 __addr = (u64)&((base) member) - __base; \ 255 _Static_assert(sizeof(base) >= sizeof((base) member), \ 256 "@base is smaller than @member, is @base a pointer?"); \ 257 asm volatile ( \ 258 "if %0 <= %[max] goto +2\n" \ 259 "%0 = 0\n" \ 260 "goto +1\n" \ 261 "%0 += %1\n" \ 262 : "+r"(__addr) \ 263 : "r"(__base), \ 264 [max]"i"(sizeof(base) - sizeof((base) member))); \ 265 __addr; \ 266 }) 267 #endif /* MEMBER_VPTR */ 268 269 /** 270 * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element 271 * @arr: array to index into 272 * @i: array index 273 * @n: number of elements in array 274 * 275 * Similar to MEMBER_VPTR() but is intended for use with arrays where the 276 * element count needs to be explicit. 277 * It can be used in cases where a global array is defined with an initial 278 * size but is intended to be be resized before loading the BPF program. 279 * Without this version of the macro, MEMBER_VPTR() will use the compile time 280 * size of the array to compute the max, which will result in rejection by 281 * the verifier. 282 */ 283 #ifndef ARRAY_ELEM_PTR 284 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ 285 ({ \ 286 u64 __base = (u64)arr; \ 287 u64 __addr = (u64)&(arr[i]) - __base; \ 288 asm volatile ( \ 289 "if %0 <= %[max] goto +2\n" \ 290 "%0 = 0\n" \ 291 "goto +1\n" \ 292 "%0 += %1\n" \ 293 : "+r"(__addr) \ 294 : "r"(__base), \ 295 [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ 296 __addr; \ 297 }) 298 #endif /* ARRAY_ELEM_PTR */ 299 300 /* 301 * BPF declarations and helpers 302 */ 303 304 /* list and rbtree */ 305 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) 306 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 307 308 void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; 309 void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; 310 311 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) 312 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) 313 314 int bpf_list_push_front_impl(struct bpf_list_head *head, 315 struct bpf_list_node *node, 316 void *meta, __u64 off) __ksym; 317 #define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) 318 319 int bpf_list_push_back_impl(struct bpf_list_head *head, 320 struct bpf_list_node *node, 321 void *meta, __u64 off) __ksym; 322 #define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) 323 324 struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; 325 struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; 326 struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 327 struct bpf_rb_node *node) __ksym; 328 int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 329 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 330 void *meta, __u64 off) __ksym; 331 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) 332 333 struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; 334 335 void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; 336 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) 337 338 /* task */ 339 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 340 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 341 void bpf_task_release(struct task_struct *p) __ksym; 342 343 /* cgroup */ 344 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 345 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 346 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; 347 348 /* css iteration */ 349 struct bpf_iter_css; 350 struct cgroup_subsys_state; 351 extern int bpf_iter_css_new(struct bpf_iter_css *it, 352 struct cgroup_subsys_state *start, 353 unsigned int flags) __weak __ksym; 354 extern struct cgroup_subsys_state * 355 bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; 356 extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; 357 358 /* cpumask */ 359 struct bpf_cpumask *bpf_cpumask_create(void) __ksym; 360 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; 361 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; 362 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; 363 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; 364 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 365 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 366 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; 367 bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 368 bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 369 void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; 370 void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; 371 bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, 372 const struct cpumask *src2) __ksym; 373 void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, 374 const struct cpumask *src2) __ksym; 375 void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, 376 const struct cpumask *src2) __ksym; 377 bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; 378 bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; 379 bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; 380 bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; 381 bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; 382 void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; 383 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym; 384 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, 385 const struct cpumask *src2) __ksym; 386 u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; 387 388 int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym; 389 int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym; 390 void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym; 391 392 #define def_iter_struct(name) \ 393 struct bpf_iter_##name { \ 394 struct bpf_iter_bits it; \ 395 const struct cpumask *bitmap; \ 396 }; 397 398 #define def_iter_new(name) \ 399 static inline int bpf_iter_##name##_new( \ 400 struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \ 401 { \ 402 it->bitmap = scx_bpf_get_##name##_cpumask(); \ 403 return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \ 404 sizeof(struct cpumask) / 8); \ 405 } 406 407 #define def_iter_next(name) \ 408 static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \ 409 return bpf_iter_bits_next(&it->it); \ 410 } 411 412 #define def_iter_destroy(name) \ 413 static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \ 414 scx_bpf_put_cpumask(it->bitmap); \ 415 bpf_iter_bits_destroy(&it->it); \ 416 } 417 #define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu) 418 419 /// Provides iterator for possible and online cpus. 420 /// 421 /// # Example 422 /// 423 /// ``` 424 /// static inline void example_use() { 425 /// int *cpu; 426 /// 427 /// for_each_possible_cpu(cpu){ 428 /// bpf_printk("CPU %d is possible", *cpu); 429 /// } 430 /// 431 /// for_each_online_cpu(cpu){ 432 /// bpf_printk("CPU %d is online", *cpu); 433 /// } 434 /// } 435 /// ``` 436 def_iter_struct(possible); 437 def_iter_new(possible); 438 def_iter_next(possible); 439 def_iter_destroy(possible); 440 #define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0) 441 442 def_iter_struct(online); 443 def_iter_new(online); 444 def_iter_next(online); 445 def_iter_destroy(online); 446 #define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0) 447 448 /* 449 * Access a cpumask in read-only mode (typically to check bits). 450 */ 451 static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) 452 { 453 return (const struct cpumask *)mask; 454 } 455 456 /* 457 * Return true if task @p cannot migrate to a different CPU, false 458 * otherwise. 459 */ 460 static inline bool is_migration_disabled(const struct task_struct *p) 461 { 462 /* 463 * Testing p->migration_disabled in a BPF code is tricky because the 464 * migration is _always_ disabled while running the BPF code. 465 * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF 466 * code execution disable and re-enable the migration of the current 467 * task, respectively. So, the _current_ task of the sched_ext ops is 468 * always migration-disabled. Moreover, p->migration_disabled could be 469 * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is 470 * executed in the middle of the other BPF code execution. 471 * 472 * Therefore, we should decide that the _current_ task is 473 * migration-disabled only when its migration_disabled count is greater 474 * than one. In other words, when p->migration_disabled == 1, there is 475 * an ambiguity, so we should check if @p is the current task or not. 476 */ 477 if (bpf_core_field_exists(p->migration_disabled)) { 478 if (p->migration_disabled == 1) 479 return bpf_get_current_task_btf() != p; 480 else 481 return p->migration_disabled; 482 } 483 return false; 484 } 485 486 /* rcu */ 487 void bpf_rcu_read_lock(void) __ksym; 488 void bpf_rcu_read_unlock(void) __ksym; 489 490 /* 491 * Time helpers, most of which are from jiffies.h. 492 */ 493 494 /** 495 * time_delta - Calculate the delta between new and old time stamp 496 * @after: first comparable as u64 497 * @before: second comparable as u64 498 * 499 * Return: the time difference, which is >= 0 500 */ 501 static inline s64 time_delta(u64 after, u64 before) 502 { 503 return (s64)(after - before) > 0 ? (s64)(after - before) : 0; 504 } 505 506 /** 507 * time_after - returns true if the time a is after time b. 508 * @a: first comparable as u64 509 * @b: second comparable as u64 510 * 511 * Do this with "<0" and ">=0" to only test the sign of the result. A 512 * good compiler would generate better code (and a really good compiler 513 * wouldn't care). Gcc is currently neither. 514 * 515 * Return: %true is time a is after time b, otherwise %false. 516 */ 517 static inline bool time_after(u64 a, u64 b) 518 { 519 return (s64)(b - a) < 0; 520 } 521 522 /** 523 * time_before - returns true if the time a is before time b. 524 * @a: first comparable as u64 525 * @b: second comparable as u64 526 * 527 * Return: %true is time a is before time b, otherwise %false. 528 */ 529 static inline bool time_before(u64 a, u64 b) 530 { 531 return time_after(b, a); 532 } 533 534 /** 535 * time_after_eq - returns true if the time a is after or the same as time b. 536 * @a: first comparable as u64 537 * @b: second comparable as u64 538 * 539 * Return: %true is time a is after or the same as time b, otherwise %false. 540 */ 541 static inline bool time_after_eq(u64 a, u64 b) 542 { 543 return (s64)(a - b) >= 0; 544 } 545 546 /** 547 * time_before_eq - returns true if the time a is before or the same as time b. 548 * @a: first comparable as u64 549 * @b: second comparable as u64 550 * 551 * Return: %true is time a is before or the same as time b, otherwise %false. 552 */ 553 static inline bool time_before_eq(u64 a, u64 b) 554 { 555 return time_after_eq(b, a); 556 } 557 558 /** 559 * time_in_range - Calculate whether a is in the range of [b, c]. 560 * @a: time to test 561 * @b: beginning of the range 562 * @c: end of the range 563 * 564 * Return: %true is time a is in the range [b, c], otherwise %false. 565 */ 566 static inline bool time_in_range(u64 a, u64 b, u64 c) 567 { 568 return time_after_eq(a, b) && time_before_eq(a, c); 569 } 570 571 /** 572 * time_in_range_open - Calculate whether a is in the range of [b, c). 573 * @a: time to test 574 * @b: beginning of the range 575 * @c: end of the range 576 * 577 * Return: %true is time a is in the range [b, c), otherwise %false. 578 */ 579 static inline bool time_in_range_open(u64 a, u64 b, u64 c) 580 { 581 return time_after_eq(a, b) && time_before(a, c); 582 } 583 584 585 /* 586 * Other helpers 587 */ 588 589 /* useful compiler attributes */ 590 #ifndef likely 591 #define likely(x) __builtin_expect(!!(x), 1) 592 #endif 593 #ifndef unlikely 594 #define unlikely(x) __builtin_expect(!!(x), 0) 595 #endif 596 #ifndef __maybe_unused 597 #define __maybe_unused __attribute__((__unused__)) 598 #endif 599 600 /* 601 * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They 602 * prevent compiler from caching, redoing or reordering reads or writes. 603 */ 604 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; 605 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; 606 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; 607 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; 608 609 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 610 { 611 switch (size) { 612 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; 613 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; 614 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; 615 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; 616 default: 617 barrier(); 618 __builtin_memcpy((void *)res, (const void *)p, size); 619 barrier(); 620 } 621 } 622 623 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 624 { 625 switch (size) { 626 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; 627 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; 628 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; 629 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; 630 default: 631 barrier(); 632 __builtin_memcpy((void *)p, (const void *)res, size); 633 barrier(); 634 } 635 } 636 637 /* 638 * __unqual_typeof(x) - Declare an unqualified scalar type, leaving 639 * non-scalar types unchanged, 640 * 641 * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' 642 * is not type-compatible with 'signed char', and we define a separate case. 643 * 644 * This is copied verbatim from kernel's include/linux/compiler_types.h, but 645 * with default expression (for pointers) changed from (x) to (typeof(x)0). 646 * 647 * This is because LLVM has a bug where for lvalue (x), it does not get rid of 648 * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). 649 * Hence, for pointers, we need to create an rvalue expression to get the 650 * desired type. See https://github.com/llvm/llvm-project/issues/53400. 651 */ 652 #define __scalar_type_to_expr_cases(type) \ 653 unsigned type : (unsigned type)0, signed type : (signed type)0 654 655 #define __unqual_typeof(x) \ 656 typeof(_Generic((x), \ 657 char: (char)0, \ 658 __scalar_type_to_expr_cases(char), \ 659 __scalar_type_to_expr_cases(short), \ 660 __scalar_type_to_expr_cases(int), \ 661 __scalar_type_to_expr_cases(long), \ 662 __scalar_type_to_expr_cases(long long), \ 663 default: (typeof(x))0)) 664 665 #define READ_ONCE(x) \ 666 ({ \ 667 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 668 { .__c = { 0 } }; \ 669 __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 670 __u.__val; \ 671 }) 672 673 #define WRITE_ONCE(x, val) \ 674 ({ \ 675 union { __unqual_typeof(x) __val; char __c[1]; } __u = \ 676 { .__val = (val) }; \ 677 __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ 678 __u.__val; \ 679 }) 680 681 /* 682 * __calc_avg - Calculate exponential weighted moving average (EWMA) with 683 * @old and @new values. @decay represents how large the @old value remains. 684 * With a larger @decay value, the moving average changes slowly, exhibiting 685 * fewer fluctuations. 686 */ 687 #define __calc_avg(old, new, decay) ({ \ 688 typeof(decay) thr = 1 << (decay); \ 689 typeof(old) ret; \ 690 if (((old) < thr) || ((new) < thr)) { \ 691 if (((old) == 1) && ((new) == 0)) \ 692 ret = 0; \ 693 else \ 694 ret = ((old) - ((old) >> 1)) + ((new) >> 1); \ 695 } else { \ 696 ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \ 697 } \ 698 ret; \ 699 }) 700 701 /* 702 * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. 703 * @v: The value for which we're computing the base 2 logarithm. 704 */ 705 static inline u32 log2_u32(u32 v) 706 { 707 u32 r; 708 u32 shift; 709 710 r = (v > 0xFFFF) << 4; v >>= r; 711 shift = (v > 0xFF) << 3; v >>= shift; r |= shift; 712 shift = (v > 0xF) << 2; v >>= shift; r |= shift; 713 shift = (v > 0x3) << 1; v >>= shift; r |= shift; 714 r |= (v >> 1); 715 return r; 716 } 717 718 /* 719 * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value. 720 * @v: The value for which we're computing the base 2 logarithm. 721 */ 722 static inline u32 log2_u64(u64 v) 723 { 724 u32 hi = v >> 32; 725 if (hi) 726 return log2_u32(hi) + 32 + 1; 727 else 728 return log2_u32(v) + 1; 729 } 730 731 /* 732 * sqrt_u64 - Calculate the square root of value @x using Newton's method. 733 */ 734 static inline u64 __sqrt_u64(u64 x) 735 { 736 if (x == 0 || x == 1) 737 return x; 738 739 u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32); 740 741 for (int i = 0; i < 8; ++i) { 742 u64 q = x / r; 743 if (r <= q) 744 break; 745 r = (r + q) >> 1; 746 } 747 return r; 748 } 749 750 /* 751 * Return a value proportionally scaled to the task's weight. 752 */ 753 static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value) 754 { 755 return (value * p->scx.weight) / 100; 756 } 757 758 /* 759 * Return a value inversely proportional to the task's weight. 760 */ 761 static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value) 762 { 763 return value * 100 / p->scx.weight; 764 } 765 766 767 #include "compat.bpf.h" 768 #include "enums.bpf.h" 769 770 #endif /* __SCX_COMMON_BPF_H */ 771