1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2022 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2022 David Vernet <dvernet@meta.com> 6 */ 7 #ifndef __SCX_COMMON_BPF_H 8 #define __SCX_COMMON_BPF_H 9 10 #ifdef LSP 11 #define __bpf__ 12 #include "../vmlinux.h" 13 #else 14 #include "vmlinux.h" 15 #endif 16 17 #include <bpf/bpf_helpers.h> 18 #include <bpf/bpf_tracing.h> 19 #include <asm-generic/errno.h> 20 #include "user_exit_info.h" 21 22 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ 23 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ 24 #define PF_EXITING 0x00000004 25 #define CLOCK_MONOTONIC 1 26 27 extern int LINUX_KERNEL_VERSION __kconfig; 28 extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; 29 extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; 30 31 /* 32 * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can 33 * lead to really confusing misbehaviors. Let's trigger a build failure. 34 */ 35 static inline void ___vmlinux_h_sanity_check___(void) 36 { 37 _Static_assert(SCX_DSQ_FLAG_BUILTIN, 38 "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); 39 } 40 41 s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; 42 s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; 43 void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; 44 void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; 45 u32 scx_bpf_dispatch_nr_slots(void) __ksym; 46 void scx_bpf_dispatch_cancel(void) __ksym; 47 bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak; 48 void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; 49 void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; 50 bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 51 bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; 52 u32 scx_bpf_reenqueue_local(void) __ksym; 53 void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; 54 s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; 55 void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; 56 int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak; 57 struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; 58 void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; 59 void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak; 60 void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; 61 void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak; 62 u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak; 63 u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak; 64 void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak; 65 u32 scx_bpf_nr_cpu_ids(void) __ksym __weak; 66 const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; 67 const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; 68 void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; 69 const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; 70 const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; 71 void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; 72 bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; 73 s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 74 s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; 75 bool scx_bpf_task_running(const struct task_struct *p) __ksym; 76 s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; 77 struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; 78 struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; 79 80 /* 81 * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from 82 * within bpf_for_each() loops. 83 */ 84 #define BPF_FOR_EACH_ITER (&___it) 85 86 static inline __attribute__((format(printf, 1, 2))) 87 void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} 88 89 /* 90 * Helper macro for initializing the fmt and variadic argument inputs to both 91 * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to 92 * refer to the initialized list of inputs to the bstr kfunc. 93 */ 94 #define scx_bpf_bstr_preamble(fmt, args...) \ 95 static char ___fmt[] = fmt; \ 96 /* \ 97 * Note that __param[] must have at least one \ 98 * element to keep the verifier happy. \ 99 */ \ 100 unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ 101 \ 102 _Pragma("GCC diagnostic push") \ 103 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 104 ___bpf_fill(___param, args); \ 105 _Pragma("GCC diagnostic pop") 106 107 /* 108 * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments 109 * instead of an array of u64. Using this macro will cause the scheduler to 110 * exit cleanly with the specified exit code being passed to user space. 111 */ 112 #define scx_bpf_exit(code, fmt, args...) \ 113 ({ \ 114 scx_bpf_bstr_preamble(fmt, args) \ 115 scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \ 116 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 117 }) 118 119 /* 120 * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments 121 * instead of an array of u64. Invoking this macro will cause the scheduler to 122 * exit in an erroneous state, with diagnostic information being passed to the 123 * user. 124 */ 125 #define scx_bpf_error(fmt, args...) \ 126 ({ \ 127 scx_bpf_bstr_preamble(fmt, args) \ 128 scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ 129 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 130 }) 131 132 /* 133 * scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments 134 * instead of an array of u64. To be used from ops.dump() and friends. 135 */ 136 #define scx_bpf_dump(fmt, args...) \ 137 ({ \ 138 scx_bpf_bstr_preamble(fmt, args) \ 139 scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \ 140 ___scx_bpf_bstr_format_checker(fmt, ##args); \ 141 }) 142 143 /* 144 * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header 145 * of system information for debugging. 146 */ 147 #define scx_bpf_dump_header() \ 148 ({ \ 149 scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \ 150 LINUX_KERNEL_VERSION >> 16, \ 151 LINUX_KERNEL_VERSION >> 8 & 0xFF, \ 152 LINUX_KERNEL_VERSION & 0xFF, \ 153 CONFIG_LOCALVERSION, \ 154 CONFIG_CC_VERSION_TEXT); \ 155 }) 156 157 #define BPF_STRUCT_OPS(name, args...) \ 158 SEC("struct_ops/"#name) \ 159 BPF_PROG(name, ##args) 160 161 #define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ 162 SEC("struct_ops.s/"#name) \ 163 BPF_PROG(name, ##args) 164 165 /** 166 * RESIZABLE_ARRAY - Generates annotations for an array that may be resized 167 * @elfsec: the data section of the BPF program in which to place the array 168 * @arr: the name of the array 169 * 170 * libbpf has an API for setting map value sizes. Since data sections (i.e. 171 * bss, data, rodata) themselves are maps, a data section can be resized. If 172 * a data section has an array as its last element, the BTF info for that 173 * array will be adjusted so that length of the array is extended to meet the 174 * new length of the data section. This macro annotates an array to have an 175 * element count of one with the assumption that this array can be resized 176 * within the userspace program. It also annotates the section specifier so 177 * this array exists in a custom sub data section which can be resized 178 * independently. 179 * 180 * See RESIZE_ARRAY() for the userspace convenience macro for resizing an 181 * array declared with RESIZABLE_ARRAY(). 182 */ 183 #define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr) 184 185 /** 186 * MEMBER_VPTR - Obtain the verified pointer to a struct or array member 187 * @base: struct or array to index 188 * @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...) 189 * 190 * The verifier often gets confused by the instruction sequence the compiler 191 * generates for indexing struct fields or arrays. This macro forces the 192 * compiler to generate a code sequence which first calculates the byte offset, 193 * checks it against the struct or array size and add that byte offset to 194 * generate the pointer to the member to help the verifier. 195 * 196 * Ideally, we want to abort if the calculated offset is out-of-bounds. However, 197 * BPF currently doesn't support abort, so evaluate to %NULL instead. The caller 198 * must check for %NULL and take appropriate action to appease the verifier. To 199 * avoid confusing the verifier, it's best to check for %NULL and dereference 200 * immediately. 201 * 202 * vptr = MEMBER_VPTR(my_array, [i][j]); 203 * if (!vptr) 204 * return error; 205 * *vptr = new_value; 206 * 207 * sizeof(@base) should encompass the memory area to be accessed and thus can't 208 * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of 209 * `MEMBER_VPTR(ptr, ->member)`. 210 */ 211 #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ 212 ({ \ 213 u64 __base = (u64)&(base); \ 214 u64 __addr = (u64)&((base) member) - __base; \ 215 _Static_assert(sizeof(base) >= sizeof((base) member), \ 216 "@base is smaller than @member, is @base a pointer?"); \ 217 asm volatile ( \ 218 "if %0 <= %[max] goto +2\n" \ 219 "%0 = 0\n" \ 220 "goto +1\n" \ 221 "%0 += %1\n" \ 222 : "+r"(__addr) \ 223 : "r"(__base), \ 224 [max]"i"(sizeof(base) - sizeof((base) member))); \ 225 __addr; \ 226 }) 227 228 /** 229 * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element 230 * @arr: array to index into 231 * @i: array index 232 * @n: number of elements in array 233 * 234 * Similar to MEMBER_VPTR() but is intended for use with arrays where the 235 * element count needs to be explicit. 236 * It can be used in cases where a global array is defined with an initial 237 * size but is intended to be be resized before loading the BPF program. 238 * Without this version of the macro, MEMBER_VPTR() will use the compile time 239 * size of the array to compute the max, which will result in rejection by 240 * the verifier. 241 */ 242 #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ 243 ({ \ 244 u64 __base = (u64)arr; \ 245 u64 __addr = (u64)&(arr[i]) - __base; \ 246 asm volatile ( \ 247 "if %0 <= %[max] goto +2\n" \ 248 "%0 = 0\n" \ 249 "goto +1\n" \ 250 "%0 += %1\n" \ 251 : "+r"(__addr) \ 252 : "r"(__base), \ 253 [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ 254 __addr; \ 255 }) 256 257 258 /* 259 * BPF declarations and helpers 260 */ 261 262 /* list and rbtree */ 263 #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) 264 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 265 266 void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; 267 void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; 268 269 #define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL)) 270 #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) 271 272 void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; 273 void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; 274 struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym; 275 struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; 276 struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, 277 struct bpf_rb_node *node) __ksym; 278 int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 279 bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), 280 void *meta, __u64 off) __ksym; 281 #define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) 282 283 struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym; 284 285 void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; 286 #define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) 287 288 /* task */ 289 struct task_struct *bpf_task_from_pid(s32 pid) __ksym; 290 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; 291 void bpf_task_release(struct task_struct *p) __ksym; 292 293 /* cgroup */ 294 struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; 295 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 296 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; 297 298 /* css iteration */ 299 struct bpf_iter_css; 300 struct cgroup_subsys_state; 301 extern int bpf_iter_css_new(struct bpf_iter_css *it, 302 struct cgroup_subsys_state *start, 303 unsigned int flags) __weak __ksym; 304 extern struct cgroup_subsys_state * 305 bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; 306 extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; 307 308 /* cpumask */ 309 struct bpf_cpumask *bpf_cpumask_create(void) __ksym; 310 struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; 311 void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; 312 u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; 313 u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; 314 void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 315 void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 316 bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; 317 bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 318 bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; 319 void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; 320 void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; 321 bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, 322 const struct cpumask *src2) __ksym; 323 void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1, 324 const struct cpumask *src2) __ksym; 325 void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1, 326 const struct cpumask *src2) __ksym; 327 bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; 328 bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; 329 bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; 330 bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; 331 bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; 332 void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; 333 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym; 334 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, 335 const struct cpumask *src2) __ksym; 336 u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; 337 338 int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym; 339 int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym; 340 void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym; 341 342 #define def_iter_struct(name) \ 343 struct bpf_iter_##name { \ 344 struct bpf_iter_bits it; \ 345 const struct cpumask *bitmap; \ 346 }; 347 348 #define def_iter_new(name) \ 349 static inline int bpf_iter_##name##_new( \ 350 struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \ 351 { \ 352 it->bitmap = scx_bpf_get_##name##_cpumask(); \ 353 return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \ 354 sizeof(struct cpumask) / 8); \ 355 } 356 357 #define def_iter_next(name) \ 358 static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \ 359 return bpf_iter_bits_next(&it->it); \ 360 } 361 362 #define def_iter_destroy(name) \ 363 static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \ 364 scx_bpf_put_cpumask(it->bitmap); \ 365 bpf_iter_bits_destroy(&it->it); \ 366 } 367 #define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu) 368 369 /// Provides iterator for possible and online cpus. 370 /// 371 /// # Example 372 /// 373 /// ``` 374 /// static inline void example_use() { 375 /// int *cpu; 376 /// 377 /// for_each_possible_cpu(cpu){ 378 /// bpf_printk("CPU %d is possible", *cpu); 379 /// } 380 /// 381 /// for_each_online_cpu(cpu){ 382 /// bpf_printk("CPU %d is online", *cpu); 383 /// } 384 /// } 385 /// ``` 386 def_iter_struct(possible); 387 def_iter_new(possible); 388 def_iter_next(possible); 389 def_iter_destroy(possible); 390 #define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0) 391 392 def_iter_struct(online); 393 def_iter_new(online); 394 def_iter_next(online); 395 def_iter_destroy(online); 396 #define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0) 397 398 /* 399 * Access a cpumask in read-only mode (typically to check bits). 400 */ 401 static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) 402 { 403 return (const struct cpumask *)mask; 404 } 405 406 /* rcu */ 407 void bpf_rcu_read_lock(void) __ksym; 408 void bpf_rcu_read_unlock(void) __ksym; 409 410 411 /* 412 * Other helpers 413 */ 414 415 /* useful compiler attributes */ 416 #define likely(x) __builtin_expect(!!(x), 1) 417 #define unlikely(x) __builtin_expect(!!(x), 0) 418 #define __maybe_unused __attribute__((__unused__)) 419 420 /* 421 * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They 422 * prevent compiler from caching, redoing or reordering reads or writes. 423 */ 424 typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; 425 typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; 426 typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; 427 typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; 428 429 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 430 { 431 switch (size) { 432 case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; 433 case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; 434 case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; 435 case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; 436 default: 437 barrier(); 438 __builtin_memcpy((void *)res, (const void *)p, size); 439 barrier(); 440 } 441 } 442 443 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 444 { 445 switch (size) { 446 case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; 447 case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; 448 case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; 449 case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; 450 default: 451 barrier(); 452 __builtin_memcpy((void *)p, (const void *)res, size); 453 barrier(); 454 } 455 } 456 457 #define READ_ONCE(x) \ 458 ({ \ 459 union { typeof(x) __val; char __c[1]; } __u = \ 460 { .__c = { 0 } }; \ 461 __read_once_size(&(x), __u.__c, sizeof(x)); \ 462 __u.__val; \ 463 }) 464 465 #define WRITE_ONCE(x, val) \ 466 ({ \ 467 union { typeof(x) __val; char __c[1]; } __u = \ 468 { .__val = (val) }; \ 469 __write_once_size(&(x), __u.__c, sizeof(x)); \ 470 __u.__val; \ 471 }) 472 473 /* 474 * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. 475 * @v: The value for which we're computing the base 2 logarithm. 476 */ 477 static inline u32 log2_u32(u32 v) 478 { 479 u32 r; 480 u32 shift; 481 482 r = (v > 0xFFFF) << 4; v >>= r; 483 shift = (v > 0xFF) << 3; v >>= shift; r |= shift; 484 shift = (v > 0xF) << 2; v >>= shift; r |= shift; 485 shift = (v > 0x3) << 1; v >>= shift; r |= shift; 486 r |= (v >> 1); 487 return r; 488 } 489 490 /* 491 * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value. 492 * @v: The value for which we're computing the base 2 logarithm. 493 */ 494 static inline u32 log2_u64(u64 v) 495 { 496 u32 hi = v >> 32; 497 if (hi) 498 return log2_u32(hi) + 32 + 1; 499 else 500 return log2_u32(v) + 1; 501 } 502 503 #include "compat.bpf.h" 504 #include "enums.bpf.h" 505 506 #endif /* __SCX_COMMON_BPF_H */ 507