1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * Copyright (c) 2025 Meta Platforms, Inc. and affiliates. 4 * Copyright (c) 2025 Tejun Heo <tj@kernel.org> 5 * Copyright (c) 2025 Emil Tsalapatis <etsal@meta.com> 6 */ 7 #pragma once 8 9 #ifndef __BPF__ 10 #define __arena 11 #endif /* __BPF__ */ 12 13 struct scx_alloc_stats { 14 __u64 chunk_allocs; 15 __u64 data_allocs; 16 __u64 alloc_ops; 17 __u64 free_ops; 18 __u64 active_allocs; 19 __u64 arena_pages_used; 20 }; 21 22 struct sdt_pool { 23 void __arena *slab; 24 __u64 elem_size; 25 __u64 max_elems; 26 __u64 idx; 27 }; 28 29 #ifndef div_round_up 30 #define div_round_up(a, b) (((a) + (b) - 1) / (b)) 31 #endif 32 33 #ifndef round_up 34 #define round_up(a, b) (div_round_up((a), (b)) * (b)) 35 #endif 36 37 typedef struct sdt_desc __arena sdt_desc_t; 38 39 enum sdt_consts { 40 SDT_TASK_ENTS_PER_PAGE_SHIFT = 9, 41 SDT_TASK_LEVELS = 3, 42 SDT_TASK_ENTS_PER_CHUNK = 1 << SDT_TASK_ENTS_PER_PAGE_SHIFT, 43 SDT_TASK_CHUNK_BITMAP_U64S = div_round_up(SDT_TASK_ENTS_PER_CHUNK, 64), 44 SDT_TASK_MIN_ELEM_PER_ALLOC = 8, 45 }; 46 47 union sdt_id { 48 __s64 val; 49 struct { 50 __s32 idx; /* index in the radix tree */ 51 __s32 genn; /* ++'d on recycle so that it forms unique'ish 64bit ID */ 52 }; 53 }; 54 55 struct sdt_chunk; 56 57 /* 58 * Each index page is described by the following descriptor which carries the 59 * bitmap. This way the actual index can host power-of-two numbers of entries 60 * which makes indexing cheaper. 61 */ 62 struct sdt_desc { 63 __u64 allocated[SDT_TASK_CHUNK_BITMAP_U64S]; 64 __u64 nr_free; 65 struct sdt_chunk __arena *chunk; 66 }; 67 68 /* 69 * Leaf node containing per-task data. 70 */ 71 struct sdt_data { 72 union sdt_id tid; 73 __u64 payload[]; 74 }; 75 76 /* 77 * Intermediate node pointing to another intermediate node or leaf node. 78 */ 79 struct sdt_chunk { 80 union { 81 sdt_desc_t * descs[SDT_TASK_ENTS_PER_CHUNK]; 82 struct sdt_data __arena *data[SDT_TASK_ENTS_PER_CHUNK]; 83 }; 84 }; 85 86 struct scx_allocator { 87 struct sdt_pool pool; 88 sdt_desc_t *root; 89 }; 90 91 struct scx_stats { 92 int seq; 93 pid_t pid; 94 __u64 enqueue; 95 __u64 exit; 96 __u64 init; 97 __u64 select_busy_cpu; 98 __u64 select_idle_cpu; 99 }; 100 101 #ifdef __BPF__ 102 103 void __arena *scx_task_data(struct task_struct *p); 104 int scx_task_init(__u64 data_size); 105 void __arena *scx_task_alloc(struct task_struct *p); 106 void scx_task_free(struct task_struct *p); 107 void scx_arena_subprog_init(void); 108 109 int scx_alloc_init(struct scx_allocator *alloc, __u64 data_size); 110 u64 scx_alloc_internal(struct scx_allocator *alloc); 111 int scx_alloc_free_idx(struct scx_allocator *alloc, __u64 idx); 112 113 #endif /* __BPF__ */ 114