xref: /linux/tools/sched_ext/include/scx/bpf_arena_common.bpf.h (revision a23cd25baed2316e50597f8b67192bdc904f955b)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #pragma once
4 
5 #ifndef PAGE_SIZE
6 #define PAGE_SIZE __PAGE_SIZE
7 /*
8  * for older kernels try sizeof(struct genradix_node)
9  * or flexible:
10  * static inline long __bpf_page_size(void) {
11  *   return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node);
12  * }
13  * but generated code is not great.
14  */
15 #endif
16 
17 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM)
18 #define __arena __attribute__((address_space(1)))
19 #define __arena_global __attribute__((address_space(1)))
20 #define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */
21 #define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */
22 #else
23 
24 /* emit instruction:
25  * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as
26  *
27  * This is a workaround for LLVM compiler versions without
28  * __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena
29  * pointers and native kernel/userspace ones. In this case we explicitly do so
30  * with cast_kern() and cast_user(). E.g., in the Linux kernel tree,
31  * tools/testing/selftests/bpf includes tests that use these macros to implement
32  * linked lists and hashtables backed by arena memory. In sched_ext, we use
33  * cast_kern() and cast_user() for compatibility with older LLVM toolchains.
34  */
35 #ifndef bpf_addr_space_cast
36 #define bpf_addr_space_cast(var, dst_as, src_as)\
37 	asm volatile(".byte 0xBF;		\
38 		     .ifc %[reg], r0;		\
39 		     .byte 0x00;		\
40 		     .endif;			\
41 		     .ifc %[reg], r1;		\
42 		     .byte 0x11;		\
43 		     .endif;			\
44 		     .ifc %[reg], r2;		\
45 		     .byte 0x22;		\
46 		     .endif;			\
47 		     .ifc %[reg], r3;		\
48 		     .byte 0x33;		\
49 		     .endif;			\
50 		     .ifc %[reg], r4;		\
51 		     .byte 0x44;		\
52 		     .endif;			\
53 		     .ifc %[reg], r5;		\
54 		     .byte 0x55;		\
55 		     .endif;			\
56 		     .ifc %[reg], r6;		\
57 		     .byte 0x66;		\
58 		     .endif;			\
59 		     .ifc %[reg], r7;		\
60 		     .byte 0x77;		\
61 		     .endif;			\
62 		     .ifc %[reg], r8;		\
63 		     .byte 0x88;		\
64 		     .endif;			\
65 		     .ifc %[reg], r9;		\
66 		     .byte 0x99;		\
67 		     .endif;			\
68 		     .short %[off];		\
69 		     .long %[as]"		\
70 		     : [reg]"+r"(var)		\
71 		     : [off]"i"(BPF_ADDR_SPACE_CAST) \
72 		     , [as]"i"((dst_as << 16) | src_as));
73 #endif
74 
75 #define __arena
76 #define __arena_global SEC(".addr_space.1")
77 #define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1)
78 #define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0)
79 #endif
80 
81 void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt,
82 				    int node_id, __u64 flags) __ksym __weak;
83 void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak;
84 
85 /*
86  * Note that cond_break can only be portably used in the body of a breakable
87  * construct, whereas can_loop can be used anywhere.
88  */
89 #ifdef TEST
90 #define can_loop true
91 #define __cond_break(expr) expr
92 #else
93 #ifdef __BPF_FEATURE_MAY_GOTO
94 #define can_loop					\
95 	({ __label__ l_break, l_continue;		\
96 	bool ret = true;				\
97 	asm volatile goto("may_goto %l[l_break]"	\
98 		      :::: l_break);			\
99 	goto l_continue;				\
100 	l_break: ret = false;				\
101 	l_continue:;					\
102 	ret;						\
103 	})
104 
105 #define __cond_break(expr)				\
106 	({ __label__ l_break, l_continue;		\
107 	asm volatile goto("may_goto %l[l_break]"	\
108 		      :::: l_break);			\
109 	goto l_continue;				\
110 	l_break: expr;					\
111 	l_continue:;					\
112 	})
113 #else
114 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
115 #define can_loop					\
116 	({ __label__ l_break, l_continue;		\
117 	bool ret = true;				\
118 	asm volatile goto("1:.byte 0xe5;		\
119 		      .byte 0;				\
120 		      .long ((%l[l_break] - 1b - 8) / 8) & 0xffff;	\
121 		      .short 0"				\
122 		      :::: l_break);			\
123 	goto l_continue;				\
124 	l_break: ret = false;				\
125 	l_continue:;					\
126 	ret;						\
127 	})
128 
129 #define __cond_break(expr)				\
130 	({ __label__ l_break, l_continue;		\
131 	asm volatile goto("1:.byte 0xe5;		\
132 		      .byte 0;				\
133 		      .long ((%l[l_break] - 1b - 8) / 8) & 0xffff;	\
134 		      .short 0"				\
135 		      :::: l_break);			\
136 	goto l_continue;				\
137 	l_break: expr;					\
138 	l_continue:;					\
139 	})
140 #else
141 #define can_loop					\
142 	({ __label__ l_break, l_continue;		\
143 	bool ret = true;				\
144 	asm volatile goto("1:.byte 0xe5;		\
145 		      .byte 0;				\
146 		      .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16;	\
147 		      .short 0"				\
148 		      :::: l_break);			\
149 	goto l_continue;				\
150 	l_break: ret = false;				\
151 	l_continue:;					\
152 	ret;						\
153 	})
154 
155 #define __cond_break(expr)				\
156 	({ __label__ l_break, l_continue;		\
157 	asm volatile goto("1:.byte 0xe5;		\
158 		      .byte 0;				\
159 		      .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16;	\
160 		      .short 0"				\
161 		      :::: l_break);			\
162 	goto l_continue;				\
163 	l_break: expr;					\
164 	l_continue:;					\
165 	})
166 #endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
167 #endif /* __BPF_FEATURE_MAY_GOTO */
168 #endif /* TEST */
169 
170 #define cond_break __cond_break(break)
171 #define cond_break_label(label) __cond_break(goto label)
172 
173 
174 void bpf_preempt_disable(void) __weak __ksym;
175 void bpf_preempt_enable(void) __weak __ksym;
176