xref: /linux/tools/testing/selftests/bpf/progs/verifier_regalloc.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 #define MAX_ENTRIES 11
9 
10 struct test_val {
11 	unsigned int index;
12 	int foo[MAX_ENTRIES];
13 };
14 
15 struct {
16 	__uint(type, BPF_MAP_TYPE_HASH);
17 	__uint(max_entries, 1);
18 	__type(key, long long);
19 	__type(value, struct test_val);
20 } map_hash_48b SEC(".maps");
21 
22 SEC("tracepoint")
23 __description("regalloc basic")
24 __success __flag(BPF_F_ANY_ALIGNMENT)
25 __naked void regalloc_basic(void)
26 {
27 	asm volatile ("					\
28 	r6 = r1;					\
29 	r1 = 0;						\
30 	*(u64*)(r10 - 8) = r1;				\
31 	r2 = r10;					\
32 	r2 += -8;					\
33 	r1 = %[map_hash_48b] ll;			\
34 	call %[bpf_map_lookup_elem];			\
35 	if r0 == 0 goto l0_%=;				\
36 	r7 = r0;					\
37 	call %[bpf_get_prandom_u32];			\
38 	r2 = r0;					\
39 	if r0 s> 20 goto l0_%=;				\
40 	if r2 s< 0 goto l0_%=;				\
41 	r7 += r0;					\
42 	r7 += r2;					\
43 	r0 = *(u64*)(r7 + 0);				\
44 l0_%=:	exit;						\
45 "	:
46 	: __imm(bpf_get_prandom_u32),
47 	  __imm(bpf_map_lookup_elem),
48 	  __imm_addr(map_hash_48b)
49 	: __clobber_all);
50 }
51 
52 SEC("tracepoint")
53 __description("regalloc negative")
54 __failure __msg("invalid access to map value, value_size=48 off=48 size=1")
55 __naked void regalloc_negative(void)
56 {
57 	asm volatile ("					\
58 	r6 = r1;					\
59 	r1 = 0;						\
60 	*(u64*)(r10 - 8) = r1;				\
61 	r2 = r10;					\
62 	r2 += -8;					\
63 	r1 = %[map_hash_48b] ll;			\
64 	call %[bpf_map_lookup_elem];			\
65 	if r0 == 0 goto l0_%=;				\
66 	r7 = r0;					\
67 	call %[bpf_get_prandom_u32];			\
68 	r2 = r0;					\
69 	if r0 s> 24 goto l0_%=;				\
70 	if r2 s< 0 goto l0_%=;				\
71 	r7 += r0;					\
72 	r7 += r2;					\
73 	r0 = *(u8*)(r7 + 0);				\
74 l0_%=:	exit;						\
75 "	:
76 	: __imm(bpf_get_prandom_u32),
77 	  __imm(bpf_map_lookup_elem),
78 	  __imm_addr(map_hash_48b)
79 	: __clobber_all);
80 }
81 
82 SEC("tracepoint")
83 __description("regalloc src_reg mark")
84 __success __flag(BPF_F_ANY_ALIGNMENT)
85 __naked void regalloc_src_reg_mark(void)
86 {
87 	asm volatile ("					\
88 	r6 = r1;					\
89 	r1 = 0;						\
90 	*(u64*)(r10 - 8) = r1;				\
91 	r2 = r10;					\
92 	r2 += -8;					\
93 	r1 = %[map_hash_48b] ll;			\
94 	call %[bpf_map_lookup_elem];			\
95 	if r0 == 0 goto l0_%=;				\
96 	r7 = r0;					\
97 	call %[bpf_get_prandom_u32];			\
98 	r2 = r0;					\
99 	if r0 s> 20 goto l0_%=;				\
100 	r3 = 0;						\
101 	if r3 s>= r2 goto l0_%=;			\
102 	r7 += r0;					\
103 	r7 += r2;					\
104 	r0 = *(u64*)(r7 + 0);				\
105 l0_%=:	exit;						\
106 "	:
107 	: __imm(bpf_get_prandom_u32),
108 	  __imm(bpf_map_lookup_elem),
109 	  __imm_addr(map_hash_48b)
110 	: __clobber_all);
111 }
112 
113 SEC("tracepoint")
114 __description("regalloc src_reg negative")
115 __failure __msg("invalid access to map value, value_size=48 off=44 size=8")
116 __flag(BPF_F_ANY_ALIGNMENT)
117 __naked void regalloc_src_reg_negative(void)
118 {
119 	asm volatile ("					\
120 	r6 = r1;					\
121 	r1 = 0;						\
122 	*(u64*)(r10 - 8) = r1;				\
123 	r2 = r10;					\
124 	r2 += -8;					\
125 	r1 = %[map_hash_48b] ll;			\
126 	call %[bpf_map_lookup_elem];			\
127 	if r0 == 0 goto l0_%=;				\
128 	r7 = r0;					\
129 	call %[bpf_get_prandom_u32];			\
130 	r2 = r0;					\
131 	if r0 s> 22 goto l0_%=;				\
132 	r3 = 0;						\
133 	if r3 s>= r2 goto l0_%=;			\
134 	r7 += r0;					\
135 	r7 += r2;					\
136 	r0 = *(u64*)(r7 + 0);				\
137 l0_%=:	exit;						\
138 "	:
139 	: __imm(bpf_get_prandom_u32),
140 	  __imm(bpf_map_lookup_elem),
141 	  __imm_addr(map_hash_48b)
142 	: __clobber_all);
143 }
144 
145 SEC("tracepoint")
146 __description("regalloc and spill")
147 __success __flag(BPF_F_ANY_ALIGNMENT)
148 __naked void regalloc_and_spill(void)
149 {
150 	asm volatile ("					\
151 	r6 = r1;					\
152 	r1 = 0;						\
153 	*(u64*)(r10 - 8) = r1;				\
154 	r2 = r10;					\
155 	r2 += -8;					\
156 	r1 = %[map_hash_48b] ll;			\
157 	call %[bpf_map_lookup_elem];			\
158 	if r0 == 0 goto l0_%=;				\
159 	r7 = r0;					\
160 	call %[bpf_get_prandom_u32];			\
161 	r2 = r0;					\
162 	if r0 s> 20 goto l0_%=;				\
163 	/* r0 has upper bound that should propagate into r2 */\
164 	*(u64*)(r10 - 8) = r2;		/* spill r2 */	\
165 	r0 = 0;						\
166 	r2 = 0;				/* clear r0 and r2 */\
167 	r3 = *(u64*)(r10 - 8);		/* fill r3 */	\
168 	if r0 s>= r3 goto l0_%=;			\
169 	/* r3 has lower and upper bounds */		\
170 	r7 += r3;					\
171 	r0 = *(u64*)(r7 + 0);				\
172 l0_%=:	exit;						\
173 "	:
174 	: __imm(bpf_get_prandom_u32),
175 	  __imm(bpf_map_lookup_elem),
176 	  __imm_addr(map_hash_48b)
177 	: __clobber_all);
178 }
179 
180 SEC("tracepoint")
181 __description("regalloc and spill negative")
182 __failure __msg("invalid access to map value, value_size=48 off=48 size=8")
183 __flag(BPF_F_ANY_ALIGNMENT)
184 __naked void regalloc_and_spill_negative(void)
185 {
186 	asm volatile ("					\
187 	r6 = r1;					\
188 	r1 = 0;						\
189 	*(u64*)(r10 - 8) = r1;				\
190 	r2 = r10;					\
191 	r2 += -8;					\
192 	r1 = %[map_hash_48b] ll;			\
193 	call %[bpf_map_lookup_elem];			\
194 	if r0 == 0 goto l0_%=;				\
195 	r7 = r0;					\
196 	call %[bpf_get_prandom_u32];			\
197 	r2 = r0;					\
198 	if r0 s> 48 goto l0_%=;				\
199 	/* r0 has upper bound that should propagate into r2 */\
200 	*(u64*)(r10 - 8) = r2;		/* spill r2 */	\
201 	r0 = 0;						\
202 	r2 = 0;				/* clear r0 and r2 */\
203 	r3 = *(u64*)(r10 - 8);		/* fill r3 */\
204 	if r0 s>= r3 goto l0_%=;			\
205 	/* r3 has lower and upper bounds */		\
206 	r7 += r3;					\
207 	r0 = *(u64*)(r7 + 0);				\
208 l0_%=:	exit;						\
209 "	:
210 	: __imm(bpf_get_prandom_u32),
211 	  __imm(bpf_map_lookup_elem),
212 	  __imm_addr(map_hash_48b)
213 	: __clobber_all);
214 }
215 
216 SEC("tracepoint")
217 __description("regalloc three regs")
218 __success __flag(BPF_F_ANY_ALIGNMENT)
219 __naked void regalloc_three_regs(void)
220 {
221 	asm volatile ("					\
222 	r6 = r1;					\
223 	r1 = 0;						\
224 	*(u64*)(r10 - 8) = r1;				\
225 	r2 = r10;					\
226 	r2 += -8;					\
227 	r1 = %[map_hash_48b] ll;			\
228 	call %[bpf_map_lookup_elem];			\
229 	if r0 == 0 goto l0_%=;				\
230 	r7 = r0;					\
231 	call %[bpf_get_prandom_u32];			\
232 	r2 = r0;					\
233 	r4 = r2;					\
234 	if r0 s> 12 goto l0_%=;				\
235 	if r2 s< 0 goto l0_%=;				\
236 	r7 += r0;					\
237 	r7 += r2;					\
238 	r7 += r4;					\
239 	r0 = *(u64*)(r7 + 0);				\
240 l0_%=:	exit;						\
241 "	:
242 	: __imm(bpf_get_prandom_u32),
243 	  __imm(bpf_map_lookup_elem),
244 	  __imm_addr(map_hash_48b)
245 	: __clobber_all);
246 }
247 
248 SEC("tracepoint")
249 __description("regalloc after call")
250 __success __flag(BPF_F_ANY_ALIGNMENT)
251 __naked void regalloc_after_call(void)
252 {
253 	asm volatile ("					\
254 	r6 = r1;					\
255 	r1 = 0;						\
256 	*(u64*)(r10 - 8) = r1;				\
257 	r2 = r10;					\
258 	r2 += -8;					\
259 	r1 = %[map_hash_48b] ll;			\
260 	call %[bpf_map_lookup_elem];			\
261 	if r0 == 0 goto l0_%=;				\
262 	r7 = r0;					\
263 	call %[bpf_get_prandom_u32];			\
264 	r8 = r0;					\
265 	r9 = r0;					\
266 	call regalloc_after_call__1;			\
267 	if r8 s> 20 goto l0_%=;				\
268 	if r9 s< 0 goto l0_%=;				\
269 	r7 += r8;					\
270 	r7 += r9;					\
271 	r0 = *(u64*)(r7 + 0);				\
272 l0_%=:	exit;						\
273 "	:
274 	: __imm(bpf_get_prandom_u32),
275 	  __imm(bpf_map_lookup_elem),
276 	  __imm_addr(map_hash_48b)
277 	: __clobber_all);
278 }
279 
280 static __naked __noinline __attribute__((used))
281 void regalloc_after_call__1(void)
282 {
283 	asm volatile ("					\
284 	r0 = 0;						\
285 	exit;						\
286 "	::: __clobber_all);
287 }
288 
289 SEC("tracepoint")
290 __description("regalloc in callee")
291 __success __flag(BPF_F_ANY_ALIGNMENT)
292 __naked void regalloc_in_callee(void)
293 {
294 	asm volatile ("					\
295 	r6 = r1;					\
296 	r1 = 0;						\
297 	*(u64*)(r10 - 8) = r1;				\
298 	r2 = r10;					\
299 	r2 += -8;					\
300 	r1 = %[map_hash_48b] ll;			\
301 	call %[bpf_map_lookup_elem];			\
302 	if r0 == 0 goto l0_%=;				\
303 	r7 = r0;					\
304 	call %[bpf_get_prandom_u32];			\
305 	r1 = r0;					\
306 	r2 = r0;					\
307 	r3 = r7;					\
308 	call regalloc_in_callee__1;			\
309 l0_%=:	exit;						\
310 "	:
311 	: __imm(bpf_get_prandom_u32),
312 	  __imm(bpf_map_lookup_elem),
313 	  __imm_addr(map_hash_48b)
314 	: __clobber_all);
315 }
316 
317 static __naked __noinline __attribute__((used))
318 void regalloc_in_callee__1(void)
319 {
320 	asm volatile ("					\
321 	if r1 s> 20 goto l0_%=;				\
322 	if r2 s< 0 goto l0_%=;				\
323 	r3 += r1;					\
324 	r3 += r2;					\
325 	r0 = *(u64*)(r3 + 0);				\
326 	exit;						\
327 l0_%=:	r0 = 0;						\
328 	exit;						\
329 "	::: __clobber_all);
330 }
331 
332 SEC("tracepoint")
333 __description("regalloc, spill, JEQ")
334 __success
335 __naked void regalloc_spill_jeq(void)
336 {
337 	asm volatile ("					\
338 	r6 = r1;					\
339 	r1 = 0;						\
340 	*(u64*)(r10 - 8) = r1;				\
341 	r2 = r10;					\
342 	r2 += -8;					\
343 	r1 = %[map_hash_48b] ll;			\
344 	call %[bpf_map_lookup_elem];			\
345 	*(u64*)(r10 - 8) = r0;		/* spill r0 */	\
346 	if r0 == 0 goto l0_%=;				\
347 l0_%=:	/* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\
348 	call %[bpf_get_prandom_u32];			\
349 	r2 = r0;					\
350 	if r2 == 20 goto l1_%=;				\
351 l1_%=:	/* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\
352 	r3 = *(u64*)(r10 - 8);		/* fill r3 with map_value */\
353 	if r3 == 0 goto l2_%=;		/* skip ldx if map_value == NULL */\
354 	/* Buggy verifier will think that r3 == 20 here */\
355 	r0 = *(u64*)(r3 + 0);		/* read from map_value */\
356 l2_%=:	exit;						\
357 "	:
358 	: __imm(bpf_get_prandom_u32),
359 	  __imm(bpf_map_lookup_elem),
360 	  __imm_addr(map_hash_48b)
361 	: __clobber_all);
362 }
363 
364 char _license[] SEC("license") = "GPL";
365