xref: /linux/tools/testing/selftests/bpf/progs/verifier_search_pruning.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
3 
4 #include <linux/bpf.h>
5 #include <../../../include/linux/filter.h>
6 #include <bpf/bpf_helpers.h>
7 #include "bpf_misc.h"
8 
9 #define MAX_ENTRIES 11
10 
11 struct test_val {
12 	unsigned int index;
13 	int foo[MAX_ENTRIES];
14 };
15 
16 struct {
17 	__uint(type, BPF_MAP_TYPE_HASH);
18 	__uint(max_entries, 1);
19 	__type(key, long long);
20 	__type(value, struct test_val);
21 } map_hash_48b SEC(".maps");
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_HASH);
25 	__uint(max_entries, 1);
26 	__type(key, long long);
27 	__type(value, long long);
28 } map_hash_8b SEC(".maps");
29 
30 SEC("socket")
31 __description("pointer/scalar confusion in state equality check (way 1)")
32 __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
33 __retval(POINTER_VALUE)
34 __naked void state_equality_check_way_1(void)
35 {
36 	asm volatile ("					\
37 	r1 = 0;						\
38 	*(u64*)(r10 - 8) = r1;				\
39 	r2 = r10;					\
40 	r2 += -8;					\
41 	r1 = %[map_hash_8b] ll;				\
42 	call %[bpf_map_lookup_elem];			\
43 	if r0 == 0 goto l0_%=;				\
44 	r0 = *(u64*)(r0 + 0);				\
45 	goto l1_%=;					\
46 l0_%=:	r0 = r10;					\
47 l1_%=:	goto l2_%=;					\
48 l2_%=:	exit;						\
49 "	:
50 	: __imm(bpf_map_lookup_elem),
51 	  __imm_addr(map_hash_8b)
52 	: __clobber_all);
53 }
54 
55 SEC("socket")
56 __description("pointer/scalar confusion in state equality check (way 2)")
57 __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
58 __retval(POINTER_VALUE)
59 __naked void state_equality_check_way_2(void)
60 {
61 	asm volatile ("					\
62 	r1 = 0;						\
63 	*(u64*)(r10 - 8) = r1;				\
64 	r2 = r10;					\
65 	r2 += -8;					\
66 	r1 = %[map_hash_8b] ll;				\
67 	call %[bpf_map_lookup_elem];			\
68 	if r0 != 0 goto l0_%=;				\
69 	r0 = r10;					\
70 	goto l1_%=;					\
71 l0_%=:	r0 = *(u64*)(r0 + 0);				\
72 l1_%=:	exit;						\
73 "	:
74 	: __imm(bpf_map_lookup_elem),
75 	  __imm_addr(map_hash_8b)
76 	: __clobber_all);
77 }
78 
79 SEC("lwt_in")
80 __description("liveness pruning and write screening")
81 __failure __msg("R0 !read_ok")
82 __naked void liveness_pruning_and_write_screening(void)
83 {
84 	asm volatile ("					\
85 	/* Get an unknown value */			\
86 	r2 = *(u32*)(r1 + 0);				\
87 	/* branch conditions teach us nothing about R2 */\
88 	if r2 >= 0 goto l0_%=;				\
89 	r0 = 0;						\
90 l0_%=:	if r2 >= 0 goto l1_%=;				\
91 	r0 = 0;						\
92 l1_%=:	exit;						\
93 "	::: __clobber_all);
94 }
95 
96 SEC("socket")
97 __description("varlen_map_value_access pruning")
98 __failure __msg("R0 unbounded memory access")
99 __failure_unpriv __msg_unpriv("R0 leaks addr")
100 __flag(BPF_F_ANY_ALIGNMENT)
101 __naked void varlen_map_value_access_pruning(void)
102 {
103 	asm volatile ("					\
104 	r1 = 0;						\
105 	*(u64*)(r10 - 8) = r1;				\
106 	r2 = r10;					\
107 	r2 += -8;					\
108 	r1 = %[map_hash_48b] ll;			\
109 	call %[bpf_map_lookup_elem];			\
110 	if r0 == 0 goto l0_%=;				\
111 	r1 = *(u64*)(r0 + 0);				\
112 	w2 = %[max_entries];				\
113 	if r2 s> r1 goto l1_%=;				\
114 	w1 = 0;						\
115 l1_%=:	w1 <<= 2;					\
116 	r0 += r1;					\
117 	goto l2_%=;					\
118 l2_%=:	r1 = %[test_val_foo];				\
119 	*(u64*)(r0 + 0) = r1;				\
120 l0_%=:	exit;						\
121 "	:
122 	: __imm(bpf_map_lookup_elem),
123 	  __imm_addr(map_hash_48b),
124 	  __imm_const(max_entries, MAX_ENTRIES),
125 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
126 	: __clobber_all);
127 }
128 
129 SEC("tracepoint")
130 __description("search pruning: all branches should be verified (nop operation)")
131 __failure __msg("R6 invalid mem access 'scalar'")
132 __naked void should_be_verified_nop_operation(void)
133 {
134 	asm volatile ("					\
135 	r2 = r10;					\
136 	r2 += -8;					\
137 	r1 = 0;						\
138 	*(u64*)(r2 + 0) = r1;				\
139 	r1 = %[map_hash_8b] ll;				\
140 	call %[bpf_map_lookup_elem];			\
141 	if r0 == 0 goto l0_%=;				\
142 	r3 = *(u64*)(r0 + 0);				\
143 	if r3 == 0xbeef goto l1_%=;			\
144 	r4 = 0;						\
145 	goto l2_%=;					\
146 l1_%=:	r4 = 1;						\
147 l2_%=:	*(u64*)(r10 - 16) = r4;				\
148 	call %[bpf_ktime_get_ns];			\
149 	r5 = *(u64*)(r10 - 16);				\
150 	if r5 == 0 goto l0_%=;				\
151 	r6 = 0;						\
152 	r1 = 0xdead;					\
153 	*(u64*)(r6 + 0) = r1;				\
154 l0_%=:	exit;						\
155 "	:
156 	: __imm(bpf_ktime_get_ns),
157 	  __imm(bpf_map_lookup_elem),
158 	  __imm_addr(map_hash_8b)
159 	: __clobber_all);
160 }
161 
162 SEC("socket")
163 __description("search pruning: all branches should be verified (invalid stack access)")
164 /* in privileged mode reads from uninitialized stack locations are permitted */
165 __success __failure_unpriv
166 __msg_unpriv("invalid read from stack off -16+0 size 8")
167 __retval(0)
168 __naked void be_verified_invalid_stack_access(void)
169 {
170 	asm volatile ("					\
171 	r2 = r10;					\
172 	r2 += -8;					\
173 	r1 = 0;						\
174 	*(u64*)(r2 + 0) = r1;				\
175 	r1 = %[map_hash_8b] ll;				\
176 	call %[bpf_map_lookup_elem];			\
177 	if r0 == 0 goto l0_%=;				\
178 	r3 = *(u64*)(r0 + 0);				\
179 	r4 = 0;						\
180 	if r3 == 0xbeef goto l1_%=;			\
181 	*(u64*)(r10 - 16) = r4;				\
182 	goto l2_%=;					\
183 l1_%=:	*(u64*)(r10 - 24) = r4;				\
184 l2_%=:	call %[bpf_ktime_get_ns];			\
185 	r5 = *(u64*)(r10 - 16);				\
186 l0_%=:	exit;						\
187 "	:
188 	: __imm(bpf_ktime_get_ns),
189 	  __imm(bpf_map_lookup_elem),
190 	  __imm_addr(map_hash_8b)
191 	: __clobber_all);
192 }
193 
194 SEC("tracepoint")
195 __description("precision tracking for u32 spill/fill")
196 __failure __msg("R0 min value is outside of the allowed memory range")
197 __naked void tracking_for_u32_spill_fill(void)
198 {
199 	asm volatile ("					\
200 	r7 = r1;					\
201 	call %[bpf_get_prandom_u32];			\
202 	w6 = 32;					\
203 	if r0 == 0 goto l0_%=;				\
204 	w6 = 4;						\
205 l0_%=:	/* Additional insns to introduce a pruning point. */\
206 	call %[bpf_get_prandom_u32];			\
207 	r3 = 0;						\
208 	r3 = 0;						\
209 	if r0 == 0 goto l1_%=;				\
210 	r3 = 0;						\
211 l1_%=:	/* u32 spill/fill */				\
212 	*(u32*)(r10 - 8) = r6;				\
213 	r8 = *(u32*)(r10 - 8);				\
214 	/* out-of-bound map value access for r6=32 */	\
215 	r1 = 0;						\
216 	*(u64*)(r10 - 16) = r1;				\
217 	r2 = r10;					\
218 	r2 += -16;					\
219 	r1 = %[map_hash_8b] ll;				\
220 	call %[bpf_map_lookup_elem];			\
221 	if r0 == 0 goto l2_%=;				\
222 	r0 += r8;					\
223 	r1 = *(u32*)(r0 + 0);				\
224 l2_%=:	r0 = 0;						\
225 	exit;						\
226 "	:
227 	: __imm(bpf_get_prandom_u32),
228 	  __imm(bpf_map_lookup_elem),
229 	  __imm_addr(map_hash_8b)
230 	: __clobber_all);
231 }
232 
233 SEC("tracepoint")
234 __description("precision tracking for u32 spills, u64 fill")
235 __failure __msg("div by zero")
236 __naked void for_u32_spills_u64_fill(void)
237 {
238 	asm volatile ("					\
239 	call %[bpf_get_prandom_u32];			\
240 	r6 = r0;					\
241 	w7 = 0xffffffff;				\
242 	/* Additional insns to introduce a pruning point. */\
243 	r3 = 1;						\
244 	r3 = 1;						\
245 	r3 = 1;						\
246 	r3 = 1;						\
247 	call %[bpf_get_prandom_u32];			\
248 	if r0 == 0 goto l0_%=;				\
249 	r3 = 1;						\
250 l0_%=:	w3 /= 0;					\
251 	/* u32 spills, u64 fill */			\
252 	*(u32*)(r10 - 4) = r6;				\
253 	*(u32*)(r10 - 8) = r7;				\
254 	r8 = *(u64*)(r10 - 8);				\
255 	/* if r8 != X goto pc+1  r8 known in fallthrough branch */\
256 	if r8 != 0xffffffff goto l1_%=;			\
257 	r3 = 1;						\
258 l1_%=:	/* if r8 == X goto pc+1  condition always true on first\
259 	 * traversal, so starts backtracking to mark r8 as requiring\
260 	 * precision. r7 marked as needing precision. r6 not marked\
261 	 * since it's not tracked.			\
262 	 */						\
263 	if r8 == 0xffffffff goto l2_%=;			\
264 	/* fails if r8 correctly marked unknown after fill. */\
265 	w3 /= 0;					\
266 l2_%=:	r0 = 0;						\
267 	exit;						\
268 "	:
269 	: __imm(bpf_get_prandom_u32)
270 	: __clobber_all);
271 }
272 
273 SEC("socket")
274 __description("allocated_stack")
275 __success __msg("processed 15 insns")
276 __success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
277 __naked void allocated_stack(void)
278 {
279 	asm volatile ("					\
280 	r6 = r1;					\
281 	call %[bpf_get_prandom_u32];			\
282 	r7 = r0;					\
283 	if r0 == 0 goto l0_%=;				\
284 	r0 = 0;						\
285 	*(u64*)(r10 - 8) = r6;				\
286 	r6 = *(u64*)(r10 - 8);				\
287 	*(u8*)(r10 - 9) = r7;				\
288 	r7 = *(u8*)(r10 - 9);				\
289 l0_%=:	if r0 != 0 goto l1_%=;				\
290 l1_%=:	if r0 != 0 goto l2_%=;				\
291 l2_%=:	if r0 != 0 goto l3_%=;				\
292 l3_%=:	if r0 != 0 goto l4_%=;				\
293 l4_%=:	exit;						\
294 "	:
295 	: __imm(bpf_get_prandom_u32)
296 	: __clobber_all);
297 }
298 
299 /* The test performs a conditional 64-bit write to a stack location
300  * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
301  * then data is read from fp[-8]. This sequence is unsafe.
302  *
303  * The test would be mistakenly marked as safe w/o dst register parent
304  * preservation in verifier.c:copy_register_state() function.
305  *
306  * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
307  * checkpoint state after conditional 64-bit assignment.
308  */
309 
310 SEC("socket")
311 __description("write tracking and register parent chain bug")
312 /* in privileged mode reads from uninitialized stack locations are permitted */
313 __success __failure_unpriv
314 __msg_unpriv("invalid read from stack off -8+1 size 8")
315 __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
316 __naked void and_register_parent_chain_bug(void)
317 {
318 	asm volatile ("					\
319 	/* r6 = ktime_get_ns() */			\
320 	call %[bpf_ktime_get_ns];			\
321 	r6 = r0;					\
322 	/* r0 = ktime_get_ns() */			\
323 	call %[bpf_ktime_get_ns];			\
324 	/* if r0 > r6 goto +1 */			\
325 	if r0 > r6 goto l0_%=;				\
326 	/* *(u64 *)(r10 - 8) = 0xdeadbeef */		\
327 	r0 = 0xdeadbeef;				\
328 	*(u64*)(r10 - 8) = r0;				\
329 l0_%=:	r1 = 42;					\
330 	*(u8*)(r10 - 8) = r1;				\
331 	r2 = *(u64*)(r10 - 8);				\
332 	/* exit(0) */					\
333 	r0 = 0;						\
334 	exit;						\
335 "	:
336 	: __imm(bpf_ktime_get_ns)
337 	: __clobber_all);
338 }
339 
340 /* Without checkpoint forcibly inserted at the back-edge a loop this
341  * test would take a very long time to verify.
342  */
343 SEC("kprobe")
344 __failure __log_level(4)
345 __msg("BPF program is too large.")
346 __naked void short_loop1(void)
347 {
348 	asm volatile (
349 	"   r7 = *(u16 *)(r1 +0);"
350 	"1: r7 += 0x1ab064b9;"
351 	"   .8byte %[jset];" /* same as 'if r7 & 0x702000 goto 1b;' */
352 	"   r7 &= 0x1ee60e;"
353 	"   r7 += r1;"
354 	"   if r7 s> 0x37d2 goto +0;"
355 	"   r0 = 0;"
356 	"   exit;"
357 	:
358 	: __imm_insn(jset, BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x702000, -2))
359 	: __clobber_all);
360 }
361 
362 char _license[] SEC("license") = "GPL";
363