xref: /linux/tools/testing/selftests/bpf/progs/verifier_raw_stack.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 SEC("tc")
9 __description("raw_stack: no skb_load_bytes")
10 __failure __msg("invalid read from stack R6 off=-8 size=8")
11 __naked void stack_no_skb_load_bytes(void)
12 {
13 	asm volatile ("					\
14 	r2 = 4;						\
15 	r6 = r10;					\
16 	r6 += -8;					\
17 	r3 = r6;					\
18 	r4 = 8;						\
19 	/* Call to skb_load_bytes() omitted. */		\
20 	r0 = *(u64*)(r6 + 0);				\
21 	exit;						\
22 "	::: __clobber_all);
23 }
24 
25 SEC("tc")
26 __description("raw_stack: skb_load_bytes, negative len")
27 __failure __msg("R4 min value is negative")
28 __naked void skb_load_bytes_negative_len(void)
29 {
30 	asm volatile ("					\
31 	r2 = 4;						\
32 	r6 = r10;					\
33 	r6 += -8;					\
34 	r3 = r6;					\
35 	r4 = -8;					\
36 	call %[bpf_skb_load_bytes];			\
37 	r0 = *(u64*)(r6 + 0);				\
38 	exit;						\
39 "	:
40 	: __imm(bpf_skb_load_bytes)
41 	: __clobber_all);
42 }
43 
44 SEC("tc")
45 __description("raw_stack: skb_load_bytes, negative len 2")
46 __failure __msg("R4 min value is negative")
47 __naked void load_bytes_negative_len_2(void)
48 {
49 	asm volatile ("					\
50 	r2 = 4;						\
51 	r6 = r10;					\
52 	r6 += -8;					\
53 	r3 = r6;					\
54 	r4 = %[__imm_0];				\
55 	call %[bpf_skb_load_bytes];			\
56 	r0 = *(u64*)(r6 + 0);				\
57 	exit;						\
58 "	:
59 	: __imm(bpf_skb_load_bytes),
60 	  __imm_const(__imm_0, ~0)
61 	: __clobber_all);
62 }
63 
64 SEC("tc")
65 __description("raw_stack: skb_load_bytes, zero len")
66 __failure __msg("invalid zero-sized read")
67 __naked void skb_load_bytes_zero_len(void)
68 {
69 	asm volatile ("					\
70 	r2 = 4;						\
71 	r6 = r10;					\
72 	r6 += -8;					\
73 	r3 = r6;					\
74 	r4 = 0;						\
75 	call %[bpf_skb_load_bytes];			\
76 	r0 = *(u64*)(r6 + 0);				\
77 	exit;						\
78 "	:
79 	: __imm(bpf_skb_load_bytes)
80 	: __clobber_all);
81 }
82 
83 SEC("tc")
84 __description("raw_stack: skb_load_bytes, no init")
85 __success __retval(0)
86 __naked void skb_load_bytes_no_init(void)
87 {
88 	asm volatile ("					\
89 	r2 = 4;						\
90 	r6 = r10;					\
91 	r6 += -8;					\
92 	r3 = r6;					\
93 	r4 = 8;						\
94 	call %[bpf_skb_load_bytes];			\
95 	r0 = *(u64*)(r6 + 0);				\
96 	exit;						\
97 "	:
98 	: __imm(bpf_skb_load_bytes)
99 	: __clobber_all);
100 }
101 
102 SEC("tc")
103 __description("raw_stack: skb_load_bytes, init")
104 __success __retval(0)
105 __naked void stack_skb_load_bytes_init(void)
106 {
107 	asm volatile ("					\
108 	r2 = 4;						\
109 	r6 = r10;					\
110 	r6 += -8;					\
111 	r3 = 0xcafe;					\
112 	*(u64*)(r6 + 0) = r3;				\
113 	r3 = r6;					\
114 	r4 = 8;						\
115 	call %[bpf_skb_load_bytes];			\
116 	r0 = *(u64*)(r6 + 0);				\
117 	exit;						\
118 "	:
119 	: __imm(bpf_skb_load_bytes)
120 	: __clobber_all);
121 }
122 
123 SEC("tc")
124 __description("raw_stack: skb_load_bytes, spilled regs around bounds")
125 __success __retval(0)
126 __naked void bytes_spilled_regs_around_bounds(void)
127 {
128 	asm volatile ("					\
129 	r2 = 4;						\
130 	r6 = r10;					\
131 	r6 += -16;					\
132 	*(u64*)(r6 - 8) = r1;				\
133 	*(u64*)(r6 + 8) = r1;				\
134 	r3 = r6;					\
135 	r4 = 8;						\
136 	call %[bpf_skb_load_bytes];			\
137 	r0 = *(u64*)(r6 - 8);				\
138 	r2 = *(u64*)(r6 + 8);				\
139 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
140 	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
141 	r0 += r2;					\
142 	exit;						\
143 "	:
144 	: __imm(bpf_skb_load_bytes),
145 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
146 	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
147 	: __clobber_all);
148 }
149 
150 SEC("tc")
151 __description("raw_stack: skb_load_bytes, spilled regs corruption")
152 __failure __msg("R0 invalid mem access 'scalar'")
153 __flag(BPF_F_ANY_ALIGNMENT)
154 __naked void load_bytes_spilled_regs_corruption(void)
155 {
156 	asm volatile ("					\
157 	r2 = 4;						\
158 	r6 = r10;					\
159 	r6 += -8;					\
160 	*(u64*)(r6 + 0) = r1;				\
161 	r3 = r6;					\
162 	r4 = 8;						\
163 	call %[bpf_skb_load_bytes];			\
164 	r0 = *(u64*)(r6 + 0);				\
165 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
166 	exit;						\
167 "	:
168 	: __imm(bpf_skb_load_bytes),
169 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
170 	: __clobber_all);
171 }
172 
173 SEC("tc")
174 __description("raw_stack: skb_load_bytes, spilled regs corruption 2")
175 __failure __msg("R3 invalid mem access 'scalar'")
176 __flag(BPF_F_ANY_ALIGNMENT)
177 __naked void bytes_spilled_regs_corruption_2(void)
178 {
179 	asm volatile ("					\
180 	r2 = 4;						\
181 	r6 = r10;					\
182 	r6 += -16;					\
183 	*(u64*)(r6 - 8) = r1;				\
184 	*(u64*)(r6 + 0) = r1;				\
185 	*(u64*)(r6 + 8) = r1;				\
186 	r3 = r6;					\
187 	r4 = 8;						\
188 	call %[bpf_skb_load_bytes];			\
189 	r0 = *(u64*)(r6 - 8);				\
190 	r2 = *(u64*)(r6 + 8);				\
191 	r3 = *(u64*)(r6 + 0);				\
192 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
193 	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
194 	r0 += r2;					\
195 	r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]);	\
196 	r0 += r3;					\
197 	exit;						\
198 "	:
199 	: __imm(bpf_skb_load_bytes),
200 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
201 	  __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
202 	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
203 	: __clobber_all);
204 }
205 
206 SEC("tc")
207 __description("raw_stack: skb_load_bytes, spilled regs + data")
208 __success __retval(0)
209 __naked void load_bytes_spilled_regs_data(void)
210 {
211 	asm volatile ("					\
212 	r2 = 4;						\
213 	r6 = r10;					\
214 	r6 += -16;					\
215 	*(u64*)(r6 - 8) = r1;				\
216 	*(u64*)(r6 + 0) = r1;				\
217 	*(u64*)(r6 + 8) = r1;				\
218 	r3 = r6;					\
219 	r4 = 8;						\
220 	call %[bpf_skb_load_bytes];			\
221 	r0 = *(u64*)(r6 - 8);				\
222 	r2 = *(u64*)(r6 + 8);				\
223 	r3 = *(u64*)(r6 + 0);				\
224 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
225 	r2 = *(u32*)(r2 + %[__sk_buff_priority]);	\
226 	r0 += r2;					\
227 	r0 += r3;					\
228 	exit;						\
229 "	:
230 	: __imm(bpf_skb_load_bytes),
231 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
232 	  __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
233 	: __clobber_all);
234 }
235 
236 SEC("tc")
237 __description("raw_stack: skb_load_bytes, invalid access 1")
238 __failure __msg("invalid indirect access to stack R3 off=-513 size=8")
239 __naked void load_bytes_invalid_access_1(void)
240 {
241 	asm volatile ("					\
242 	r2 = 4;						\
243 	r6 = r10;					\
244 	r6 += -513;					\
245 	r3 = r6;					\
246 	r4 = 8;						\
247 	call %[bpf_skb_load_bytes];			\
248 	r0 = *(u64*)(r6 + 0);				\
249 	exit;						\
250 "	:
251 	: __imm(bpf_skb_load_bytes)
252 	: __clobber_all);
253 }
254 
255 SEC("tc")
256 __description("raw_stack: skb_load_bytes, invalid access 2")
257 __failure __msg("invalid indirect access to stack R3 off=-1 size=8")
258 __naked void load_bytes_invalid_access_2(void)
259 {
260 	asm volatile ("					\
261 	r2 = 4;						\
262 	r6 = r10;					\
263 	r6 += -1;					\
264 	r3 = r6;					\
265 	r4 = 8;						\
266 	call %[bpf_skb_load_bytes];			\
267 	r0 = *(u64*)(r6 + 0);				\
268 	exit;						\
269 "	:
270 	: __imm(bpf_skb_load_bytes)
271 	: __clobber_all);
272 }
273 
274 SEC("tc")
275 __description("raw_stack: skb_load_bytes, invalid access 3")
276 __failure __msg("R4 min value is negative")
277 __naked void load_bytes_invalid_access_3(void)
278 {
279 	asm volatile ("					\
280 	r2 = 4;						\
281 	r6 = r10;					\
282 	r6 += 0xffffffff;				\
283 	r3 = r6;					\
284 	r4 = 0xffffffff;				\
285 	call %[bpf_skb_load_bytes];			\
286 	r0 = *(u64*)(r6 + 0);				\
287 	exit;						\
288 "	:
289 	: __imm(bpf_skb_load_bytes)
290 	: __clobber_all);
291 }
292 
293 SEC("tc")
294 __description("raw_stack: skb_load_bytes, invalid access 4")
295 __failure
296 __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
297 __naked void load_bytes_invalid_access_4(void)
298 {
299 	asm volatile ("					\
300 	r2 = 4;						\
301 	r6 = r10;					\
302 	r6 += -1;					\
303 	r3 = r6;					\
304 	r4 = 0x7fffffff;				\
305 	call %[bpf_skb_load_bytes];			\
306 	r0 = *(u64*)(r6 + 0);				\
307 	exit;						\
308 "	:
309 	: __imm(bpf_skb_load_bytes)
310 	: __clobber_all);
311 }
312 
313 SEC("tc")
314 __description("raw_stack: skb_load_bytes, invalid access 5")
315 __failure
316 __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
317 __naked void load_bytes_invalid_access_5(void)
318 {
319 	asm volatile ("					\
320 	r2 = 4;						\
321 	r6 = r10;					\
322 	r6 += -512;					\
323 	r3 = r6;					\
324 	r4 = 0x7fffffff;				\
325 	call %[bpf_skb_load_bytes];			\
326 	r0 = *(u64*)(r6 + 0);				\
327 	exit;						\
328 "	:
329 	: __imm(bpf_skb_load_bytes)
330 	: __clobber_all);
331 }
332 
333 SEC("tc")
334 __description("raw_stack: skb_load_bytes, invalid access 6")
335 __failure __msg("invalid zero-sized read")
336 __naked void load_bytes_invalid_access_6(void)
337 {
338 	asm volatile ("					\
339 	r2 = 4;						\
340 	r6 = r10;					\
341 	r6 += -512;					\
342 	r3 = r6;					\
343 	r4 = 0;						\
344 	call %[bpf_skb_load_bytes];			\
345 	r0 = *(u64*)(r6 + 0);				\
346 	exit;						\
347 "	:
348 	: __imm(bpf_skb_load_bytes)
349 	: __clobber_all);
350 }
351 
352 SEC("tc")
353 __description("raw_stack: skb_load_bytes, large access")
354 __success __retval(0)
355 __naked void skb_load_bytes_large_access(void)
356 {
357 	asm volatile ("					\
358 	r2 = 4;						\
359 	r6 = r10;					\
360 	r6 += -512;					\
361 	r3 = r6;					\
362 	r4 = 512;					\
363 	call %[bpf_skb_load_bytes];			\
364 	r0 = *(u64*)(r6 + 0);				\
365 	exit;						\
366 "	:
367 	: __imm(bpf_skb_load_bytes)
368 	: __clobber_all);
369 }
370 
371 char _license[] SEC("license") = "GPL";
372