xref: /linux/tools/testing/selftests/bpf/progs/verifier_unpriv.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "../../../include/linux/filter.h"
7 #include "bpf_misc.h"
8 
9 #define BPF_SK_LOOKUP(func) \
10 	/* struct bpf_sock_tuple tuple = {} */ \
11 	"r2 = 0;"			\
12 	"*(u32*)(r10 - 8) = r2;"	\
13 	"*(u64*)(r10 - 16) = r2;"	\
14 	"*(u64*)(r10 - 24) = r2;"	\
15 	"*(u64*)(r10 - 32) = r2;"	\
16 	"*(u64*)(r10 - 40) = r2;"	\
17 	"*(u64*)(r10 - 48) = r2;"	\
18 	/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
19 	"r2 = r10;"			\
20 	"r2 += -48;"			\
21 	"r3 = %[sizeof_bpf_sock_tuple];"\
22 	"r4 = 0;"			\
23 	"r5 = 0;"			\
24 	"call %[" #func "];"
25 
26 struct {
27 	__uint(type, BPF_MAP_TYPE_HASH);
28 	__uint(max_entries, 1);
29 	__type(key, long long);
30 	__type(value, long long);
31 } map_hash_8b SEC(".maps");
32 
33 void dummy_prog_42_socket(void);
34 void dummy_prog_24_socket(void);
35 void dummy_prog_loop1_socket(void);
36 
37 struct {
38 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
39 	__uint(max_entries, 4);
40 	__uint(key_size, sizeof(int));
41 	__array(values, void (void));
42 } map_prog1_socket SEC(".maps") = {
43 	.values = {
44 		[0] = (void *)&dummy_prog_42_socket,
45 		[1] = (void *)&dummy_prog_loop1_socket,
46 		[2] = (void *)&dummy_prog_24_socket,
47 	},
48 };
49 
50 SEC("socket")
51 __auxiliary __auxiliary_unpriv
52 __naked void dummy_prog_42_socket(void)
53 {
54 	asm volatile ("r0 = 42; exit;");
55 }
56 
57 SEC("socket")
58 __auxiliary __auxiliary_unpriv
59 __naked void dummy_prog_24_socket(void)
60 {
61 	asm volatile ("r0 = 24; exit;");
62 }
63 
64 SEC("socket")
65 __auxiliary __auxiliary_unpriv
66 __naked void dummy_prog_loop1_socket(void)
67 {
68 	asm volatile ("			\
69 	r3 = 1;				\
70 	r2 = %[map_prog1_socket] ll;	\
71 	call %[bpf_tail_call];		\
72 	r0 = 41;			\
73 	exit;				\
74 "	:
75 	: __imm(bpf_tail_call),
76 	  __imm_addr(map_prog1_socket)
77 	: __clobber_all);
78 }
79 
80 SEC("socket")
81 __description("unpriv: return pointer")
82 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
83 __retval(POINTER_VALUE)
84 __naked void unpriv_return_pointer(void)
85 {
86 	asm volatile ("					\
87 	r0 = r10;					\
88 	exit;						\
89 "	::: __clobber_all);
90 }
91 
92 SEC("socket")
93 __description("unpriv: add const to pointer")
94 __success __success_unpriv __retval(0)
95 __naked void unpriv_add_const_to_pointer(void)
96 {
97 	asm volatile ("					\
98 	r1 += 8;					\
99 	r0 = 0;						\
100 	exit;						\
101 "	::: __clobber_all);
102 }
103 
104 SEC("socket")
105 __description("unpriv: add pointer to pointer")
106 __failure __msg("R1 pointer += pointer")
107 __failure_unpriv
108 __naked void unpriv_add_pointer_to_pointer(void)
109 {
110 	asm volatile ("					\
111 	r1 += r10;					\
112 	r0 = 0;						\
113 	exit;						\
114 "	::: __clobber_all);
115 }
116 
117 SEC("socket")
118 __description("unpriv: neg pointer")
119 __success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
120 __retval(0)
121 __naked void unpriv_neg_pointer(void)
122 {
123 	asm volatile ("					\
124 	r1 = -r1;					\
125 	r0 = 0;						\
126 	exit;						\
127 "	::: __clobber_all);
128 }
129 
130 SEC("socket")
131 __description("unpriv: cmp pointer with const")
132 __success __failure_unpriv __msg_unpriv("R1 pointer comparison")
133 __retval(0)
134 __naked void unpriv_cmp_pointer_with_const(void)
135 {
136 	asm volatile ("					\
137 	if r1 == 0 goto l0_%=;				\
138 l0_%=:	r0 = 0;						\
139 	exit;						\
140 "	::: __clobber_all);
141 }
142 
143 SEC("socket")
144 __description("unpriv: cmp pointer with pointer")
145 __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
146 __retval(0)
147 __naked void unpriv_cmp_pointer_with_pointer(void)
148 {
149 	asm volatile ("					\
150 	if r1 == r10 goto l0_%=;			\
151 l0_%=:	r0 = 0;						\
152 	exit;						\
153 "	::: __clobber_all);
154 }
155 
156 SEC("tracepoint")
157 __description("unpriv: check that printk is disallowed")
158 __success
159 __naked void check_that_printk_is_disallowed(void)
160 {
161 	asm volatile ("					\
162 	r1 = 0;						\
163 	*(u64*)(r10 - 8) = r1;				\
164 	r1 = r10;					\
165 	r1 += -8;					\
166 	r2 = 8;						\
167 	r3 = r1;					\
168 	call %[bpf_trace_printk];			\
169 	r0 = 0;						\
170 	exit;						\
171 "	:
172 	: __imm(bpf_trace_printk)
173 	: __clobber_all);
174 }
175 
176 SEC("socket")
177 __description("unpriv: pass pointer to helper function")
178 __success __failure_unpriv __msg_unpriv("R4 leaks addr")
179 __retval(0)
180 __naked void pass_pointer_to_helper_function(void)
181 {
182 	asm volatile ("					\
183 	r1 = 0;						\
184 	*(u64*)(r10 - 8) = r1;				\
185 	r2 = r10;					\
186 	r2 += -8;					\
187 	r1 = %[map_hash_8b] ll;				\
188 	r3 = r2;					\
189 	r4 = r2;					\
190 	call %[bpf_map_update_elem];			\
191 	r0 = 0;						\
192 	exit;						\
193 "	:
194 	: __imm(bpf_map_update_elem),
195 	  __imm_addr(map_hash_8b)
196 	: __clobber_all);
197 }
198 
199 SEC("socket")
200 __description("unpriv: indirectly pass pointer on stack to helper function")
201 __success __failure_unpriv
202 __msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8")
203 __retval(0)
204 __naked void on_stack_to_helper_function(void)
205 {
206 	asm volatile ("					\
207 	*(u64*)(r10 - 8) = r10;				\
208 	r2 = r10;					\
209 	r2 += -8;					\
210 	r1 = %[map_hash_8b] ll;				\
211 	call %[bpf_map_lookup_elem];			\
212 	r0 = 0;						\
213 	exit;						\
214 "	:
215 	: __imm(bpf_map_lookup_elem),
216 	  __imm_addr(map_hash_8b)
217 	: __clobber_all);
218 }
219 
220 SEC("socket")
221 __description("unpriv: mangle pointer on stack 1")
222 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
223 __retval(0)
224 __naked void mangle_pointer_on_stack_1(void)
225 {
226 	asm volatile ("					\
227 	*(u64*)(r10 - 8) = r10;				\
228 	r0 = 0;						\
229 	*(u32*)(r10 - 8) = r0;				\
230 	r0 = 0;						\
231 	exit;						\
232 "	::: __clobber_all);
233 }
234 
235 SEC("socket")
236 __description("unpriv: mangle pointer on stack 2")
237 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
238 __retval(0)
239 __naked void mangle_pointer_on_stack_2(void)
240 {
241 	asm volatile ("					\
242 	*(u64*)(r10 - 8) = r10;				\
243 	r0 = 0;						\
244 	*(u8*)(r10 - 1) = r0;				\
245 	r0 = 0;						\
246 	exit;						\
247 "	::: __clobber_all);
248 }
249 
250 SEC("socket")
251 __description("unpriv: read pointer from stack in small chunks")
252 __failure __msg("invalid size")
253 __failure_unpriv
254 __naked void from_stack_in_small_chunks(void)
255 {
256 	asm volatile ("					\
257 	*(u64*)(r10 - 8) = r10;				\
258 	r0 = *(u32*)(r10 - 8);				\
259 	r0 = 0;						\
260 	exit;						\
261 "	::: __clobber_all);
262 }
263 
264 SEC("socket")
265 __description("unpriv: write pointer into ctx")
266 __failure __msg("invalid bpf_context access")
267 __failure_unpriv __msg_unpriv("R1 leaks addr")
268 __naked void unpriv_write_pointer_into_ctx(void)
269 {
270 	asm volatile ("					\
271 	*(u64*)(r1 + 0) = r1;				\
272 	r0 = 0;						\
273 	exit;						\
274 "	::: __clobber_all);
275 }
276 
277 SEC("socket")
278 __description("unpriv: spill/fill of ctx")
279 __success __success_unpriv __retval(0)
280 __naked void unpriv_spill_fill_of_ctx(void)
281 {
282 	asm volatile ("					\
283 	r6 = r10;					\
284 	r6 += -8;					\
285 	*(u64*)(r6 + 0) = r1;				\
286 	r1 = *(u64*)(r6 + 0);				\
287 	r0 = 0;						\
288 	exit;						\
289 "	::: __clobber_all);
290 }
291 
292 SEC("tc")
293 __description("unpriv: spill/fill of ctx 2")
294 __success __retval(0)
295 __naked void spill_fill_of_ctx_2(void)
296 {
297 	asm volatile ("					\
298 	r6 = r10;					\
299 	r6 += -8;					\
300 	*(u64*)(r6 + 0) = r1;				\
301 	r1 = *(u64*)(r6 + 0);				\
302 	call %[bpf_get_hash_recalc];			\
303 	r0 = 0;						\
304 	exit;						\
305 "	:
306 	: __imm(bpf_get_hash_recalc)
307 	: __clobber_all);
308 }
309 
310 SEC("tc")
311 __description("unpriv: spill/fill of ctx 3")
312 __failure __msg("R1 type=fp expected=ctx")
313 __naked void spill_fill_of_ctx_3(void)
314 {
315 	asm volatile ("					\
316 	r6 = r10;					\
317 	r6 += -8;					\
318 	*(u64*)(r6 + 0) = r1;				\
319 	*(u64*)(r6 + 0) = r10;				\
320 	r1 = *(u64*)(r6 + 0);				\
321 	call %[bpf_get_hash_recalc];			\
322 	exit;						\
323 "	:
324 	: __imm(bpf_get_hash_recalc)
325 	: __clobber_all);
326 }
327 
328 SEC("tc")
329 __description("unpriv: spill/fill of ctx 4")
330 __failure __msg("R1 type=scalar expected=ctx")
331 __naked void spill_fill_of_ctx_4(void)
332 {
333 	asm volatile ("					\
334 	r6 = r10;					\
335 	r6 += -8;					\
336 	*(u64*)(r6 + 0) = r1;				\
337 	r0 = 1;						\
338 	lock *(u64 *)(r10 - 8) += r0;			\
339 	r1 = *(u64*)(r6 + 0);				\
340 	call %[bpf_get_hash_recalc];			\
341 	exit;						\
342 "	:
343 	: __imm(bpf_get_hash_recalc)
344 	: __clobber_all);
345 }
346 
347 SEC("tc")
348 __description("unpriv: spill/fill of different pointers stx")
349 __failure __msg("same insn cannot be used with different pointers")
350 __naked void fill_of_different_pointers_stx(void)
351 {
352 	asm volatile ("					\
353 	r3 = 42;					\
354 	r6 = r10;					\
355 	r6 += -8;					\
356 	if r1 == 0 goto l0_%=;				\
357 	r2 = r10;					\
358 	r2 += -16;					\
359 	*(u64*)(r6 + 0) = r2;				\
360 l0_%=:	if r1 != 0 goto l1_%=;				\
361 	*(u64*)(r6 + 0) = r1;				\
362 l1_%=:	r1 = *(u64*)(r6 + 0);				\
363 	*(u32*)(r1 + %[__sk_buff_mark]) = r3;		\
364 	r0 = 0;						\
365 	exit;						\
366 "	:
367 	: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
368 	: __clobber_all);
369 }
370 
371 /* Same as above, but use BPF_ST_MEM to save 42
372  * instead of BPF_STX_MEM.
373  */
374 SEC("tc")
375 __description("unpriv: spill/fill of different pointers st")
376 __failure __msg("same insn cannot be used with different pointers")
377 __naked void fill_of_different_pointers_st(void)
378 {
379 	asm volatile ("					\
380 	r6 = r10;					\
381 	r6 += -8;					\
382 	if r1 == 0 goto l0_%=;				\
383 	r2 = r10;					\
384 	r2 += -16;					\
385 	*(u64*)(r6 + 0) = r2;				\
386 l0_%=:	if r1 != 0 goto l1_%=;				\
387 	*(u64*)(r6 + 0) = r1;				\
388 l1_%=:	r1 = *(u64*)(r6 + 0);				\
389 	.8byte %[st_mem];				\
390 	r0 = 0;						\
391 	exit;						\
392 "	:
393 	: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
394 	  __imm_insn(st_mem,
395 		     BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
396 	: __clobber_all);
397 }
398 
399 SEC("tc")
400 __description("unpriv: spill/fill of different pointers stx - ctx and sock")
401 __failure __msg("type=ctx expected=sock")
402 __naked void pointers_stx_ctx_and_sock(void)
403 {
404 	asm volatile ("					\
405 	r8 = r1;					\
406 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
407 "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
408 "	r2 = r0;					\
409 	/* u64 foo; */					\
410 	/* void *target = &foo; */			\
411 	r6 = r10;					\
412 	r6 += -8;					\
413 	r1 = r8;					\
414 	/* if (skb == NULL) *target = sock; */		\
415 	if r1 == 0 goto l0_%=;				\
416 	*(u64*)(r6 + 0) = r2;				\
417 l0_%=:	/* else *target = skb; */			\
418 	if r1 != 0 goto l1_%=;				\
419 	*(u64*)(r6 + 0) = r1;				\
420 l1_%=:	/* struct __sk_buff *skb = *target; */		\
421 	r1 = *(u64*)(r6 + 0);				\
422 	/* skb->mark = 42; */				\
423 	r3 = 42;					\
424 	*(u32*)(r1 + %[__sk_buff_mark]) = r3;		\
425 	/* if (sk) bpf_sk_release(sk) */		\
426 	if r1 == 0 goto l2_%=;				\
427 	call %[bpf_sk_release];				\
428 l2_%=:	r0 = 0;						\
429 	exit;						\
430 "	:
431 	: __imm(bpf_sk_lookup_tcp),
432 	  __imm(bpf_sk_release),
433 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
434 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
435 	: __clobber_all);
436 }
437 
438 SEC("tc")
439 __description("unpriv: spill/fill of different pointers stx - leak sock")
440 __failure
441 //.errstr = "same insn cannot be used with different pointers",
442 __msg("Unreleased reference")
443 __naked void different_pointers_stx_leak_sock(void)
444 {
445 	asm volatile ("					\
446 	r8 = r1;					\
447 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
448 "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
449 "	r2 = r0;					\
450 	/* u64 foo; */					\
451 	/* void *target = &foo; */			\
452 	r6 = r10;					\
453 	r6 += -8;					\
454 	r1 = r8;					\
455 	/* if (skb == NULL) *target = sock; */		\
456 	if r1 == 0 goto l0_%=;				\
457 	*(u64*)(r6 + 0) = r2;				\
458 l0_%=:	/* else *target = skb; */			\
459 	if r1 != 0 goto l1_%=;				\
460 	*(u64*)(r6 + 0) = r1;				\
461 l1_%=:	/* struct __sk_buff *skb = *target; */		\
462 	r1 = *(u64*)(r6 + 0);				\
463 	/* skb->mark = 42; */				\
464 	r3 = 42;					\
465 	*(u32*)(r1 + %[__sk_buff_mark]) = r3;		\
466 	exit;						\
467 "	:
468 	: __imm(bpf_sk_lookup_tcp),
469 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
470 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
471 	: __clobber_all);
472 }
473 
474 SEC("tc")
475 __description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
476 __failure __msg("same insn cannot be used with different pointers")
477 __naked void stx_sock_and_ctx_read(void)
478 {
479 	asm volatile ("					\
480 	r8 = r1;					\
481 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
482 "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
483 "	r2 = r0;					\
484 	/* u64 foo; */					\
485 	/* void *target = &foo; */			\
486 	r6 = r10;					\
487 	r6 += -8;					\
488 	r1 = r8;					\
489 	/* if (skb) *target = skb */			\
490 	if r1 == 0 goto l0_%=;				\
491 	*(u64*)(r6 + 0) = r1;				\
492 l0_%=:	/* else *target = sock */			\
493 	if r1 != 0 goto l1_%=;				\
494 	*(u64*)(r6 + 0) = r2;				\
495 l1_%=:	/* struct bpf_sock *sk = *target; */		\
496 	r1 = *(u64*)(r6 + 0);				\
497 	/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
498 	if r1 == 0 goto l2_%=;				\
499 	r3 = *(u32*)(r1 + %[bpf_sock_mark]);		\
500 	call %[bpf_sk_release];				\
501 l2_%=:	r0 = 0;						\
502 	exit;						\
503 "	:
504 	: __imm(bpf_sk_lookup_tcp),
505 	  __imm(bpf_sk_release),
506 	  __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
507 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
508 	: __clobber_all);
509 }
510 
511 SEC("tc")
512 __description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
513 __failure
514 //.errstr = "same insn cannot be used with different pointers",
515 __msg("cannot write into sock")
516 __naked void stx_sock_and_ctx_write(void)
517 {
518 	asm volatile ("					\
519 	r8 = r1;					\
520 	/* struct bpf_sock *sock = bpf_sock_lookup(...); */\
521 "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
522 "	r2 = r0;					\
523 	/* u64 foo; */					\
524 	/* void *target = &foo; */			\
525 	r6 = r10;					\
526 	r6 += -8;					\
527 	r1 = r8;					\
528 	/* if (skb) *target = skb */			\
529 	if r1 == 0 goto l0_%=;				\
530 	*(u64*)(r6 + 0) = r1;				\
531 l0_%=:	/* else *target = sock */			\
532 	if r1 != 0 goto l1_%=;				\
533 	*(u64*)(r6 + 0) = r2;				\
534 l1_%=:	/* struct bpf_sock *sk = *target; */		\
535 	r1 = *(u64*)(r6 + 0);				\
536 	/* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
537 	if r1 == 0 goto l2_%=;				\
538 	r3 = 42;					\
539 	*(u32*)(r1 + %[bpf_sock_mark]) = r3;		\
540 	call %[bpf_sk_release];				\
541 l2_%=:	r0 = 0;						\
542 	exit;						\
543 "	:
544 	: __imm(bpf_sk_lookup_tcp),
545 	  __imm(bpf_sk_release),
546 	  __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
547 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
548 	: __clobber_all);
549 }
550 
551 SEC("socket")
552 __description("unpriv: write pointer into map elem value")
553 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
554 __retval(0)
555 __naked void pointer_into_map_elem_value(void)
556 {
557 	asm volatile ("					\
558 	r1 = 0;						\
559 	*(u64*)(r10 - 8) = r1;				\
560 	r2 = r10;					\
561 	r2 += -8;					\
562 	r1 = %[map_hash_8b] ll;				\
563 	call %[bpf_map_lookup_elem];			\
564 	if r0 == 0 goto l0_%=;				\
565 	*(u64*)(r0 + 0) = r0;				\
566 l0_%=:	exit;						\
567 "	:
568 	: __imm(bpf_map_lookup_elem),
569 	  __imm_addr(map_hash_8b)
570 	: __clobber_all);
571 }
572 
573 SEC("socket")
574 __description("alu32: mov u32 const")
575 __success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'")
576 __retval(0)
577 __naked void alu32_mov_u32_const(void)
578 {
579 	asm volatile ("					\
580 	w7 = 0;						\
581 	w7 &= 1;					\
582 	w0 = w7;					\
583 	if r0 == 0 goto l0_%=;				\
584 	r0 = *(u64*)(r7 + 0);				\
585 l0_%=:	exit;						\
586 "	::: __clobber_all);
587 }
588 
589 SEC("socket")
590 __description("unpriv: partial copy of pointer")
591 __success __failure_unpriv __msg_unpriv("R10 partial copy")
592 __retval(0)
593 __naked void unpriv_partial_copy_of_pointer(void)
594 {
595 	asm volatile ("					\
596 	w1 = w10;					\
597 	r0 = 0;						\
598 	exit;						\
599 "	::: __clobber_all);
600 }
601 
602 SEC("socket")
603 __description("unpriv: pass pointer to tail_call")
604 __success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
605 __retval(0)
606 __naked void pass_pointer_to_tail_call(void)
607 {
608 	asm volatile ("					\
609 	r3 = r1;					\
610 	r2 = %[map_prog1_socket] ll;			\
611 	call %[bpf_tail_call];				\
612 	r0 = 0;						\
613 	exit;						\
614 "	:
615 	: __imm(bpf_tail_call),
616 	  __imm_addr(map_prog1_socket)
617 	: __clobber_all);
618 }
619 
620 SEC("socket")
621 __description("unpriv: cmp map pointer with zero")
622 __success __failure_unpriv __msg_unpriv("R1 pointer comparison")
623 __retval(0)
624 __naked void cmp_map_pointer_with_zero(void)
625 {
626 	asm volatile ("					\
627 	r1 = 0;						\
628 	r1 = %[map_hash_8b] ll;				\
629 	if r1 == 0 goto l0_%=;				\
630 l0_%=:	r0 = 0;						\
631 	exit;						\
632 "	:
633 	: __imm_addr(map_hash_8b)
634 	: __clobber_all);
635 }
636 
637 SEC("socket")
638 __description("unpriv: write into frame pointer")
639 __failure __msg("frame pointer is read only")
640 __failure_unpriv
641 __naked void unpriv_write_into_frame_pointer(void)
642 {
643 	asm volatile ("					\
644 	r10 = r1;					\
645 	r0 = 0;						\
646 	exit;						\
647 "	::: __clobber_all);
648 }
649 
650 SEC("socket")
651 __description("unpriv: spill/fill frame pointer")
652 __failure __msg("frame pointer is read only")
653 __failure_unpriv
654 __naked void unpriv_spill_fill_frame_pointer(void)
655 {
656 	asm volatile ("					\
657 	r6 = r10;					\
658 	r6 += -8;					\
659 	*(u64*)(r6 + 0) = r10;				\
660 	r10 = *(u64*)(r6 + 0);				\
661 	r0 = 0;						\
662 	exit;						\
663 "	::: __clobber_all);
664 }
665 
666 SEC("socket")
667 __description("unpriv: cmp of frame pointer")
668 __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
669 __retval(0)
670 __naked void unpriv_cmp_of_frame_pointer(void)
671 {
672 	asm volatile ("					\
673 	if r10 == 0 goto l0_%=;				\
674 l0_%=:	r0 = 0;						\
675 	exit;						\
676 "	::: __clobber_all);
677 }
678 
679 SEC("socket")
680 __description("unpriv: adding of fp, reg")
681 __success __failure_unpriv
682 __msg_unpriv("R1 stack pointer arithmetic goes out of range")
683 __retval(0)
684 __naked void unpriv_adding_of_fp_reg(void)
685 {
686 	asm volatile ("					\
687 	r0 = 0;						\
688 	r1 = 0;						\
689 	r1 += r10;					\
690 	*(u64*)(r1 - 8) = r0;				\
691 	exit;						\
692 "	::: __clobber_all);
693 }
694 
695 SEC("socket")
696 __description("unpriv: adding of fp, imm")
697 __success __failure_unpriv
698 __msg_unpriv("R1 stack pointer arithmetic goes out of range")
699 __retval(0)
700 __naked void unpriv_adding_of_fp_imm(void)
701 {
702 	asm volatile ("					\
703 	r0 = 0;						\
704 	r1 = r10;					\
705 	r1 += 0;					\
706 	*(u64*)(r1 - 8) = r0;				\
707 	exit;						\
708 "	::: __clobber_all);
709 }
710 
711 SEC("socket")
712 __description("unpriv: cmp of stack pointer")
713 __success __failure_unpriv __msg_unpriv("R2 pointer comparison")
714 __retval(0)
715 __naked void unpriv_cmp_of_stack_pointer(void)
716 {
717 	asm volatile ("					\
718 	r2 = r10;					\
719 	r2 += -8;					\
720 	if r2 == 0 goto l0_%=;				\
721 l0_%=:	r0 = 0;						\
722 	exit;						\
723 "	::: __clobber_all);
724 }
725 
726 char _license[] SEC("license") = "GPL";
727