xref: /linux/tools/testing/selftests/bpf/progs/test_ldsx_insn.c (revision 1f24458a1071f006e3f7449c08ae0f12af493923)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include "vmlinux.h"
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 
8 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
9      (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) ||       \
10      defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18
11 const volatile int skip = 0;
12 #else
13 const volatile int skip = 1;
14 #endif
15 
16 volatile const short val1 = -1;
17 volatile const int val2 = -1;
18 short val3 = -1;
19 int val4 = -1;
20 int done1, done2, ret1, ret2;
21 
22 SEC("?raw_tp/sys_enter")
23 int rdonly_map_prog(const void *ctx)
24 {
25 	if (done1)
26 		return 0;
27 
28 	done1 = 1;
29 	/* val1/val2 readonly map */
30 	if (val1 == val2)
31 		ret1 = 1;
32 	return 0;
33 
34 }
35 
36 SEC("?raw_tp/sys_enter")
37 int map_val_prog(const void *ctx)
38 {
39 	if (done2)
40 		return 0;
41 
42 	done2 = 1;
43 	/* val1/val2 regular read/write map */
44 	if (val3 == val4)
45 		ret2 = 1;
46 	return 0;
47 
48 }
49 
50 struct bpf_testmod_struct_arg_1 {
51 	int a;
52 };
53 
54 long long int_member;
55 
56 SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
57 int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
58 {
59 	/* probed memory access */
60 	int_member = p->a;
61         return 0;
62 }
63 
64 long long set_optlen, set_retval;
65 
66 SEC("?cgroup/getsockopt")
67 int _getsockopt(volatile struct bpf_sockopt *ctx)
68 {
69 	int old_optlen, old_retval;
70 
71 	old_optlen = ctx->optlen;
72 	old_retval = ctx->retval;
73 
74 	ctx->optlen = -1;
75 	ctx->retval = -1;
76 
77 	/* sign extension for ctx member */
78 	set_optlen = ctx->optlen;
79 	set_retval = ctx->retval;
80 
81 	ctx->optlen = old_optlen;
82 	ctx->retval = old_retval;
83 
84 	return 0;
85 }
86 
87 long long set_mark;
88 
89 SEC("?tc")
90 int _tc(volatile struct __sk_buff *skb)
91 {
92 	long long tmp_mark;
93 	int old_mark;
94 
95 	old_mark = skb->mark;
96 
97 	skb->mark = 0xf6fe;
98 
99 	/* narrowed sign extension for ctx member */
100 #if __clang_major__ >= 18
101 	/* force narrow one-byte signed load. Otherwise, compiler may
102 	 * generate a 32-bit unsigned load followed by an s8 movsx.
103 	 */
104 	asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
105 		      "%[tmp_mark] = r1"
106 		      : [tmp_mark]"=r"(tmp_mark)
107 		      : [ctx]"r"(skb),
108 			[off_mark]"i"(offsetof(struct __sk_buff, mark)
109 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
110 			+ sizeof(skb->mark) - 1
111 #endif
112 			)
113 		      : "r1");
114 #else
115 	tmp_mark = (char)skb->mark;
116 #endif
117 	set_mark = tmp_mark;
118 
119 	skb->mark = old_mark;
120 
121 	return 0;
122 }
123 
124 char _license[] SEC("license") = "GPL";
125