1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4 #include "vmlinux.h"
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7
8 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
9 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
10 defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
11 __clang_major__ >= 18
12 const volatile int skip = 0;
13 #else
14 const volatile int skip = 1;
15 #endif
16
17 volatile const short val1 = -1;
18 volatile const int val2 = -1;
19 short val3 = -1;
20 int val4 = -1;
21 int done1, done2, ret1, ret2;
22
23 SEC("?raw_tp/sys_enter")
rdonly_map_prog(const void * ctx)24 int rdonly_map_prog(const void *ctx)
25 {
26 if (done1)
27 return 0;
28
29 done1 = 1;
30 /* val1/val2 readonly map */
31 if (val1 == val2)
32 ret1 = 1;
33 return 0;
34
35 }
36
37 SEC("?raw_tp/sys_enter")
map_val_prog(const void * ctx)38 int map_val_prog(const void *ctx)
39 {
40 if (done2)
41 return 0;
42
43 done2 = 1;
44 /* val1/val2 regular read/write map */
45 if (val3 == val4)
46 ret2 = 1;
47 return 0;
48
49 }
50
51 struct bpf_testmod_struct_arg_1 {
52 int a;
53 };
54
55 long long int_member;
56
57 SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct")
BPF_PROG2(test_ptr_struct_arg,struct bpf_testmod_struct_arg_1 *,p)58 int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p)
59 {
60 /* probed memory access */
61 int_member = p->a;
62 return 0;
63 }
64
65 long long set_optlen, set_retval;
66
67 SEC("?cgroup/getsockopt")
_getsockopt(volatile struct bpf_sockopt * ctx)68 int _getsockopt(volatile struct bpf_sockopt *ctx)
69 {
70 int old_optlen, old_retval;
71
72 old_optlen = ctx->optlen;
73 old_retval = ctx->retval;
74
75 ctx->optlen = -1;
76 ctx->retval = -1;
77
78 /* sign extension for ctx member */
79 set_optlen = ctx->optlen;
80 set_retval = ctx->retval;
81
82 ctx->optlen = old_optlen;
83 ctx->retval = old_retval;
84
85 return 0;
86 }
87
88 long long set_mark;
89
90 SEC("?tc")
_tc(volatile struct __sk_buff * skb)91 int _tc(volatile struct __sk_buff *skb)
92 {
93 long long tmp_mark;
94 int old_mark;
95
96 old_mark = skb->mark;
97
98 skb->mark = 0xf6fe;
99
100 /* narrowed sign extension for ctx member */
101 #if __clang_major__ >= 18
102 /* force narrow one-byte signed load. Otherwise, compiler may
103 * generate a 32-bit unsigned load followed by an s8 movsx.
104 */
105 asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t"
106 "%[tmp_mark] = r1"
107 : [tmp_mark]"=r"(tmp_mark)
108 : [ctx]"r"(skb),
109 [off_mark]"i"(offsetof(struct __sk_buff, mark)
110 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
111 + sizeof(skb->mark) - 1
112 #endif
113 )
114 : "r1");
115 #else
116 tmp_mark = (char)skb->mark;
117 #endif
118 set_mark = tmp_mark;
119
120 skb->mark = old_mark;
121
122 return 0;
123 }
124
125 char _license[] SEC("license") = "GPL";
126