xref: /linux/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 SEC("sk_msg")
9 __description("valid access family in SK_MSG")
10 __success
access_family_in_sk_msg(void)11 __naked void access_family_in_sk_msg(void)
12 {
13 	asm volatile ("					\
14 	r0 = *(u32*)(r1 + %[sk_msg_md_family]);		\
15 	exit;						\
16 "	:
17 	: __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family))
18 	: __clobber_all);
19 }
20 
21 SEC("sk_msg")
22 __description("valid access remote_ip4 in SK_MSG")
23 __success
remote_ip4_in_sk_msg(void)24 __naked void remote_ip4_in_sk_msg(void)
25 {
26 	asm volatile ("					\
27 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]);	\
28 	exit;						\
29 "	:
30 	: __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4))
31 	: __clobber_all);
32 }
33 
34 SEC("sk_msg")
35 __description("valid access local_ip4 in SK_MSG")
36 __success
local_ip4_in_sk_msg(void)37 __naked void local_ip4_in_sk_msg(void)
38 {
39 	asm volatile ("					\
40 	r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]);	\
41 	exit;						\
42 "	:
43 	: __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4))
44 	: __clobber_all);
45 }
46 
47 SEC("sk_msg")
48 __description("valid access remote_port in SK_MSG")
49 __success
remote_port_in_sk_msg(void)50 __naked void remote_port_in_sk_msg(void)
51 {
52 	asm volatile ("					\
53 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]);	\
54 	exit;						\
55 "	:
56 	: __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port))
57 	: __clobber_all);
58 }
59 
60 SEC("sk_msg")
61 __description("valid access local_port in SK_MSG")
62 __success
local_port_in_sk_msg(void)63 __naked void local_port_in_sk_msg(void)
64 {
65 	asm volatile ("					\
66 	r0 = *(u32*)(r1 + %[sk_msg_md_local_port]);	\
67 	exit;						\
68 "	:
69 	: __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port))
70 	: __clobber_all);
71 }
72 
73 SEC("sk_skb")
74 __description("valid access remote_ip6 in SK_MSG")
75 __success
remote_ip6_in_sk_msg(void)76 __naked void remote_ip6_in_sk_msg(void)
77 {
78 	asm volatile ("					\
79 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]);	\
80 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]);	\
81 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]);	\
82 	r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]);	\
83 	exit;						\
84 "	:
85 	: __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])),
86 	  __imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])),
87 	  __imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])),
88 	  __imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3]))
89 	: __clobber_all);
90 }
91 
92 SEC("sk_skb")
93 __description("valid access local_ip6 in SK_MSG")
94 __success
local_ip6_in_sk_msg(void)95 __naked void local_ip6_in_sk_msg(void)
96 {
97 	asm volatile ("					\
98 	r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]);	\
99 	r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]);	\
100 	r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]);	\
101 	r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]);	\
102 	exit;						\
103 "	:
104 	: __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])),
105 	  __imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])),
106 	  __imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])),
107 	  __imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3]))
108 	: __clobber_all);
109 }
110 
111 SEC("sk_msg")
112 __description("valid access size in SK_MSG")
113 __success
access_size_in_sk_msg(void)114 __naked void access_size_in_sk_msg(void)
115 {
116 	asm volatile ("					\
117 	r0 = *(u32*)(r1 + %[sk_msg_md_size]);		\
118 	exit;						\
119 "	:
120 	: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
121 	: __clobber_all);
122 }
123 
124 SEC("sk_msg")
125 __description("invalid 64B read of size in SK_MSG")
126 __failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)127 __flag(BPF_F_ANY_ALIGNMENT)
128 __naked void of_size_in_sk_msg(void)
129 {
130 	asm volatile ("					\
131 	r2 = *(u64*)(r1 + %[sk_msg_md_size]);		\
132 	exit;						\
133 "	:
134 	: __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size))
135 	: __clobber_all);
136 }
137 
138 SEC("sk_msg")
139 __description("invalid read past end of SK_MSG")
140 __failure __msg("invalid bpf_context access")
past_end_of_sk_msg(void)141 __naked void past_end_of_sk_msg(void)
142 {
143 	asm volatile ("					\
144 	r2 = *(u32*)(r1 + %[__imm_0]);			\
145 	exit;						\
146 "	:
147 	: __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4)
148 	: __clobber_all);
149 }
150 
151 SEC("sk_msg")
152 __description("invalid read offset in SK_MSG")
153 __failure __msg("invalid bpf_context access")
__flag(BPF_F_ANY_ALIGNMENT)154 __flag(BPF_F_ANY_ALIGNMENT)
155 __naked void read_offset_in_sk_msg(void)
156 {
157 	asm volatile ("					\
158 	r2 = *(u32*)(r1 + %[__imm_0]);			\
159 	exit;						\
160 "	:
161 	: __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1)
162 	: __clobber_all);
163 }
164 
165 SEC("sk_msg")
166 __description("direct packet read for SK_MSG")
167 __success
packet_read_for_sk_msg(void)168 __naked void packet_read_for_sk_msg(void)
169 {
170 	asm volatile ("					\
171 	r2 = *(u64*)(r1 + %[sk_msg_md_data]);		\
172 	r3 = *(u64*)(r1 + %[sk_msg_md_data_end]);	\
173 	r0 = r2;					\
174 	r0 += 8;					\
175 	if r0 > r3 goto l0_%=;				\
176 	r0 = *(u8*)(r2 + 0);				\
177 l0_%=:	r0 = 0;						\
178 	exit;						\
179 "	:
180 	: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
181 	  __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
182 	: __clobber_all);
183 }
184 
185 SEC("sk_msg")
186 __description("direct packet write for SK_MSG")
187 __success
packet_write_for_sk_msg(void)188 __naked void packet_write_for_sk_msg(void)
189 {
190 	asm volatile ("					\
191 	r2 = *(u64*)(r1 + %[sk_msg_md_data]);		\
192 	r3 = *(u64*)(r1 + %[sk_msg_md_data_end]);	\
193 	r0 = r2;					\
194 	r0 += 8;					\
195 	if r0 > r3 goto l0_%=;				\
196 	*(u8*)(r2 + 0) = r2;				\
197 l0_%=:	r0 = 0;						\
198 	exit;						\
199 "	:
200 	: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
201 	  __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
202 	: __clobber_all);
203 }
204 
205 SEC("sk_msg")
206 __description("overlapping checks for direct packet access SK_MSG")
207 __success
direct_packet_access_sk_msg(void)208 __naked void direct_packet_access_sk_msg(void)
209 {
210 	asm volatile ("					\
211 	r2 = *(u64*)(r1 + %[sk_msg_md_data]);		\
212 	r3 = *(u64*)(r1 + %[sk_msg_md_data_end]);	\
213 	r0 = r2;					\
214 	r0 += 8;					\
215 	if r0 > r3 goto l0_%=;				\
216 	r1 = r2;					\
217 	r1 += 6;					\
218 	if r1 > r3 goto l0_%=;				\
219 	r0 = *(u16*)(r2 + 6);				\
220 l0_%=:	r0 = 0;						\
221 	exit;						\
222 "	:
223 	: __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)),
224 	  __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end))
225 	: __clobber_all);
226 }
227 
228 char _license[] SEC("license") = "GPL";
229