1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 SEC("lwt_in")
9 __description("invalid direct packet write for LWT_IN")
10 __failure __msg("cannot write into packet")
packet_write_for_lwt_in(void)11 __naked void packet_write_for_lwt_in(void)
12 {
13 asm volatile (" \
14 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
15 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
16 r0 = r2; \
17 r0 += 8; \
18 if r0 > r3 goto l0_%=; \
19 *(u8*)(r2 + 0) = r2; \
20 l0_%=: r0 = 0; \
21 exit; \
22 " :
23 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
24 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
25 : __clobber_all);
26 }
27
28 SEC("lwt_out")
29 __description("invalid direct packet write for LWT_OUT")
30 __failure __msg("cannot write into packet")
packet_write_for_lwt_out(void)31 __naked void packet_write_for_lwt_out(void)
32 {
33 asm volatile (" \
34 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
35 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
36 r0 = r2; \
37 r0 += 8; \
38 if r0 > r3 goto l0_%=; \
39 *(u8*)(r2 + 0) = r2; \
40 l0_%=: r0 = 0; \
41 exit; \
42 " :
43 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
44 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
45 : __clobber_all);
46 }
47
48 SEC("lwt_xmit")
49 __description("direct packet write for LWT_XMIT")
50 __success __retval(0)
packet_write_for_lwt_xmit(void)51 __naked void packet_write_for_lwt_xmit(void)
52 {
53 asm volatile (" \
54 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
55 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
56 r0 = r2; \
57 r0 += 8; \
58 if r0 > r3 goto l0_%=; \
59 *(u8*)(r2 + 0) = r2; \
60 l0_%=: r0 = 0; \
61 exit; \
62 " :
63 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
64 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
65 : __clobber_all);
66 }
67
68 SEC("lwt_in")
69 __description("direct packet read for LWT_IN")
70 __success __retval(0)
packet_read_for_lwt_in(void)71 __naked void packet_read_for_lwt_in(void)
72 {
73 asm volatile (" \
74 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
75 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
76 r0 = r2; \
77 r0 += 8; \
78 if r0 > r3 goto l0_%=; \
79 r0 = *(u8*)(r2 + 0); \
80 l0_%=: r0 = 0; \
81 exit; \
82 " :
83 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
84 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
85 : __clobber_all);
86 }
87
88 SEC("lwt_out")
89 __description("direct packet read for LWT_OUT")
90 __success __retval(0)
packet_read_for_lwt_out(void)91 __naked void packet_read_for_lwt_out(void)
92 {
93 asm volatile (" \
94 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
95 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
96 r0 = r2; \
97 r0 += 8; \
98 if r0 > r3 goto l0_%=; \
99 r0 = *(u8*)(r2 + 0); \
100 l0_%=: r0 = 0; \
101 exit; \
102 " :
103 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
104 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
105 : __clobber_all);
106 }
107
108 SEC("lwt_xmit")
109 __description("direct packet read for LWT_XMIT")
110 __success __retval(0)
packet_read_for_lwt_xmit(void)111 __naked void packet_read_for_lwt_xmit(void)
112 {
113 asm volatile (" \
114 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
115 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
116 r0 = r2; \
117 r0 += 8; \
118 if r0 > r3 goto l0_%=; \
119 r0 = *(u8*)(r2 + 0); \
120 l0_%=: r0 = 0; \
121 exit; \
122 " :
123 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
124 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
125 : __clobber_all);
126 }
127
128 SEC("lwt_xmit")
129 __description("overlapping checks for direct packet access")
130 __success __retval(0)
checks_for_direct_packet_access(void)131 __naked void checks_for_direct_packet_access(void)
132 {
133 asm volatile (" \
134 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
135 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
136 r0 = r2; \
137 r0 += 8; \
138 if r0 > r3 goto l0_%=; \
139 r1 = r2; \
140 r1 += 6; \
141 if r1 > r3 goto l0_%=; \
142 r0 = *(u16*)(r2 + 6); \
143 l0_%=: r0 = 0; \
144 exit; \
145 " :
146 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
147 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
148 : __clobber_all);
149 }
150
151 SEC("lwt_xmit")
152 __description("make headroom for LWT_XMIT")
153 __success __retval(0)
make_headroom_for_lwt_xmit(void)154 __naked void make_headroom_for_lwt_xmit(void)
155 {
156 asm volatile (" \
157 r6 = r1; \
158 r2 = 34; \
159 r3 = 0; \
160 call %[bpf_skb_change_head]; \
161 /* split for s390 to succeed */ \
162 r1 = r6; \
163 r2 = 42; \
164 r3 = 0; \
165 call %[bpf_skb_change_head]; \
166 r0 = 0; \
167 exit; \
168 " :
169 : __imm(bpf_skb_change_head)
170 : __clobber_all);
171 }
172
173 SEC("socket")
174 __description("invalid access of tc_classid for LWT_IN")
175 __failure __msg("invalid bpf_context access")
176 __failure_unpriv
tc_classid_for_lwt_in(void)177 __naked void tc_classid_for_lwt_in(void)
178 {
179 asm volatile (" \
180 r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
181 exit; \
182 " :
183 : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
184 : __clobber_all);
185 }
186
187 SEC("socket")
188 __description("invalid access of tc_classid for LWT_OUT")
189 __failure __msg("invalid bpf_context access")
190 __failure_unpriv
tc_classid_for_lwt_out(void)191 __naked void tc_classid_for_lwt_out(void)
192 {
193 asm volatile (" \
194 r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
195 exit; \
196 " :
197 : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
198 : __clobber_all);
199 }
200
201 SEC("socket")
202 __description("invalid access of tc_classid for LWT_XMIT")
203 __failure __msg("invalid bpf_context access")
204 __failure_unpriv
tc_classid_for_lwt_xmit(void)205 __naked void tc_classid_for_lwt_xmit(void)
206 {
207 asm volatile (" \
208 r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
209 exit; \
210 " :
211 : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
212 : __clobber_all);
213 }
214
215 SEC("lwt_in")
216 __description("check skb->tc_classid half load not permitted for lwt prog")
217 __failure __msg("invalid bpf_context access")
not_permitted_for_lwt_prog(void)218 __naked void not_permitted_for_lwt_prog(void)
219 {
220 asm volatile (
221 "r0 = 0;"
222 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
223 "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
224 #else
225 "r0 = *(u16*)(r1 + %[__imm_0]);"
226 #endif
227 "exit;"
228 :
229 : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
230 __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
231 : __clobber_all);
232 }
233
234 char _license[] SEC("license") = "GPL";
235