xref: /linux/tools/testing/selftests/bpf/verifier/calls.c (revision 91204e4703aef7bcdd045126b889d7e1aab63dd5)
1 {
2 	"calls: invalid kfunc call not eliminated",
3 	.insns = {
4 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
5 	BPF_MOV64_IMM(BPF_REG_0, 1),
6 	BPF_EXIT_INSN(),
7 	},
8 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
9 	.result  = REJECT,
10 	.errstr = "invalid kernel function call not eliminated in verifier pass",
11 },
12 {
13 	"calls: invalid kfunc call unreachable",
14 	.insns = {
15 	BPF_MOV64_IMM(BPF_REG_0, 1),
16 	BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
17 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
18 	BPF_MOV64_IMM(BPF_REG_0, 1),
19 	BPF_EXIT_INSN(),
20 	},
21 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
22 	.result  = ACCEPT,
23 },
24 {
25 	"calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
26 	.insns = {
27 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
28 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
29 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
30 	BPF_EXIT_INSN(),
31 	},
32 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
33 	.result = REJECT,
34 	.errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
35 	.fixup_kfunc_btf_id = {
36 		{ "bpf_kfunc_call_test_fail1", 2 },
37 	},
38 },
39 {
40 	"calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
41 	.insns = {
42 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
43 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
44 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
45 	BPF_EXIT_INSN(),
46 	},
47 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
48 	.result = REJECT,
49 	.errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
50 	.fixup_kfunc_btf_id = {
51 		{ "bpf_kfunc_call_test_fail2", 2 },
52 	},
53 },
54 {
55 	"calls: invalid kfunc call: ptr_to_mem to struct with FAM",
56 	.insns = {
57 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
58 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
59 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
60 	BPF_EXIT_INSN(),
61 	},
62 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
63 	.result = REJECT,
64 	.errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
65 	.fixup_kfunc_btf_id = {
66 		{ "bpf_kfunc_call_test_fail3", 2 },
67 	},
68 },
69 {
70 	"calls: invalid kfunc call: reg->type != PTR_TO_CTX",
71 	.insns = {
72 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
74 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
75 	BPF_EXIT_INSN(),
76 	},
77 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
78 	.result = REJECT,
79 	.errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
80 	.fixup_kfunc_btf_id = {
81 		{ "bpf_kfunc_call_test_pass_ctx", 2 },
82 	},
83 },
84 {
85 	"calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
86 	.insns = {
87 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
88 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
89 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
90 	BPF_EXIT_INSN(),
91 	},
92 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
93 	.result = REJECT,
94 	.errstr = "arg#0 pointer type UNKNOWN  must point to scalar",
95 	.fixup_kfunc_btf_id = {
96 		{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
97 	},
98 },
99 {
100 	"calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
101 	.insns = {
102 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
103 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
104 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
105 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
106 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
107 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
108 	BPF_EXIT_INSN(),
109 	},
110 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
111 	.result = REJECT,
112 	.errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket",
113 	.fixup_kfunc_btf_id = {
114 		{ "bpf_kfunc_call_test_acquire", 3 },
115 		{ "bpf_kfunc_call_test_release", 5 },
116 	},
117 },
118 {
119 	"calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
120 	.insns = {
121 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
122 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
123 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
124 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
125 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
126 	BPF_EXIT_INSN(),
127 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
128 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
129 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
130 	BPF_MOV64_IMM(BPF_REG_0, 0),
131 	BPF_EXIT_INSN(),
132 	},
133 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
134 	.result = REJECT,
135 	.errstr = "R1 must have zero offset when passed to release func",
136 	.fixup_kfunc_btf_id = {
137 		{ "bpf_kfunc_call_test_acquire", 3 },
138 		{ "bpf_kfunc_call_memb_release", 8 },
139 	},
140 },
141 {
142 	"calls: invalid kfunc call: don't match first member type when passed to release kfunc",
143 	.insns = {
144 	BPF_MOV64_IMM(BPF_REG_0, 0),
145 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
146 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
147 	BPF_EXIT_INSN(),
148 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
149 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
150 	BPF_MOV64_IMM(BPF_REG_0, 0),
151 	BPF_EXIT_INSN(),
152 	},
153 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
154 	.result = REJECT,
155 	.errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
156 	.fixup_kfunc_btf_id = {
157 		{ "bpf_kfunc_call_memb_acquire", 1 },
158 		{ "bpf_kfunc_call_memb1_release", 5 },
159 	},
160 },
161 {
162 	"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
163 	.insns = {
164 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
165 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
166 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
167 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
168 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
169 	BPF_EXIT_INSN(),
170 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
171 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
172 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
173 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
174 	BPF_MOV64_IMM(BPF_REG_0, 0),
175 	BPF_EXIT_INSN(),
176 	},
177 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
178 	.fixup_kfunc_btf_id = {
179 		{ "bpf_kfunc_call_test_acquire", 3 },
180 		{ "bpf_kfunc_call_test_release", 9 },
181 	},
182 	.result_unpriv = REJECT,
183 	.result = REJECT,
184 	.errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
185 },
186 {
187 	"calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
188 	.insns = {
189 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
190 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
191 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
192 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
193 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
194 	BPF_EXIT_INSN(),
195 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
196 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
197 	BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
198 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
199 	BPF_MOV64_IMM(BPF_REG_0, 0),
200 	BPF_EXIT_INSN(),
201 	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
202 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
203 	BPF_MOV64_IMM(BPF_REG_0, 0),
204 	BPF_EXIT_INSN(),
205 	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
206 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
207 	BPF_MOV64_IMM(BPF_REG_0, 0),
208 	BPF_EXIT_INSN(),
209 	},
210 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
211 	.fixup_kfunc_btf_id = {
212 		{ "bpf_kfunc_call_test_acquire", 3 },
213 		{ "bpf_kfunc_call_test_release", 9 },
214 		{ "bpf_kfunc_call_test_release", 13 },
215 		{ "bpf_kfunc_call_test_release", 17 },
216 	},
217 	.result_unpriv = REJECT,
218 	.result = REJECT,
219 	.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
220 },
221 {
222 	"calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
223 	.insns = {
224 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
225 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
226 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
227 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
228 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
229 	BPF_EXIT_INSN(),
230 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
231 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
232 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
233 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
234 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
235 	BPF_MOV64_IMM(BPF_REG_0, 0),
236 	BPF_EXIT_INSN(),
237 	},
238 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
239 	.fixup_kfunc_btf_id = {
240 		{ "bpf_kfunc_call_test_acquire", 3 },
241 		{ "bpf_kfunc_call_test_ref", 8 },
242 		{ "bpf_kfunc_call_test_ref", 10 },
243 	},
244 	.result_unpriv = REJECT,
245 	.result = REJECT,
246 	.errstr = "R1 must be referenced",
247 },
248 {
249 	"calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
250 	.insns = {
251 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
252 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
253 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
254 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
255 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
256 	BPF_EXIT_INSN(),
257 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
258 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
259 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
260 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
261 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
262 	BPF_MOV64_IMM(BPF_REG_0, 0),
263 	BPF_EXIT_INSN(),
264 	},
265 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
266 	.fixup_kfunc_btf_id = {
267 		{ "bpf_kfunc_call_test_acquire", 3 },
268 		{ "bpf_kfunc_call_test_ref", 8 },
269 		{ "bpf_kfunc_call_test_release", 10 },
270 	},
271 	.result_unpriv = REJECT,
272 	.result = ACCEPT,
273 },
274 {
275 	"calls: basic sanity",
276 	.insns = {
277 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
278 	BPF_MOV64_IMM(BPF_REG_0, 1),
279 	BPF_EXIT_INSN(),
280 	BPF_MOV64_IMM(BPF_REG_0, 2),
281 	BPF_EXIT_INSN(),
282 	},
283 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
284 	.result = ACCEPT,
285 },
286 {
287 	"calls: not on unprivileged",
288 	.insns = {
289 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
290 	BPF_MOV64_IMM(BPF_REG_0, 1),
291 	BPF_EXIT_INSN(),
292 	BPF_MOV64_IMM(BPF_REG_0, 2),
293 	BPF_EXIT_INSN(),
294 	},
295 	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
296 	.result_unpriv = REJECT,
297 	.result = ACCEPT,
298 	.retval = 1,
299 },
300 {
301 	"calls: div by 0 in subprog",
302 	.insns = {
303 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
304 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
305 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
306 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
307 		    offsetof(struct __sk_buff, data_end)),
308 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
310 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
311 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
312 	BPF_MOV64_IMM(BPF_REG_0, 1),
313 	BPF_EXIT_INSN(),
314 	BPF_MOV32_IMM(BPF_REG_2, 0),
315 	BPF_MOV32_IMM(BPF_REG_3, 1),
316 	BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
317 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
318 		    offsetof(struct __sk_buff, data)),
319 	BPF_EXIT_INSN(),
320 	},
321 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
322 	.result = ACCEPT,
323 	.retval = 1,
324 },
325 {
326 	"calls: multiple ret types in subprog 1",
327 	.insns = {
328 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
329 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
330 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
331 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
332 		    offsetof(struct __sk_buff, data_end)),
333 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
334 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
335 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
336 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
337 	BPF_MOV64_IMM(BPF_REG_0, 1),
338 	BPF_EXIT_INSN(),
339 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
340 		    offsetof(struct __sk_buff, data)),
341 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
342 	BPF_MOV32_IMM(BPF_REG_0, 42),
343 	BPF_EXIT_INSN(),
344 	},
345 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
346 	.result = REJECT,
347 	.errstr = "R0 invalid mem access 'scalar'",
348 },
349 {
350 	"calls: multiple ret types in subprog 2",
351 	.insns = {
352 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
353 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
354 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
355 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
356 		    offsetof(struct __sk_buff, data_end)),
357 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
358 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
359 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
360 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
361 	BPF_MOV64_IMM(BPF_REG_0, 1),
362 	BPF_EXIT_INSN(),
363 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
364 		    offsetof(struct __sk_buff, data)),
365 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
366 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
367 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
368 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
369 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
370 	BPF_LD_MAP_FD(BPF_REG_1, 0),
371 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
372 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
373 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
374 		    offsetof(struct __sk_buff, data)),
375 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
376 	BPF_EXIT_INSN(),
377 	},
378 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
379 	.fixup_map_hash_8b = { 16 },
380 	.result = REJECT,
381 	.errstr = "R0 min value is outside of the allowed memory range",
382 },
383 {
384 	"calls: overlapping caller/callee",
385 	.insns = {
386 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
387 	BPF_MOV64_IMM(BPF_REG_0, 1),
388 	BPF_EXIT_INSN(),
389 	},
390 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
391 	.errstr = "last insn is not an exit or jmp",
392 	.result = REJECT,
393 },
394 {
395 	"calls: wrong recursive calls",
396 	.insns = {
397 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
398 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
399 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
400 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
401 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
402 	BPF_MOV64_IMM(BPF_REG_0, 1),
403 	BPF_EXIT_INSN(),
404 	},
405 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
406 	.errstr = "jump out of range",
407 	.result = REJECT,
408 },
409 {
410 	"calls: wrong src reg",
411 	.insns = {
412 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
413 	BPF_MOV64_IMM(BPF_REG_0, 1),
414 	BPF_EXIT_INSN(),
415 	},
416 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
417 	.errstr = "BPF_CALL uses reserved fields",
418 	.result = REJECT,
419 },
420 {
421 	"calls: wrong off value",
422 	.insns = {
423 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
424 	BPF_MOV64_IMM(BPF_REG_0, 1),
425 	BPF_EXIT_INSN(),
426 	BPF_MOV64_IMM(BPF_REG_0, 2),
427 	BPF_EXIT_INSN(),
428 	},
429 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
430 	.errstr = "BPF_CALL uses reserved fields",
431 	.result = REJECT,
432 },
433 {
434 	"calls: jump back loop",
435 	.insns = {
436 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
437 	BPF_MOV64_IMM(BPF_REG_0, 1),
438 	BPF_EXIT_INSN(),
439 	},
440 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
441 	.errstr = "back-edge from insn 0 to 0",
442 	.result = REJECT,
443 },
444 {
445 	"calls: conditional call",
446 	.insns = {
447 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
448 		    offsetof(struct __sk_buff, mark)),
449 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
450 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
451 	BPF_MOV64_IMM(BPF_REG_0, 1),
452 	BPF_EXIT_INSN(),
453 	BPF_MOV64_IMM(BPF_REG_0, 2),
454 	BPF_EXIT_INSN(),
455 	},
456 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
457 	.errstr = "jump out of range",
458 	.result = REJECT,
459 },
460 {
461 	"calls: conditional call 2",
462 	.insns = {
463 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
464 		    offsetof(struct __sk_buff, mark)),
465 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
466 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
467 	BPF_MOV64_IMM(BPF_REG_0, 1),
468 	BPF_EXIT_INSN(),
469 	BPF_MOV64_IMM(BPF_REG_0, 2),
470 	BPF_EXIT_INSN(),
471 	BPF_MOV64_IMM(BPF_REG_0, 3),
472 	BPF_EXIT_INSN(),
473 	},
474 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
475 	.result = ACCEPT,
476 },
477 {
478 	"calls: conditional call 3",
479 	.insns = {
480 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
481 		    offsetof(struct __sk_buff, mark)),
482 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
483 	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
484 	BPF_MOV64_IMM(BPF_REG_0, 1),
485 	BPF_EXIT_INSN(),
486 	BPF_MOV64_IMM(BPF_REG_0, 1),
487 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
488 	BPF_MOV64_IMM(BPF_REG_0, 3),
489 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
490 	},
491 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
492 	.errstr_unpriv = "back-edge from insn",
493 	.result_unpriv = REJECT,
494 	.result = ACCEPT,
495 	.retval = 1,
496 },
497 {
498 	"calls: conditional call 4",
499 	.insns = {
500 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
501 		    offsetof(struct __sk_buff, mark)),
502 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
503 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
504 	BPF_MOV64_IMM(BPF_REG_0, 1),
505 	BPF_EXIT_INSN(),
506 	BPF_MOV64_IMM(BPF_REG_0, 1),
507 	BPF_JMP_IMM(BPF_JA, 0, 0, -5),
508 	BPF_MOV64_IMM(BPF_REG_0, 3),
509 	BPF_EXIT_INSN(),
510 	},
511 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
512 	.result = ACCEPT,
513 },
514 {
515 	"calls: conditional call 5",
516 	.insns = {
517 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
518 		    offsetof(struct __sk_buff, mark)),
519 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
520 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
521 	BPF_MOV64_IMM(BPF_REG_0, 1),
522 	BPF_EXIT_INSN(),
523 	BPF_MOV64_IMM(BPF_REG_0, 1),
524 	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
525 	BPF_MOV64_IMM(BPF_REG_0, 3),
526 	BPF_EXIT_INSN(),
527 	},
528 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
529 	.result = ACCEPT,
530 	.retval = 1,
531 },
532 {
533 	"calls: conditional call 6",
534 	.insns = {
535 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
536 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
537 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
538 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
539 	BPF_EXIT_INSN(),
540 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
541 		    offsetof(struct __sk_buff, mark)),
542 	BPF_EXIT_INSN(),
543 	},
544 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
545 	.errstr = "infinite loop detected",
546 	.result = REJECT,
547 },
548 {
549 	"calls: using r0 returned by callee",
550 	.insns = {
551 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
552 	BPF_EXIT_INSN(),
553 	BPF_MOV64_IMM(BPF_REG_0, 2),
554 	BPF_EXIT_INSN(),
555 	},
556 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
557 	.result = ACCEPT,
558 },
559 {
560 	"calls: using uninit r0 from callee",
561 	.insns = {
562 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
563 	BPF_EXIT_INSN(),
564 	BPF_EXIT_INSN(),
565 	},
566 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
567 	.errstr = "!read_ok",
568 	.result = REJECT,
569 },
570 {
571 	"calls: callee is using r1",
572 	.insns = {
573 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
574 	BPF_EXIT_INSN(),
575 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
576 		    offsetof(struct __sk_buff, len)),
577 	BPF_EXIT_INSN(),
578 	},
579 	.prog_type = BPF_PROG_TYPE_SCHED_ACT,
580 	.result = ACCEPT,
581 	.retval = TEST_DATA_LEN,
582 },
583 {
584 	"calls: callee using args1",
585 	.insns = {
586 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
587 	BPF_EXIT_INSN(),
588 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
589 	BPF_EXIT_INSN(),
590 	},
591 	.errstr_unpriv = "allowed for",
592 	.result_unpriv = REJECT,
593 	.result = ACCEPT,
594 	.retval = POINTER_VALUE,
595 },
596 {
597 	"calls: callee using wrong args2",
598 	.insns = {
599 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
600 	BPF_EXIT_INSN(),
601 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
602 	BPF_EXIT_INSN(),
603 	},
604 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
605 	.errstr = "R2 !read_ok",
606 	.result = REJECT,
607 },
608 {
609 	"calls: callee using two args",
610 	.insns = {
611 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
612 	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
613 		    offsetof(struct __sk_buff, len)),
614 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
615 		    offsetof(struct __sk_buff, len)),
616 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
617 	BPF_EXIT_INSN(),
618 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
619 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
620 	BPF_EXIT_INSN(),
621 	},
622 	.errstr_unpriv = "allowed for",
623 	.result_unpriv = REJECT,
624 	.result = ACCEPT,
625 	.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
626 },
627 {
628 	"calls: callee changing pkt pointers",
629 	.insns = {
630 	BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
631 	BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
632 		    offsetof(struct xdp_md, data_end)),
633 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
634 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
635 	BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
636 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
637 	/* clear_all_pkt_pointers() has to walk all frames
638 	 * to make sure that pkt pointers in the caller
639 	 * are cleared when callee is calling a helper that
640 	 * adjusts packet size
641 	 */
642 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
643 	BPF_MOV32_IMM(BPF_REG_0, 0),
644 	BPF_EXIT_INSN(),
645 	BPF_MOV64_IMM(BPF_REG_2, 0),
646 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
647 	BPF_EXIT_INSN(),
648 	},
649 	.result = REJECT,
650 	.errstr = "R6 invalid mem access 'scalar'",
651 	.prog_type = BPF_PROG_TYPE_XDP,
652 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
653 },
654 {
655 	"calls: ptr null check in subprog",
656 	.insns = {
657 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
658 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
659 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
660 	BPF_LD_MAP_FD(BPF_REG_1, 0),
661 	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
662 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
663 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
664 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
665 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
666 	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
667 	BPF_EXIT_INSN(),
668 	BPF_MOV64_IMM(BPF_REG_0, 0),
669 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
670 	BPF_MOV64_IMM(BPF_REG_0, 1),
671 	BPF_EXIT_INSN(),
672 	},
673 	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
674 	.fixup_map_hash_48b = { 3 },
675 	.result_unpriv = REJECT,
676 	.result = ACCEPT,
677 	.retval = 0,
678 },
679 {
680 	"calls: two calls with args",
681 	.insns = {
682 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
683 	BPF_EXIT_INSN(),
684 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
685 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
686 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
687 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
688 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
689 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
690 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
691 	BPF_EXIT_INSN(),
692 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
693 		    offsetof(struct __sk_buff, len)),
694 	BPF_EXIT_INSN(),
695 	},
696 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
697 	.result = ACCEPT,
698 	.retval = TEST_DATA_LEN + TEST_DATA_LEN,
699 },
700 {
701 	"calls: calls with stack arith",
702 	.insns = {
703 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
704 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
705 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
706 	BPF_EXIT_INSN(),
707 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
708 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
709 	BPF_EXIT_INSN(),
710 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
711 	BPF_MOV64_IMM(BPF_REG_0, 42),
712 	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
713 	BPF_EXIT_INSN(),
714 	},
715 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
716 	.result = ACCEPT,
717 	.retval = 42,
718 },
719 {
720 	"calls: calls with misaligned stack access",
721 	.insns = {
722 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
723 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
724 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
725 	BPF_EXIT_INSN(),
726 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
727 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
728 	BPF_EXIT_INSN(),
729 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
730 	BPF_MOV64_IMM(BPF_REG_0, 42),
731 	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
732 	BPF_EXIT_INSN(),
733 	},
734 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
735 	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
736 	.errstr = "misaligned stack access",
737 	.result = REJECT,
738 },
739 {
740 	"calls: calls control flow, jump test",
741 	.insns = {
742 	BPF_MOV64_IMM(BPF_REG_0, 42),
743 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
744 	BPF_MOV64_IMM(BPF_REG_0, 43),
745 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
746 	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
747 	BPF_EXIT_INSN(),
748 	},
749 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
750 	.result = ACCEPT,
751 	.retval = 43,
752 },
753 {
754 	"calls: calls control flow, jump test 2",
755 	.insns = {
756 	BPF_MOV64_IMM(BPF_REG_0, 42),
757 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
758 	BPF_MOV64_IMM(BPF_REG_0, 43),
759 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
760 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
761 	BPF_EXIT_INSN(),
762 	},
763 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
764 	.errstr = "jump out of range from insn 1 to 4",
765 	.result = REJECT,
766 },
767 {
768 	"calls: two calls with bad jump",
769 	.insns = {
770 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
771 	BPF_EXIT_INSN(),
772 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
773 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
774 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
775 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
776 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
777 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
778 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
779 	BPF_EXIT_INSN(),
780 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
781 		    offsetof(struct __sk_buff, len)),
782 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
783 	BPF_EXIT_INSN(),
784 	},
785 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
786 	.errstr = "jump out of range from insn 11 to 9",
787 	.result = REJECT,
788 },
789 {
790 	"calls: recursive call. test1",
791 	.insns = {
792 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
793 	BPF_EXIT_INSN(),
794 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
795 	BPF_EXIT_INSN(),
796 	},
797 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
798 	.errstr = "back-edge",
799 	.result = REJECT,
800 },
801 {
802 	"calls: recursive call. test2",
803 	.insns = {
804 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
805 	BPF_EXIT_INSN(),
806 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
807 	BPF_EXIT_INSN(),
808 	},
809 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
810 	.errstr = "back-edge",
811 	.result = REJECT,
812 },
813 {
814 	"calls: unreachable code",
815 	.insns = {
816 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
817 	BPF_EXIT_INSN(),
818 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
819 	BPF_EXIT_INSN(),
820 	BPF_MOV64_IMM(BPF_REG_0, 0),
821 	BPF_EXIT_INSN(),
822 	BPF_MOV64_IMM(BPF_REG_0, 0),
823 	BPF_EXIT_INSN(),
824 	},
825 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
826 	.errstr = "unreachable insn 6",
827 	.result = REJECT,
828 },
829 {
830 	"calls: invalid call",
831 	.insns = {
832 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
833 	BPF_EXIT_INSN(),
834 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
835 	BPF_EXIT_INSN(),
836 	},
837 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
838 	.errstr = "invalid destination",
839 	.result = REJECT,
840 },
841 {
842 	"calls: invalid call 2",
843 	.insns = {
844 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
845 	BPF_EXIT_INSN(),
846 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
847 	BPF_EXIT_INSN(),
848 	},
849 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
850 	.errstr = "invalid destination",
851 	.result = REJECT,
852 },
853 {
854 	"calls: jumping across function bodies. test1",
855 	.insns = {
856 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
857 	BPF_MOV64_IMM(BPF_REG_0, 0),
858 	BPF_EXIT_INSN(),
859 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
860 	BPF_EXIT_INSN(),
861 	},
862 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
863 	.errstr = "jump out of range",
864 	.result = REJECT,
865 },
866 {
867 	"calls: jumping across function bodies. test2",
868 	.insns = {
869 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
870 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
871 	BPF_MOV64_IMM(BPF_REG_0, 0),
872 	BPF_EXIT_INSN(),
873 	BPF_EXIT_INSN(),
874 	},
875 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
876 	.errstr = "jump out of range",
877 	.result = REJECT,
878 },
879 {
880 	"calls: call without exit",
881 	.insns = {
882 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
883 	BPF_EXIT_INSN(),
884 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
885 	BPF_EXIT_INSN(),
886 	BPF_MOV64_IMM(BPF_REG_0, 0),
887 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
888 	},
889 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
890 	.errstr = "not an exit",
891 	.result = REJECT,
892 },
893 {
894 	"calls: call into middle of ld_imm64",
895 	.insns = {
896 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
897 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
898 	BPF_MOV64_IMM(BPF_REG_0, 0),
899 	BPF_EXIT_INSN(),
900 	BPF_LD_IMM64(BPF_REG_0, 0),
901 	BPF_EXIT_INSN(),
902 	},
903 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
904 	.errstr = "last insn",
905 	.result = REJECT,
906 },
907 {
908 	"calls: call into middle of other call",
909 	.insns = {
910 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
911 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
912 	BPF_MOV64_IMM(BPF_REG_0, 0),
913 	BPF_EXIT_INSN(),
914 	BPF_MOV64_IMM(BPF_REG_0, 0),
915 	BPF_MOV64_IMM(BPF_REG_0, 0),
916 	BPF_EXIT_INSN(),
917 	},
918 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
919 	.errstr = "last insn",
920 	.result = REJECT,
921 },
922 {
923 	"calls: subprog call with ld_abs in main prog",
924 	.insns = {
925 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
926 	BPF_LD_ABS(BPF_B, 0),
927 	BPF_LD_ABS(BPF_H, 0),
928 	BPF_LD_ABS(BPF_W, 0),
929 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
930 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
931 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
932 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
933 	BPF_LD_ABS(BPF_B, 0),
934 	BPF_LD_ABS(BPF_H, 0),
935 	BPF_LD_ABS(BPF_W, 0),
936 	BPF_EXIT_INSN(),
937 	BPF_MOV64_IMM(BPF_REG_2, 1),
938 	BPF_MOV64_IMM(BPF_REG_3, 2),
939 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
940 	BPF_EXIT_INSN(),
941 	},
942 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
943 	.result = ACCEPT,
944 },
945 {
946 	"calls: two calls with bad fallthrough",
947 	.insns = {
948 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
949 	BPF_EXIT_INSN(),
950 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
951 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
952 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
953 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
954 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
955 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
956 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
957 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
958 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
959 		    offsetof(struct __sk_buff, len)),
960 	BPF_EXIT_INSN(),
961 	},
962 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
963 	.errstr = "not an exit",
964 	.result = REJECT,
965 },
966 {
967 	"calls: two calls with stack read",
968 	.insns = {
969 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
970 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
971 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
972 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
973 	BPF_EXIT_INSN(),
974 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
975 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
976 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
977 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
978 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
979 	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
980 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
981 	BPF_EXIT_INSN(),
982 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
983 	BPF_EXIT_INSN(),
984 	},
985 	.prog_type = BPF_PROG_TYPE_XDP,
986 	.result = ACCEPT,
987 },
988 {
989 	"calls: two calls with stack write",
990 	.insns = {
991 	/* main prog */
992 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
993 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
994 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
995 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
996 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
997 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
998 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
999 	BPF_EXIT_INSN(),
1000 
1001 	/* subprog 1 */
1002 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1003 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1004 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
1005 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
1006 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1007 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1008 	BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
1009 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
1010 	/* write into stack frame of main prog */
1011 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1012 	BPF_EXIT_INSN(),
1013 
1014 	/* subprog 2 */
1015 	/* read from stack frame of main prog */
1016 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1017 	BPF_EXIT_INSN(),
1018 	},
1019 	.prog_type = BPF_PROG_TYPE_XDP,
1020 	.result = ACCEPT,
1021 },
1022 {
1023 	"calls: stack overflow using two frames (pre-call access)",
1024 	.insns = {
1025 	/* prog 1 */
1026 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1027 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
1028 	BPF_EXIT_INSN(),
1029 
1030 	/* prog 2 */
1031 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1032 	BPF_MOV64_IMM(BPF_REG_0, 0),
1033 	BPF_EXIT_INSN(),
1034 	},
1035 	.prog_type = BPF_PROG_TYPE_XDP,
1036 	.errstr = "combined stack size",
1037 	.result = REJECT,
1038 },
1039 {
1040 	"calls: stack overflow using two frames (post-call access)",
1041 	.insns = {
1042 	/* prog 1 */
1043 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
1044 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1045 	BPF_EXIT_INSN(),
1046 
1047 	/* prog 2 */
1048 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1049 	BPF_MOV64_IMM(BPF_REG_0, 0),
1050 	BPF_EXIT_INSN(),
1051 	},
1052 	.prog_type = BPF_PROG_TYPE_XDP,
1053 	.errstr = "combined stack size",
1054 	.result = REJECT,
1055 },
1056 {
1057 	"calls: stack depth check using three frames. test1",
1058 	.insns = {
1059 	/* main */
1060 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1061 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1062 	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1063 	BPF_MOV64_IMM(BPF_REG_0, 0),
1064 	BPF_EXIT_INSN(),
1065 	/* A */
1066 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1067 	BPF_EXIT_INSN(),
1068 	/* B */
1069 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1070 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1071 	BPF_EXIT_INSN(),
1072 	},
1073 	.prog_type = BPF_PROG_TYPE_XDP,
1074 	/* stack_main=32, stack_A=256, stack_B=64
1075 	 * and max(main+A, main+A+B) < 512
1076 	 */
1077 	.result = ACCEPT,
1078 },
1079 {
1080 	"calls: stack depth check using three frames. test2",
1081 	.insns = {
1082 	/* main */
1083 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1084 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1085 	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1086 	BPF_MOV64_IMM(BPF_REG_0, 0),
1087 	BPF_EXIT_INSN(),
1088 	/* A */
1089 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1090 	BPF_EXIT_INSN(),
1091 	/* B */
1092 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1093 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1094 	BPF_EXIT_INSN(),
1095 	},
1096 	.prog_type = BPF_PROG_TYPE_XDP,
1097 	/* stack_main=32, stack_A=64, stack_B=256
1098 	 * and max(main+A, main+A+B) < 512
1099 	 */
1100 	.result = ACCEPT,
1101 },
1102 {
1103 	"calls: stack depth check using three frames. test3",
1104 	.insns = {
1105 	/* main */
1106 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1107 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1108 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1109 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1110 	BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1111 	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1112 	BPF_MOV64_IMM(BPF_REG_0, 0),
1113 	BPF_EXIT_INSN(),
1114 	/* A */
1115 	BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1116 	BPF_EXIT_INSN(),
1117 	BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1118 	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1119 	/* B */
1120 	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1121 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1122 	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1123 	BPF_EXIT_INSN(),
1124 	},
1125 	.prog_type = BPF_PROG_TYPE_XDP,
1126 	/* stack_main=64, stack_A=224, stack_B=256
1127 	 * and max(main+A, main+A+B) > 512
1128 	 */
1129 	.errstr = "combined stack",
1130 	.result = REJECT,
1131 },
1132 {
1133 	"calls: stack depth check using three frames. test4",
1134 	/* void main(void) {
1135 	 *   func1(0);
1136 	 *   func1(1);
1137 	 *   func2(1);
1138 	 * }
1139 	 * void func1(int alloc_or_recurse) {
1140 	 *   if (alloc_or_recurse) {
1141 	 *     frame_pointer[-300] = 1;
1142 	 *   } else {
1143 	 *     func2(alloc_or_recurse);
1144 	 *   }
1145 	 * }
1146 	 * void func2(int alloc_or_recurse) {
1147 	 *   if (alloc_or_recurse) {
1148 	 *     frame_pointer[-300] = 1;
1149 	 *   }
1150 	 * }
1151 	 */
1152 	.insns = {
1153 	/* main */
1154 	BPF_MOV64_IMM(BPF_REG_1, 0),
1155 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1156 	BPF_MOV64_IMM(BPF_REG_1, 1),
1157 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1158 	BPF_MOV64_IMM(BPF_REG_1, 1),
1159 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1160 	BPF_MOV64_IMM(BPF_REG_0, 0),
1161 	BPF_EXIT_INSN(),
1162 	/* A */
1163 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1164 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1165 	BPF_EXIT_INSN(),
1166 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1167 	BPF_EXIT_INSN(),
1168 	/* B */
1169 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1170 	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1171 	BPF_EXIT_INSN(),
1172 	},
1173 	.prog_type = BPF_PROG_TYPE_XDP,
1174 	.result = REJECT,
1175 	.errstr = "combined stack",
1176 },
1177 {
1178 	"calls: stack depth check using three frames. test5",
1179 	.insns = {
1180 	/* main */
1181 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1182 	BPF_EXIT_INSN(),
1183 	/* A */
1184 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1185 	BPF_EXIT_INSN(),
1186 	/* B */
1187 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1188 	BPF_EXIT_INSN(),
1189 	/* C */
1190 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1191 	BPF_EXIT_INSN(),
1192 	/* D */
1193 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1194 	BPF_EXIT_INSN(),
1195 	/* E */
1196 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1197 	BPF_EXIT_INSN(),
1198 	/* F */
1199 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1200 	BPF_EXIT_INSN(),
1201 	/* G */
1202 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1203 	BPF_EXIT_INSN(),
1204 	/* H */
1205 	BPF_MOV64_IMM(BPF_REG_0, 0),
1206 	BPF_EXIT_INSN(),
1207 	},
1208 	.prog_type = BPF_PROG_TYPE_XDP,
1209 	.errstr = "call stack",
1210 	.result = REJECT,
1211 },
1212 {
1213 	"calls: stack depth check in dead code",
1214 	.insns = {
1215 	/* main */
1216 	BPF_MOV64_IMM(BPF_REG_1, 0),
1217 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1218 	BPF_EXIT_INSN(),
1219 	/* A */
1220 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1221 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1222 	BPF_MOV64_IMM(BPF_REG_0, 0),
1223 	BPF_EXIT_INSN(),
1224 	/* B */
1225 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1226 	BPF_EXIT_INSN(),
1227 	/* C */
1228 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1229 	BPF_EXIT_INSN(),
1230 	/* D */
1231 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1232 	BPF_EXIT_INSN(),
1233 	/* E */
1234 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1235 	BPF_EXIT_INSN(),
1236 	/* F */
1237 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1238 	BPF_EXIT_INSN(),
1239 	/* G */
1240 	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1241 	BPF_EXIT_INSN(),
1242 	/* H */
1243 	BPF_MOV64_IMM(BPF_REG_0, 0),
1244 	BPF_EXIT_INSN(),
1245 	},
1246 	.prog_type = BPF_PROG_TYPE_XDP,
1247 	.errstr = "call stack",
1248 	.result = REJECT,
1249 },
1250 {
1251 	"calls: spill into caller stack frame",
1252 	.insns = {
1253 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1254 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1255 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1256 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1257 	BPF_EXIT_INSN(),
1258 	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1259 	BPF_MOV64_IMM(BPF_REG_0, 0),
1260 	BPF_EXIT_INSN(),
1261 	},
1262 	.prog_type = BPF_PROG_TYPE_XDP,
1263 	.errstr = "cannot spill",
1264 	.result = REJECT,
1265 },
1266 {
1267 	"calls: write into caller stack frame",
1268 	.insns = {
1269 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1270 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1271 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1272 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1273 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1274 	BPF_EXIT_INSN(),
1275 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1276 	BPF_MOV64_IMM(BPF_REG_0, 0),
1277 	BPF_EXIT_INSN(),
1278 	},
1279 	.prog_type = BPF_PROG_TYPE_XDP,
1280 	.result = ACCEPT,
1281 	.retval = 42,
1282 },
1283 {
1284 	"calls: write into callee stack frame",
1285 	.insns = {
1286 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1287 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1288 	BPF_EXIT_INSN(),
1289 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1290 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1291 	BPF_EXIT_INSN(),
1292 	},
1293 	.prog_type = BPF_PROG_TYPE_XDP,
1294 	.errstr = "cannot return stack pointer",
1295 	.result = REJECT,
1296 },
1297 {
1298 	"calls: two calls with stack write and void return",
1299 	.insns = {
1300 	/* main prog */
1301 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1302 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1303 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1304 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1305 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1306 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1307 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1308 	BPF_EXIT_INSN(),
1309 
1310 	/* subprog 1 */
1311 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1312 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1313 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1314 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1315 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1316 	BPF_EXIT_INSN(),
1317 
1318 	/* subprog 2 */
1319 	/* write into stack frame of main prog */
1320 	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1321 	BPF_EXIT_INSN(), /* void return */
1322 	},
1323 	.prog_type = BPF_PROG_TYPE_XDP,
1324 	.result = ACCEPT,
1325 },
1326 {
1327 	"calls: ambiguous return value",
1328 	.insns = {
1329 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1330 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1331 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1332 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1333 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1334 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1335 	BPF_EXIT_INSN(),
1336 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1337 	BPF_MOV64_IMM(BPF_REG_0, 0),
1338 	BPF_EXIT_INSN(),
1339 	},
1340 	.errstr_unpriv = "allowed for",
1341 	.result_unpriv = REJECT,
1342 	.errstr = "R0 !read_ok",
1343 	.result = REJECT,
1344 },
1345 {
1346 	"calls: two calls that return map_value",
1347 	.insns = {
1348 	/* main prog */
1349 	/* pass fp-16, fp-8 into a function */
1350 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1351 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1352 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1353 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1354 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1355 
1356 	/* fetch map_value_ptr from the stack of this function */
1357 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1358 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1359 	/* write into map value */
1360 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1361 	/* fetch secound map_value_ptr from the stack */
1362 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1363 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1364 	/* write into map value */
1365 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1366 	BPF_MOV64_IMM(BPF_REG_0, 0),
1367 	BPF_EXIT_INSN(),
1368 
1369 	/* subprog 1 */
1370 	/* call 3rd function twice */
1371 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1372 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1373 	/* first time with fp-8 */
1374 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1375 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1376 	/* second time with fp-16 */
1377 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1378 	BPF_EXIT_INSN(),
1379 
1380 	/* subprog 2 */
1381 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1382 	/* lookup from map */
1383 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1384 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1385 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1386 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1387 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1388 	/* write map_value_ptr into stack frame of main prog */
1389 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1390 	BPF_MOV64_IMM(BPF_REG_0, 0),
1391 	BPF_EXIT_INSN(), /* return 0 */
1392 	},
1393 	.prog_type = BPF_PROG_TYPE_XDP,
1394 	.fixup_map_hash_8b = { 23 },
1395 	.result = ACCEPT,
1396 },
1397 {
1398 	"calls: two calls that return map_value with bool condition",
1399 	.insns = {
1400 	/* main prog */
1401 	/* pass fp-16, fp-8 into a function */
1402 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1403 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1404 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1405 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1406 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1407 	BPF_MOV64_IMM(BPF_REG_0, 0),
1408 	BPF_EXIT_INSN(),
1409 
1410 	/* subprog 1 */
1411 	/* call 3rd function twice */
1412 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1413 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1414 	/* first time with fp-8 */
1415 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1416 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1417 	/* fetch map_value_ptr from the stack of this function */
1418 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1419 	/* write into map value */
1420 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1421 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1422 	/* second time with fp-16 */
1423 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1424 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1425 	/* fetch secound map_value_ptr from the stack */
1426 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1427 	/* write into map value */
1428 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1429 	BPF_EXIT_INSN(),
1430 
1431 	/* subprog 2 */
1432 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1433 	/* lookup from map */
1434 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1439 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1440 	BPF_MOV64_IMM(BPF_REG_0, 0),
1441 	BPF_EXIT_INSN(), /* return 0 */
1442 	/* write map_value_ptr into stack frame of main prog */
1443 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1444 	BPF_MOV64_IMM(BPF_REG_0, 1),
1445 	BPF_EXIT_INSN(), /* return 1 */
1446 	},
1447 	.prog_type = BPF_PROG_TYPE_XDP,
1448 	.fixup_map_hash_8b = { 23 },
1449 	.result = ACCEPT,
1450 },
1451 {
1452 	"calls: two calls that return map_value with incorrect bool check",
1453 	.insns = {
1454 	/* main prog */
1455 	/* pass fp-16, fp-8 into a function */
1456 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1457 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1458 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1459 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1460 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1461 	BPF_MOV64_IMM(BPF_REG_0, 0),
1462 	BPF_EXIT_INSN(),
1463 
1464 	/* subprog 1 */
1465 	/* call 3rd function twice */
1466 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1467 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1468 	/* first time with fp-8 */
1469 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1470 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1471 	/* fetch map_value_ptr from the stack of this function */
1472 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1473 	/* write into map value */
1474 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1475 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1476 	/* second time with fp-16 */
1477 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1478 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1479 	/* fetch secound map_value_ptr from the stack */
1480 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1481 	/* write into map value */
1482 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1483 	BPF_EXIT_INSN(),
1484 
1485 	/* subprog 2 */
1486 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1487 	/* lookup from map */
1488 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1489 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1490 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1491 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1492 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1493 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1494 	BPF_MOV64_IMM(BPF_REG_0, 0),
1495 	BPF_EXIT_INSN(), /* return 0 */
1496 	/* write map_value_ptr into stack frame of main prog */
1497 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1498 	BPF_MOV64_IMM(BPF_REG_0, 1),
1499 	BPF_EXIT_INSN(), /* return 1 */
1500 	},
1501 	.prog_type = BPF_PROG_TYPE_XDP,
1502 	.fixup_map_hash_8b = { 23 },
1503 	.result = REJECT,
1504 	.errstr = "invalid read from stack R7 off=-16 size=8",
1505 },
1506 {
1507 	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1508 	.insns = {
1509 	/* main prog */
1510 	/* pass fp-16, fp-8 into a function */
1511 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1512 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1513 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1514 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1515 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1516 	BPF_MOV64_IMM(BPF_REG_0, 0),
1517 	BPF_EXIT_INSN(),
1518 
1519 	/* subprog 1 */
1520 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1521 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1522 	/* 1st lookup from map */
1523 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1524 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1525 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1526 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1527 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1528 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1529 	BPF_MOV64_IMM(BPF_REG_8, 0),
1530 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1531 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1532 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1533 	BPF_MOV64_IMM(BPF_REG_8, 1),
1534 
1535 	/* 2nd lookup from map */
1536 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1537 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1538 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1539 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1540 		     BPF_FUNC_map_lookup_elem),
1541 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1542 	BPF_MOV64_IMM(BPF_REG_9, 0),
1543 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1544 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1545 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1546 	BPF_MOV64_IMM(BPF_REG_9, 1),
1547 
1548 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1549 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1550 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1551 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1552 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1553 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1554 	BPF_EXIT_INSN(),
1555 
1556 	/* subprog 2 */
1557 	/* if arg2 == 1 do *arg1 = 0 */
1558 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1559 	/* fetch map_value_ptr from the stack of this function */
1560 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1561 	/* write into map value */
1562 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1563 
1564 	/* if arg4 == 1 do *arg3 = 0 */
1565 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1566 	/* fetch map_value_ptr from the stack of this function */
1567 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1568 	/* write into map value */
1569 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1570 	BPF_EXIT_INSN(),
1571 	},
1572 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1573 	.fixup_map_hash_8b = { 12, 22 },
1574 	.result = REJECT,
1575 	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
1576 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1577 },
1578 {
1579 	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1580 	.insns = {
1581 	/* main prog */
1582 	/* pass fp-16, fp-8 into a function */
1583 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1584 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1585 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1586 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1587 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1588 	BPF_MOV64_IMM(BPF_REG_0, 0),
1589 	BPF_EXIT_INSN(),
1590 
1591 	/* subprog 1 */
1592 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1593 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1594 	/* 1st lookup from map */
1595 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1596 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1597 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1598 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1599 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1600 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1601 	BPF_MOV64_IMM(BPF_REG_8, 0),
1602 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1603 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1604 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1605 	BPF_MOV64_IMM(BPF_REG_8, 1),
1606 
1607 	/* 2nd lookup from map */
1608 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1609 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1610 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1611 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1612 		     BPF_FUNC_map_lookup_elem),
1613 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1614 	BPF_MOV64_IMM(BPF_REG_9, 0),
1615 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1616 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1617 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1618 	BPF_MOV64_IMM(BPF_REG_9, 1),
1619 
1620 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1621 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1622 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1623 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1624 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1625 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1626 	BPF_EXIT_INSN(),
1627 
1628 	/* subprog 2 */
1629 	/* if arg2 == 1 do *arg1 = 0 */
1630 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1631 	/* fetch map_value_ptr from the stack of this function */
1632 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1633 	/* write into map value */
1634 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1635 
1636 	/* if arg4 == 1 do *arg3 = 0 */
1637 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1638 	/* fetch map_value_ptr from the stack of this function */
1639 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1640 	/* write into map value */
1641 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1642 	BPF_EXIT_INSN(),
1643 	},
1644 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1645 	.fixup_map_hash_8b = { 12, 22 },
1646 	.result = ACCEPT,
1647 },
1648 {
1649 	"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1650 	.insns = {
1651 	/* main prog */
1652 	/* pass fp-16, fp-8 into a function */
1653 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1654 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1655 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1656 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1657 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1658 	BPF_MOV64_IMM(BPF_REG_0, 0),
1659 	BPF_EXIT_INSN(),
1660 
1661 	/* subprog 1 */
1662 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1663 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1664 	/* 1st lookup from map */
1665 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1666 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1667 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1668 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1669 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1670 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1671 	BPF_MOV64_IMM(BPF_REG_8, 0),
1672 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1673 	/* write map_value_ptr into stack frame of main prog at fp-8 */
1674 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1675 	BPF_MOV64_IMM(BPF_REG_8, 1),
1676 
1677 	/* 2nd lookup from map */
1678 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1679 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1680 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1681 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1682 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1683 	BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
1684 	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1685 	/* write map_value_ptr into stack frame of main prog at fp-16 */
1686 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1687 	BPF_MOV64_IMM(BPF_REG_9, 1),
1688 
1689 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1690 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1691 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1692 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1693 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1694 	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1695 	BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1696 
1697 	/* subprog 2 */
1698 	/* if arg2 == 1 do *arg1 = 0 */
1699 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1700 	/* fetch map_value_ptr from the stack of this function */
1701 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1702 	/* write into map value */
1703 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1704 
1705 	/* if arg4 == 1 do *arg3 = 0 */
1706 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1707 	/* fetch map_value_ptr from the stack of this function */
1708 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1709 	/* write into map value */
1710 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1711 	BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1712 	},
1713 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1714 	.fixup_map_hash_8b = { 12, 22 },
1715 	.result = REJECT,
1716 	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
1717 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1718 },
1719 {
1720 	"calls: two calls that receive map_value_ptr_or_null via arg. test1",
1721 	.insns = {
1722 	/* main prog */
1723 	/* pass fp-16, fp-8 into a function */
1724 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1725 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1726 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1727 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1728 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1729 	BPF_MOV64_IMM(BPF_REG_0, 0),
1730 	BPF_EXIT_INSN(),
1731 
1732 	/* subprog 1 */
1733 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1734 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1735 	/* 1st lookup from map */
1736 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1737 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1738 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1739 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1740 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1741 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1742 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1743 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1744 	BPF_MOV64_IMM(BPF_REG_8, 0),
1745 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1746 	BPF_MOV64_IMM(BPF_REG_8, 1),
1747 
1748 	/* 2nd lookup from map */
1749 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1750 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1751 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1752 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1753 	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1754 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1755 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1756 	BPF_MOV64_IMM(BPF_REG_9, 0),
1757 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1758 	BPF_MOV64_IMM(BPF_REG_9, 1),
1759 
1760 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1761 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1762 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1763 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1764 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1765 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1766 	BPF_EXIT_INSN(),
1767 
1768 	/* subprog 2 */
1769 	/* if arg2 == 1 do *arg1 = 0 */
1770 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1771 	/* fetch map_value_ptr from the stack of this function */
1772 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1773 	/* write into map value */
1774 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1775 
1776 	/* if arg4 == 1 do *arg3 = 0 */
1777 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1778 	/* fetch map_value_ptr from the stack of this function */
1779 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1780 	/* write into map value */
1781 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1782 	BPF_EXIT_INSN(),
1783 	},
1784 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1785 	.fixup_map_hash_8b = { 12, 22 },
1786 	.result = ACCEPT,
1787 },
1788 {
1789 	"calls: two calls that receive map_value_ptr_or_null via arg. test2",
1790 	.insns = {
1791 	/* main prog */
1792 	/* pass fp-16, fp-8 into a function */
1793 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1794 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1795 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1796 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1797 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1798 	BPF_MOV64_IMM(BPF_REG_0, 0),
1799 	BPF_EXIT_INSN(),
1800 
1801 	/* subprog 1 */
1802 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1803 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1804 	/* 1st lookup from map */
1805 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1806 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1807 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1808 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1809 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1810 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1811 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1812 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1813 	BPF_MOV64_IMM(BPF_REG_8, 0),
1814 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1815 	BPF_MOV64_IMM(BPF_REG_8, 1),
1816 
1817 	/* 2nd lookup from map */
1818 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1819 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1820 	BPF_LD_MAP_FD(BPF_REG_1, 0),
1821 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1822 	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1823 	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1824 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1825 	BPF_MOV64_IMM(BPF_REG_9, 0),
1826 	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1827 	BPF_MOV64_IMM(BPF_REG_9, 1),
1828 
1829 	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1830 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1831 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1832 	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1833 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1834 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1835 	BPF_EXIT_INSN(),
1836 
1837 	/* subprog 2 */
1838 	/* if arg2 == 1 do *arg1 = 0 */
1839 	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1840 	/* fetch map_value_ptr from the stack of this function */
1841 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1842 	/* write into map value */
1843 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1844 
1845 	/* if arg4 == 0 do *arg3 = 0 */
1846 	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1847 	/* fetch map_value_ptr from the stack of this function */
1848 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1849 	/* write into map value */
1850 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1851 	BPF_EXIT_INSN(),
1852 	},
1853 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1854 	.fixup_map_hash_8b = { 12, 22 },
1855 	.result = REJECT,
1856 	.errstr = "R0 invalid mem access 'scalar'",
1857 },
1858 {
1859 	"calls: pkt_ptr spill into caller stack",
1860 	.insns = {
1861 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1862 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1863 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1864 	BPF_EXIT_INSN(),
1865 
1866 	/* subprog 1 */
1867 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1868 		    offsetof(struct __sk_buff, data)),
1869 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1870 		    offsetof(struct __sk_buff, data_end)),
1871 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1872 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1873 	/* spill unchecked pkt_ptr into stack of caller */
1874 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1875 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1876 	/* now the pkt range is verified, read pkt_ptr from stack */
1877 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1878 	/* write 4 bytes into packet */
1879 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1880 	BPF_EXIT_INSN(),
1881 	},
1882 	.result = ACCEPT,
1883 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1884 	.retval = POINTER_VALUE,
1885 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1886 },
1887 {
1888 	"calls: pkt_ptr spill into caller stack 2",
1889 	.insns = {
1890 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1891 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1892 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1893 	/* Marking is still kept, but not in all cases safe. */
1894 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1895 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1896 	BPF_EXIT_INSN(),
1897 
1898 	/* subprog 1 */
1899 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1900 		    offsetof(struct __sk_buff, data)),
1901 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1902 		    offsetof(struct __sk_buff, data_end)),
1903 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1904 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1905 	/* spill unchecked pkt_ptr into stack of caller */
1906 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1907 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1908 	/* now the pkt range is verified, read pkt_ptr from stack */
1909 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1910 	/* write 4 bytes into packet */
1911 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1912 	BPF_EXIT_INSN(),
1913 	},
1914 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1915 	.errstr = "invalid access to packet",
1916 	.result = REJECT,
1917 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1918 },
1919 {
1920 	"calls: pkt_ptr spill into caller stack 3",
1921 	.insns = {
1922 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1923 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1924 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1925 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1926 	/* Marking is still kept and safe here. */
1927 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1928 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1929 	BPF_EXIT_INSN(),
1930 
1931 	/* subprog 1 */
1932 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1933 		    offsetof(struct __sk_buff, data)),
1934 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1935 		    offsetof(struct __sk_buff, data_end)),
1936 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1937 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1938 	/* spill unchecked pkt_ptr into stack of caller */
1939 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1940 	BPF_MOV64_IMM(BPF_REG_5, 0),
1941 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1942 	BPF_MOV64_IMM(BPF_REG_5, 1),
1943 	/* now the pkt range is verified, read pkt_ptr from stack */
1944 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1945 	/* write 4 bytes into packet */
1946 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1947 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1948 	BPF_EXIT_INSN(),
1949 	},
1950 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1951 	.result = ACCEPT,
1952 	.retval = 1,
1953 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1954 },
1955 {
1956 	"calls: pkt_ptr spill into caller stack 4",
1957 	.insns = {
1958 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1959 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1960 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1961 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1962 	/* Check marking propagated. */
1963 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1964 	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1965 	BPF_EXIT_INSN(),
1966 
1967 	/* subprog 1 */
1968 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1969 		    offsetof(struct __sk_buff, data)),
1970 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1971 		    offsetof(struct __sk_buff, data_end)),
1972 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1973 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1974 	/* spill unchecked pkt_ptr into stack of caller */
1975 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1976 	BPF_MOV64_IMM(BPF_REG_5, 0),
1977 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1978 	BPF_MOV64_IMM(BPF_REG_5, 1),
1979 	/* don't read back pkt_ptr from stack here */
1980 	/* write 4 bytes into packet */
1981 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1982 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1983 	BPF_EXIT_INSN(),
1984 	},
1985 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
1986 	.result = ACCEPT,
1987 	.retval = 1,
1988 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1989 },
1990 {
1991 	"calls: pkt_ptr spill into caller stack 5",
1992 	.insns = {
1993 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1994 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1995 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1996 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1997 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1998 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1999 	BPF_EXIT_INSN(),
2000 
2001 	/* subprog 1 */
2002 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2003 		    offsetof(struct __sk_buff, data)),
2004 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2005 		    offsetof(struct __sk_buff, data_end)),
2006 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2007 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2008 	BPF_MOV64_IMM(BPF_REG_5, 0),
2009 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2010 	/* spill checked pkt_ptr into stack of caller */
2011 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2012 	BPF_MOV64_IMM(BPF_REG_5, 1),
2013 	/* don't read back pkt_ptr from stack here */
2014 	/* write 4 bytes into packet */
2015 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2016 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2017 	BPF_EXIT_INSN(),
2018 	},
2019 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2020 	.errstr = "same insn cannot be used with different",
2021 	.result = REJECT,
2022 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2023 },
2024 {
2025 	"calls: pkt_ptr spill into caller stack 6",
2026 	.insns = {
2027 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2028 		    offsetof(struct __sk_buff, data_end)),
2029 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2030 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2031 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2032 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2033 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2034 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2035 	BPF_EXIT_INSN(),
2036 
2037 	/* subprog 1 */
2038 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2039 		    offsetof(struct __sk_buff, data)),
2040 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2041 		    offsetof(struct __sk_buff, data_end)),
2042 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2043 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2044 	BPF_MOV64_IMM(BPF_REG_5, 0),
2045 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2046 	/* spill checked pkt_ptr into stack of caller */
2047 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2048 	BPF_MOV64_IMM(BPF_REG_5, 1),
2049 	/* don't read back pkt_ptr from stack here */
2050 	/* write 4 bytes into packet */
2051 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2052 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2053 	BPF_EXIT_INSN(),
2054 	},
2055 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2056 	.errstr = "R4 invalid mem access",
2057 	.result = REJECT,
2058 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2059 },
2060 {
2061 	"calls: pkt_ptr spill into caller stack 7",
2062 	.insns = {
2063 	BPF_MOV64_IMM(BPF_REG_2, 0),
2064 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2065 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2066 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2067 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2068 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2069 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2070 	BPF_EXIT_INSN(),
2071 
2072 	/* subprog 1 */
2073 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2074 		    offsetof(struct __sk_buff, data)),
2075 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2076 		    offsetof(struct __sk_buff, data_end)),
2077 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2078 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2079 	BPF_MOV64_IMM(BPF_REG_5, 0),
2080 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2081 	/* spill checked pkt_ptr into stack of caller */
2082 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2083 	BPF_MOV64_IMM(BPF_REG_5, 1),
2084 	/* don't read back pkt_ptr from stack here */
2085 	/* write 4 bytes into packet */
2086 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2087 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2088 	BPF_EXIT_INSN(),
2089 	},
2090 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2091 	.errstr = "R4 invalid mem access",
2092 	.result = REJECT,
2093 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2094 },
2095 {
2096 	"calls: pkt_ptr spill into caller stack 8",
2097 	.insns = {
2098 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2099 		    offsetof(struct __sk_buff, data)),
2100 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2101 		    offsetof(struct __sk_buff, data_end)),
2102 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2103 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2104 	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2105 	BPF_EXIT_INSN(),
2106 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2107 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2108 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2109 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2110 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2111 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2112 	BPF_EXIT_INSN(),
2113 
2114 	/* subprog 1 */
2115 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2116 		    offsetof(struct __sk_buff, data)),
2117 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2118 		    offsetof(struct __sk_buff, data_end)),
2119 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2120 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2121 	BPF_MOV64_IMM(BPF_REG_5, 0),
2122 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2123 	/* spill checked pkt_ptr into stack of caller */
2124 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2125 	BPF_MOV64_IMM(BPF_REG_5, 1),
2126 	/* don't read back pkt_ptr from stack here */
2127 	/* write 4 bytes into packet */
2128 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2129 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2130 	BPF_EXIT_INSN(),
2131 	},
2132 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2133 	.result = ACCEPT,
2134 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2135 },
2136 {
2137 	"calls: pkt_ptr spill into caller stack 9",
2138 	.insns = {
2139 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2140 		    offsetof(struct __sk_buff, data)),
2141 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2142 		    offsetof(struct __sk_buff, data_end)),
2143 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2144 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2145 	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2146 	BPF_EXIT_INSN(),
2147 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2148 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2149 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2150 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2151 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2152 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2153 	BPF_EXIT_INSN(),
2154 
2155 	/* subprog 1 */
2156 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2157 		    offsetof(struct __sk_buff, data)),
2158 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2159 		    offsetof(struct __sk_buff, data_end)),
2160 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2161 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2162 	BPF_MOV64_IMM(BPF_REG_5, 0),
2163 	/* spill unchecked pkt_ptr into stack of caller */
2164 	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2165 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2166 	BPF_MOV64_IMM(BPF_REG_5, 1),
2167 	/* don't read back pkt_ptr from stack here */
2168 	/* write 4 bytes into packet */
2169 	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2170 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2171 	BPF_EXIT_INSN(),
2172 	},
2173 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2174 	.errstr = "invalid access to packet",
2175 	.result = REJECT,
2176 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2177 },
2178 {
2179 	"calls: caller stack init to zero or map_value_or_null",
2180 	.insns = {
2181 	BPF_MOV64_IMM(BPF_REG_0, 0),
2182 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2183 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2184 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2185 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2186 	/* fetch map_value_or_null or const_zero from stack */
2187 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2188 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2189 	/* store into map_value */
2190 	BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2191 	BPF_EXIT_INSN(),
2192 
2193 	/* subprog 1 */
2194 	/* if (ctx == 0) return; */
2195 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2196 	/* else bpf_map_lookup() and *(fp - 8) = r0 */
2197 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2198 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2199 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2200 	BPF_LD_MAP_FD(BPF_REG_1, 0),
2201 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2202 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2203 	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2204 	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2205 	BPF_EXIT_INSN(),
2206 	},
2207 	.fixup_map_hash_8b = { 13 },
2208 	.result = ACCEPT,
2209 	.prog_type = BPF_PROG_TYPE_XDP,
2210 },
2211 {
2212 	"calls: stack init to zero and pruning",
2213 	.insns = {
2214 	/* first make allocated_stack 16 byte */
2215 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2216 	/* now fork the execution such that the false branch
2217 	 * of JGT insn will be verified second and it skisp zero
2218 	 * init of fp-8 stack slot. If stack liveness marking
2219 	 * is missing live_read marks from call map_lookup
2220 	 * processing then pruning will incorrectly assume
2221 	 * that fp-8 stack slot was unused in the fall-through
2222 	 * branch and will accept the program incorrectly
2223 	 */
2224 	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
2225 	BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
2226 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2227 	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2228 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2229 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2230 	BPF_LD_MAP_FD(BPF_REG_1, 0),
2231 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2232 	BPF_MOV64_IMM(BPF_REG_0, 0),
2233 	BPF_EXIT_INSN(),
2234 	},
2235 	.fixup_map_hash_48b = { 7 },
2236 	.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
2237 	.result_unpriv = REJECT,
2238 	/* in privileged mode reads from uninitialized stack locations are permitted */
2239 	.result = ACCEPT,
2240 },
2241 {
2242 	"calls: ctx read at start of subprog",
2243 	.insns = {
2244 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2245 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2246 	BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2247 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2248 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2249 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2250 	BPF_EXIT_INSN(),
2251 	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2252 	BPF_MOV64_IMM(BPF_REG_0, 0),
2253 	BPF_EXIT_INSN(),
2254 	},
2255 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2256 	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2257 	.result_unpriv = REJECT,
2258 	.result = ACCEPT,
2259 },
2260 {
2261 	"calls: cross frame pruning",
2262 	.insns = {
2263 	/* r8 = !!random();
2264 	 * call pruner()
2265 	 * if (r8)
2266 	 *     do something bad;
2267 	 */
2268 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2269 	BPF_MOV64_IMM(BPF_REG_8, 0),
2270 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2271 	BPF_MOV64_IMM(BPF_REG_8, 1),
2272 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2273 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2274 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2275 	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2276 	BPF_MOV64_IMM(BPF_REG_0, 0),
2277 	BPF_EXIT_INSN(),
2278 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2279 	BPF_EXIT_INSN(),
2280 	},
2281 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2282 	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2283 	.errstr = "!read_ok",
2284 	.result = REJECT,
2285 },
2286 {
2287 	"calls: cross frame pruning - liveness propagation",
2288 	.insns = {
2289 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2290 	BPF_MOV64_IMM(BPF_REG_8, 0),
2291 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2292 	BPF_MOV64_IMM(BPF_REG_8, 1),
2293 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2294 	BPF_MOV64_IMM(BPF_REG_9, 0),
2295 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2296 	BPF_MOV64_IMM(BPF_REG_9, 1),
2297 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2298 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2299 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2300 	BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2301 	BPF_MOV64_IMM(BPF_REG_0, 0),
2302 	BPF_EXIT_INSN(),
2303 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2304 	BPF_EXIT_INSN(),
2305 	},
2306 	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2307 	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2308 	.errstr = "!read_ok",
2309 	.result = REJECT,
2310 },
2311 /* Make sure that verifier.c:states_equal() considers IDs from all
2312  * frames when building 'idmap' for check_ids().
2313  */
2314 {
2315 	"calls: check_ids() across call boundary",
2316 	.insns = {
2317 	/* Function main() */
2318 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2319 	/* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2320 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2321 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2322 	BPF_LD_MAP_FD(BPF_REG_1,
2323 		      0),
2324 	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2325 	BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24),
2326 	/* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */
2327 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2328 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2329 	BPF_LD_MAP_FD(BPF_REG_1,
2330 		      0),
2331 	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
2332 	BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32),
2333 	/* call foo(&fp[-24], &fp[-32])   ; both arguments have IDs in the current
2334 	 *                                ; stack frame
2335 	 */
2336 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
2337 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24),
2338 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
2339 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
2340 	BPF_CALL_REL(2),
2341 	/* exit 0 */
2342 	BPF_MOV64_IMM(BPF_REG_0, 0),
2343 	BPF_EXIT_INSN(),
2344 	/* Function foo()
2345 	 *
2346 	 * r9 = &frame[0].fp[-24]  ; save arguments in the callee saved registers,
2347 	 * r8 = &frame[0].fp[-32]  ; arguments are pointers to pointers to map value
2348 	 */
2349 	BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
2350 	BPF_MOV64_REG(BPF_REG_8, BPF_REG_2),
2351 	/* r7 = ktime_get_ns() */
2352 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2353 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
2354 	/* r6 = ktime_get_ns() */
2355 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
2356 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
2357 	/* if r6 > r7 goto +1      ; no new information about the state is derived from
2358 	 *                         ; this check, thus produced verifier states differ
2359 	 *                         ; only in 'insn_idx'
2360 	 * r9 = r8
2361 	 */
2362 	BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1),
2363 	BPF_MOV64_REG(BPF_REG_9, BPF_REG_8),
2364 	/* r9 = *r9                ; verifier get's to this point via two paths:
2365 	 *                         ; (I) one including r9 = r8, verified first;
2366 	 *                         ; (II) one excluding r9 = r8, verified next.
2367 	 *                         ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id.
2368 	 *                         ; Suppose that checkpoint is created here via path (I).
2369 	 *                         ; When verifying via (II) the r9.id must be compared against
2370 	 *                         ; frame[0].fp[-24].id, otherwise (I) and (II) would be
2371 	 *                         ; incorrectly deemed equivalent.
2372 	 * if r9 == 0 goto <exit>
2373 	 */
2374 	BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0),
2375 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1),
2376 	/* r8 = *r8                ; read map value via r8, this is not safe
2377 	 * r0 = *r8                ; because r8 might be not equal to r9.
2378 	 */
2379 	BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0),
2380 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0),
2381 	/* exit 0 */
2382 	BPF_MOV64_IMM(BPF_REG_0, 0),
2383 	BPF_EXIT_INSN(),
2384 	},
2385 	.flags = BPF_F_TEST_STATE_FREQ,
2386 	.fixup_map_hash_8b = { 3, 9 },
2387 	.result = REJECT,
2388 	.errstr = "R8 invalid mem access 'map_value_or_null'",
2389 	.result_unpriv = REJECT,
2390 	.errstr_unpriv = "",
2391 	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
2392 },
2393