xref: /linux/tools/testing/selftests/bpf/test_verifier.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = REJECT,
725 		.errstr = "unknown opcode c4",
726 	},
727 	{
728 		"arsh32 on reg",
729 		.insns = {
730 			BPF_MOV64_IMM(BPF_REG_0, 1),
731 			BPF_MOV64_IMM(BPF_REG_1, 5),
732 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
733 			BPF_EXIT_INSN(),
734 		},
735 		.result = REJECT,
736 		.errstr = "unknown opcode cc",
737 	},
738 	{
739 		"arsh64 on imm",
740 		.insns = {
741 			BPF_MOV64_IMM(BPF_REG_0, 1),
742 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 	},
747 	{
748 		"arsh64 on reg",
749 		.insns = {
750 			BPF_MOV64_IMM(BPF_REG_0, 1),
751 			BPF_MOV64_IMM(BPF_REG_1, 5),
752 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 			BPF_EXIT_INSN(),
754 		},
755 		.result = ACCEPT,
756 	},
757 	{
758 		"no bpf_exit",
759 		.insns = {
760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
761 		},
762 		.errstr = "not an exit",
763 		.result = REJECT,
764 	},
765 	{
766 		"loop (back-edge)",
767 		.insns = {
768 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
769 			BPF_EXIT_INSN(),
770 		},
771 		.errstr = "back-edge",
772 		.result = REJECT,
773 	},
774 	{
775 		"loop2 (back-edge)",
776 		.insns = {
777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
779 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
780 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
781 			BPF_EXIT_INSN(),
782 		},
783 		.errstr = "back-edge",
784 		.result = REJECT,
785 	},
786 	{
787 		"conditional loop",
788 		.insns = {
789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
791 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
793 			BPF_EXIT_INSN(),
794 		},
795 		.errstr = "back-edge",
796 		.result = REJECT,
797 	},
798 	{
799 		"read uninitialized register",
800 		.insns = {
801 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "R2 !read_ok",
805 		.result = REJECT,
806 	},
807 	{
808 		"read invalid register",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_0, -1),
811 			BPF_EXIT_INSN(),
812 		},
813 		.errstr = "R15 is invalid",
814 		.result = REJECT,
815 	},
816 	{
817 		"program doesn't init R0 before exit",
818 		.insns = {
819 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
820 			BPF_EXIT_INSN(),
821 		},
822 		.errstr = "R0 !read_ok",
823 		.result = REJECT,
824 	},
825 	{
826 		"program doesn't init R0 before exit in all branches",
827 		.insns = {
828 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
829 			BPF_MOV64_IMM(BPF_REG_0, 1),
830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
831 			BPF_EXIT_INSN(),
832 		},
833 		.errstr = "R0 !read_ok",
834 		.errstr_unpriv = "R1 pointer comparison",
835 		.result = REJECT,
836 	},
837 	{
838 		"stack out of bounds",
839 		.insns = {
840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "invalid stack",
844 		.result = REJECT,
845 	},
846 	{
847 		"invalid call insn1",
848 		.insns = {
849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
850 			BPF_EXIT_INSN(),
851 		},
852 		.errstr = "unknown opcode 8d",
853 		.result = REJECT,
854 	},
855 	{
856 		"invalid call insn2",
857 		.insns = {
858 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
859 			BPF_EXIT_INSN(),
860 		},
861 		.errstr = "BPF_CALL uses reserved",
862 		.result = REJECT,
863 	},
864 	{
865 		"invalid function call",
866 		.insns = {
867 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
868 			BPF_EXIT_INSN(),
869 		},
870 		.errstr = "invalid func unknown#1234567",
871 		.result = REJECT,
872 	},
873 	{
874 		"uninitialized stack1",
875 		.insns = {
876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
878 			BPF_LD_MAP_FD(BPF_REG_1, 0),
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
880 				     BPF_FUNC_map_lookup_elem),
881 			BPF_EXIT_INSN(),
882 		},
883 		.fixup_map_hash_8b = { 2 },
884 		.errstr = "invalid indirect read from stack",
885 		.result = REJECT,
886 	},
887 	{
888 		"uninitialized stack2",
889 		.insns = {
890 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
891 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
892 			BPF_EXIT_INSN(),
893 		},
894 		.errstr = "invalid read from stack",
895 		.result = REJECT,
896 	},
897 	{
898 		"invalid fp arithmetic",
899 		/* If this gets ever changed, make sure JITs can deal with it. */
900 		.insns = {
901 			BPF_MOV64_IMM(BPF_REG_0, 0),
902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
903 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
904 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
905 			BPF_EXIT_INSN(),
906 		},
907 		.errstr = "R1 subtraction from stack pointer",
908 		.result = REJECT,
909 	},
910 	{
911 		"non-invalid fp arithmetic",
912 		.insns = {
913 			BPF_MOV64_IMM(BPF_REG_0, 0),
914 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
915 			BPF_EXIT_INSN(),
916 		},
917 		.result = ACCEPT,
918 	},
919 	{
920 		"invalid argument register",
921 		.insns = {
922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
923 				     BPF_FUNC_get_cgroup_classid),
924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
925 				     BPF_FUNC_get_cgroup_classid),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 !read_ok",
929 		.result = REJECT,
930 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
931 	},
932 	{
933 		"non-invalid argument register",
934 		.insns = {
935 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
937 				     BPF_FUNC_get_cgroup_classid),
938 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
939 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
940 				     BPF_FUNC_get_cgroup_classid),
941 			BPF_EXIT_INSN(),
942 		},
943 		.result = ACCEPT,
944 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
945 	},
946 	{
947 		"check valid spill/fill",
948 		.insns = {
949 			/* spill R1(ctx) into stack */
950 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
951 			/* fill it back into R2 */
952 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
953 			/* should be able to access R0 = *(R2 + 8) */
954 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
955 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
956 			BPF_EXIT_INSN(),
957 		},
958 		.errstr_unpriv = "R0 leaks addr",
959 		.result = ACCEPT,
960 		.result_unpriv = REJECT,
961 		.retval = POINTER_VALUE,
962 	},
963 	{
964 		"check valid spill/fill, skb mark",
965 		.insns = {
966 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
968 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
969 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
970 				    offsetof(struct __sk_buff, mark)),
971 			BPF_EXIT_INSN(),
972 		},
973 		.result = ACCEPT,
974 		.result_unpriv = ACCEPT,
975 	},
976 	{
977 		"check corrupted spill/fill",
978 		.insns = {
979 			/* spill R1(ctx) into stack */
980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 			/* mess up with R1 pointer on stack */
982 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
983 			/* fill back into R0 should fail */
984 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
985 			BPF_EXIT_INSN(),
986 		},
987 		.errstr_unpriv = "attempt to corrupt spilled",
988 		.errstr = "corrupted spill",
989 		.result = REJECT,
990 	},
991 	{
992 		"invalid src register in STX",
993 		.insns = {
994 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
995 			BPF_EXIT_INSN(),
996 		},
997 		.errstr = "R15 is invalid",
998 		.result = REJECT,
999 	},
1000 	{
1001 		"invalid dst register in STX",
1002 		.insns = {
1003 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "R14 is invalid",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid dst register in ST",
1011 		.insns = {
1012 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1013 			BPF_EXIT_INSN(),
1014 		},
1015 		.errstr = "R14 is invalid",
1016 		.result = REJECT,
1017 	},
1018 	{
1019 		"invalid src register in LDX",
1020 		.insns = {
1021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1022 			BPF_EXIT_INSN(),
1023 		},
1024 		.errstr = "R12 is invalid",
1025 		.result = REJECT,
1026 	},
1027 	{
1028 		"invalid dst register in LDX",
1029 		.insns = {
1030 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1031 			BPF_EXIT_INSN(),
1032 		},
1033 		.errstr = "R11 is invalid",
1034 		.result = REJECT,
1035 	},
1036 	{
1037 		"junk insn",
1038 		.insns = {
1039 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1040 			BPF_EXIT_INSN(),
1041 		},
1042 		.errstr = "unknown opcode 00",
1043 		.result = REJECT,
1044 	},
1045 	{
1046 		"junk insn2",
1047 		.insns = {
1048 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1049 			BPF_EXIT_INSN(),
1050 		},
1051 		.errstr = "BPF_LDX uses reserved fields",
1052 		.result = REJECT,
1053 	},
1054 	{
1055 		"junk insn3",
1056 		.insns = {
1057 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1058 			BPF_EXIT_INSN(),
1059 		},
1060 		.errstr = "unknown opcode ff",
1061 		.result = REJECT,
1062 	},
1063 	{
1064 		"junk insn4",
1065 		.insns = {
1066 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1067 			BPF_EXIT_INSN(),
1068 		},
1069 		.errstr = "unknown opcode ff",
1070 		.result = REJECT,
1071 	},
1072 	{
1073 		"junk insn5",
1074 		.insns = {
1075 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1076 			BPF_EXIT_INSN(),
1077 		},
1078 		.errstr = "BPF_ALU uses reserved fields",
1079 		.result = REJECT,
1080 	},
1081 	{
1082 		"misaligned read from stack",
1083 		.insns = {
1084 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1086 			BPF_EXIT_INSN(),
1087 		},
1088 		.errstr = "misaligned stack access",
1089 		.result = REJECT,
1090 	},
1091 	{
1092 		"invalid map_fd for function call",
1093 		.insns = {
1094 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1098 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1099 				     BPF_FUNC_map_delete_elem),
1100 			BPF_EXIT_INSN(),
1101 		},
1102 		.errstr = "fd 0 is not pointing to valid bpf_map",
1103 		.result = REJECT,
1104 	},
1105 	{
1106 		"don't check return value before access",
1107 		.insns = {
1108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1111 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1112 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1113 				     BPF_FUNC_map_lookup_elem),
1114 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1115 			BPF_EXIT_INSN(),
1116 		},
1117 		.fixup_map_hash_8b = { 3 },
1118 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1119 		.result = REJECT,
1120 	},
1121 	{
1122 		"access memory with incorrect alignment",
1123 		.insns = {
1124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1129 				     BPF_FUNC_map_lookup_elem),
1130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1131 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1132 			BPF_EXIT_INSN(),
1133 		},
1134 		.fixup_map_hash_8b = { 3 },
1135 		.errstr = "misaligned value access",
1136 		.result = REJECT,
1137 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1138 	},
1139 	{
1140 		"sometimes access memory with incorrect alignment",
1141 		.insns = {
1142 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1143 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1145 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1146 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1147 				     BPF_FUNC_map_lookup_elem),
1148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1149 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1150 			BPF_EXIT_INSN(),
1151 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1152 			BPF_EXIT_INSN(),
1153 		},
1154 		.fixup_map_hash_8b = { 3 },
1155 		.errstr = "R0 invalid mem access",
1156 		.errstr_unpriv = "R0 leaks addr",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"jump test 1",
1162 		.insns = {
1163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1168 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1171 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1173 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1174 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1175 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1176 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1177 			BPF_MOV64_IMM(BPF_REG_0, 0),
1178 			BPF_EXIT_INSN(),
1179 		},
1180 		.errstr_unpriv = "R1 pointer comparison",
1181 		.result_unpriv = REJECT,
1182 		.result = ACCEPT,
1183 	},
1184 	{
1185 		"jump test 2",
1186 		.insns = {
1187 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1190 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1193 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1196 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1197 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1198 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1199 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1201 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1202 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1204 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1205 			BPF_MOV64_IMM(BPF_REG_0, 0),
1206 			BPF_EXIT_INSN(),
1207 		},
1208 		.errstr_unpriv = "R1 pointer comparison",
1209 		.result_unpriv = REJECT,
1210 		.result = ACCEPT,
1211 	},
1212 	{
1213 		"jump test 3",
1214 		.insns = {
1215 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1217 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1219 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1220 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1221 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1227 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1231 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1235 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1241 				     BPF_FUNC_map_delete_elem),
1242 			BPF_EXIT_INSN(),
1243 		},
1244 		.fixup_map_hash_8b = { 24 },
1245 		.errstr_unpriv = "R1 pointer comparison",
1246 		.result_unpriv = REJECT,
1247 		.result = ACCEPT,
1248 		.retval = -ENOENT,
1249 	},
1250 	{
1251 		"jump test 4",
1252 		.insns = {
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1293 			BPF_MOV64_IMM(BPF_REG_0, 0),
1294 			BPF_EXIT_INSN(),
1295 		},
1296 		.errstr_unpriv = "R1 pointer comparison",
1297 		.result_unpriv = REJECT,
1298 		.result = ACCEPT,
1299 	},
1300 	{
1301 		"jump test 5",
1302 		.insns = {
1303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1304 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1305 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 			BPF_MOV64_IMM(BPF_REG_0, 0),
1311 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1312 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1313 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1315 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1316 			BPF_MOV64_IMM(BPF_REG_0, 0),
1317 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1318 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1319 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1320 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1321 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1322 			BPF_MOV64_IMM(BPF_REG_0, 0),
1323 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1324 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1325 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1327 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1328 			BPF_MOV64_IMM(BPF_REG_0, 0),
1329 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1330 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1331 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1332 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1333 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1334 			BPF_MOV64_IMM(BPF_REG_0, 0),
1335 			BPF_EXIT_INSN(),
1336 		},
1337 		.errstr_unpriv = "R1 pointer comparison",
1338 		.result_unpriv = REJECT,
1339 		.result = ACCEPT,
1340 	},
1341 	{
1342 		"access skb fields ok",
1343 		.insns = {
1344 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 				    offsetof(struct __sk_buff, len)),
1346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1348 				    offsetof(struct __sk_buff, mark)),
1349 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1350 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 				    offsetof(struct __sk_buff, pkt_type)),
1352 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1353 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1354 				    offsetof(struct __sk_buff, queue_mapping)),
1355 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1356 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 				    offsetof(struct __sk_buff, protocol)),
1358 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1359 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1360 				    offsetof(struct __sk_buff, vlan_present)),
1361 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1362 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 				    offsetof(struct __sk_buff, vlan_tci)),
1364 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, napi_id)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1368 			BPF_EXIT_INSN(),
1369 		},
1370 		.result = ACCEPT,
1371 	},
1372 	{
1373 		"access skb fields bad1",
1374 		.insns = {
1375 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1376 			BPF_EXIT_INSN(),
1377 		},
1378 		.errstr = "invalid bpf_context access",
1379 		.result = REJECT,
1380 	},
1381 	{
1382 		"access skb fields bad2",
1383 		.insns = {
1384 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1385 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1388 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1390 				     BPF_FUNC_map_lookup_elem),
1391 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1392 			BPF_EXIT_INSN(),
1393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1394 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1395 				    offsetof(struct __sk_buff, pkt_type)),
1396 			BPF_EXIT_INSN(),
1397 		},
1398 		.fixup_map_hash_8b = { 4 },
1399 		.errstr = "different pointers",
1400 		.errstr_unpriv = "R1 pointer comparison",
1401 		.result = REJECT,
1402 	},
1403 	{
1404 		"access skb fields bad3",
1405 		.insns = {
1406 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1407 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 				    offsetof(struct __sk_buff, pkt_type)),
1409 			BPF_EXIT_INSN(),
1410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 				     BPF_FUNC_map_lookup_elem),
1416 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1417 			BPF_EXIT_INSN(),
1418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1420 		},
1421 		.fixup_map_hash_8b = { 6 },
1422 		.errstr = "different pointers",
1423 		.errstr_unpriv = "R1 pointer comparison",
1424 		.result = REJECT,
1425 	},
1426 	{
1427 		"access skb fields bad4",
1428 		.insns = {
1429 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1430 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1431 				    offsetof(struct __sk_buff, len)),
1432 			BPF_MOV64_IMM(BPF_REG_0, 0),
1433 			BPF_EXIT_INSN(),
1434 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1439 				     BPF_FUNC_map_lookup_elem),
1440 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1441 			BPF_EXIT_INSN(),
1442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1443 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1444 		},
1445 		.fixup_map_hash_8b = { 7 },
1446 		.errstr = "different pointers",
1447 		.errstr_unpriv = "R1 pointer comparison",
1448 		.result = REJECT,
1449 	},
1450 	{
1451 		"invalid access __sk_buff family",
1452 		.insns = {
1453 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 				    offsetof(struct __sk_buff, family)),
1455 			BPF_EXIT_INSN(),
1456 		},
1457 		.errstr = "invalid bpf_context access",
1458 		.result = REJECT,
1459 	},
1460 	{
1461 		"invalid access __sk_buff remote_ip4",
1462 		.insns = {
1463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 				    offsetof(struct __sk_buff, remote_ip4)),
1465 			BPF_EXIT_INSN(),
1466 		},
1467 		.errstr = "invalid bpf_context access",
1468 		.result = REJECT,
1469 	},
1470 	{
1471 		"invalid access __sk_buff local_ip4",
1472 		.insns = {
1473 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 				    offsetof(struct __sk_buff, local_ip4)),
1475 			BPF_EXIT_INSN(),
1476 		},
1477 		.errstr = "invalid bpf_context access",
1478 		.result = REJECT,
1479 	},
1480 	{
1481 		"invalid access __sk_buff remote_ip6",
1482 		.insns = {
1483 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 				    offsetof(struct __sk_buff, remote_ip6)),
1485 			BPF_EXIT_INSN(),
1486 		},
1487 		.errstr = "invalid bpf_context access",
1488 		.result = REJECT,
1489 	},
1490 	{
1491 		"invalid access __sk_buff local_ip6",
1492 		.insns = {
1493 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1494 				    offsetof(struct __sk_buff, local_ip6)),
1495 			BPF_EXIT_INSN(),
1496 		},
1497 		.errstr = "invalid bpf_context access",
1498 		.result = REJECT,
1499 	},
1500 	{
1501 		"invalid access __sk_buff remote_port",
1502 		.insns = {
1503 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 				    offsetof(struct __sk_buff, remote_port)),
1505 			BPF_EXIT_INSN(),
1506 		},
1507 		.errstr = "invalid bpf_context access",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"invalid access __sk_buff remote_port",
1512 		.insns = {
1513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 				    offsetof(struct __sk_buff, local_port)),
1515 			BPF_EXIT_INSN(),
1516 		},
1517 		.errstr = "invalid bpf_context access",
1518 		.result = REJECT,
1519 	},
1520 	{
1521 		"valid access __sk_buff family",
1522 		.insns = {
1523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 				    offsetof(struct __sk_buff, family)),
1525 			BPF_EXIT_INSN(),
1526 		},
1527 		.result = ACCEPT,
1528 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1529 	},
1530 	{
1531 		"valid access __sk_buff remote_ip4",
1532 		.insns = {
1533 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, remote_ip4)),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.result = ACCEPT,
1538 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1539 	},
1540 	{
1541 		"valid access __sk_buff local_ip4",
1542 		.insns = {
1543 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 				    offsetof(struct __sk_buff, local_ip4)),
1545 			BPF_EXIT_INSN(),
1546 		},
1547 		.result = ACCEPT,
1548 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1549 	},
1550 	{
1551 		"valid access __sk_buff remote_ip6",
1552 		.insns = {
1553 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 				    offsetof(struct __sk_buff, remote_ip6[0])),
1555 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 				    offsetof(struct __sk_buff, remote_ip6[1])),
1557 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 				    offsetof(struct __sk_buff, remote_ip6[2])),
1559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 				    offsetof(struct __sk_buff, remote_ip6[3])),
1561 			BPF_EXIT_INSN(),
1562 		},
1563 		.result = ACCEPT,
1564 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1565 	},
1566 	{
1567 		"valid access __sk_buff local_ip6",
1568 		.insns = {
1569 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 				    offsetof(struct __sk_buff, local_ip6[0])),
1571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 				    offsetof(struct __sk_buff, local_ip6[1])),
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, local_ip6[2])),
1575 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 				    offsetof(struct __sk_buff, local_ip6[3])),
1577 			BPF_EXIT_INSN(),
1578 		},
1579 		.result = ACCEPT,
1580 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1581 	},
1582 	{
1583 		"valid access __sk_buff remote_port",
1584 		.insns = {
1585 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1586 				    offsetof(struct __sk_buff, remote_port)),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.result = ACCEPT,
1590 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1591 	},
1592 	{
1593 		"valid access __sk_buff remote_port",
1594 		.insns = {
1595 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1596 				    offsetof(struct __sk_buff, local_port)),
1597 			BPF_EXIT_INSN(),
1598 		},
1599 		.result = ACCEPT,
1600 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1601 	},
1602 	{
1603 		"invalid access of tc_classid for SK_SKB",
1604 		.insns = {
1605 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1606 				    offsetof(struct __sk_buff, tc_classid)),
1607 			BPF_EXIT_INSN(),
1608 		},
1609 		.result = REJECT,
1610 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1611 		.errstr = "invalid bpf_context access",
1612 	},
1613 	{
1614 		"invalid access of skb->mark for SK_SKB",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, mark)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result =  REJECT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 		.errstr = "invalid bpf_context access",
1623 	},
1624 	{
1625 		"check skb->mark is not writeable by SK_SKB",
1626 		.insns = {
1627 			BPF_MOV64_IMM(BPF_REG_0, 0),
1628 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1629 				    offsetof(struct __sk_buff, mark)),
1630 			BPF_EXIT_INSN(),
1631 		},
1632 		.result =  REJECT,
1633 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1634 		.errstr = "invalid bpf_context access",
1635 	},
1636 	{
1637 		"check skb->tc_index is writeable by SK_SKB",
1638 		.insns = {
1639 			BPF_MOV64_IMM(BPF_REG_0, 0),
1640 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1641 				    offsetof(struct __sk_buff, tc_index)),
1642 			BPF_EXIT_INSN(),
1643 		},
1644 		.result = ACCEPT,
1645 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1646 	},
1647 	{
1648 		"check skb->priority is writeable by SK_SKB",
1649 		.insns = {
1650 			BPF_MOV64_IMM(BPF_REG_0, 0),
1651 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1652 				    offsetof(struct __sk_buff, priority)),
1653 			BPF_EXIT_INSN(),
1654 		},
1655 		.result = ACCEPT,
1656 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1657 	},
1658 	{
1659 		"direct packet read for SK_SKB",
1660 		.insns = {
1661 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1662 				    offsetof(struct __sk_buff, data)),
1663 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1664 				    offsetof(struct __sk_buff, data_end)),
1665 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1667 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1668 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1669 			BPF_MOV64_IMM(BPF_REG_0, 0),
1670 			BPF_EXIT_INSN(),
1671 		},
1672 		.result = ACCEPT,
1673 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1674 	},
1675 	{
1676 		"direct packet write for SK_SKB",
1677 		.insns = {
1678 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1679 				    offsetof(struct __sk_buff, data)),
1680 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1681 				    offsetof(struct __sk_buff, data_end)),
1682 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1684 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1685 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1686 			BPF_MOV64_IMM(BPF_REG_0, 0),
1687 			BPF_EXIT_INSN(),
1688 		},
1689 		.result = ACCEPT,
1690 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1691 	},
1692 	{
1693 		"overlapping checks for direct packet access SK_SKB",
1694 		.insns = {
1695 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 				    offsetof(struct __sk_buff, data)),
1697 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 				    offsetof(struct __sk_buff, data_end)),
1699 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1704 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1705 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1706 			BPF_MOV64_IMM(BPF_REG_0, 0),
1707 			BPF_EXIT_INSN(),
1708 		},
1709 		.result = ACCEPT,
1710 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1711 	},
1712 	{
1713 		"valid access family in SK_MSG",
1714 		.insns = {
1715 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1716 				    offsetof(struct sk_msg_md, family)),
1717 			BPF_EXIT_INSN(),
1718 		},
1719 		.result = ACCEPT,
1720 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1721 	},
1722 	{
1723 		"valid access remote_ip4 in SK_MSG",
1724 		.insns = {
1725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1726 				    offsetof(struct sk_msg_md, remote_ip4)),
1727 			BPF_EXIT_INSN(),
1728 		},
1729 		.result = ACCEPT,
1730 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1731 	},
1732 	{
1733 		"valid access local_ip4 in SK_MSG",
1734 		.insns = {
1735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1736 				    offsetof(struct sk_msg_md, local_ip4)),
1737 			BPF_EXIT_INSN(),
1738 		},
1739 		.result = ACCEPT,
1740 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1741 	},
1742 	{
1743 		"valid access remote_port in SK_MSG",
1744 		.insns = {
1745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 				    offsetof(struct sk_msg_md, remote_port)),
1747 			BPF_EXIT_INSN(),
1748 		},
1749 		.result = ACCEPT,
1750 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1751 	},
1752 	{
1753 		"valid access local_port in SK_MSG",
1754 		.insns = {
1755 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1756 				    offsetof(struct sk_msg_md, local_port)),
1757 			BPF_EXIT_INSN(),
1758 		},
1759 		.result = ACCEPT,
1760 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1761 	},
1762 	{
1763 		"valid access remote_ip6 in SK_MSG",
1764 		.insns = {
1765 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1767 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1769 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1770 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1771 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1772 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1773 			BPF_EXIT_INSN(),
1774 		},
1775 		.result = ACCEPT,
1776 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1777 	},
1778 	{
1779 		"valid access local_ip6 in SK_MSG",
1780 		.insns = {
1781 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 				    offsetof(struct sk_msg_md, local_ip6[0])),
1783 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 				    offsetof(struct sk_msg_md, local_ip6[1])),
1785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 				    offsetof(struct sk_msg_md, local_ip6[2])),
1787 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1788 				    offsetof(struct sk_msg_md, local_ip6[3])),
1789 			BPF_EXIT_INSN(),
1790 		},
1791 		.result = ACCEPT,
1792 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1793 	},
1794 	{
1795 		"invalid 64B read of family in SK_MSG",
1796 		.insns = {
1797 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1798 				    offsetof(struct sk_msg_md, family)),
1799 			BPF_EXIT_INSN(),
1800 		},
1801 		.errstr = "invalid bpf_context access",
1802 		.result = REJECT,
1803 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1804 	},
1805 	{
1806 		"invalid read past end of SK_MSG",
1807 		.insns = {
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_port) + 4),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.errstr = "R0 !read_ok",
1813 		.result = REJECT,
1814 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1815 	},
1816 	{
1817 		"invalid read offset in SK_MSG",
1818 		.insns = {
1819 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1820 				    offsetof(struct sk_msg_md, family) + 1),
1821 			BPF_EXIT_INSN(),
1822 		},
1823 		.errstr = "invalid bpf_context access",
1824 		.result = REJECT,
1825 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1826 	},
1827 	{
1828 		"direct packet read for SK_MSG",
1829 		.insns = {
1830 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1831 				    offsetof(struct sk_msg_md, data)),
1832 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1833 				    offsetof(struct sk_msg_md, data_end)),
1834 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1836 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1837 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1838 			BPF_MOV64_IMM(BPF_REG_0, 0),
1839 			BPF_EXIT_INSN(),
1840 		},
1841 		.result = ACCEPT,
1842 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1843 	},
1844 	{
1845 		"direct packet write for SK_MSG",
1846 		.insns = {
1847 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1848 				    offsetof(struct sk_msg_md, data)),
1849 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1850 				    offsetof(struct sk_msg_md, data_end)),
1851 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1853 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1854 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1855 			BPF_MOV64_IMM(BPF_REG_0, 0),
1856 			BPF_EXIT_INSN(),
1857 		},
1858 		.result = ACCEPT,
1859 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1860 	},
1861 	{
1862 		"overlapping checks for direct packet access SK_MSG",
1863 		.insns = {
1864 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1865 				    offsetof(struct sk_msg_md, data)),
1866 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1867 				    offsetof(struct sk_msg_md, data_end)),
1868 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1870 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1873 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1874 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1875 			BPF_MOV64_IMM(BPF_REG_0, 0),
1876 			BPF_EXIT_INSN(),
1877 		},
1878 		.result = ACCEPT,
1879 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1880 	},
1881 	{
1882 		"check skb->mark is not writeable by sockets",
1883 		.insns = {
1884 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1885 				    offsetof(struct __sk_buff, mark)),
1886 			BPF_EXIT_INSN(),
1887 		},
1888 		.errstr = "invalid bpf_context access",
1889 		.errstr_unpriv = "R1 leaks addr",
1890 		.result = REJECT,
1891 	},
1892 	{
1893 		"check skb->tc_index is not writeable by sockets",
1894 		.insns = {
1895 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1896 				    offsetof(struct __sk_buff, tc_index)),
1897 			BPF_EXIT_INSN(),
1898 		},
1899 		.errstr = "invalid bpf_context access",
1900 		.errstr_unpriv = "R1 leaks addr",
1901 		.result = REJECT,
1902 	},
1903 	{
1904 		"check cb access: byte",
1905 		.insns = {
1906 			BPF_MOV64_IMM(BPF_REG_0, 0),
1907 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 				    offsetof(struct __sk_buff, cb[0])),
1909 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 				    offsetof(struct __sk_buff, cb[0]) + 1),
1911 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 				    offsetof(struct __sk_buff, cb[0]) + 2),
1913 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 				    offsetof(struct __sk_buff, cb[0]) + 3),
1915 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 				    offsetof(struct __sk_buff, cb[1])),
1917 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 				    offsetof(struct __sk_buff, cb[1]) + 1),
1919 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 				    offsetof(struct __sk_buff, cb[1]) + 2),
1921 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 				    offsetof(struct __sk_buff, cb[1]) + 3),
1923 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1924 				    offsetof(struct __sk_buff, cb[2])),
1925 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1926 				    offsetof(struct __sk_buff, cb[2]) + 1),
1927 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1928 				    offsetof(struct __sk_buff, cb[2]) + 2),
1929 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 				    offsetof(struct __sk_buff, cb[2]) + 3),
1931 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 				    offsetof(struct __sk_buff, cb[3])),
1933 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 				    offsetof(struct __sk_buff, cb[3]) + 1),
1935 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 				    offsetof(struct __sk_buff, cb[3]) + 2),
1937 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 				    offsetof(struct __sk_buff, cb[3]) + 3),
1939 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 				    offsetof(struct __sk_buff, cb[4])),
1941 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 				    offsetof(struct __sk_buff, cb[4]) + 1),
1943 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 				    offsetof(struct __sk_buff, cb[4]) + 2),
1945 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 				    offsetof(struct __sk_buff, cb[4]) + 3),
1947 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 				    offsetof(struct __sk_buff, cb[0])),
1949 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 				    offsetof(struct __sk_buff, cb[0]) + 1),
1951 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 				    offsetof(struct __sk_buff, cb[0]) + 2),
1953 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 				    offsetof(struct __sk_buff, cb[0]) + 3),
1955 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 				    offsetof(struct __sk_buff, cb[1])),
1957 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 				    offsetof(struct __sk_buff, cb[1]) + 1),
1959 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 				    offsetof(struct __sk_buff, cb[1]) + 2),
1961 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 				    offsetof(struct __sk_buff, cb[1]) + 3),
1963 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1964 				    offsetof(struct __sk_buff, cb[2])),
1965 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1966 				    offsetof(struct __sk_buff, cb[2]) + 1),
1967 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1968 				    offsetof(struct __sk_buff, cb[2]) + 2),
1969 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 				    offsetof(struct __sk_buff, cb[2]) + 3),
1971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 				    offsetof(struct __sk_buff, cb[3])),
1973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 				    offsetof(struct __sk_buff, cb[3]) + 1),
1975 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 				    offsetof(struct __sk_buff, cb[3]) + 2),
1977 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 				    offsetof(struct __sk_buff, cb[3]) + 3),
1979 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 				    offsetof(struct __sk_buff, cb[4])),
1981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 				    offsetof(struct __sk_buff, cb[4]) + 1),
1983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 				    offsetof(struct __sk_buff, cb[4]) + 2),
1985 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 				    offsetof(struct __sk_buff, cb[4]) + 3),
1987 			BPF_EXIT_INSN(),
1988 		},
1989 		.result = ACCEPT,
1990 	},
1991 	{
1992 		"__sk_buff->hash, offset 0, byte store not permitted",
1993 		.insns = {
1994 			BPF_MOV64_IMM(BPF_REG_0, 0),
1995 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1996 				    offsetof(struct __sk_buff, hash)),
1997 			BPF_EXIT_INSN(),
1998 		},
1999 		.errstr = "invalid bpf_context access",
2000 		.result = REJECT,
2001 	},
2002 	{
2003 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2004 		.insns = {
2005 			BPF_MOV64_IMM(BPF_REG_0, 0),
2006 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2007 				    offsetof(struct __sk_buff, tc_index) + 3),
2008 			BPF_EXIT_INSN(),
2009 		},
2010 		.errstr = "invalid bpf_context access",
2011 		.result = REJECT,
2012 	},
2013 	{
2014 		"check skb->hash byte load permitted",
2015 		.insns = {
2016 			BPF_MOV64_IMM(BPF_REG_0, 0),
2017 #if __BYTE_ORDER == __LITTLE_ENDIAN
2018 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 				    offsetof(struct __sk_buff, hash)),
2020 #else
2021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 				    offsetof(struct __sk_buff, hash) + 3),
2023 #endif
2024 			BPF_EXIT_INSN(),
2025 		},
2026 		.result = ACCEPT,
2027 	},
2028 	{
2029 		"check skb->hash byte load not permitted 1",
2030 		.insns = {
2031 			BPF_MOV64_IMM(BPF_REG_0, 0),
2032 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 				    offsetof(struct __sk_buff, hash) + 1),
2034 			BPF_EXIT_INSN(),
2035 		},
2036 		.errstr = "invalid bpf_context access",
2037 		.result = REJECT,
2038 	},
2039 	{
2040 		"check skb->hash byte load not permitted 2",
2041 		.insns = {
2042 			BPF_MOV64_IMM(BPF_REG_0, 0),
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, hash) + 2),
2045 			BPF_EXIT_INSN(),
2046 		},
2047 		.errstr = "invalid bpf_context access",
2048 		.result = REJECT,
2049 	},
2050 	{
2051 		"check skb->hash byte load not permitted 3",
2052 		.insns = {
2053 			BPF_MOV64_IMM(BPF_REG_0, 0),
2054 #if __BYTE_ORDER == __LITTLE_ENDIAN
2055 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2056 				    offsetof(struct __sk_buff, hash) + 3),
2057 #else
2058 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2059 				    offsetof(struct __sk_buff, hash)),
2060 #endif
2061 			BPF_EXIT_INSN(),
2062 		},
2063 		.errstr = "invalid bpf_context access",
2064 		.result = REJECT,
2065 	},
2066 	{
2067 		"check cb access: byte, wrong type",
2068 		.insns = {
2069 			BPF_MOV64_IMM(BPF_REG_0, 0),
2070 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2071 				    offsetof(struct __sk_buff, cb[0])),
2072 			BPF_EXIT_INSN(),
2073 		},
2074 		.errstr = "invalid bpf_context access",
2075 		.result = REJECT,
2076 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2077 	},
2078 	{
2079 		"check cb access: half",
2080 		.insns = {
2081 			BPF_MOV64_IMM(BPF_REG_0, 0),
2082 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2083 				    offsetof(struct __sk_buff, cb[0])),
2084 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2085 				    offsetof(struct __sk_buff, cb[0]) + 2),
2086 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2087 				    offsetof(struct __sk_buff, cb[1])),
2088 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 				    offsetof(struct __sk_buff, cb[1]) + 2),
2090 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 				    offsetof(struct __sk_buff, cb[2])),
2092 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 				    offsetof(struct __sk_buff, cb[2]) + 2),
2094 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 				    offsetof(struct __sk_buff, cb[3])),
2096 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 				    offsetof(struct __sk_buff, cb[3]) + 2),
2098 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099 				    offsetof(struct __sk_buff, cb[4])),
2100 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2101 				    offsetof(struct __sk_buff, cb[4]) + 2),
2102 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2103 				    offsetof(struct __sk_buff, cb[0])),
2104 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2105 				    offsetof(struct __sk_buff, cb[0]) + 2),
2106 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2107 				    offsetof(struct __sk_buff, cb[1])),
2108 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 				    offsetof(struct __sk_buff, cb[1]) + 2),
2110 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 				    offsetof(struct __sk_buff, cb[2])),
2112 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 				    offsetof(struct __sk_buff, cb[2]) + 2),
2114 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 				    offsetof(struct __sk_buff, cb[3])),
2116 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 				    offsetof(struct __sk_buff, cb[3]) + 2),
2118 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119 				    offsetof(struct __sk_buff, cb[4])),
2120 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2121 				    offsetof(struct __sk_buff, cb[4]) + 2),
2122 			BPF_EXIT_INSN(),
2123 		},
2124 		.result = ACCEPT,
2125 	},
2126 	{
2127 		"check cb access: half, unaligned",
2128 		.insns = {
2129 			BPF_MOV64_IMM(BPF_REG_0, 0),
2130 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2131 				    offsetof(struct __sk_buff, cb[0]) + 1),
2132 			BPF_EXIT_INSN(),
2133 		},
2134 		.errstr = "misaligned context access",
2135 		.result = REJECT,
2136 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2137 	},
2138 	{
2139 		"check __sk_buff->hash, offset 0, half store not permitted",
2140 		.insns = {
2141 			BPF_MOV64_IMM(BPF_REG_0, 0),
2142 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2143 				    offsetof(struct __sk_buff, hash)),
2144 			BPF_EXIT_INSN(),
2145 		},
2146 		.errstr = "invalid bpf_context access",
2147 		.result = REJECT,
2148 	},
2149 	{
2150 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2151 		.insns = {
2152 			BPF_MOV64_IMM(BPF_REG_0, 0),
2153 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2154 				    offsetof(struct __sk_buff, tc_index) + 2),
2155 			BPF_EXIT_INSN(),
2156 		},
2157 		.errstr = "invalid bpf_context access",
2158 		.result = REJECT,
2159 	},
2160 	{
2161 		"check skb->hash half load permitted",
2162 		.insns = {
2163 			BPF_MOV64_IMM(BPF_REG_0, 0),
2164 #if __BYTE_ORDER == __LITTLE_ENDIAN
2165 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2166 				    offsetof(struct __sk_buff, hash)),
2167 #else
2168 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2169 				    offsetof(struct __sk_buff, hash) + 2),
2170 #endif
2171 			BPF_EXIT_INSN(),
2172 		},
2173 		.result = ACCEPT,
2174 	},
2175 	{
2176 		"check skb->hash half load not permitted",
2177 		.insns = {
2178 			BPF_MOV64_IMM(BPF_REG_0, 0),
2179 #if __BYTE_ORDER == __LITTLE_ENDIAN
2180 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 				    offsetof(struct __sk_buff, hash) + 2),
2182 #else
2183 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2184 				    offsetof(struct __sk_buff, hash)),
2185 #endif
2186 			BPF_EXIT_INSN(),
2187 		},
2188 		.errstr = "invalid bpf_context access",
2189 		.result = REJECT,
2190 	},
2191 	{
2192 		"check cb access: half, wrong type",
2193 		.insns = {
2194 			BPF_MOV64_IMM(BPF_REG_0, 0),
2195 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2196 				    offsetof(struct __sk_buff, cb[0])),
2197 			BPF_EXIT_INSN(),
2198 		},
2199 		.errstr = "invalid bpf_context access",
2200 		.result = REJECT,
2201 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2202 	},
2203 	{
2204 		"check cb access: word",
2205 		.insns = {
2206 			BPF_MOV64_IMM(BPF_REG_0, 0),
2207 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2208 				    offsetof(struct __sk_buff, cb[0])),
2209 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2210 				    offsetof(struct __sk_buff, cb[1])),
2211 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2212 				    offsetof(struct __sk_buff, cb[2])),
2213 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2214 				    offsetof(struct __sk_buff, cb[3])),
2215 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2216 				    offsetof(struct __sk_buff, cb[4])),
2217 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2218 				    offsetof(struct __sk_buff, cb[0])),
2219 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2220 				    offsetof(struct __sk_buff, cb[1])),
2221 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2222 				    offsetof(struct __sk_buff, cb[2])),
2223 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2224 				    offsetof(struct __sk_buff, cb[3])),
2225 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2226 				    offsetof(struct __sk_buff, cb[4])),
2227 			BPF_EXIT_INSN(),
2228 		},
2229 		.result = ACCEPT,
2230 	},
2231 	{
2232 		"check cb access: word, unaligned 1",
2233 		.insns = {
2234 			BPF_MOV64_IMM(BPF_REG_0, 0),
2235 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2236 				    offsetof(struct __sk_buff, cb[0]) + 2),
2237 			BPF_EXIT_INSN(),
2238 		},
2239 		.errstr = "misaligned context access",
2240 		.result = REJECT,
2241 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2242 	},
2243 	{
2244 		"check cb access: word, unaligned 2",
2245 		.insns = {
2246 			BPF_MOV64_IMM(BPF_REG_0, 0),
2247 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2248 				    offsetof(struct __sk_buff, cb[4]) + 1),
2249 			BPF_EXIT_INSN(),
2250 		},
2251 		.errstr = "misaligned context access",
2252 		.result = REJECT,
2253 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2254 	},
2255 	{
2256 		"check cb access: word, unaligned 3",
2257 		.insns = {
2258 			BPF_MOV64_IMM(BPF_REG_0, 0),
2259 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2260 				    offsetof(struct __sk_buff, cb[4]) + 2),
2261 			BPF_EXIT_INSN(),
2262 		},
2263 		.errstr = "misaligned context access",
2264 		.result = REJECT,
2265 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2266 	},
2267 	{
2268 		"check cb access: word, unaligned 4",
2269 		.insns = {
2270 			BPF_MOV64_IMM(BPF_REG_0, 0),
2271 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2272 				    offsetof(struct __sk_buff, cb[4]) + 3),
2273 			BPF_EXIT_INSN(),
2274 		},
2275 		.errstr = "misaligned context access",
2276 		.result = REJECT,
2277 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2278 	},
2279 	{
2280 		"check cb access: double",
2281 		.insns = {
2282 			BPF_MOV64_IMM(BPF_REG_0, 0),
2283 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2284 				    offsetof(struct __sk_buff, cb[0])),
2285 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2286 				    offsetof(struct __sk_buff, cb[2])),
2287 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2288 				    offsetof(struct __sk_buff, cb[0])),
2289 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2290 				    offsetof(struct __sk_buff, cb[2])),
2291 			BPF_EXIT_INSN(),
2292 		},
2293 		.result = ACCEPT,
2294 	},
2295 	{
2296 		"check cb access: double, unaligned 1",
2297 		.insns = {
2298 			BPF_MOV64_IMM(BPF_REG_0, 0),
2299 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2300 				    offsetof(struct __sk_buff, cb[1])),
2301 			BPF_EXIT_INSN(),
2302 		},
2303 		.errstr = "misaligned context access",
2304 		.result = REJECT,
2305 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2306 	},
2307 	{
2308 		"check cb access: double, unaligned 2",
2309 		.insns = {
2310 			BPF_MOV64_IMM(BPF_REG_0, 0),
2311 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[3])),
2313 			BPF_EXIT_INSN(),
2314 		},
2315 		.errstr = "misaligned context access",
2316 		.result = REJECT,
2317 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2318 	},
2319 	{
2320 		"check cb access: double, oob 1",
2321 		.insns = {
2322 			BPF_MOV64_IMM(BPF_REG_0, 0),
2323 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2324 				    offsetof(struct __sk_buff, cb[4])),
2325 			BPF_EXIT_INSN(),
2326 		},
2327 		.errstr = "invalid bpf_context access",
2328 		.result = REJECT,
2329 	},
2330 	{
2331 		"check cb access: double, oob 2",
2332 		.insns = {
2333 			BPF_MOV64_IMM(BPF_REG_0, 0),
2334 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2335 				    offsetof(struct __sk_buff, cb[4])),
2336 			BPF_EXIT_INSN(),
2337 		},
2338 		.errstr = "invalid bpf_context access",
2339 		.result = REJECT,
2340 	},
2341 	{
2342 		"check __sk_buff->ifindex dw store not permitted",
2343 		.insns = {
2344 			BPF_MOV64_IMM(BPF_REG_0, 0),
2345 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2346 				    offsetof(struct __sk_buff, ifindex)),
2347 			BPF_EXIT_INSN(),
2348 		},
2349 		.errstr = "invalid bpf_context access",
2350 		.result = REJECT,
2351 	},
2352 	{
2353 		"check __sk_buff->ifindex dw load not permitted",
2354 		.insns = {
2355 			BPF_MOV64_IMM(BPF_REG_0, 0),
2356 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2357 				    offsetof(struct __sk_buff, ifindex)),
2358 			BPF_EXIT_INSN(),
2359 		},
2360 		.errstr = "invalid bpf_context access",
2361 		.result = REJECT,
2362 	},
2363 	{
2364 		"check cb access: double, wrong type",
2365 		.insns = {
2366 			BPF_MOV64_IMM(BPF_REG_0, 0),
2367 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2368 				    offsetof(struct __sk_buff, cb[0])),
2369 			BPF_EXIT_INSN(),
2370 		},
2371 		.errstr = "invalid bpf_context access",
2372 		.result = REJECT,
2373 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2374 	},
2375 	{
2376 		"check out of range skb->cb access",
2377 		.insns = {
2378 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2379 				    offsetof(struct __sk_buff, cb[0]) + 256),
2380 			BPF_EXIT_INSN(),
2381 		},
2382 		.errstr = "invalid bpf_context access",
2383 		.errstr_unpriv = "",
2384 		.result = REJECT,
2385 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2386 	},
2387 	{
2388 		"write skb fields from socket prog",
2389 		.insns = {
2390 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2391 				    offsetof(struct __sk_buff, cb[4])),
2392 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2393 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2394 				    offsetof(struct __sk_buff, mark)),
2395 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2396 				    offsetof(struct __sk_buff, tc_index)),
2397 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2398 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2399 				    offsetof(struct __sk_buff, cb[0])),
2400 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2401 				    offsetof(struct __sk_buff, cb[2])),
2402 			BPF_EXIT_INSN(),
2403 		},
2404 		.result = ACCEPT,
2405 		.errstr_unpriv = "R1 leaks addr",
2406 		.result_unpriv = REJECT,
2407 	},
2408 	{
2409 		"write skb fields from tc_cls_act prog",
2410 		.insns = {
2411 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2412 				    offsetof(struct __sk_buff, cb[0])),
2413 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2414 				    offsetof(struct __sk_buff, mark)),
2415 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2416 				    offsetof(struct __sk_buff, tc_index)),
2417 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2418 				    offsetof(struct __sk_buff, tc_index)),
2419 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2420 				    offsetof(struct __sk_buff, cb[3])),
2421 			BPF_EXIT_INSN(),
2422 		},
2423 		.errstr_unpriv = "",
2424 		.result_unpriv = REJECT,
2425 		.result = ACCEPT,
2426 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2427 	},
2428 	{
2429 		"PTR_TO_STACK store/load",
2430 		.insns = {
2431 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2433 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2434 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2435 			BPF_EXIT_INSN(),
2436 		},
2437 		.result = ACCEPT,
2438 		.retval = 0xfaceb00c,
2439 	},
2440 	{
2441 		"PTR_TO_STACK store/load - bad alignment on off",
2442 		.insns = {
2443 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2445 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2446 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2447 			BPF_EXIT_INSN(),
2448 		},
2449 		.result = REJECT,
2450 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2451 	},
2452 	{
2453 		"PTR_TO_STACK store/load - bad alignment on reg",
2454 		.insns = {
2455 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2457 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2459 			BPF_EXIT_INSN(),
2460 		},
2461 		.result = REJECT,
2462 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2463 	},
2464 	{
2465 		"PTR_TO_STACK store/load - out of bounds low",
2466 		.insns = {
2467 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2468 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2469 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2470 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2471 			BPF_EXIT_INSN(),
2472 		},
2473 		.result = REJECT,
2474 		.errstr = "invalid stack off=-79992 size=8",
2475 	},
2476 	{
2477 		"PTR_TO_STACK store/load - out of bounds high",
2478 		.insns = {
2479 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2481 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2482 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2483 			BPF_EXIT_INSN(),
2484 		},
2485 		.result = REJECT,
2486 		.errstr = "invalid stack off=0 size=8",
2487 	},
2488 	{
2489 		"unpriv: return pointer",
2490 		.insns = {
2491 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2492 			BPF_EXIT_INSN(),
2493 		},
2494 		.result = ACCEPT,
2495 		.result_unpriv = REJECT,
2496 		.errstr_unpriv = "R0 leaks addr",
2497 		.retval = POINTER_VALUE,
2498 	},
2499 	{
2500 		"unpriv: add const to pointer",
2501 		.insns = {
2502 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2503 			BPF_MOV64_IMM(BPF_REG_0, 0),
2504 			BPF_EXIT_INSN(),
2505 		},
2506 		.result = ACCEPT,
2507 	},
2508 	{
2509 		"unpriv: add pointer to pointer",
2510 		.insns = {
2511 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2512 			BPF_MOV64_IMM(BPF_REG_0, 0),
2513 			BPF_EXIT_INSN(),
2514 		},
2515 		.result = REJECT,
2516 		.errstr = "R1 pointer += pointer",
2517 	},
2518 	{
2519 		"unpriv: neg pointer",
2520 		.insns = {
2521 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2522 			BPF_MOV64_IMM(BPF_REG_0, 0),
2523 			BPF_EXIT_INSN(),
2524 		},
2525 		.result = ACCEPT,
2526 		.result_unpriv = REJECT,
2527 		.errstr_unpriv = "R1 pointer arithmetic",
2528 	},
2529 	{
2530 		"unpriv: cmp pointer with const",
2531 		.insns = {
2532 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2533 			BPF_MOV64_IMM(BPF_REG_0, 0),
2534 			BPF_EXIT_INSN(),
2535 		},
2536 		.result = ACCEPT,
2537 		.result_unpriv = REJECT,
2538 		.errstr_unpriv = "R1 pointer comparison",
2539 	},
2540 	{
2541 		"unpriv: cmp pointer with pointer",
2542 		.insns = {
2543 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2544 			BPF_MOV64_IMM(BPF_REG_0, 0),
2545 			BPF_EXIT_INSN(),
2546 		},
2547 		.result = ACCEPT,
2548 		.result_unpriv = REJECT,
2549 		.errstr_unpriv = "R10 pointer comparison",
2550 	},
2551 	{
2552 		"unpriv: check that printk is disallowed",
2553 		.insns = {
2554 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2557 			BPF_MOV64_IMM(BPF_REG_2, 8),
2558 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2559 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2560 				     BPF_FUNC_trace_printk),
2561 			BPF_MOV64_IMM(BPF_REG_0, 0),
2562 			BPF_EXIT_INSN(),
2563 		},
2564 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2565 		.result_unpriv = REJECT,
2566 		.result = ACCEPT,
2567 	},
2568 	{
2569 		"unpriv: pass pointer to helper function",
2570 		.insns = {
2571 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2572 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2573 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2574 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2575 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2576 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2577 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2578 				     BPF_FUNC_map_update_elem),
2579 			BPF_MOV64_IMM(BPF_REG_0, 0),
2580 			BPF_EXIT_INSN(),
2581 		},
2582 		.fixup_map_hash_8b = { 3 },
2583 		.errstr_unpriv = "R4 leaks addr",
2584 		.result_unpriv = REJECT,
2585 		.result = ACCEPT,
2586 	},
2587 	{
2588 		"unpriv: indirectly pass pointer on stack to helper function",
2589 		.insns = {
2590 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2591 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2592 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2593 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 				     BPF_FUNC_map_lookup_elem),
2596 			BPF_MOV64_IMM(BPF_REG_0, 0),
2597 			BPF_EXIT_INSN(),
2598 		},
2599 		.fixup_map_hash_8b = { 3 },
2600 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2601 		.result = REJECT,
2602 	},
2603 	{
2604 		"unpriv: mangle pointer on stack 1",
2605 		.insns = {
2606 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2607 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2608 			BPF_MOV64_IMM(BPF_REG_0, 0),
2609 			BPF_EXIT_INSN(),
2610 		},
2611 		.errstr_unpriv = "attempt to corrupt spilled",
2612 		.result_unpriv = REJECT,
2613 		.result = ACCEPT,
2614 	},
2615 	{
2616 		"unpriv: mangle pointer on stack 2",
2617 		.insns = {
2618 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2619 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2620 			BPF_MOV64_IMM(BPF_REG_0, 0),
2621 			BPF_EXIT_INSN(),
2622 		},
2623 		.errstr_unpriv = "attempt to corrupt spilled",
2624 		.result_unpriv = REJECT,
2625 		.result = ACCEPT,
2626 	},
2627 	{
2628 		"unpriv: read pointer from stack in small chunks",
2629 		.insns = {
2630 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2631 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2632 			BPF_MOV64_IMM(BPF_REG_0, 0),
2633 			BPF_EXIT_INSN(),
2634 		},
2635 		.errstr = "invalid size",
2636 		.result = REJECT,
2637 	},
2638 	{
2639 		"unpriv: write pointer into ctx",
2640 		.insns = {
2641 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2642 			BPF_MOV64_IMM(BPF_REG_0, 0),
2643 			BPF_EXIT_INSN(),
2644 		},
2645 		.errstr_unpriv = "R1 leaks addr",
2646 		.result_unpriv = REJECT,
2647 		.errstr = "invalid bpf_context access",
2648 		.result = REJECT,
2649 	},
2650 	{
2651 		"unpriv: spill/fill of ctx",
2652 		.insns = {
2653 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2654 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2655 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2656 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2657 			BPF_MOV64_IMM(BPF_REG_0, 0),
2658 			BPF_EXIT_INSN(),
2659 		},
2660 		.result = ACCEPT,
2661 	},
2662 	{
2663 		"unpriv: spill/fill of ctx 2",
2664 		.insns = {
2665 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2667 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2668 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2669 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2670 				     BPF_FUNC_get_hash_recalc),
2671 			BPF_MOV64_IMM(BPF_REG_0, 0),
2672 			BPF_EXIT_INSN(),
2673 		},
2674 		.result = ACCEPT,
2675 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2676 	},
2677 	{
2678 		"unpriv: spill/fill of ctx 3",
2679 		.insns = {
2680 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2681 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2682 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2683 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2684 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2685 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2686 				     BPF_FUNC_get_hash_recalc),
2687 			BPF_EXIT_INSN(),
2688 		},
2689 		.result = REJECT,
2690 		.errstr = "R1 type=fp expected=ctx",
2691 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2692 	},
2693 	{
2694 		"unpriv: spill/fill of ctx 4",
2695 		.insns = {
2696 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2698 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2699 			BPF_MOV64_IMM(BPF_REG_0, 1),
2700 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2701 				     BPF_REG_0, -8, 0),
2702 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2703 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2704 				     BPF_FUNC_get_hash_recalc),
2705 			BPF_EXIT_INSN(),
2706 		},
2707 		.result = REJECT,
2708 		.errstr = "R1 type=inv expected=ctx",
2709 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2710 	},
2711 	{
2712 		"unpriv: spill/fill of different pointers stx",
2713 		.insns = {
2714 			BPF_MOV64_IMM(BPF_REG_3, 42),
2715 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2716 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2717 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2718 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2720 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2721 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2722 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2723 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2724 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2725 				    offsetof(struct __sk_buff, mark)),
2726 			BPF_MOV64_IMM(BPF_REG_0, 0),
2727 			BPF_EXIT_INSN(),
2728 		},
2729 		.result = REJECT,
2730 		.errstr = "same insn cannot be used with different pointers",
2731 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732 	},
2733 	{
2734 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2735 		.insns = {
2736 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2737 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2738 			BPF_SK_LOOKUP,
2739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2740 			/* u64 foo; */
2741 			/* void *target = &foo; */
2742 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2744 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2745 			/* if (skb == NULL) *target = sock; */
2746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2747 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2748 			/* else *target = skb; */
2749 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2750 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2751 			/* struct __sk_buff *skb = *target; */
2752 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2753 			/* skb->mark = 42; */
2754 			BPF_MOV64_IMM(BPF_REG_3, 42),
2755 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2756 				    offsetof(struct __sk_buff, mark)),
2757 			/* if (sk) bpf_sk_release(sk) */
2758 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2759 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2760 			BPF_MOV64_IMM(BPF_REG_0, 0),
2761 			BPF_EXIT_INSN(),
2762 		},
2763 		.result = REJECT,
2764 		.errstr = "type=ctx expected=sock",
2765 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2766 	},
2767 	{
2768 		"unpriv: spill/fill of different pointers stx - leak sock",
2769 		.insns = {
2770 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2771 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2772 			BPF_SK_LOOKUP,
2773 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2774 			/* u64 foo; */
2775 			/* void *target = &foo; */
2776 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2778 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2779 			/* if (skb == NULL) *target = sock; */
2780 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2781 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2782 			/* else *target = skb; */
2783 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2784 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2785 			/* struct __sk_buff *skb = *target; */
2786 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2787 			/* skb->mark = 42; */
2788 			BPF_MOV64_IMM(BPF_REG_3, 42),
2789 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2790 				    offsetof(struct __sk_buff, mark)),
2791 			BPF_EXIT_INSN(),
2792 		},
2793 		.result = REJECT,
2794 		//.errstr = "same insn cannot be used with different pointers",
2795 		.errstr = "Unreleased reference",
2796 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 	},
2798 	{
2799 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2800 		.insns = {
2801 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2802 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2803 			BPF_SK_LOOKUP,
2804 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2805 			/* u64 foo; */
2806 			/* void *target = &foo; */
2807 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2810 			/* if (skb) *target = skb */
2811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2812 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2813 			/* else *target = sock */
2814 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2815 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2816 			/* struct bpf_sock *sk = *target; */
2817 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2818 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2819 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2820 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2821 					    offsetof(struct bpf_sock, mark)),
2822 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2823 			BPF_MOV64_IMM(BPF_REG_0, 0),
2824 			BPF_EXIT_INSN(),
2825 		},
2826 		.result = REJECT,
2827 		.errstr = "same insn cannot be used with different pointers",
2828 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2829 	},
2830 	{
2831 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2832 		.insns = {
2833 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2834 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2835 			BPF_SK_LOOKUP,
2836 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2837 			/* u64 foo; */
2838 			/* void *target = &foo; */
2839 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2842 			/* if (skb) *target = skb */
2843 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2844 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2845 			/* else *target = sock */
2846 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2847 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2848 			/* struct bpf_sock *sk = *target; */
2849 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2850 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2852 				BPF_MOV64_IMM(BPF_REG_3, 42),
2853 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2854 					    offsetof(struct bpf_sock, mark)),
2855 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2856 			BPF_MOV64_IMM(BPF_REG_0, 0),
2857 			BPF_EXIT_INSN(),
2858 		},
2859 		.result = REJECT,
2860 		//.errstr = "same insn cannot be used with different pointers",
2861 		.errstr = "cannot write into socket",
2862 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2863 	},
2864 	{
2865 		"unpriv: spill/fill of different pointers ldx",
2866 		.insns = {
2867 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2872 				      -(__s32)offsetof(struct bpf_perf_event_data,
2873 						       sample_period) - 8),
2874 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2875 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2876 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2877 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2878 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2879 				    offsetof(struct bpf_perf_event_data,
2880 					     sample_period)),
2881 			BPF_MOV64_IMM(BPF_REG_0, 0),
2882 			BPF_EXIT_INSN(),
2883 		},
2884 		.result = REJECT,
2885 		.errstr = "same insn cannot be used with different pointers",
2886 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2887 	},
2888 	{
2889 		"unpriv: write pointer into map elem value",
2890 		.insns = {
2891 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2893 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2894 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2895 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2896 				     BPF_FUNC_map_lookup_elem),
2897 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2898 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2899 			BPF_EXIT_INSN(),
2900 		},
2901 		.fixup_map_hash_8b = { 3 },
2902 		.errstr_unpriv = "R0 leaks addr",
2903 		.result_unpriv = REJECT,
2904 		.result = ACCEPT,
2905 	},
2906 	{
2907 		"unpriv: partial copy of pointer",
2908 		.insns = {
2909 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2910 			BPF_MOV64_IMM(BPF_REG_0, 0),
2911 			BPF_EXIT_INSN(),
2912 		},
2913 		.errstr_unpriv = "R10 partial copy",
2914 		.result_unpriv = REJECT,
2915 		.result = ACCEPT,
2916 	},
2917 	{
2918 		"unpriv: pass pointer to tail_call",
2919 		.insns = {
2920 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2921 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2923 				     BPF_FUNC_tail_call),
2924 			BPF_MOV64_IMM(BPF_REG_0, 0),
2925 			BPF_EXIT_INSN(),
2926 		},
2927 		.fixup_prog1 = { 1 },
2928 		.errstr_unpriv = "R3 leaks addr into helper",
2929 		.result_unpriv = REJECT,
2930 		.result = ACCEPT,
2931 	},
2932 	{
2933 		"unpriv: cmp map pointer with zero",
2934 		.insns = {
2935 			BPF_MOV64_IMM(BPF_REG_1, 0),
2936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2937 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2938 			BPF_MOV64_IMM(BPF_REG_0, 0),
2939 			BPF_EXIT_INSN(),
2940 		},
2941 		.fixup_map_hash_8b = { 1 },
2942 		.errstr_unpriv = "R1 pointer comparison",
2943 		.result_unpriv = REJECT,
2944 		.result = ACCEPT,
2945 	},
2946 	{
2947 		"unpriv: write into frame pointer",
2948 		.insns = {
2949 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2950 			BPF_MOV64_IMM(BPF_REG_0, 0),
2951 			BPF_EXIT_INSN(),
2952 		},
2953 		.errstr = "frame pointer is read only",
2954 		.result = REJECT,
2955 	},
2956 	{
2957 		"unpriv: spill/fill frame pointer",
2958 		.insns = {
2959 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2961 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2962 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2963 			BPF_MOV64_IMM(BPF_REG_0, 0),
2964 			BPF_EXIT_INSN(),
2965 		},
2966 		.errstr = "frame pointer is read only",
2967 		.result = REJECT,
2968 	},
2969 	{
2970 		"unpriv: cmp of frame pointer",
2971 		.insns = {
2972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2973 			BPF_MOV64_IMM(BPF_REG_0, 0),
2974 			BPF_EXIT_INSN(),
2975 		},
2976 		.errstr_unpriv = "R10 pointer comparison",
2977 		.result_unpriv = REJECT,
2978 		.result = ACCEPT,
2979 	},
2980 	{
2981 		"unpriv: adding of fp",
2982 		.insns = {
2983 			BPF_MOV64_IMM(BPF_REG_0, 0),
2984 			BPF_MOV64_IMM(BPF_REG_1, 0),
2985 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2986 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2987 			BPF_EXIT_INSN(),
2988 		},
2989 		.result = ACCEPT,
2990 	},
2991 	{
2992 		"unpriv: cmp of stack pointer",
2993 		.insns = {
2994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2996 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2997 			BPF_MOV64_IMM(BPF_REG_0, 0),
2998 			BPF_EXIT_INSN(),
2999 		},
3000 		.errstr_unpriv = "R2 pointer comparison",
3001 		.result_unpriv = REJECT,
3002 		.result = ACCEPT,
3003 	},
3004 	{
3005 		"runtime/jit: tail_call within bounds, prog once",
3006 		.insns = {
3007 			BPF_MOV64_IMM(BPF_REG_3, 0),
3008 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3009 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3010 				     BPF_FUNC_tail_call),
3011 			BPF_MOV64_IMM(BPF_REG_0, 1),
3012 			BPF_EXIT_INSN(),
3013 		},
3014 		.fixup_prog1 = { 1 },
3015 		.result = ACCEPT,
3016 		.retval = 42,
3017 	},
3018 	{
3019 		"runtime/jit: tail_call within bounds, prog loop",
3020 		.insns = {
3021 			BPF_MOV64_IMM(BPF_REG_3, 1),
3022 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3023 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3024 				     BPF_FUNC_tail_call),
3025 			BPF_MOV64_IMM(BPF_REG_0, 1),
3026 			BPF_EXIT_INSN(),
3027 		},
3028 		.fixup_prog1 = { 1 },
3029 		.result = ACCEPT,
3030 		.retval = 41,
3031 	},
3032 	{
3033 		"runtime/jit: tail_call within bounds, no prog",
3034 		.insns = {
3035 			BPF_MOV64_IMM(BPF_REG_3, 2),
3036 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3037 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3038 				     BPF_FUNC_tail_call),
3039 			BPF_MOV64_IMM(BPF_REG_0, 1),
3040 			BPF_EXIT_INSN(),
3041 		},
3042 		.fixup_prog1 = { 1 },
3043 		.result = ACCEPT,
3044 		.retval = 1,
3045 	},
3046 	{
3047 		"runtime/jit: tail_call out of bounds",
3048 		.insns = {
3049 			BPF_MOV64_IMM(BPF_REG_3, 256),
3050 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3051 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3052 				     BPF_FUNC_tail_call),
3053 			BPF_MOV64_IMM(BPF_REG_0, 2),
3054 			BPF_EXIT_INSN(),
3055 		},
3056 		.fixup_prog1 = { 1 },
3057 		.result = ACCEPT,
3058 		.retval = 2,
3059 	},
3060 	{
3061 		"runtime/jit: pass negative index to tail_call",
3062 		.insns = {
3063 			BPF_MOV64_IMM(BPF_REG_3, -1),
3064 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3065 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 				     BPF_FUNC_tail_call),
3067 			BPF_MOV64_IMM(BPF_REG_0, 2),
3068 			BPF_EXIT_INSN(),
3069 		},
3070 		.fixup_prog1 = { 1 },
3071 		.result = ACCEPT,
3072 		.retval = 2,
3073 	},
3074 	{
3075 		"runtime/jit: pass > 32bit index to tail_call",
3076 		.insns = {
3077 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3078 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3079 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3080 				     BPF_FUNC_tail_call),
3081 			BPF_MOV64_IMM(BPF_REG_0, 2),
3082 			BPF_EXIT_INSN(),
3083 		},
3084 		.fixup_prog1 = { 2 },
3085 		.result = ACCEPT,
3086 		.retval = 42,
3087 	},
3088 	{
3089 		"stack pointer arithmetic",
3090 		.insns = {
3091 			BPF_MOV64_IMM(BPF_REG_1, 4),
3092 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3093 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3096 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3097 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3098 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3099 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3101 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3102 			BPF_MOV64_IMM(BPF_REG_0, 0),
3103 			BPF_EXIT_INSN(),
3104 		},
3105 		.result = ACCEPT,
3106 	},
3107 	{
3108 		"raw_stack: no skb_load_bytes",
3109 		.insns = {
3110 			BPF_MOV64_IMM(BPF_REG_2, 4),
3111 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3113 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3114 			BPF_MOV64_IMM(BPF_REG_4, 8),
3115 			/* Call to skb_load_bytes() omitted. */
3116 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3117 			BPF_EXIT_INSN(),
3118 		},
3119 		.result = REJECT,
3120 		.errstr = "invalid read from stack off -8+0 size 8",
3121 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3122 	},
3123 	{
3124 		"raw_stack: skb_load_bytes, negative len",
3125 		.insns = {
3126 			BPF_MOV64_IMM(BPF_REG_2, 4),
3127 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3129 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3130 			BPF_MOV64_IMM(BPF_REG_4, -8),
3131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3132 				     BPF_FUNC_skb_load_bytes),
3133 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3134 			BPF_EXIT_INSN(),
3135 		},
3136 		.result = REJECT,
3137 		.errstr = "R4 min value is negative",
3138 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3139 	},
3140 	{
3141 		"raw_stack: skb_load_bytes, negative len 2",
3142 		.insns = {
3143 			BPF_MOV64_IMM(BPF_REG_2, 4),
3144 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3145 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3146 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3147 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3149 				     BPF_FUNC_skb_load_bytes),
3150 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3151 			BPF_EXIT_INSN(),
3152 		},
3153 		.result = REJECT,
3154 		.errstr = "R4 min value is negative",
3155 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3156 	},
3157 	{
3158 		"raw_stack: skb_load_bytes, zero len",
3159 		.insns = {
3160 			BPF_MOV64_IMM(BPF_REG_2, 4),
3161 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3162 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3163 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3164 			BPF_MOV64_IMM(BPF_REG_4, 0),
3165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3166 				     BPF_FUNC_skb_load_bytes),
3167 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3168 			BPF_EXIT_INSN(),
3169 		},
3170 		.result = REJECT,
3171 		.errstr = "invalid stack type R3",
3172 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3173 	},
3174 	{
3175 		"raw_stack: skb_load_bytes, no init",
3176 		.insns = {
3177 			BPF_MOV64_IMM(BPF_REG_2, 4),
3178 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3180 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3181 			BPF_MOV64_IMM(BPF_REG_4, 8),
3182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 				     BPF_FUNC_skb_load_bytes),
3184 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3185 			BPF_EXIT_INSN(),
3186 		},
3187 		.result = ACCEPT,
3188 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3189 	},
3190 	{
3191 		"raw_stack: skb_load_bytes, init",
3192 		.insns = {
3193 			BPF_MOV64_IMM(BPF_REG_2, 4),
3194 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3196 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3197 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3198 			BPF_MOV64_IMM(BPF_REG_4, 8),
3199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3200 				     BPF_FUNC_skb_load_bytes),
3201 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3202 			BPF_EXIT_INSN(),
3203 		},
3204 		.result = ACCEPT,
3205 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3206 	},
3207 	{
3208 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3209 		.insns = {
3210 			BPF_MOV64_IMM(BPF_REG_2, 4),
3211 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3213 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3214 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3215 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3216 			BPF_MOV64_IMM(BPF_REG_4, 8),
3217 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3218 				     BPF_FUNC_skb_load_bytes),
3219 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3220 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3221 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3222 				    offsetof(struct __sk_buff, mark)),
3223 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3224 				    offsetof(struct __sk_buff, priority)),
3225 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3226 			BPF_EXIT_INSN(),
3227 		},
3228 		.result = ACCEPT,
3229 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3230 	},
3231 	{
3232 		"raw_stack: skb_load_bytes, spilled regs corruption",
3233 		.insns = {
3234 			BPF_MOV64_IMM(BPF_REG_2, 4),
3235 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3237 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3238 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3239 			BPF_MOV64_IMM(BPF_REG_4, 8),
3240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3241 				     BPF_FUNC_skb_load_bytes),
3242 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3243 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3244 				    offsetof(struct __sk_buff, mark)),
3245 			BPF_EXIT_INSN(),
3246 		},
3247 		.result = REJECT,
3248 		.errstr = "R0 invalid mem access 'inv'",
3249 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250 	},
3251 	{
3252 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3253 		.insns = {
3254 			BPF_MOV64_IMM(BPF_REG_2, 4),
3255 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3257 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3258 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3259 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3260 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3261 			BPF_MOV64_IMM(BPF_REG_4, 8),
3262 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3263 				     BPF_FUNC_skb_load_bytes),
3264 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3265 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3266 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3267 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3268 				    offsetof(struct __sk_buff, mark)),
3269 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3270 				    offsetof(struct __sk_buff, priority)),
3271 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3272 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3273 				    offsetof(struct __sk_buff, pkt_type)),
3274 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3275 			BPF_EXIT_INSN(),
3276 		},
3277 		.result = REJECT,
3278 		.errstr = "R3 invalid mem access 'inv'",
3279 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 	},
3281 	{
3282 		"raw_stack: skb_load_bytes, spilled regs + data",
3283 		.insns = {
3284 			BPF_MOV64_IMM(BPF_REG_2, 4),
3285 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3287 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3288 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3289 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3290 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3291 			BPF_MOV64_IMM(BPF_REG_4, 8),
3292 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3293 				     BPF_FUNC_skb_load_bytes),
3294 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3295 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3296 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3297 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3298 				    offsetof(struct __sk_buff, mark)),
3299 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3300 				    offsetof(struct __sk_buff, priority)),
3301 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3302 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3303 			BPF_EXIT_INSN(),
3304 		},
3305 		.result = ACCEPT,
3306 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3307 	},
3308 	{
3309 		"raw_stack: skb_load_bytes, invalid access 1",
3310 		.insns = {
3311 			BPF_MOV64_IMM(BPF_REG_2, 4),
3312 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3313 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3314 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3315 			BPF_MOV64_IMM(BPF_REG_4, 8),
3316 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3317 				     BPF_FUNC_skb_load_bytes),
3318 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3319 			BPF_EXIT_INSN(),
3320 		},
3321 		.result = REJECT,
3322 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3324 	},
3325 	{
3326 		"raw_stack: skb_load_bytes, invalid access 2",
3327 		.insns = {
3328 			BPF_MOV64_IMM(BPF_REG_2, 4),
3329 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3331 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3332 			BPF_MOV64_IMM(BPF_REG_4, 8),
3333 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3334 				     BPF_FUNC_skb_load_bytes),
3335 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3336 			BPF_EXIT_INSN(),
3337 		},
3338 		.result = REJECT,
3339 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3340 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3341 	},
3342 	{
3343 		"raw_stack: skb_load_bytes, invalid access 3",
3344 		.insns = {
3345 			BPF_MOV64_IMM(BPF_REG_2, 4),
3346 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3348 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3349 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3351 				     BPF_FUNC_skb_load_bytes),
3352 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3353 			BPF_EXIT_INSN(),
3354 		},
3355 		.result = REJECT,
3356 		.errstr = "R4 min value is negative",
3357 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3358 	},
3359 	{
3360 		"raw_stack: skb_load_bytes, invalid access 4",
3361 		.insns = {
3362 			BPF_MOV64_IMM(BPF_REG_2, 4),
3363 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3365 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3366 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3368 				     BPF_FUNC_skb_load_bytes),
3369 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3370 			BPF_EXIT_INSN(),
3371 		},
3372 		.result = REJECT,
3373 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3374 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3375 	},
3376 	{
3377 		"raw_stack: skb_load_bytes, invalid access 5",
3378 		.insns = {
3379 			BPF_MOV64_IMM(BPF_REG_2, 4),
3380 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3382 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3383 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3385 				     BPF_FUNC_skb_load_bytes),
3386 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3387 			BPF_EXIT_INSN(),
3388 		},
3389 		.result = REJECT,
3390 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3392 	},
3393 	{
3394 		"raw_stack: skb_load_bytes, invalid access 6",
3395 		.insns = {
3396 			BPF_MOV64_IMM(BPF_REG_2, 4),
3397 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3399 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3400 			BPF_MOV64_IMM(BPF_REG_4, 0),
3401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3402 				     BPF_FUNC_skb_load_bytes),
3403 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3404 			BPF_EXIT_INSN(),
3405 		},
3406 		.result = REJECT,
3407 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3409 	},
3410 	{
3411 		"raw_stack: skb_load_bytes, large access",
3412 		.insns = {
3413 			BPF_MOV64_IMM(BPF_REG_2, 4),
3414 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3416 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3417 			BPF_MOV64_IMM(BPF_REG_4, 512),
3418 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3419 				     BPF_FUNC_skb_load_bytes),
3420 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3421 			BPF_EXIT_INSN(),
3422 		},
3423 		.result = ACCEPT,
3424 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3425 	},
3426 	{
3427 		"context stores via ST",
3428 		.insns = {
3429 			BPF_MOV64_IMM(BPF_REG_0, 0),
3430 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3431 			BPF_EXIT_INSN(),
3432 		},
3433 		.errstr = "BPF_ST stores into R1 inv is not allowed",
3434 		.result = REJECT,
3435 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3436 	},
3437 	{
3438 		"context stores via XADD",
3439 		.insns = {
3440 			BPF_MOV64_IMM(BPF_REG_0, 0),
3441 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3442 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3443 			BPF_EXIT_INSN(),
3444 		},
3445 		.errstr = "BPF_XADD stores into R1 inv is not allowed",
3446 		.result = REJECT,
3447 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 	},
3449 	{
3450 		"direct packet access: test1",
3451 		.insns = {
3452 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3453 				    offsetof(struct __sk_buff, data)),
3454 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3455 				    offsetof(struct __sk_buff, data_end)),
3456 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3458 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3459 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3460 			BPF_MOV64_IMM(BPF_REG_0, 0),
3461 			BPF_EXIT_INSN(),
3462 		},
3463 		.result = ACCEPT,
3464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3465 	},
3466 	{
3467 		"direct packet access: test2",
3468 		.insns = {
3469 			BPF_MOV64_IMM(BPF_REG_0, 1),
3470 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3471 				    offsetof(struct __sk_buff, data_end)),
3472 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3473 				    offsetof(struct __sk_buff, data)),
3474 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3476 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3477 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3478 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3479 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3480 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3481 				    offsetof(struct __sk_buff, data)),
3482 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3483 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3484 				    offsetof(struct __sk_buff, len)),
3485 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3486 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3487 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3488 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3490 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3491 				    offsetof(struct __sk_buff, data_end)),
3492 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3493 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3494 			BPF_MOV64_IMM(BPF_REG_0, 0),
3495 			BPF_EXIT_INSN(),
3496 		},
3497 		.result = ACCEPT,
3498 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 	},
3500 	{
3501 		"direct packet access: test3",
3502 		.insns = {
3503 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3504 				    offsetof(struct __sk_buff, data)),
3505 			BPF_MOV64_IMM(BPF_REG_0, 0),
3506 			BPF_EXIT_INSN(),
3507 		},
3508 		.errstr = "invalid bpf_context access off=76",
3509 		.result = REJECT,
3510 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3511 	},
3512 	{
3513 		"direct packet access: test4 (write)",
3514 		.insns = {
3515 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3516 				    offsetof(struct __sk_buff, data)),
3517 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3518 				    offsetof(struct __sk_buff, data_end)),
3519 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3521 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3522 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3523 			BPF_MOV64_IMM(BPF_REG_0, 0),
3524 			BPF_EXIT_INSN(),
3525 		},
3526 		.result = ACCEPT,
3527 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3528 	},
3529 	{
3530 		"direct packet access: test5 (pkt_end >= reg, good access)",
3531 		.insns = {
3532 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3533 				    offsetof(struct __sk_buff, data)),
3534 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3535 				    offsetof(struct __sk_buff, data_end)),
3536 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3538 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3539 			BPF_MOV64_IMM(BPF_REG_0, 1),
3540 			BPF_EXIT_INSN(),
3541 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3542 			BPF_MOV64_IMM(BPF_REG_0, 0),
3543 			BPF_EXIT_INSN(),
3544 		},
3545 		.result = ACCEPT,
3546 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3547 	},
3548 	{
3549 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3550 		.insns = {
3551 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3552 				    offsetof(struct __sk_buff, data)),
3553 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3554 				    offsetof(struct __sk_buff, data_end)),
3555 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3557 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3558 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3559 			BPF_MOV64_IMM(BPF_REG_0, 1),
3560 			BPF_EXIT_INSN(),
3561 			BPF_MOV64_IMM(BPF_REG_0, 0),
3562 			BPF_EXIT_INSN(),
3563 		},
3564 		.errstr = "invalid access to packet",
3565 		.result = REJECT,
3566 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3567 	},
3568 	{
3569 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3570 		.insns = {
3571 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3572 				    offsetof(struct __sk_buff, data)),
3573 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3574 				    offsetof(struct __sk_buff, data_end)),
3575 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3577 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3578 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3579 			BPF_MOV64_IMM(BPF_REG_0, 1),
3580 			BPF_EXIT_INSN(),
3581 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3582 			BPF_MOV64_IMM(BPF_REG_0, 0),
3583 			BPF_EXIT_INSN(),
3584 		},
3585 		.errstr = "invalid access to packet",
3586 		.result = REJECT,
3587 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3588 	},
3589 	{
3590 		"direct packet access: test8 (double test, variant 1)",
3591 		.insns = {
3592 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3593 				    offsetof(struct __sk_buff, data)),
3594 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 				    offsetof(struct __sk_buff, data_end)),
3596 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3598 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3599 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3600 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3601 			BPF_MOV64_IMM(BPF_REG_0, 1),
3602 			BPF_EXIT_INSN(),
3603 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3604 			BPF_MOV64_IMM(BPF_REG_0, 0),
3605 			BPF_EXIT_INSN(),
3606 		},
3607 		.result = ACCEPT,
3608 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3609 	},
3610 	{
3611 		"direct packet access: test9 (double test, variant 2)",
3612 		.insns = {
3613 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3614 				    offsetof(struct __sk_buff, data)),
3615 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3616 				    offsetof(struct __sk_buff, data_end)),
3617 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3618 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3619 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3620 			BPF_MOV64_IMM(BPF_REG_0, 1),
3621 			BPF_EXIT_INSN(),
3622 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3623 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3624 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3625 			BPF_MOV64_IMM(BPF_REG_0, 0),
3626 			BPF_EXIT_INSN(),
3627 		},
3628 		.result = ACCEPT,
3629 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3630 	},
3631 	{
3632 		"direct packet access: test10 (write invalid)",
3633 		.insns = {
3634 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3635 				    offsetof(struct __sk_buff, data)),
3636 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3637 				    offsetof(struct __sk_buff, data_end)),
3638 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3640 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3641 			BPF_MOV64_IMM(BPF_REG_0, 0),
3642 			BPF_EXIT_INSN(),
3643 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3644 			BPF_MOV64_IMM(BPF_REG_0, 0),
3645 			BPF_EXIT_INSN(),
3646 		},
3647 		.errstr = "invalid access to packet",
3648 		.result = REJECT,
3649 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3650 	},
3651 	{
3652 		"direct packet access: test11 (shift, good access)",
3653 		.insns = {
3654 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3655 				    offsetof(struct __sk_buff, data)),
3656 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3657 				    offsetof(struct __sk_buff, data_end)),
3658 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3660 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3661 			BPF_MOV64_IMM(BPF_REG_3, 144),
3662 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3664 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3665 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3666 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3667 			BPF_MOV64_IMM(BPF_REG_0, 1),
3668 			BPF_EXIT_INSN(),
3669 			BPF_MOV64_IMM(BPF_REG_0, 0),
3670 			BPF_EXIT_INSN(),
3671 		},
3672 		.result = ACCEPT,
3673 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3674 		.retval = 1,
3675 	},
3676 	{
3677 		"direct packet access: test12 (and, good access)",
3678 		.insns = {
3679 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3680 				    offsetof(struct __sk_buff, data)),
3681 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3682 				    offsetof(struct __sk_buff, data_end)),
3683 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3685 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3686 			BPF_MOV64_IMM(BPF_REG_3, 144),
3687 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3688 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3689 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3690 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3691 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3692 			BPF_MOV64_IMM(BPF_REG_0, 1),
3693 			BPF_EXIT_INSN(),
3694 			BPF_MOV64_IMM(BPF_REG_0, 0),
3695 			BPF_EXIT_INSN(),
3696 		},
3697 		.result = ACCEPT,
3698 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3699 		.retval = 1,
3700 	},
3701 	{
3702 		"direct packet access: test13 (branches, good access)",
3703 		.insns = {
3704 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3705 				    offsetof(struct __sk_buff, data)),
3706 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3707 				    offsetof(struct __sk_buff, data_end)),
3708 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3710 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3711 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3712 				    offsetof(struct __sk_buff, mark)),
3713 			BPF_MOV64_IMM(BPF_REG_4, 1),
3714 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3715 			BPF_MOV64_IMM(BPF_REG_3, 14),
3716 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3717 			BPF_MOV64_IMM(BPF_REG_3, 24),
3718 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3720 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3721 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3722 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3723 			BPF_MOV64_IMM(BPF_REG_0, 1),
3724 			BPF_EXIT_INSN(),
3725 			BPF_MOV64_IMM(BPF_REG_0, 0),
3726 			BPF_EXIT_INSN(),
3727 		},
3728 		.result = ACCEPT,
3729 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3730 		.retval = 1,
3731 	},
3732 	{
3733 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3734 		.insns = {
3735 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 				    offsetof(struct __sk_buff, data)),
3737 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3738 				    offsetof(struct __sk_buff, data_end)),
3739 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3741 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3742 			BPF_MOV64_IMM(BPF_REG_5, 12),
3743 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3744 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3745 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3746 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3747 			BPF_MOV64_IMM(BPF_REG_0, 1),
3748 			BPF_EXIT_INSN(),
3749 			BPF_MOV64_IMM(BPF_REG_0, 0),
3750 			BPF_EXIT_INSN(),
3751 		},
3752 		.result = ACCEPT,
3753 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3754 		.retval = 1,
3755 	},
3756 	{
3757 		"direct packet access: test15 (spill with xadd)",
3758 		.insns = {
3759 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3760 				    offsetof(struct __sk_buff, data)),
3761 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3762 				    offsetof(struct __sk_buff, data_end)),
3763 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3765 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3766 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3767 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3768 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3769 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3770 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3771 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3772 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3773 			BPF_MOV64_IMM(BPF_REG_0, 0),
3774 			BPF_EXIT_INSN(),
3775 		},
3776 		.errstr = "R2 invalid mem access 'inv'",
3777 		.result = REJECT,
3778 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3779 	},
3780 	{
3781 		"direct packet access: test16 (arith on data_end)",
3782 		.insns = {
3783 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3784 				    offsetof(struct __sk_buff, data)),
3785 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3786 				    offsetof(struct __sk_buff, data_end)),
3787 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3790 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3791 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3792 			BPF_MOV64_IMM(BPF_REG_0, 0),
3793 			BPF_EXIT_INSN(),
3794 		},
3795 		.errstr = "R3 pointer arithmetic on pkt_end",
3796 		.result = REJECT,
3797 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3798 	},
3799 	{
3800 		"direct packet access: test17 (pruning, alignment)",
3801 		.insns = {
3802 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3803 				    offsetof(struct __sk_buff, data)),
3804 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3805 				    offsetof(struct __sk_buff, data_end)),
3806 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3807 				    offsetof(struct __sk_buff, mark)),
3808 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3810 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3811 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3812 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3813 			BPF_MOV64_IMM(BPF_REG_0, 0),
3814 			BPF_EXIT_INSN(),
3815 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3816 			BPF_JMP_A(-6),
3817 		},
3818 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3819 		.result = REJECT,
3820 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3821 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3822 	},
3823 	{
3824 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3825 		.insns = {
3826 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3827 				    offsetof(struct __sk_buff, data)),
3828 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3829 				    offsetof(struct __sk_buff, data_end)),
3830 			BPF_MOV64_IMM(BPF_REG_0, 8),
3831 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3832 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3833 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3834 			BPF_MOV64_IMM(BPF_REG_0, 0),
3835 			BPF_EXIT_INSN(),
3836 		},
3837 		.result = ACCEPT,
3838 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3839 	},
3840 	{
3841 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3842 		.insns = {
3843 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3844 				    offsetof(struct __sk_buff, data)),
3845 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3846 				    offsetof(struct __sk_buff, data_end)),
3847 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3849 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3850 			BPF_MOV64_IMM(BPF_REG_4, 4),
3851 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3852 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3853 			BPF_MOV64_IMM(BPF_REG_0, 0),
3854 			BPF_EXIT_INSN(),
3855 		},
3856 		.result = ACCEPT,
3857 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3858 	},
3859 	{
3860 		"direct packet access: test20 (x += pkt_ptr, 1)",
3861 		.insns = {
3862 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3863 				    offsetof(struct __sk_buff, data)),
3864 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3865 				    offsetof(struct __sk_buff, data_end)),
3866 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3867 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3868 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3869 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3870 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3871 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3872 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3873 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3874 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3875 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3876 			BPF_MOV64_IMM(BPF_REG_0, 0),
3877 			BPF_EXIT_INSN(),
3878 		},
3879 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3880 		.result = ACCEPT,
3881 	},
3882 	{
3883 		"direct packet access: test21 (x += pkt_ptr, 2)",
3884 		.insns = {
3885 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3886 				    offsetof(struct __sk_buff, data)),
3887 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3888 				    offsetof(struct __sk_buff, data_end)),
3889 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3891 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3892 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3893 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3894 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3895 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3896 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3897 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3899 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3900 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3901 			BPF_MOV64_IMM(BPF_REG_0, 0),
3902 			BPF_EXIT_INSN(),
3903 		},
3904 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3905 		.result = ACCEPT,
3906 	},
3907 	{
3908 		"direct packet access: test22 (x += pkt_ptr, 3)",
3909 		.insns = {
3910 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3911 				    offsetof(struct __sk_buff, data)),
3912 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3913 				    offsetof(struct __sk_buff, data_end)),
3914 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3916 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3917 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3918 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3919 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3920 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3921 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3922 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3923 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3924 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3925 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3926 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3928 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3929 			BPF_MOV64_IMM(BPF_REG_2, 1),
3930 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3931 			BPF_MOV64_IMM(BPF_REG_0, 0),
3932 			BPF_EXIT_INSN(),
3933 		},
3934 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3935 		.result = ACCEPT,
3936 	},
3937 	{
3938 		"direct packet access: test23 (x += pkt_ptr, 4)",
3939 		.insns = {
3940 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3941 				    offsetof(struct __sk_buff, data)),
3942 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3943 				    offsetof(struct __sk_buff, data_end)),
3944 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3945 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3946 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3947 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3948 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3949 			BPF_MOV64_IMM(BPF_REG_0, 31),
3950 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3951 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3952 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3954 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3955 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3956 			BPF_MOV64_IMM(BPF_REG_0, 0),
3957 			BPF_EXIT_INSN(),
3958 		},
3959 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3960 		.result = REJECT,
3961 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3962 	},
3963 	{
3964 		"direct packet access: test24 (x += pkt_ptr, 5)",
3965 		.insns = {
3966 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3967 				    offsetof(struct __sk_buff, data)),
3968 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3969 				    offsetof(struct __sk_buff, data_end)),
3970 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3971 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3972 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3973 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3974 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3975 			BPF_MOV64_IMM(BPF_REG_0, 64),
3976 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3977 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3978 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3980 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3981 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3982 			BPF_MOV64_IMM(BPF_REG_0, 0),
3983 			BPF_EXIT_INSN(),
3984 		},
3985 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3986 		.result = ACCEPT,
3987 	},
3988 	{
3989 		"direct packet access: test25 (marking on <, good access)",
3990 		.insns = {
3991 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3992 				    offsetof(struct __sk_buff, data)),
3993 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3994 				    offsetof(struct __sk_buff, data_end)),
3995 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3996 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3997 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3998 			BPF_MOV64_IMM(BPF_REG_0, 0),
3999 			BPF_EXIT_INSN(),
4000 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4001 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4002 		},
4003 		.result = ACCEPT,
4004 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4005 	},
4006 	{
4007 		"direct packet access: test26 (marking on <, bad access)",
4008 		.insns = {
4009 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4010 				    offsetof(struct __sk_buff, data)),
4011 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4012 				    offsetof(struct __sk_buff, data_end)),
4013 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4015 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4016 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4017 			BPF_MOV64_IMM(BPF_REG_0, 0),
4018 			BPF_EXIT_INSN(),
4019 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4020 		},
4021 		.result = REJECT,
4022 		.errstr = "invalid access to packet",
4023 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 	},
4025 	{
4026 		"direct packet access: test27 (marking on <=, good access)",
4027 		.insns = {
4028 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4029 				    offsetof(struct __sk_buff, data)),
4030 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4031 				    offsetof(struct __sk_buff, data_end)),
4032 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4034 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4035 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4036 			BPF_MOV64_IMM(BPF_REG_0, 1),
4037 			BPF_EXIT_INSN(),
4038 		},
4039 		.result = ACCEPT,
4040 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4041 		.retval = 1,
4042 	},
4043 	{
4044 		"direct packet access: test28 (marking on <=, bad access)",
4045 		.insns = {
4046 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4047 				    offsetof(struct __sk_buff, data)),
4048 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4049 				    offsetof(struct __sk_buff, data_end)),
4050 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4052 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4053 			BPF_MOV64_IMM(BPF_REG_0, 1),
4054 			BPF_EXIT_INSN(),
4055 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4056 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4057 		},
4058 		.result = REJECT,
4059 		.errstr = "invalid access to packet",
4060 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4061 	},
4062 	{
4063 		"helper access to packet: test1, valid packet_ptr range",
4064 		.insns = {
4065 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4066 				    offsetof(struct xdp_md, data)),
4067 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4068 				    offsetof(struct xdp_md, data_end)),
4069 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4071 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4072 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4074 			BPF_MOV64_IMM(BPF_REG_4, 0),
4075 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4076 				     BPF_FUNC_map_update_elem),
4077 			BPF_MOV64_IMM(BPF_REG_0, 0),
4078 			BPF_EXIT_INSN(),
4079 		},
4080 		.fixup_map_hash_8b = { 5 },
4081 		.result_unpriv = ACCEPT,
4082 		.result = ACCEPT,
4083 		.prog_type = BPF_PROG_TYPE_XDP,
4084 	},
4085 	{
4086 		"helper access to packet: test2, unchecked packet_ptr",
4087 		.insns = {
4088 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4089 				    offsetof(struct xdp_md, data)),
4090 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4091 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092 				     BPF_FUNC_map_lookup_elem),
4093 			BPF_MOV64_IMM(BPF_REG_0, 0),
4094 			BPF_EXIT_INSN(),
4095 		},
4096 		.fixup_map_hash_8b = { 1 },
4097 		.result = REJECT,
4098 		.errstr = "invalid access to packet",
4099 		.prog_type = BPF_PROG_TYPE_XDP,
4100 	},
4101 	{
4102 		"helper access to packet: test3, variable add",
4103 		.insns = {
4104 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4105 					offsetof(struct xdp_md, data)),
4106 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4107 					offsetof(struct xdp_md, data_end)),
4108 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4109 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4110 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4111 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4112 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4113 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4114 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4115 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4116 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4117 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4118 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4119 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4120 				     BPF_FUNC_map_lookup_elem),
4121 			BPF_MOV64_IMM(BPF_REG_0, 0),
4122 			BPF_EXIT_INSN(),
4123 		},
4124 		.fixup_map_hash_8b = { 11 },
4125 		.result = ACCEPT,
4126 		.prog_type = BPF_PROG_TYPE_XDP,
4127 	},
4128 	{
4129 		"helper access to packet: test4, packet_ptr with bad range",
4130 		.insns = {
4131 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4132 				    offsetof(struct xdp_md, data)),
4133 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4134 				    offsetof(struct xdp_md, data_end)),
4135 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4137 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4138 			BPF_MOV64_IMM(BPF_REG_0, 0),
4139 			BPF_EXIT_INSN(),
4140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4142 				     BPF_FUNC_map_lookup_elem),
4143 			BPF_MOV64_IMM(BPF_REG_0, 0),
4144 			BPF_EXIT_INSN(),
4145 		},
4146 		.fixup_map_hash_8b = { 7 },
4147 		.result = REJECT,
4148 		.errstr = "invalid access to packet",
4149 		.prog_type = BPF_PROG_TYPE_XDP,
4150 	},
4151 	{
4152 		"helper access to packet: test5, packet_ptr with too short range",
4153 		.insns = {
4154 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 				    offsetof(struct xdp_md, data)),
4156 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4157 				    offsetof(struct xdp_md, data_end)),
4158 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4159 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4161 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4162 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4163 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4164 				     BPF_FUNC_map_lookup_elem),
4165 			BPF_MOV64_IMM(BPF_REG_0, 0),
4166 			BPF_EXIT_INSN(),
4167 		},
4168 		.fixup_map_hash_8b = { 6 },
4169 		.result = REJECT,
4170 		.errstr = "invalid access to packet",
4171 		.prog_type = BPF_PROG_TYPE_XDP,
4172 	},
4173 	{
4174 		"helper access to packet: test6, cls valid packet_ptr range",
4175 		.insns = {
4176 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4177 				    offsetof(struct __sk_buff, data)),
4178 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4179 				    offsetof(struct __sk_buff, data_end)),
4180 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4182 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4183 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4184 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4185 			BPF_MOV64_IMM(BPF_REG_4, 0),
4186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4187 				     BPF_FUNC_map_update_elem),
4188 			BPF_MOV64_IMM(BPF_REG_0, 0),
4189 			BPF_EXIT_INSN(),
4190 		},
4191 		.fixup_map_hash_8b = { 5 },
4192 		.result = ACCEPT,
4193 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4194 	},
4195 	{
4196 		"helper access to packet: test7, cls unchecked packet_ptr",
4197 		.insns = {
4198 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4199 				    offsetof(struct __sk_buff, data)),
4200 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202 				     BPF_FUNC_map_lookup_elem),
4203 			BPF_MOV64_IMM(BPF_REG_0, 0),
4204 			BPF_EXIT_INSN(),
4205 		},
4206 		.fixup_map_hash_8b = { 1 },
4207 		.result = REJECT,
4208 		.errstr = "invalid access to packet",
4209 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4210 	},
4211 	{
4212 		"helper access to packet: test8, cls variable add",
4213 		.insns = {
4214 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4215 					offsetof(struct __sk_buff, data)),
4216 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4217 					offsetof(struct __sk_buff, data_end)),
4218 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4220 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4221 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4222 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4223 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4224 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4226 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4227 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4228 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4229 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4230 				     BPF_FUNC_map_lookup_elem),
4231 			BPF_MOV64_IMM(BPF_REG_0, 0),
4232 			BPF_EXIT_INSN(),
4233 		},
4234 		.fixup_map_hash_8b = { 11 },
4235 		.result = ACCEPT,
4236 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4237 	},
4238 	{
4239 		"helper access to packet: test9, cls packet_ptr with bad range",
4240 		.insns = {
4241 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4242 				    offsetof(struct __sk_buff, data)),
4243 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4244 				    offsetof(struct __sk_buff, data_end)),
4245 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4246 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4247 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4248 			BPF_MOV64_IMM(BPF_REG_0, 0),
4249 			BPF_EXIT_INSN(),
4250 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4252 				     BPF_FUNC_map_lookup_elem),
4253 			BPF_MOV64_IMM(BPF_REG_0, 0),
4254 			BPF_EXIT_INSN(),
4255 		},
4256 		.fixup_map_hash_8b = { 7 },
4257 		.result = REJECT,
4258 		.errstr = "invalid access to packet",
4259 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4260 	},
4261 	{
4262 		"helper access to packet: test10, cls packet_ptr with too short range",
4263 		.insns = {
4264 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4265 				    offsetof(struct __sk_buff, data)),
4266 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4267 				    offsetof(struct __sk_buff, data_end)),
4268 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4269 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4271 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4272 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4273 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4274 				     BPF_FUNC_map_lookup_elem),
4275 			BPF_MOV64_IMM(BPF_REG_0, 0),
4276 			BPF_EXIT_INSN(),
4277 		},
4278 		.fixup_map_hash_8b = { 6 },
4279 		.result = REJECT,
4280 		.errstr = "invalid access to packet",
4281 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4282 	},
4283 	{
4284 		"helper access to packet: test11, cls unsuitable helper 1",
4285 		.insns = {
4286 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4287 				    offsetof(struct __sk_buff, data)),
4288 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4289 				    offsetof(struct __sk_buff, data_end)),
4290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4291 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4292 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4293 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4294 			BPF_MOV64_IMM(BPF_REG_2, 0),
4295 			BPF_MOV64_IMM(BPF_REG_4, 42),
4296 			BPF_MOV64_IMM(BPF_REG_5, 0),
4297 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4298 				     BPF_FUNC_skb_store_bytes),
4299 			BPF_MOV64_IMM(BPF_REG_0, 0),
4300 			BPF_EXIT_INSN(),
4301 		},
4302 		.result = REJECT,
4303 		.errstr = "helper access to the packet",
4304 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4305 	},
4306 	{
4307 		"helper access to packet: test12, cls unsuitable helper 2",
4308 		.insns = {
4309 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4310 				    offsetof(struct __sk_buff, data)),
4311 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4312 				    offsetof(struct __sk_buff, data_end)),
4313 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4314 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4315 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4316 			BPF_MOV64_IMM(BPF_REG_2, 0),
4317 			BPF_MOV64_IMM(BPF_REG_4, 4),
4318 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4319 				     BPF_FUNC_skb_load_bytes),
4320 			BPF_MOV64_IMM(BPF_REG_0, 0),
4321 			BPF_EXIT_INSN(),
4322 		},
4323 		.result = REJECT,
4324 		.errstr = "helper access to the packet",
4325 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4326 	},
4327 	{
4328 		"helper access to packet: test13, cls helper ok",
4329 		.insns = {
4330 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4331 				    offsetof(struct __sk_buff, data)),
4332 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4333 				    offsetof(struct __sk_buff, data_end)),
4334 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4335 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4336 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4337 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4338 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4339 			BPF_MOV64_IMM(BPF_REG_2, 4),
4340 			BPF_MOV64_IMM(BPF_REG_3, 0),
4341 			BPF_MOV64_IMM(BPF_REG_4, 0),
4342 			BPF_MOV64_IMM(BPF_REG_5, 0),
4343 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4344 				     BPF_FUNC_csum_diff),
4345 			BPF_MOV64_IMM(BPF_REG_0, 0),
4346 			BPF_EXIT_INSN(),
4347 		},
4348 		.result = ACCEPT,
4349 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4350 	},
4351 	{
4352 		"helper access to packet: test14, cls helper ok sub",
4353 		.insns = {
4354 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4355 				    offsetof(struct __sk_buff, data)),
4356 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4357 				    offsetof(struct __sk_buff, data_end)),
4358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4359 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4361 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4362 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4363 			BPF_MOV64_IMM(BPF_REG_2, 4),
4364 			BPF_MOV64_IMM(BPF_REG_3, 0),
4365 			BPF_MOV64_IMM(BPF_REG_4, 0),
4366 			BPF_MOV64_IMM(BPF_REG_5, 0),
4367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4368 				     BPF_FUNC_csum_diff),
4369 			BPF_MOV64_IMM(BPF_REG_0, 0),
4370 			BPF_EXIT_INSN(),
4371 		},
4372 		.result = ACCEPT,
4373 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4374 	},
4375 	{
4376 		"helper access to packet: test15, cls helper fail sub",
4377 		.insns = {
4378 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4379 				    offsetof(struct __sk_buff, data)),
4380 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4381 				    offsetof(struct __sk_buff, data_end)),
4382 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4383 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4385 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4386 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4387 			BPF_MOV64_IMM(BPF_REG_2, 4),
4388 			BPF_MOV64_IMM(BPF_REG_3, 0),
4389 			BPF_MOV64_IMM(BPF_REG_4, 0),
4390 			BPF_MOV64_IMM(BPF_REG_5, 0),
4391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4392 				     BPF_FUNC_csum_diff),
4393 			BPF_MOV64_IMM(BPF_REG_0, 0),
4394 			BPF_EXIT_INSN(),
4395 		},
4396 		.result = REJECT,
4397 		.errstr = "invalid access to packet",
4398 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4399 	},
4400 	{
4401 		"helper access to packet: test16, cls helper fail range 1",
4402 		.insns = {
4403 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4404 				    offsetof(struct __sk_buff, data)),
4405 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4406 				    offsetof(struct __sk_buff, data_end)),
4407 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4408 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4410 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4411 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412 			BPF_MOV64_IMM(BPF_REG_2, 8),
4413 			BPF_MOV64_IMM(BPF_REG_3, 0),
4414 			BPF_MOV64_IMM(BPF_REG_4, 0),
4415 			BPF_MOV64_IMM(BPF_REG_5, 0),
4416 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4417 				     BPF_FUNC_csum_diff),
4418 			BPF_MOV64_IMM(BPF_REG_0, 0),
4419 			BPF_EXIT_INSN(),
4420 		},
4421 		.result = REJECT,
4422 		.errstr = "invalid access to packet",
4423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4424 	},
4425 	{
4426 		"helper access to packet: test17, cls helper fail range 2",
4427 		.insns = {
4428 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4429 				    offsetof(struct __sk_buff, data)),
4430 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4431 				    offsetof(struct __sk_buff, data_end)),
4432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4433 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4435 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4436 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437 			BPF_MOV64_IMM(BPF_REG_2, -9),
4438 			BPF_MOV64_IMM(BPF_REG_3, 0),
4439 			BPF_MOV64_IMM(BPF_REG_4, 0),
4440 			BPF_MOV64_IMM(BPF_REG_5, 0),
4441 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442 				     BPF_FUNC_csum_diff),
4443 			BPF_MOV64_IMM(BPF_REG_0, 0),
4444 			BPF_EXIT_INSN(),
4445 		},
4446 		.result = REJECT,
4447 		.errstr = "R2 min value is negative",
4448 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4449 	},
4450 	{
4451 		"helper access to packet: test18, cls helper fail range 3",
4452 		.insns = {
4453 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4454 				    offsetof(struct __sk_buff, data)),
4455 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4456 				    offsetof(struct __sk_buff, data_end)),
4457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4458 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4459 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4460 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4461 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4462 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4463 			BPF_MOV64_IMM(BPF_REG_3, 0),
4464 			BPF_MOV64_IMM(BPF_REG_4, 0),
4465 			BPF_MOV64_IMM(BPF_REG_5, 0),
4466 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4467 				     BPF_FUNC_csum_diff),
4468 			BPF_MOV64_IMM(BPF_REG_0, 0),
4469 			BPF_EXIT_INSN(),
4470 		},
4471 		.result = REJECT,
4472 		.errstr = "R2 min value is negative",
4473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4474 	},
4475 	{
4476 		"helper access to packet: test19, cls helper range zero",
4477 		.insns = {
4478 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4479 				    offsetof(struct __sk_buff, data)),
4480 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4481 				    offsetof(struct __sk_buff, data_end)),
4482 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4483 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4484 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4485 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4487 			BPF_MOV64_IMM(BPF_REG_2, 0),
4488 			BPF_MOV64_IMM(BPF_REG_3, 0),
4489 			BPF_MOV64_IMM(BPF_REG_4, 0),
4490 			BPF_MOV64_IMM(BPF_REG_5, 0),
4491 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4492 				     BPF_FUNC_csum_diff),
4493 			BPF_MOV64_IMM(BPF_REG_0, 0),
4494 			BPF_EXIT_INSN(),
4495 		},
4496 		.result = ACCEPT,
4497 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4498 	},
4499 	{
4500 		"helper access to packet: test20, pkt end as input",
4501 		.insns = {
4502 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4503 				    offsetof(struct __sk_buff, data)),
4504 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4505 				    offsetof(struct __sk_buff, data_end)),
4506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4507 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4508 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4509 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4510 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4511 			BPF_MOV64_IMM(BPF_REG_2, 4),
4512 			BPF_MOV64_IMM(BPF_REG_3, 0),
4513 			BPF_MOV64_IMM(BPF_REG_4, 0),
4514 			BPF_MOV64_IMM(BPF_REG_5, 0),
4515 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4516 				     BPF_FUNC_csum_diff),
4517 			BPF_MOV64_IMM(BPF_REG_0, 0),
4518 			BPF_EXIT_INSN(),
4519 		},
4520 		.result = REJECT,
4521 		.errstr = "R1 type=pkt_end expected=fp",
4522 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4523 	},
4524 	{
4525 		"helper access to packet: test21, wrong reg",
4526 		.insns = {
4527 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4528 				    offsetof(struct __sk_buff, data)),
4529 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4530 				    offsetof(struct __sk_buff, data_end)),
4531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4532 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4534 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4535 			BPF_MOV64_IMM(BPF_REG_2, 4),
4536 			BPF_MOV64_IMM(BPF_REG_3, 0),
4537 			BPF_MOV64_IMM(BPF_REG_4, 0),
4538 			BPF_MOV64_IMM(BPF_REG_5, 0),
4539 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4540 				     BPF_FUNC_csum_diff),
4541 			BPF_MOV64_IMM(BPF_REG_0, 0),
4542 			BPF_EXIT_INSN(),
4543 		},
4544 		.result = REJECT,
4545 		.errstr = "invalid access to packet",
4546 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4547 	},
4548 	{
4549 		"prevent map lookup in sockmap",
4550 		.insns = {
4551 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4552 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4553 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4554 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4555 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4556 				     BPF_FUNC_map_lookup_elem),
4557 			BPF_EXIT_INSN(),
4558 		},
4559 		.fixup_map_sockmap = { 3 },
4560 		.result = REJECT,
4561 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4562 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4563 	},
4564 	{
4565 		"prevent map lookup in sockhash",
4566 		.insns = {
4567 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572 				     BPF_FUNC_map_lookup_elem),
4573 			BPF_EXIT_INSN(),
4574 		},
4575 		.fixup_map_sockhash = { 3 },
4576 		.result = REJECT,
4577 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4578 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4579 	},
4580 	{
4581 		"prevent map lookup in xskmap",
4582 		.insns = {
4583 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4584 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4586 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4587 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4588 				     BPF_FUNC_map_lookup_elem),
4589 			BPF_EXIT_INSN(),
4590 		},
4591 		.fixup_map_xskmap = { 3 },
4592 		.result = REJECT,
4593 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4594 		.prog_type = BPF_PROG_TYPE_XDP,
4595 	},
4596 	{
4597 		"prevent map lookup in stack trace",
4598 		.insns = {
4599 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4600 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4602 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4603 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4604 				     BPF_FUNC_map_lookup_elem),
4605 			BPF_EXIT_INSN(),
4606 		},
4607 		.fixup_map_stacktrace = { 3 },
4608 		.result = REJECT,
4609 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4610 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4611 	},
4612 	{
4613 		"prevent map lookup in prog array",
4614 		.insns = {
4615 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4618 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4620 				     BPF_FUNC_map_lookup_elem),
4621 			BPF_EXIT_INSN(),
4622 		},
4623 		.fixup_prog2 = { 3 },
4624 		.result = REJECT,
4625 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4626 	},
4627 	{
4628 		"valid map access into an array with a constant",
4629 		.insns = {
4630 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4631 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4633 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4635 				     BPF_FUNC_map_lookup_elem),
4636 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4637 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4638 				   offsetof(struct test_val, foo)),
4639 			BPF_EXIT_INSN(),
4640 		},
4641 		.fixup_map_hash_48b = { 3 },
4642 		.errstr_unpriv = "R0 leaks addr",
4643 		.result_unpriv = REJECT,
4644 		.result = ACCEPT,
4645 	},
4646 	{
4647 		"valid map access into an array with a register",
4648 		.insns = {
4649 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4650 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4652 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4654 				     BPF_FUNC_map_lookup_elem),
4655 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4656 			BPF_MOV64_IMM(BPF_REG_1, 4),
4657 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4658 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4659 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4660 				   offsetof(struct test_val, foo)),
4661 			BPF_EXIT_INSN(),
4662 		},
4663 		.fixup_map_hash_48b = { 3 },
4664 		.errstr_unpriv = "R0 leaks addr",
4665 		.result_unpriv = REJECT,
4666 		.result = ACCEPT,
4667 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4668 	},
4669 	{
4670 		"valid map access into an array with a variable",
4671 		.insns = {
4672 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4673 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4675 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4676 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4677 				     BPF_FUNC_map_lookup_elem),
4678 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4679 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4680 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4681 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4682 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4683 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4684 				   offsetof(struct test_val, foo)),
4685 			BPF_EXIT_INSN(),
4686 		},
4687 		.fixup_map_hash_48b = { 3 },
4688 		.errstr_unpriv = "R0 leaks addr",
4689 		.result_unpriv = REJECT,
4690 		.result = ACCEPT,
4691 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4692 	},
4693 	{
4694 		"valid map access into an array with a signed variable",
4695 		.insns = {
4696 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4697 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4700 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4701 				     BPF_FUNC_map_lookup_elem),
4702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4703 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4704 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4705 			BPF_MOV32_IMM(BPF_REG_1, 0),
4706 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4707 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4708 			BPF_MOV32_IMM(BPF_REG_1, 0),
4709 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4710 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4711 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4712 				   offsetof(struct test_val, foo)),
4713 			BPF_EXIT_INSN(),
4714 		},
4715 		.fixup_map_hash_48b = { 3 },
4716 		.errstr_unpriv = "R0 leaks addr",
4717 		.result_unpriv = REJECT,
4718 		.result = ACCEPT,
4719 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4720 	},
4721 	{
4722 		"invalid map access into an array with a constant",
4723 		.insns = {
4724 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4725 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4726 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4727 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4728 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4729 				     BPF_FUNC_map_lookup_elem),
4730 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4731 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4732 				   offsetof(struct test_val, foo)),
4733 			BPF_EXIT_INSN(),
4734 		},
4735 		.fixup_map_hash_48b = { 3 },
4736 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4737 		.result = REJECT,
4738 	},
4739 	{
4740 		"invalid map access into an array with a register",
4741 		.insns = {
4742 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4743 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4745 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4746 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4747 				     BPF_FUNC_map_lookup_elem),
4748 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4749 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4750 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4751 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4752 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4753 				   offsetof(struct test_val, foo)),
4754 			BPF_EXIT_INSN(),
4755 		},
4756 		.fixup_map_hash_48b = { 3 },
4757 		.errstr = "R0 min value is outside of the array range",
4758 		.result = REJECT,
4759 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4760 	},
4761 	{
4762 		"invalid map access into an array with a variable",
4763 		.insns = {
4764 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4765 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4766 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4767 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4768 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4769 				     BPF_FUNC_map_lookup_elem),
4770 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4771 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4772 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4773 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4774 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4775 				   offsetof(struct test_val, foo)),
4776 			BPF_EXIT_INSN(),
4777 		},
4778 		.fixup_map_hash_48b = { 3 },
4779 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4780 		.result = REJECT,
4781 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4782 	},
4783 	{
4784 		"invalid map access into an array with no floor check",
4785 		.insns = {
4786 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4787 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4789 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4790 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4791 				     BPF_FUNC_map_lookup_elem),
4792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4793 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4794 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4795 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4796 			BPF_MOV32_IMM(BPF_REG_1, 0),
4797 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4798 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4799 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4800 				   offsetof(struct test_val, foo)),
4801 			BPF_EXIT_INSN(),
4802 		},
4803 		.fixup_map_hash_48b = { 3 },
4804 		.errstr_unpriv = "R0 leaks addr",
4805 		.errstr = "R0 unbounded memory access",
4806 		.result_unpriv = REJECT,
4807 		.result = REJECT,
4808 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4809 	},
4810 	{
4811 		"invalid map access into an array with a invalid max check",
4812 		.insns = {
4813 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4814 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4815 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4816 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4817 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4818 				     BPF_FUNC_map_lookup_elem),
4819 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4820 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4821 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4822 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4823 			BPF_MOV32_IMM(BPF_REG_1, 0),
4824 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4825 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4826 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4827 				   offsetof(struct test_val, foo)),
4828 			BPF_EXIT_INSN(),
4829 		},
4830 		.fixup_map_hash_48b = { 3 },
4831 		.errstr_unpriv = "R0 leaks addr",
4832 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4833 		.result_unpriv = REJECT,
4834 		.result = REJECT,
4835 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4836 	},
4837 	{
4838 		"invalid map access into an array with a invalid max check",
4839 		.insns = {
4840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4841 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4843 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4844 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4845 				     BPF_FUNC_map_lookup_elem),
4846 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4847 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4848 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4851 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4852 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4853 				     BPF_FUNC_map_lookup_elem),
4854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4855 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4856 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4857 				    offsetof(struct test_val, foo)),
4858 			BPF_EXIT_INSN(),
4859 		},
4860 		.fixup_map_hash_48b = { 3, 11 },
4861 		.errstr = "R0 pointer += pointer",
4862 		.result = REJECT,
4863 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4864 	},
4865 	{
4866 		"valid cgroup storage access",
4867 		.insns = {
4868 			BPF_MOV64_IMM(BPF_REG_2, 0),
4869 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4870 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4871 				     BPF_FUNC_get_local_storage),
4872 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4873 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4874 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4875 			BPF_EXIT_INSN(),
4876 		},
4877 		.fixup_cgroup_storage = { 1 },
4878 		.result = ACCEPT,
4879 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4880 	},
4881 	{
4882 		"invalid cgroup storage access 1",
4883 		.insns = {
4884 			BPF_MOV64_IMM(BPF_REG_2, 0),
4885 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4886 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4887 				     BPF_FUNC_get_local_storage),
4888 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4889 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4890 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4891 			BPF_EXIT_INSN(),
4892 		},
4893 		.fixup_map_hash_8b = { 1 },
4894 		.result = REJECT,
4895 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
4896 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4897 	},
4898 	{
4899 		"invalid cgroup storage access 2",
4900 		.insns = {
4901 			BPF_MOV64_IMM(BPF_REG_2, 0),
4902 			BPF_LD_MAP_FD(BPF_REG_1, 1),
4903 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4904 				     BPF_FUNC_get_local_storage),
4905 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4906 			BPF_EXIT_INSN(),
4907 		},
4908 		.result = REJECT,
4909 		.errstr = "fd 1 is not pointing to valid bpf_map",
4910 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4911 	},
4912 	{
4913 		"invalid cgroup storage access 3",
4914 		.insns = {
4915 			BPF_MOV64_IMM(BPF_REG_2, 0),
4916 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4917 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4918 				     BPF_FUNC_get_local_storage),
4919 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
4920 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4921 			BPF_MOV64_IMM(BPF_REG_0, 0),
4922 			BPF_EXIT_INSN(),
4923 		},
4924 		.fixup_cgroup_storage = { 1 },
4925 		.result = REJECT,
4926 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
4927 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4928 	},
4929 	{
4930 		"invalid cgroup storage access 4",
4931 		.insns = {
4932 			BPF_MOV64_IMM(BPF_REG_2, 0),
4933 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4934 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4935 				     BPF_FUNC_get_local_storage),
4936 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
4937 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4939 			BPF_EXIT_INSN(),
4940 		},
4941 		.fixup_cgroup_storage = { 1 },
4942 		.result = REJECT,
4943 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
4944 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4945 	},
4946 	{
4947 		"invalid cgroup storage access 5",
4948 		.insns = {
4949 			BPF_MOV64_IMM(BPF_REG_2, 7),
4950 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4951 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4952 				     BPF_FUNC_get_local_storage),
4953 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4954 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4955 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4956 			BPF_EXIT_INSN(),
4957 		},
4958 		.fixup_cgroup_storage = { 1 },
4959 		.result = REJECT,
4960 		.errstr = "get_local_storage() doesn't support non-zero flags",
4961 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4962 	},
4963 	{
4964 		"invalid cgroup storage access 6",
4965 		.insns = {
4966 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
4967 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4968 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4969 				     BPF_FUNC_get_local_storage),
4970 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4971 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4972 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4973 			BPF_EXIT_INSN(),
4974 		},
4975 		.fixup_cgroup_storage = { 1 },
4976 		.result = REJECT,
4977 		.errstr = "get_local_storage() doesn't support non-zero flags",
4978 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4979 	},
4980 	{
4981 		"valid per-cpu cgroup storage access",
4982 		.insns = {
4983 			BPF_MOV64_IMM(BPF_REG_2, 0),
4984 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4985 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4986 				     BPF_FUNC_get_local_storage),
4987 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4988 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
4989 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
4990 			BPF_EXIT_INSN(),
4991 		},
4992 		.fixup_percpu_cgroup_storage = { 1 },
4993 		.result = ACCEPT,
4994 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4995 	},
4996 	{
4997 		"invalid per-cpu cgroup storage access 1",
4998 		.insns = {
4999 			BPF_MOV64_IMM(BPF_REG_2, 0),
5000 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5001 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5002 				     BPF_FUNC_get_local_storage),
5003 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5004 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5005 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5006 			BPF_EXIT_INSN(),
5007 		},
5008 		.fixup_map_hash_8b = { 1 },
5009 		.result = REJECT,
5010 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5011 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5012 	},
5013 	{
5014 		"invalid per-cpu cgroup storage access 2",
5015 		.insns = {
5016 			BPF_MOV64_IMM(BPF_REG_2, 0),
5017 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5018 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5019 				     BPF_FUNC_get_local_storage),
5020 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5021 			BPF_EXIT_INSN(),
5022 		},
5023 		.result = REJECT,
5024 		.errstr = "fd 1 is not pointing to valid bpf_map",
5025 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5026 	},
5027 	{
5028 		"invalid per-cpu cgroup storage access 3",
5029 		.insns = {
5030 			BPF_MOV64_IMM(BPF_REG_2, 0),
5031 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5032 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5033 				     BPF_FUNC_get_local_storage),
5034 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5035 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5036 			BPF_MOV64_IMM(BPF_REG_0, 0),
5037 			BPF_EXIT_INSN(),
5038 		},
5039 		.fixup_percpu_cgroup_storage = { 1 },
5040 		.result = REJECT,
5041 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5042 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5043 	},
5044 	{
5045 		"invalid per-cpu cgroup storage access 4",
5046 		.insns = {
5047 			BPF_MOV64_IMM(BPF_REG_2, 0),
5048 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5049 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5050 				     BPF_FUNC_get_local_storage),
5051 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5052 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5053 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5054 			BPF_EXIT_INSN(),
5055 		},
5056 		.fixup_cgroup_storage = { 1 },
5057 		.result = REJECT,
5058 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5059 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5060 	},
5061 	{
5062 		"invalid per-cpu cgroup storage access 5",
5063 		.insns = {
5064 			BPF_MOV64_IMM(BPF_REG_2, 7),
5065 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5066 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5067 				     BPF_FUNC_get_local_storage),
5068 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5069 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5070 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5071 			BPF_EXIT_INSN(),
5072 		},
5073 		.fixup_percpu_cgroup_storage = { 1 },
5074 		.result = REJECT,
5075 		.errstr = "get_local_storage() doesn't support non-zero flags",
5076 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5077 	},
5078 	{
5079 		"invalid per-cpu cgroup storage access 6",
5080 		.insns = {
5081 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5082 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5083 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5084 				     BPF_FUNC_get_local_storage),
5085 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5086 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5087 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5088 			BPF_EXIT_INSN(),
5089 		},
5090 		.fixup_percpu_cgroup_storage = { 1 },
5091 		.result = REJECT,
5092 		.errstr = "get_local_storage() doesn't support non-zero flags",
5093 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5094 	},
5095 	{
5096 		"multiple registers share map_lookup_elem result",
5097 		.insns = {
5098 			BPF_MOV64_IMM(BPF_REG_1, 10),
5099 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5100 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5102 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5103 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5104 				     BPF_FUNC_map_lookup_elem),
5105 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5106 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5107 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5108 			BPF_EXIT_INSN(),
5109 		},
5110 		.fixup_map_hash_8b = { 4 },
5111 		.result = ACCEPT,
5112 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5113 	},
5114 	{
5115 		"alu ops on ptr_to_map_value_or_null, 1",
5116 		.insns = {
5117 			BPF_MOV64_IMM(BPF_REG_1, 10),
5118 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5119 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5120 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5121 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5122 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5123 				     BPF_FUNC_map_lookup_elem),
5124 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5125 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5127 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5128 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5129 			BPF_EXIT_INSN(),
5130 		},
5131 		.fixup_map_hash_8b = { 4 },
5132 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5133 		.result = REJECT,
5134 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5135 	},
5136 	{
5137 		"alu ops on ptr_to_map_value_or_null, 2",
5138 		.insns = {
5139 			BPF_MOV64_IMM(BPF_REG_1, 10),
5140 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5141 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5143 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5144 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5145 				     BPF_FUNC_map_lookup_elem),
5146 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5147 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5149 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5150 			BPF_EXIT_INSN(),
5151 		},
5152 		.fixup_map_hash_8b = { 4 },
5153 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5154 		.result = REJECT,
5155 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5156 	},
5157 	{
5158 		"alu ops on ptr_to_map_value_or_null, 3",
5159 		.insns = {
5160 			BPF_MOV64_IMM(BPF_REG_1, 10),
5161 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5162 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5164 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5166 				     BPF_FUNC_map_lookup_elem),
5167 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5168 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5170 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5171 			BPF_EXIT_INSN(),
5172 		},
5173 		.fixup_map_hash_8b = { 4 },
5174 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5175 		.result = REJECT,
5176 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5177 	},
5178 	{
5179 		"invalid memory access with multiple map_lookup_elem calls",
5180 		.insns = {
5181 			BPF_MOV64_IMM(BPF_REG_1, 10),
5182 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5183 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5184 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5185 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5186 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5187 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5188 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5189 				     BPF_FUNC_map_lookup_elem),
5190 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5191 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5192 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5193 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5194 				     BPF_FUNC_map_lookup_elem),
5195 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5196 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5197 			BPF_EXIT_INSN(),
5198 		},
5199 		.fixup_map_hash_8b = { 4 },
5200 		.result = REJECT,
5201 		.errstr = "R4 !read_ok",
5202 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5203 	},
5204 	{
5205 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5206 		.insns = {
5207 			BPF_MOV64_IMM(BPF_REG_1, 10),
5208 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5209 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5210 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5211 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5212 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5213 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5214 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5215 				     BPF_FUNC_map_lookup_elem),
5216 			BPF_MOV64_IMM(BPF_REG_2, 10),
5217 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5218 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5219 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5220 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5221 				     BPF_FUNC_map_lookup_elem),
5222 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5223 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5224 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5225 			BPF_EXIT_INSN(),
5226 		},
5227 		.fixup_map_hash_8b = { 4 },
5228 		.result = ACCEPT,
5229 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5230 	},
5231 	{
5232 		"invalid map access from else condition",
5233 		.insns = {
5234 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5235 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5237 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5238 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5239 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5240 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5241 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5242 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5243 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5244 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5245 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5246 			BPF_EXIT_INSN(),
5247 		},
5248 		.fixup_map_hash_48b = { 3 },
5249 		.errstr = "R0 unbounded memory access",
5250 		.result = REJECT,
5251 		.errstr_unpriv = "R0 leaks addr",
5252 		.result_unpriv = REJECT,
5253 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5254 	},
5255 	{
5256 		"constant register |= constant should keep constant type",
5257 		.insns = {
5258 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5260 			BPF_MOV64_IMM(BPF_REG_2, 34),
5261 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5262 			BPF_MOV64_IMM(BPF_REG_3, 0),
5263 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5264 			BPF_EXIT_INSN(),
5265 		},
5266 		.result = ACCEPT,
5267 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5268 	},
5269 	{
5270 		"constant register |= constant should not bypass stack boundary checks",
5271 		.insns = {
5272 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5273 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5274 			BPF_MOV64_IMM(BPF_REG_2, 34),
5275 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5276 			BPF_MOV64_IMM(BPF_REG_3, 0),
5277 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5278 			BPF_EXIT_INSN(),
5279 		},
5280 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5281 		.result = REJECT,
5282 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5283 	},
5284 	{
5285 		"constant register |= constant register should keep constant type",
5286 		.insns = {
5287 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5288 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5289 			BPF_MOV64_IMM(BPF_REG_2, 34),
5290 			BPF_MOV64_IMM(BPF_REG_4, 13),
5291 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5292 			BPF_MOV64_IMM(BPF_REG_3, 0),
5293 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5294 			BPF_EXIT_INSN(),
5295 		},
5296 		.result = ACCEPT,
5297 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5298 	},
5299 	{
5300 		"constant register |= constant register should not bypass stack boundary checks",
5301 		.insns = {
5302 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5304 			BPF_MOV64_IMM(BPF_REG_2, 34),
5305 			BPF_MOV64_IMM(BPF_REG_4, 24),
5306 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5307 			BPF_MOV64_IMM(BPF_REG_3, 0),
5308 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5309 			BPF_EXIT_INSN(),
5310 		},
5311 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5312 		.result = REJECT,
5313 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5314 	},
5315 	{
5316 		"invalid direct packet write for LWT_IN",
5317 		.insns = {
5318 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5319 				    offsetof(struct __sk_buff, data)),
5320 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5321 				    offsetof(struct __sk_buff, data_end)),
5322 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5323 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5324 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5325 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5326 			BPF_MOV64_IMM(BPF_REG_0, 0),
5327 			BPF_EXIT_INSN(),
5328 		},
5329 		.errstr = "cannot write into packet",
5330 		.result = REJECT,
5331 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5332 	},
5333 	{
5334 		"invalid direct packet write for LWT_OUT",
5335 		.insns = {
5336 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5337 				    offsetof(struct __sk_buff, data)),
5338 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5339 				    offsetof(struct __sk_buff, data_end)),
5340 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5341 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5342 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5343 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5344 			BPF_MOV64_IMM(BPF_REG_0, 0),
5345 			BPF_EXIT_INSN(),
5346 		},
5347 		.errstr = "cannot write into packet",
5348 		.result = REJECT,
5349 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5350 	},
5351 	{
5352 		"direct packet write for LWT_XMIT",
5353 		.insns = {
5354 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5355 				    offsetof(struct __sk_buff, data)),
5356 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5357 				    offsetof(struct __sk_buff, data_end)),
5358 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5360 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5361 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5362 			BPF_MOV64_IMM(BPF_REG_0, 0),
5363 			BPF_EXIT_INSN(),
5364 		},
5365 		.result = ACCEPT,
5366 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5367 	},
5368 	{
5369 		"direct packet read for LWT_IN",
5370 		.insns = {
5371 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5372 				    offsetof(struct __sk_buff, data)),
5373 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5374 				    offsetof(struct __sk_buff, data_end)),
5375 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5377 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5378 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5379 			BPF_MOV64_IMM(BPF_REG_0, 0),
5380 			BPF_EXIT_INSN(),
5381 		},
5382 		.result = ACCEPT,
5383 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5384 	},
5385 	{
5386 		"direct packet read for LWT_OUT",
5387 		.insns = {
5388 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5389 				    offsetof(struct __sk_buff, data)),
5390 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5391 				    offsetof(struct __sk_buff, data_end)),
5392 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5394 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5395 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5396 			BPF_MOV64_IMM(BPF_REG_0, 0),
5397 			BPF_EXIT_INSN(),
5398 		},
5399 		.result = ACCEPT,
5400 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5401 	},
5402 	{
5403 		"direct packet read for LWT_XMIT",
5404 		.insns = {
5405 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5406 				    offsetof(struct __sk_buff, data)),
5407 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5408 				    offsetof(struct __sk_buff, data_end)),
5409 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5411 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5412 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5413 			BPF_MOV64_IMM(BPF_REG_0, 0),
5414 			BPF_EXIT_INSN(),
5415 		},
5416 		.result = ACCEPT,
5417 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5418 	},
5419 	{
5420 		"overlapping checks for direct packet access",
5421 		.insns = {
5422 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5423 				    offsetof(struct __sk_buff, data)),
5424 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5425 				    offsetof(struct __sk_buff, data_end)),
5426 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5427 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5428 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5429 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5430 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5431 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5432 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5433 			BPF_MOV64_IMM(BPF_REG_0, 0),
5434 			BPF_EXIT_INSN(),
5435 		},
5436 		.result = ACCEPT,
5437 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5438 	},
5439 	{
5440 		"make headroom for LWT_XMIT",
5441 		.insns = {
5442 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5443 			BPF_MOV64_IMM(BPF_REG_2, 34),
5444 			BPF_MOV64_IMM(BPF_REG_3, 0),
5445 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5446 			/* split for s390 to succeed */
5447 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5448 			BPF_MOV64_IMM(BPF_REG_2, 42),
5449 			BPF_MOV64_IMM(BPF_REG_3, 0),
5450 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5451 			BPF_MOV64_IMM(BPF_REG_0, 0),
5452 			BPF_EXIT_INSN(),
5453 		},
5454 		.result = ACCEPT,
5455 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5456 	},
5457 	{
5458 		"invalid access of tc_classid for LWT_IN",
5459 		.insns = {
5460 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5461 				    offsetof(struct __sk_buff, tc_classid)),
5462 			BPF_EXIT_INSN(),
5463 		},
5464 		.result = REJECT,
5465 		.errstr = "invalid bpf_context access",
5466 	},
5467 	{
5468 		"invalid access of tc_classid for LWT_OUT",
5469 		.insns = {
5470 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5471 				    offsetof(struct __sk_buff, tc_classid)),
5472 			BPF_EXIT_INSN(),
5473 		},
5474 		.result = REJECT,
5475 		.errstr = "invalid bpf_context access",
5476 	},
5477 	{
5478 		"invalid access of tc_classid for LWT_XMIT",
5479 		.insns = {
5480 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5481 				    offsetof(struct __sk_buff, tc_classid)),
5482 			BPF_EXIT_INSN(),
5483 		},
5484 		.result = REJECT,
5485 		.errstr = "invalid bpf_context access",
5486 	},
5487 	{
5488 		"leak pointer into ctx 1",
5489 		.insns = {
5490 			BPF_MOV64_IMM(BPF_REG_0, 0),
5491 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5492 				    offsetof(struct __sk_buff, cb[0])),
5493 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5494 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5495 				      offsetof(struct __sk_buff, cb[0])),
5496 			BPF_EXIT_INSN(),
5497 		},
5498 		.fixup_map_hash_8b = { 2 },
5499 		.errstr_unpriv = "R2 leaks addr into mem",
5500 		.result_unpriv = REJECT,
5501 		.result = REJECT,
5502 		.errstr = "BPF_XADD stores into R1 inv is not allowed",
5503 	},
5504 	{
5505 		"leak pointer into ctx 2",
5506 		.insns = {
5507 			BPF_MOV64_IMM(BPF_REG_0, 0),
5508 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5509 				    offsetof(struct __sk_buff, cb[0])),
5510 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5511 				      offsetof(struct __sk_buff, cb[0])),
5512 			BPF_EXIT_INSN(),
5513 		},
5514 		.errstr_unpriv = "R10 leaks addr into mem",
5515 		.result_unpriv = REJECT,
5516 		.result = REJECT,
5517 		.errstr = "BPF_XADD stores into R1 inv is not allowed",
5518 	},
5519 	{
5520 		"leak pointer into ctx 3",
5521 		.insns = {
5522 			BPF_MOV64_IMM(BPF_REG_0, 0),
5523 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5524 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5525 				      offsetof(struct __sk_buff, cb[0])),
5526 			BPF_EXIT_INSN(),
5527 		},
5528 		.fixup_map_hash_8b = { 1 },
5529 		.errstr_unpriv = "R2 leaks addr into ctx",
5530 		.result_unpriv = REJECT,
5531 		.result = ACCEPT,
5532 	},
5533 	{
5534 		"leak pointer into map val",
5535 		.insns = {
5536 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5537 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5538 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5539 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5540 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5541 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5542 				     BPF_FUNC_map_lookup_elem),
5543 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5544 			BPF_MOV64_IMM(BPF_REG_3, 0),
5545 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5546 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5547 			BPF_MOV64_IMM(BPF_REG_0, 0),
5548 			BPF_EXIT_INSN(),
5549 		},
5550 		.fixup_map_hash_8b = { 4 },
5551 		.errstr_unpriv = "R6 leaks addr into mem",
5552 		.result_unpriv = REJECT,
5553 		.result = ACCEPT,
5554 	},
5555 	{
5556 		"helper access to map: full range",
5557 		.insns = {
5558 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5560 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5561 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5562 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5563 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5565 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5566 			BPF_MOV64_IMM(BPF_REG_3, 0),
5567 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5568 			BPF_EXIT_INSN(),
5569 		},
5570 		.fixup_map_hash_48b = { 3 },
5571 		.result = ACCEPT,
5572 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5573 	},
5574 	{
5575 		"helper access to map: partial range",
5576 		.insns = {
5577 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5578 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5579 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5580 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5581 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5582 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5583 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5584 			BPF_MOV64_IMM(BPF_REG_2, 8),
5585 			BPF_MOV64_IMM(BPF_REG_3, 0),
5586 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5587 			BPF_EXIT_INSN(),
5588 		},
5589 		.fixup_map_hash_48b = { 3 },
5590 		.result = ACCEPT,
5591 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5592 	},
5593 	{
5594 		"helper access to map: empty range",
5595 		.insns = {
5596 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5598 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5599 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5600 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5601 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5602 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5603 			BPF_MOV64_IMM(BPF_REG_2, 0),
5604 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5605 			BPF_EXIT_INSN(),
5606 		},
5607 		.fixup_map_hash_48b = { 3 },
5608 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5609 		.result = REJECT,
5610 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5611 	},
5612 	{
5613 		"helper access to map: out-of-bound range",
5614 		.insns = {
5615 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5617 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5618 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5619 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5620 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5621 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5622 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5623 			BPF_MOV64_IMM(BPF_REG_3, 0),
5624 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5625 			BPF_EXIT_INSN(),
5626 		},
5627 		.fixup_map_hash_48b = { 3 },
5628 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5629 		.result = REJECT,
5630 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5631 	},
5632 	{
5633 		"helper access to map: negative range",
5634 		.insns = {
5635 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5637 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5638 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5639 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5640 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5641 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5642 			BPF_MOV64_IMM(BPF_REG_2, -8),
5643 			BPF_MOV64_IMM(BPF_REG_3, 0),
5644 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5645 			BPF_EXIT_INSN(),
5646 		},
5647 		.fixup_map_hash_48b = { 3 },
5648 		.errstr = "R2 min value is negative",
5649 		.result = REJECT,
5650 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5651 	},
5652 	{
5653 		"helper access to adjusted map (via const imm): full range",
5654 		.insns = {
5655 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5656 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5657 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5658 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5659 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5660 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5661 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5663 				offsetof(struct test_val, foo)),
5664 			BPF_MOV64_IMM(BPF_REG_2,
5665 				sizeof(struct test_val) -
5666 				offsetof(struct test_val, foo)),
5667 			BPF_MOV64_IMM(BPF_REG_3, 0),
5668 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5669 			BPF_EXIT_INSN(),
5670 		},
5671 		.fixup_map_hash_48b = { 3 },
5672 		.result = ACCEPT,
5673 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5674 	},
5675 	{
5676 		"helper access to adjusted map (via const imm): partial range",
5677 		.insns = {
5678 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5679 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5680 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5681 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5682 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5683 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5684 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5685 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5686 				offsetof(struct test_val, foo)),
5687 			BPF_MOV64_IMM(BPF_REG_2, 8),
5688 			BPF_MOV64_IMM(BPF_REG_3, 0),
5689 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5690 			BPF_EXIT_INSN(),
5691 		},
5692 		.fixup_map_hash_48b = { 3 },
5693 		.result = ACCEPT,
5694 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5695 	},
5696 	{
5697 		"helper access to adjusted map (via const imm): empty range",
5698 		.insns = {
5699 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5701 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5702 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5703 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5704 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5705 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5706 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5707 				offsetof(struct test_val, foo)),
5708 			BPF_MOV64_IMM(BPF_REG_2, 0),
5709 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5710 			BPF_EXIT_INSN(),
5711 		},
5712 		.fixup_map_hash_48b = { 3 },
5713 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5714 		.result = REJECT,
5715 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5716 	},
5717 	{
5718 		"helper access to adjusted map (via const imm): out-of-bound range",
5719 		.insns = {
5720 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5721 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5722 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5723 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5724 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5725 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5726 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5728 				offsetof(struct test_val, foo)),
5729 			BPF_MOV64_IMM(BPF_REG_2,
5730 				sizeof(struct test_val) -
5731 				offsetof(struct test_val, foo) + 8),
5732 			BPF_MOV64_IMM(BPF_REG_3, 0),
5733 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5734 			BPF_EXIT_INSN(),
5735 		},
5736 		.fixup_map_hash_48b = { 3 },
5737 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
5738 		.result = REJECT,
5739 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5740 	},
5741 	{
5742 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
5743 		.insns = {
5744 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5745 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5746 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5747 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5748 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5749 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5750 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5752 				offsetof(struct test_val, foo)),
5753 			BPF_MOV64_IMM(BPF_REG_2, -8),
5754 			BPF_MOV64_IMM(BPF_REG_3, 0),
5755 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5756 			BPF_EXIT_INSN(),
5757 		},
5758 		.fixup_map_hash_48b = { 3 },
5759 		.errstr = "R2 min value is negative",
5760 		.result = REJECT,
5761 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5762 	},
5763 	{
5764 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
5765 		.insns = {
5766 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5767 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5768 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5769 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5770 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5771 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5772 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5774 				offsetof(struct test_val, foo)),
5775 			BPF_MOV64_IMM(BPF_REG_2, -1),
5776 			BPF_MOV64_IMM(BPF_REG_3, 0),
5777 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5778 			BPF_EXIT_INSN(),
5779 		},
5780 		.fixup_map_hash_48b = { 3 },
5781 		.errstr = "R2 min value is negative",
5782 		.result = REJECT,
5783 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5784 	},
5785 	{
5786 		"helper access to adjusted map (via const reg): full range",
5787 		.insns = {
5788 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5790 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5791 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5792 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5793 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5794 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5795 			BPF_MOV64_IMM(BPF_REG_3,
5796 				offsetof(struct test_val, foo)),
5797 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5798 			BPF_MOV64_IMM(BPF_REG_2,
5799 				sizeof(struct test_val) -
5800 				offsetof(struct test_val, foo)),
5801 			BPF_MOV64_IMM(BPF_REG_3, 0),
5802 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5803 			BPF_EXIT_INSN(),
5804 		},
5805 		.fixup_map_hash_48b = { 3 },
5806 		.result = ACCEPT,
5807 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5808 	},
5809 	{
5810 		"helper access to adjusted map (via const reg): partial range",
5811 		.insns = {
5812 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5813 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5814 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5815 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5816 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5817 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5819 			BPF_MOV64_IMM(BPF_REG_3,
5820 				offsetof(struct test_val, foo)),
5821 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5822 			BPF_MOV64_IMM(BPF_REG_2, 8),
5823 			BPF_MOV64_IMM(BPF_REG_3, 0),
5824 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5825 			BPF_EXIT_INSN(),
5826 		},
5827 		.fixup_map_hash_48b = { 3 },
5828 		.result = ACCEPT,
5829 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5830 	},
5831 	{
5832 		"helper access to adjusted map (via const reg): empty range",
5833 		.insns = {
5834 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5836 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5837 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5838 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5839 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5840 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5841 			BPF_MOV64_IMM(BPF_REG_3, 0),
5842 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5843 			BPF_MOV64_IMM(BPF_REG_2, 0),
5844 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5845 			BPF_EXIT_INSN(),
5846 		},
5847 		.fixup_map_hash_48b = { 3 },
5848 		.errstr = "R1 min value is outside of the array range",
5849 		.result = REJECT,
5850 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5851 	},
5852 	{
5853 		"helper access to adjusted map (via const reg): out-of-bound range",
5854 		.insns = {
5855 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5856 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5857 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5858 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5859 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5860 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5861 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5862 			BPF_MOV64_IMM(BPF_REG_3,
5863 				offsetof(struct test_val, foo)),
5864 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5865 			BPF_MOV64_IMM(BPF_REG_2,
5866 				sizeof(struct test_val) -
5867 				offsetof(struct test_val, foo) + 8),
5868 			BPF_MOV64_IMM(BPF_REG_3, 0),
5869 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5870 			BPF_EXIT_INSN(),
5871 		},
5872 		.fixup_map_hash_48b = { 3 },
5873 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
5874 		.result = REJECT,
5875 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5876 	},
5877 	{
5878 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
5879 		.insns = {
5880 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5882 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5883 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5884 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5885 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5886 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5887 			BPF_MOV64_IMM(BPF_REG_3,
5888 				offsetof(struct test_val, foo)),
5889 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5890 			BPF_MOV64_IMM(BPF_REG_2, -8),
5891 			BPF_MOV64_IMM(BPF_REG_3, 0),
5892 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5893 			BPF_EXIT_INSN(),
5894 		},
5895 		.fixup_map_hash_48b = { 3 },
5896 		.errstr = "R2 min value is negative",
5897 		.result = REJECT,
5898 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5899 	},
5900 	{
5901 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
5902 		.insns = {
5903 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5905 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5906 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5907 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5908 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5909 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5910 			BPF_MOV64_IMM(BPF_REG_3,
5911 				offsetof(struct test_val, foo)),
5912 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5913 			BPF_MOV64_IMM(BPF_REG_2, -1),
5914 			BPF_MOV64_IMM(BPF_REG_3, 0),
5915 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5916 			BPF_EXIT_INSN(),
5917 		},
5918 		.fixup_map_hash_48b = { 3 },
5919 		.errstr = "R2 min value is negative",
5920 		.result = REJECT,
5921 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5922 	},
5923 	{
5924 		"helper access to adjusted map (via variable): full range",
5925 		.insns = {
5926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5928 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5930 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5931 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5932 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5933 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5934 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5935 				offsetof(struct test_val, foo), 4),
5936 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5937 			BPF_MOV64_IMM(BPF_REG_2,
5938 				sizeof(struct test_val) -
5939 				offsetof(struct test_val, foo)),
5940 			BPF_MOV64_IMM(BPF_REG_3, 0),
5941 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5942 			BPF_EXIT_INSN(),
5943 		},
5944 		.fixup_map_hash_48b = { 3 },
5945 		.result = ACCEPT,
5946 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5947 	},
5948 	{
5949 		"helper access to adjusted map (via variable): partial range",
5950 		.insns = {
5951 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5953 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5954 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5955 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5956 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5958 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5959 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5960 				offsetof(struct test_val, foo), 4),
5961 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5962 			BPF_MOV64_IMM(BPF_REG_2, 8),
5963 			BPF_MOV64_IMM(BPF_REG_3, 0),
5964 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5965 			BPF_EXIT_INSN(),
5966 		},
5967 		.fixup_map_hash_48b = { 3 },
5968 		.result = ACCEPT,
5969 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5970 	},
5971 	{
5972 		"helper access to adjusted map (via variable): empty range",
5973 		.insns = {
5974 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5975 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5976 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5977 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5978 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5979 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5980 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5981 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5982 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
5983 				offsetof(struct test_val, foo), 3),
5984 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5985 			BPF_MOV64_IMM(BPF_REG_2, 0),
5986 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5987 			BPF_EXIT_INSN(),
5988 		},
5989 		.fixup_map_hash_48b = { 3 },
5990 		.errstr = "R1 min value is outside of the array range",
5991 		.result = REJECT,
5992 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5993 	},
5994 	{
5995 		"helper access to adjusted map (via variable): no max check",
5996 		.insns = {
5997 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5999 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6000 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6001 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6002 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6003 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6004 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6005 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6006 			BPF_MOV64_IMM(BPF_REG_2, 1),
6007 			BPF_MOV64_IMM(BPF_REG_3, 0),
6008 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6009 			BPF_EXIT_INSN(),
6010 		},
6011 		.fixup_map_hash_48b = { 3 },
6012 		.errstr = "R1 unbounded memory access",
6013 		.result = REJECT,
6014 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6015 	},
6016 	{
6017 		"helper access to adjusted map (via variable): wrong max check",
6018 		.insns = {
6019 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6020 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6021 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6022 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6023 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6024 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6025 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6026 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6027 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6028 				offsetof(struct test_val, foo), 4),
6029 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6030 			BPF_MOV64_IMM(BPF_REG_2,
6031 				sizeof(struct test_val) -
6032 				offsetof(struct test_val, foo) + 1),
6033 			BPF_MOV64_IMM(BPF_REG_3, 0),
6034 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6035 			BPF_EXIT_INSN(),
6036 		},
6037 		.fixup_map_hash_48b = { 3 },
6038 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6039 		.result = REJECT,
6040 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6041 	},
6042 	{
6043 		"helper access to map: bounds check using <, good access",
6044 		.insns = {
6045 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6046 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6047 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6048 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6049 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6050 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6051 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6052 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6053 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6054 			BPF_MOV64_IMM(BPF_REG_0, 0),
6055 			BPF_EXIT_INSN(),
6056 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6057 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6058 			BPF_MOV64_IMM(BPF_REG_0, 0),
6059 			BPF_EXIT_INSN(),
6060 		},
6061 		.fixup_map_hash_48b = { 3 },
6062 		.result = ACCEPT,
6063 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6064 	},
6065 	{
6066 		"helper access to map: bounds check using <, bad access",
6067 		.insns = {
6068 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6069 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6070 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6071 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6072 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6073 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6074 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6075 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6076 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6077 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6078 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6079 			BPF_MOV64_IMM(BPF_REG_0, 0),
6080 			BPF_EXIT_INSN(),
6081 			BPF_MOV64_IMM(BPF_REG_0, 0),
6082 			BPF_EXIT_INSN(),
6083 		},
6084 		.fixup_map_hash_48b = { 3 },
6085 		.result = REJECT,
6086 		.errstr = "R1 unbounded memory access",
6087 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6088 	},
6089 	{
6090 		"helper access to map: bounds check using <=, good access",
6091 		.insns = {
6092 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6093 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6094 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6095 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6096 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6097 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6098 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6099 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6100 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6101 			BPF_MOV64_IMM(BPF_REG_0, 0),
6102 			BPF_EXIT_INSN(),
6103 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6104 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6105 			BPF_MOV64_IMM(BPF_REG_0, 0),
6106 			BPF_EXIT_INSN(),
6107 		},
6108 		.fixup_map_hash_48b = { 3 },
6109 		.result = ACCEPT,
6110 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6111 	},
6112 	{
6113 		"helper access to map: bounds check using <=, bad access",
6114 		.insns = {
6115 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6116 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6117 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6119 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6120 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6121 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6122 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6123 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6124 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6125 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6126 			BPF_MOV64_IMM(BPF_REG_0, 0),
6127 			BPF_EXIT_INSN(),
6128 			BPF_MOV64_IMM(BPF_REG_0, 0),
6129 			BPF_EXIT_INSN(),
6130 		},
6131 		.fixup_map_hash_48b = { 3 },
6132 		.result = REJECT,
6133 		.errstr = "R1 unbounded memory access",
6134 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6135 	},
6136 	{
6137 		"helper access to map: bounds check using s<, good access",
6138 		.insns = {
6139 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6140 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6141 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6142 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6143 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6144 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6145 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6146 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6147 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6148 			BPF_MOV64_IMM(BPF_REG_0, 0),
6149 			BPF_EXIT_INSN(),
6150 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6151 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6152 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6153 			BPF_MOV64_IMM(BPF_REG_0, 0),
6154 			BPF_EXIT_INSN(),
6155 		},
6156 		.fixup_map_hash_48b = { 3 },
6157 		.result = ACCEPT,
6158 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6159 	},
6160 	{
6161 		"helper access to map: bounds check using s<, good access 2",
6162 		.insns = {
6163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6164 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6165 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6166 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6167 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6168 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6169 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6170 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6171 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6172 			BPF_MOV64_IMM(BPF_REG_0, 0),
6173 			BPF_EXIT_INSN(),
6174 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6175 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6176 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6177 			BPF_MOV64_IMM(BPF_REG_0, 0),
6178 			BPF_EXIT_INSN(),
6179 		},
6180 		.fixup_map_hash_48b = { 3 },
6181 		.result = ACCEPT,
6182 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6183 	},
6184 	{
6185 		"helper access to map: bounds check using s<, bad access",
6186 		.insns = {
6187 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6188 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6190 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6191 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6192 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6193 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6194 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6195 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6196 			BPF_MOV64_IMM(BPF_REG_0, 0),
6197 			BPF_EXIT_INSN(),
6198 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6199 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6200 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6201 			BPF_MOV64_IMM(BPF_REG_0, 0),
6202 			BPF_EXIT_INSN(),
6203 		},
6204 		.fixup_map_hash_48b = { 3 },
6205 		.result = REJECT,
6206 		.errstr = "R1 min value is negative",
6207 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6208 	},
6209 	{
6210 		"helper access to map: bounds check using s<=, good access",
6211 		.insns = {
6212 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6213 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6214 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6215 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6216 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6217 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6218 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6219 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6220 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6221 			BPF_MOV64_IMM(BPF_REG_0, 0),
6222 			BPF_EXIT_INSN(),
6223 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6224 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6225 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6226 			BPF_MOV64_IMM(BPF_REG_0, 0),
6227 			BPF_EXIT_INSN(),
6228 		},
6229 		.fixup_map_hash_48b = { 3 },
6230 		.result = ACCEPT,
6231 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6232 	},
6233 	{
6234 		"helper access to map: bounds check using s<=, good access 2",
6235 		.insns = {
6236 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6238 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6240 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6241 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6242 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6243 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6244 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6245 			BPF_MOV64_IMM(BPF_REG_0, 0),
6246 			BPF_EXIT_INSN(),
6247 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6248 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6249 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6250 			BPF_MOV64_IMM(BPF_REG_0, 0),
6251 			BPF_EXIT_INSN(),
6252 		},
6253 		.fixup_map_hash_48b = { 3 },
6254 		.result = ACCEPT,
6255 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6256 	},
6257 	{
6258 		"helper access to map: bounds check using s<=, bad access",
6259 		.insns = {
6260 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6261 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6262 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6263 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6264 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6265 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6266 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6267 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6268 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6269 			BPF_MOV64_IMM(BPF_REG_0, 0),
6270 			BPF_EXIT_INSN(),
6271 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6272 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6273 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6274 			BPF_MOV64_IMM(BPF_REG_0, 0),
6275 			BPF_EXIT_INSN(),
6276 		},
6277 		.fixup_map_hash_48b = { 3 },
6278 		.result = REJECT,
6279 		.errstr = "R1 min value is negative",
6280 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6281 	},
6282 	{
6283 		"map lookup helper access to map",
6284 		.insns = {
6285 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6287 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6288 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6289 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6291 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6292 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6293 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6294 			BPF_EXIT_INSN(),
6295 		},
6296 		.fixup_map_hash_16b = { 3, 8 },
6297 		.result = ACCEPT,
6298 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6299 	},
6300 	{
6301 		"map update helper access to map",
6302 		.insns = {
6303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6305 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6306 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6307 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6308 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6309 			BPF_MOV64_IMM(BPF_REG_4, 0),
6310 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6311 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6312 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6313 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6314 			BPF_EXIT_INSN(),
6315 		},
6316 		.fixup_map_hash_16b = { 3, 10 },
6317 		.result = ACCEPT,
6318 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6319 	},
6320 	{
6321 		"map update helper access to map: wrong size",
6322 		.insns = {
6323 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6325 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6326 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6327 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6328 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6329 			BPF_MOV64_IMM(BPF_REG_4, 0),
6330 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6331 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6332 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6333 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6334 			BPF_EXIT_INSN(),
6335 		},
6336 		.fixup_map_hash_8b = { 3 },
6337 		.fixup_map_hash_16b = { 10 },
6338 		.result = REJECT,
6339 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6340 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6341 	},
6342 	{
6343 		"map helper access to adjusted map (via const imm)",
6344 		.insns = {
6345 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6346 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6347 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6348 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6349 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6350 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6351 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6353 				      offsetof(struct other_val, bar)),
6354 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6355 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6356 			BPF_EXIT_INSN(),
6357 		},
6358 		.fixup_map_hash_16b = { 3, 9 },
6359 		.result = ACCEPT,
6360 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6361 	},
6362 	{
6363 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6364 		.insns = {
6365 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6366 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6367 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6368 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6369 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6370 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6371 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6372 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6373 				      sizeof(struct other_val) - 4),
6374 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6375 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6376 			BPF_EXIT_INSN(),
6377 		},
6378 		.fixup_map_hash_16b = { 3, 9 },
6379 		.result = REJECT,
6380 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6381 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6382 	},
6383 	{
6384 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6385 		.insns = {
6386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6388 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6389 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6390 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6391 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6392 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6394 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6395 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6396 			BPF_EXIT_INSN(),
6397 		},
6398 		.fixup_map_hash_16b = { 3, 9 },
6399 		.result = REJECT,
6400 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6401 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6402 	},
6403 	{
6404 		"map helper access to adjusted map (via const reg)",
6405 		.insns = {
6406 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6407 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6408 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6409 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6410 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6411 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6412 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6413 			BPF_MOV64_IMM(BPF_REG_3,
6414 				      offsetof(struct other_val, bar)),
6415 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6416 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6417 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6418 			BPF_EXIT_INSN(),
6419 		},
6420 		.fixup_map_hash_16b = { 3, 10 },
6421 		.result = ACCEPT,
6422 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6423 	},
6424 	{
6425 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6426 		.insns = {
6427 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6428 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6429 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6430 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6431 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6432 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6433 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6434 			BPF_MOV64_IMM(BPF_REG_3,
6435 				      sizeof(struct other_val) - 4),
6436 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6438 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6439 			BPF_EXIT_INSN(),
6440 		},
6441 		.fixup_map_hash_16b = { 3, 10 },
6442 		.result = REJECT,
6443 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6444 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6445 	},
6446 	{
6447 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6448 		.insns = {
6449 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6450 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6451 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6452 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6453 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6454 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6455 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6456 			BPF_MOV64_IMM(BPF_REG_3, -4),
6457 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6459 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6460 			BPF_EXIT_INSN(),
6461 		},
6462 		.fixup_map_hash_16b = { 3, 10 },
6463 		.result = REJECT,
6464 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6465 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6466 	},
6467 	{
6468 		"map helper access to adjusted map (via variable)",
6469 		.insns = {
6470 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6471 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6472 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6473 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6474 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6475 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6476 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6477 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6478 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6479 				    offsetof(struct other_val, bar), 4),
6480 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6481 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6482 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6483 			BPF_EXIT_INSN(),
6484 		},
6485 		.fixup_map_hash_16b = { 3, 11 },
6486 		.result = ACCEPT,
6487 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6488 	},
6489 	{
6490 		"map helper access to adjusted map (via variable): no max check",
6491 		.insns = {
6492 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6493 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6494 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6495 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6496 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6497 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6498 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6499 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6500 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6501 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6502 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6503 			BPF_EXIT_INSN(),
6504 		},
6505 		.fixup_map_hash_16b = { 3, 10 },
6506 		.result = REJECT,
6507 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6508 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6509 	},
6510 	{
6511 		"map helper access to adjusted map (via variable): wrong max check",
6512 		.insns = {
6513 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6514 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6515 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6516 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6517 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6518 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6519 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6520 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6521 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6522 				    offsetof(struct other_val, bar) + 1, 4),
6523 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6524 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6525 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6526 			BPF_EXIT_INSN(),
6527 		},
6528 		.fixup_map_hash_16b = { 3, 11 },
6529 		.result = REJECT,
6530 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
6531 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6532 	},
6533 	{
6534 		"map element value is preserved across register spilling",
6535 		.insns = {
6536 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6538 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6539 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6540 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6541 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6542 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6543 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6544 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6545 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6546 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6547 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6548 			BPF_EXIT_INSN(),
6549 		},
6550 		.fixup_map_hash_48b = { 3 },
6551 		.errstr_unpriv = "R0 leaks addr",
6552 		.result = ACCEPT,
6553 		.result_unpriv = REJECT,
6554 	},
6555 	{
6556 		"map element value or null is marked on register spilling",
6557 		.insns = {
6558 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6560 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6561 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6562 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6563 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6564 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6565 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6566 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6567 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6568 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6569 			BPF_EXIT_INSN(),
6570 		},
6571 		.fixup_map_hash_48b = { 3 },
6572 		.errstr_unpriv = "R0 leaks addr",
6573 		.result = ACCEPT,
6574 		.result_unpriv = REJECT,
6575 	},
6576 	{
6577 		"map element value store of cleared call register",
6578 		.insns = {
6579 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6581 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6582 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6583 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6584 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6585 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6586 			BPF_EXIT_INSN(),
6587 		},
6588 		.fixup_map_hash_48b = { 3 },
6589 		.errstr_unpriv = "R1 !read_ok",
6590 		.errstr = "R1 !read_ok",
6591 		.result = REJECT,
6592 		.result_unpriv = REJECT,
6593 	},
6594 	{
6595 		"map element value with unaligned store",
6596 		.insns = {
6597 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6598 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6599 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6600 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6601 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6602 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6604 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6605 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6606 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6607 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6608 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6609 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6610 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6611 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6612 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6613 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6614 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6615 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6617 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6618 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6619 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6620 			BPF_EXIT_INSN(),
6621 		},
6622 		.fixup_map_hash_48b = { 3 },
6623 		.errstr_unpriv = "R0 leaks addr",
6624 		.result = ACCEPT,
6625 		.result_unpriv = REJECT,
6626 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6627 	},
6628 	{
6629 		"map element value with unaligned load",
6630 		.insns = {
6631 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6633 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6634 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6635 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6636 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6637 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6638 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6640 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6641 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6642 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6643 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6644 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6645 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6646 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6647 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6648 			BPF_EXIT_INSN(),
6649 		},
6650 		.fixup_map_hash_48b = { 3 },
6651 		.errstr_unpriv = "R0 leaks addr",
6652 		.result = ACCEPT,
6653 		.result_unpriv = REJECT,
6654 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6655 	},
6656 	{
6657 		"map element value illegal alu op, 1",
6658 		.insns = {
6659 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6660 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6661 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6662 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6663 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6665 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6666 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6667 			BPF_EXIT_INSN(),
6668 		},
6669 		.fixup_map_hash_48b = { 3 },
6670 		.errstr = "R0 bitwise operator &= on pointer",
6671 		.result = REJECT,
6672 	},
6673 	{
6674 		"map element value illegal alu op, 2",
6675 		.insns = {
6676 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6677 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6678 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6679 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6680 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6681 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6682 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6683 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6684 			BPF_EXIT_INSN(),
6685 		},
6686 		.fixup_map_hash_48b = { 3 },
6687 		.errstr = "R0 32-bit pointer arithmetic prohibited",
6688 		.result = REJECT,
6689 	},
6690 	{
6691 		"map element value illegal alu op, 3",
6692 		.insns = {
6693 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6694 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6695 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6696 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6697 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6698 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6699 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6700 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6701 			BPF_EXIT_INSN(),
6702 		},
6703 		.fixup_map_hash_48b = { 3 },
6704 		.errstr = "R0 pointer arithmetic with /= operator",
6705 		.result = REJECT,
6706 	},
6707 	{
6708 		"map element value illegal alu op, 4",
6709 		.insns = {
6710 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6711 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6712 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6713 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6714 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6715 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6716 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6717 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6718 			BPF_EXIT_INSN(),
6719 		},
6720 		.fixup_map_hash_48b = { 3 },
6721 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
6722 		.errstr = "invalid mem access 'inv'",
6723 		.result = REJECT,
6724 		.result_unpriv = REJECT,
6725 	},
6726 	{
6727 		"map element value illegal alu op, 5",
6728 		.insns = {
6729 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6730 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6731 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6732 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6733 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6734 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6735 			BPF_MOV64_IMM(BPF_REG_3, 4096),
6736 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6737 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6738 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6739 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6740 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6741 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6742 			BPF_EXIT_INSN(),
6743 		},
6744 		.fixup_map_hash_48b = { 3 },
6745 		.errstr = "R0 invalid mem access 'inv'",
6746 		.result = REJECT,
6747 	},
6748 	{
6749 		"map element value is preserved across register spilling",
6750 		.insns = {
6751 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6752 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6753 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6754 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6755 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6756 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6757 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6758 				offsetof(struct test_val, foo)),
6759 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6760 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6762 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6763 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6764 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6765 			BPF_EXIT_INSN(),
6766 		},
6767 		.fixup_map_hash_48b = { 3 },
6768 		.errstr_unpriv = "R0 leaks addr",
6769 		.result = ACCEPT,
6770 		.result_unpriv = REJECT,
6771 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6772 	},
6773 	{
6774 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6775 		.insns = {
6776 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6778 			BPF_MOV64_IMM(BPF_REG_0, 0),
6779 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6780 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6781 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6782 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6783 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6784 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6785 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6786 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6787 			BPF_MOV64_IMM(BPF_REG_2, 16),
6788 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6789 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6790 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6791 			BPF_MOV64_IMM(BPF_REG_4, 0),
6792 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6793 			BPF_MOV64_IMM(BPF_REG_3, 0),
6794 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6795 			BPF_MOV64_IMM(BPF_REG_0, 0),
6796 			BPF_EXIT_INSN(),
6797 		},
6798 		.result = ACCEPT,
6799 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6800 	},
6801 	{
6802 		"helper access to variable memory: stack, bitwise AND, zero included",
6803 		.insns = {
6804 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6805 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6806 			BPF_MOV64_IMM(BPF_REG_2, 16),
6807 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6808 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6809 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6810 			BPF_MOV64_IMM(BPF_REG_3, 0),
6811 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6812 			BPF_EXIT_INSN(),
6813 		},
6814 		.errstr = "invalid indirect read from stack off -64+0 size 64",
6815 		.result = REJECT,
6816 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6817 	},
6818 	{
6819 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6820 		.insns = {
6821 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6823 			BPF_MOV64_IMM(BPF_REG_2, 16),
6824 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6825 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6826 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
6827 			BPF_MOV64_IMM(BPF_REG_4, 0),
6828 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6829 			BPF_MOV64_IMM(BPF_REG_3, 0),
6830 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6831 			BPF_MOV64_IMM(BPF_REG_0, 0),
6832 			BPF_EXIT_INSN(),
6833 		},
6834 		.errstr = "invalid stack type R1 off=-64 access_size=65",
6835 		.result = REJECT,
6836 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6837 	},
6838 	{
6839 		"helper access to variable memory: stack, JMP, correct bounds",
6840 		.insns = {
6841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6843 			BPF_MOV64_IMM(BPF_REG_0, 0),
6844 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6845 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6846 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6847 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6848 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6849 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6850 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6851 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6852 			BPF_MOV64_IMM(BPF_REG_2, 16),
6853 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6854 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6855 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
6856 			BPF_MOV64_IMM(BPF_REG_4, 0),
6857 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6858 			BPF_MOV64_IMM(BPF_REG_3, 0),
6859 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6860 			BPF_MOV64_IMM(BPF_REG_0, 0),
6861 			BPF_EXIT_INSN(),
6862 		},
6863 		.result = ACCEPT,
6864 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6865 	},
6866 	{
6867 		"helper access to variable memory: stack, JMP (signed), correct bounds",
6868 		.insns = {
6869 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6871 			BPF_MOV64_IMM(BPF_REG_0, 0),
6872 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6873 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6874 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6875 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6876 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6877 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6878 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6879 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6880 			BPF_MOV64_IMM(BPF_REG_2, 16),
6881 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6882 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6883 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
6884 			BPF_MOV64_IMM(BPF_REG_4, 0),
6885 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
6886 			BPF_MOV64_IMM(BPF_REG_3, 0),
6887 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6888 			BPF_MOV64_IMM(BPF_REG_0, 0),
6889 			BPF_EXIT_INSN(),
6890 		},
6891 		.result = ACCEPT,
6892 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6893 	},
6894 	{
6895 		"helper access to variable memory: stack, JMP, bounds + offset",
6896 		.insns = {
6897 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6899 			BPF_MOV64_IMM(BPF_REG_2, 16),
6900 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6901 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6902 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
6903 			BPF_MOV64_IMM(BPF_REG_4, 0),
6904 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
6905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
6906 			BPF_MOV64_IMM(BPF_REG_3, 0),
6907 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6908 			BPF_MOV64_IMM(BPF_REG_0, 0),
6909 			BPF_EXIT_INSN(),
6910 		},
6911 		.errstr = "invalid stack type R1 off=-64 access_size=65",
6912 		.result = REJECT,
6913 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6914 	},
6915 	{
6916 		"helper access to variable memory: stack, JMP, wrong max",
6917 		.insns = {
6918 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6919 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6920 			BPF_MOV64_IMM(BPF_REG_2, 16),
6921 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6922 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6923 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
6924 			BPF_MOV64_IMM(BPF_REG_4, 0),
6925 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6926 			BPF_MOV64_IMM(BPF_REG_3, 0),
6927 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6928 			BPF_MOV64_IMM(BPF_REG_0, 0),
6929 			BPF_EXIT_INSN(),
6930 		},
6931 		.errstr = "invalid stack type R1 off=-64 access_size=65",
6932 		.result = REJECT,
6933 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6934 	},
6935 	{
6936 		"helper access to variable memory: stack, JMP, no max check",
6937 		.insns = {
6938 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6940 			BPF_MOV64_IMM(BPF_REG_2, 16),
6941 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6942 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6943 			BPF_MOV64_IMM(BPF_REG_4, 0),
6944 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6945 			BPF_MOV64_IMM(BPF_REG_3, 0),
6946 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6947 			BPF_MOV64_IMM(BPF_REG_0, 0),
6948 			BPF_EXIT_INSN(),
6949 		},
6950 		/* because max wasn't checked, signed min is negative */
6951 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
6952 		.result = REJECT,
6953 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6954 	},
6955 	{
6956 		"helper access to variable memory: stack, JMP, no min check",
6957 		.insns = {
6958 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6959 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6960 			BPF_MOV64_IMM(BPF_REG_2, 16),
6961 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6962 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6963 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
6964 			BPF_MOV64_IMM(BPF_REG_3, 0),
6965 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6966 			BPF_MOV64_IMM(BPF_REG_0, 0),
6967 			BPF_EXIT_INSN(),
6968 		},
6969 		.errstr = "invalid indirect read from stack off -64+0 size 64",
6970 		.result = REJECT,
6971 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6972 	},
6973 	{
6974 		"helper access to variable memory: stack, JMP (signed), no min check",
6975 		.insns = {
6976 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6978 			BPF_MOV64_IMM(BPF_REG_2, 16),
6979 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6980 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6981 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
6982 			BPF_MOV64_IMM(BPF_REG_3, 0),
6983 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6984 			BPF_MOV64_IMM(BPF_REG_0, 0),
6985 			BPF_EXIT_INSN(),
6986 		},
6987 		.errstr = "R2 min value is negative",
6988 		.result = REJECT,
6989 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6990 	},
6991 	{
6992 		"helper access to variable memory: map, JMP, correct bounds",
6993 		.insns = {
6994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6996 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6997 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6998 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6999 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7000 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7001 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7002 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7003 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7004 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7005 				sizeof(struct test_val), 4),
7006 			BPF_MOV64_IMM(BPF_REG_4, 0),
7007 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7008 			BPF_MOV64_IMM(BPF_REG_3, 0),
7009 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7010 			BPF_MOV64_IMM(BPF_REG_0, 0),
7011 			BPF_EXIT_INSN(),
7012 		},
7013 		.fixup_map_hash_48b = { 3 },
7014 		.result = ACCEPT,
7015 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7016 	},
7017 	{
7018 		"helper access to variable memory: map, JMP, wrong max",
7019 		.insns = {
7020 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7021 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7022 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7023 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7024 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7025 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7026 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7027 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7028 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7029 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7030 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7031 				sizeof(struct test_val) + 1, 4),
7032 			BPF_MOV64_IMM(BPF_REG_4, 0),
7033 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7034 			BPF_MOV64_IMM(BPF_REG_3, 0),
7035 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7036 			BPF_MOV64_IMM(BPF_REG_0, 0),
7037 			BPF_EXIT_INSN(),
7038 		},
7039 		.fixup_map_hash_48b = { 3 },
7040 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7041 		.result = REJECT,
7042 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7043 	},
7044 	{
7045 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7046 		.insns = {
7047 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7049 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7050 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7051 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7052 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7053 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7055 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7056 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7057 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7058 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7059 				sizeof(struct test_val) - 20, 4),
7060 			BPF_MOV64_IMM(BPF_REG_4, 0),
7061 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7062 			BPF_MOV64_IMM(BPF_REG_3, 0),
7063 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7064 			BPF_MOV64_IMM(BPF_REG_0, 0),
7065 			BPF_EXIT_INSN(),
7066 		},
7067 		.fixup_map_hash_48b = { 3 },
7068 		.result = ACCEPT,
7069 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7070 	},
7071 	{
7072 		"helper access to variable memory: map adjusted, JMP, wrong max",
7073 		.insns = {
7074 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7076 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7077 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7078 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7079 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7080 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7082 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7083 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7084 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7085 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7086 				sizeof(struct test_val) - 19, 4),
7087 			BPF_MOV64_IMM(BPF_REG_4, 0),
7088 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7089 			BPF_MOV64_IMM(BPF_REG_3, 0),
7090 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7091 			BPF_MOV64_IMM(BPF_REG_0, 0),
7092 			BPF_EXIT_INSN(),
7093 		},
7094 		.fixup_map_hash_48b = { 3 },
7095 		.errstr = "R1 min value is outside of the array range",
7096 		.result = REJECT,
7097 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7098 	},
7099 	{
7100 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7101 		.insns = {
7102 			BPF_MOV64_IMM(BPF_REG_1, 0),
7103 			BPF_MOV64_IMM(BPF_REG_2, 0),
7104 			BPF_MOV64_IMM(BPF_REG_3, 0),
7105 			BPF_MOV64_IMM(BPF_REG_4, 0),
7106 			BPF_MOV64_IMM(BPF_REG_5, 0),
7107 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7108 			BPF_EXIT_INSN(),
7109 		},
7110 		.result = ACCEPT,
7111 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7112 	},
7113 	{
7114 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7115 		.insns = {
7116 			BPF_MOV64_IMM(BPF_REG_1, 0),
7117 			BPF_MOV64_IMM(BPF_REG_2, 1),
7118 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7119 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7120 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7121 			BPF_MOV64_IMM(BPF_REG_3, 0),
7122 			BPF_MOV64_IMM(BPF_REG_4, 0),
7123 			BPF_MOV64_IMM(BPF_REG_5, 0),
7124 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7125 			BPF_EXIT_INSN(),
7126 		},
7127 		.errstr = "R1 type=inv expected=fp",
7128 		.result = REJECT,
7129 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7130 	},
7131 	{
7132 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7133 		.insns = {
7134 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7135 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7136 			BPF_MOV64_IMM(BPF_REG_2, 0),
7137 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7138 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7139 			BPF_MOV64_IMM(BPF_REG_3, 0),
7140 			BPF_MOV64_IMM(BPF_REG_4, 0),
7141 			BPF_MOV64_IMM(BPF_REG_5, 0),
7142 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7143 			BPF_EXIT_INSN(),
7144 		},
7145 		.result = ACCEPT,
7146 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7147 	},
7148 	{
7149 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7150 		.insns = {
7151 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7152 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7154 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7156 				     BPF_FUNC_map_lookup_elem),
7157 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7158 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7159 			BPF_MOV64_IMM(BPF_REG_2, 0),
7160 			BPF_MOV64_IMM(BPF_REG_3, 0),
7161 			BPF_MOV64_IMM(BPF_REG_4, 0),
7162 			BPF_MOV64_IMM(BPF_REG_5, 0),
7163 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7164 			BPF_EXIT_INSN(),
7165 		},
7166 		.fixup_map_hash_8b = { 3 },
7167 		.result = ACCEPT,
7168 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7169 	},
7170 	{
7171 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7172 		.insns = {
7173 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7174 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7175 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7176 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7177 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7178 				     BPF_FUNC_map_lookup_elem),
7179 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7180 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7181 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7182 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7183 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7184 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7185 			BPF_MOV64_IMM(BPF_REG_3, 0),
7186 			BPF_MOV64_IMM(BPF_REG_4, 0),
7187 			BPF_MOV64_IMM(BPF_REG_5, 0),
7188 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7189 			BPF_EXIT_INSN(),
7190 		},
7191 		.fixup_map_hash_8b = { 3 },
7192 		.result = ACCEPT,
7193 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7194 	},
7195 	{
7196 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7197 		.insns = {
7198 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7199 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7200 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7201 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7202 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7203 				     BPF_FUNC_map_lookup_elem),
7204 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7205 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7206 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7207 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7208 			BPF_MOV64_IMM(BPF_REG_3, 0),
7209 			BPF_MOV64_IMM(BPF_REG_4, 0),
7210 			BPF_MOV64_IMM(BPF_REG_5, 0),
7211 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7212 			BPF_EXIT_INSN(),
7213 		},
7214 		.fixup_map_hash_8b = { 3 },
7215 		.result = ACCEPT,
7216 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7217 	},
7218 	{
7219 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7220 		.insns = {
7221 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7222 				    offsetof(struct __sk_buff, data)),
7223 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7224 				    offsetof(struct __sk_buff, data_end)),
7225 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7227 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7228 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7229 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7230 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7231 			BPF_MOV64_IMM(BPF_REG_3, 0),
7232 			BPF_MOV64_IMM(BPF_REG_4, 0),
7233 			BPF_MOV64_IMM(BPF_REG_5, 0),
7234 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7235 			BPF_EXIT_INSN(),
7236 		},
7237 		.result = ACCEPT,
7238 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7239 		.retval = 0 /* csum_diff of 64-byte packet */,
7240 	},
7241 	{
7242 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7243 		.insns = {
7244 			BPF_MOV64_IMM(BPF_REG_1, 0),
7245 			BPF_MOV64_IMM(BPF_REG_2, 0),
7246 			BPF_MOV64_IMM(BPF_REG_3, 0),
7247 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7248 			BPF_EXIT_INSN(),
7249 		},
7250 		.errstr = "R1 type=inv expected=fp",
7251 		.result = REJECT,
7252 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7253 	},
7254 	{
7255 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7256 		.insns = {
7257 			BPF_MOV64_IMM(BPF_REG_1, 0),
7258 			BPF_MOV64_IMM(BPF_REG_2, 1),
7259 			BPF_MOV64_IMM(BPF_REG_3, 0),
7260 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7261 			BPF_EXIT_INSN(),
7262 		},
7263 		.errstr = "R1 type=inv expected=fp",
7264 		.result = REJECT,
7265 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7266 	},
7267 	{
7268 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7269 		.insns = {
7270 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7271 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7272 			BPF_MOV64_IMM(BPF_REG_2, 0),
7273 			BPF_MOV64_IMM(BPF_REG_3, 0),
7274 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7275 			BPF_EXIT_INSN(),
7276 		},
7277 		.result = ACCEPT,
7278 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7279 	},
7280 	{
7281 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7282 		.insns = {
7283 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7284 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7286 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7287 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7289 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7290 			BPF_MOV64_IMM(BPF_REG_2, 0),
7291 			BPF_MOV64_IMM(BPF_REG_3, 0),
7292 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7293 			BPF_EXIT_INSN(),
7294 		},
7295 		.fixup_map_hash_8b = { 3 },
7296 		.result = ACCEPT,
7297 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7298 	},
7299 	{
7300 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7301 		.insns = {
7302 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7305 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7306 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7308 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7309 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7310 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7312 			BPF_MOV64_IMM(BPF_REG_3, 0),
7313 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7314 			BPF_EXIT_INSN(),
7315 		},
7316 		.fixup_map_hash_8b = { 3 },
7317 		.result = ACCEPT,
7318 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7319 	},
7320 	{
7321 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7322 		.insns = {
7323 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7324 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7326 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7327 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7328 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7329 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7330 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7331 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7332 			BPF_MOV64_IMM(BPF_REG_3, 0),
7333 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7334 			BPF_EXIT_INSN(),
7335 		},
7336 		.fixup_map_hash_8b = { 3 },
7337 		.result = ACCEPT,
7338 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7339 	},
7340 	{
7341 		"helper access to variable memory: 8 bytes leak",
7342 		.insns = {
7343 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7344 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7345 			BPF_MOV64_IMM(BPF_REG_0, 0),
7346 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7347 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7348 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7349 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7350 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7351 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7352 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7353 			BPF_MOV64_IMM(BPF_REG_2, 1),
7354 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7355 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7356 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7357 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7358 			BPF_MOV64_IMM(BPF_REG_3, 0),
7359 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7360 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7361 			BPF_EXIT_INSN(),
7362 		},
7363 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7364 		.result = REJECT,
7365 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7366 	},
7367 	{
7368 		"helper access to variable memory: 8 bytes no leak (init memory)",
7369 		.insns = {
7370 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7371 			BPF_MOV64_IMM(BPF_REG_0, 0),
7372 			BPF_MOV64_IMM(BPF_REG_0, 0),
7373 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7374 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7375 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7376 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7377 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7378 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7379 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7380 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7382 			BPF_MOV64_IMM(BPF_REG_2, 0),
7383 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7385 			BPF_MOV64_IMM(BPF_REG_3, 0),
7386 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7387 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7388 			BPF_EXIT_INSN(),
7389 		},
7390 		.result = ACCEPT,
7391 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7392 	},
7393 	{
7394 		"invalid and of negative number",
7395 		.insns = {
7396 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7397 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7399 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7400 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7401 				     BPF_FUNC_map_lookup_elem),
7402 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7403 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7404 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7405 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7406 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7407 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7408 				   offsetof(struct test_val, foo)),
7409 			BPF_EXIT_INSN(),
7410 		},
7411 		.fixup_map_hash_48b = { 3 },
7412 		.errstr = "R0 max value is outside of the array range",
7413 		.result = REJECT,
7414 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7415 	},
7416 	{
7417 		"invalid range check",
7418 		.insns = {
7419 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7420 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7421 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7422 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7423 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7424 				     BPF_FUNC_map_lookup_elem),
7425 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7426 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7427 			BPF_MOV64_IMM(BPF_REG_9, 1),
7428 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7429 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7430 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7431 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7432 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7433 			BPF_MOV32_IMM(BPF_REG_3, 1),
7434 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7435 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7436 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7437 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7438 			BPF_MOV64_REG(BPF_REG_0, 0),
7439 			BPF_EXIT_INSN(),
7440 		},
7441 		.fixup_map_hash_48b = { 3 },
7442 		.errstr = "R0 max value is outside of the array range",
7443 		.result = REJECT,
7444 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7445 	},
7446 	{
7447 		"map in map access",
7448 		.insns = {
7449 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7450 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7452 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7453 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7454 				     BPF_FUNC_map_lookup_elem),
7455 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7456 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7457 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7458 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7459 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7460 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7461 				     BPF_FUNC_map_lookup_elem),
7462 			BPF_MOV64_IMM(BPF_REG_0, 0),
7463 			BPF_EXIT_INSN(),
7464 		},
7465 		.fixup_map_in_map = { 3 },
7466 		.result = ACCEPT,
7467 	},
7468 	{
7469 		"invalid inner map pointer",
7470 		.insns = {
7471 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7472 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7473 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7474 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7475 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7476 				     BPF_FUNC_map_lookup_elem),
7477 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7478 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7479 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7481 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7482 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7483 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7484 				     BPF_FUNC_map_lookup_elem),
7485 			BPF_MOV64_IMM(BPF_REG_0, 0),
7486 			BPF_EXIT_INSN(),
7487 		},
7488 		.fixup_map_in_map = { 3 },
7489 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
7490 		.result = REJECT,
7491 	},
7492 	{
7493 		"forgot null checking on the inner map pointer",
7494 		.insns = {
7495 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7496 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7498 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7499 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7500 				     BPF_FUNC_map_lookup_elem),
7501 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7502 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7504 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7505 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7506 				     BPF_FUNC_map_lookup_elem),
7507 			BPF_MOV64_IMM(BPF_REG_0, 0),
7508 			BPF_EXIT_INSN(),
7509 		},
7510 		.fixup_map_in_map = { 3 },
7511 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
7512 		.result = REJECT,
7513 	},
7514 	{
7515 		"ld_abs: check calling conv, r1",
7516 		.insns = {
7517 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7518 			BPF_MOV64_IMM(BPF_REG_1, 0),
7519 			BPF_LD_ABS(BPF_W, -0x200000),
7520 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7521 			BPF_EXIT_INSN(),
7522 		},
7523 		.errstr = "R1 !read_ok",
7524 		.result = REJECT,
7525 	},
7526 	{
7527 		"ld_abs: check calling conv, r2",
7528 		.insns = {
7529 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7530 			BPF_MOV64_IMM(BPF_REG_2, 0),
7531 			BPF_LD_ABS(BPF_W, -0x200000),
7532 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7533 			BPF_EXIT_INSN(),
7534 		},
7535 		.errstr = "R2 !read_ok",
7536 		.result = REJECT,
7537 	},
7538 	{
7539 		"ld_abs: check calling conv, r3",
7540 		.insns = {
7541 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7542 			BPF_MOV64_IMM(BPF_REG_3, 0),
7543 			BPF_LD_ABS(BPF_W, -0x200000),
7544 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7545 			BPF_EXIT_INSN(),
7546 		},
7547 		.errstr = "R3 !read_ok",
7548 		.result = REJECT,
7549 	},
7550 	{
7551 		"ld_abs: check calling conv, r4",
7552 		.insns = {
7553 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7554 			BPF_MOV64_IMM(BPF_REG_4, 0),
7555 			BPF_LD_ABS(BPF_W, -0x200000),
7556 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7557 			BPF_EXIT_INSN(),
7558 		},
7559 		.errstr = "R4 !read_ok",
7560 		.result = REJECT,
7561 	},
7562 	{
7563 		"ld_abs: check calling conv, r5",
7564 		.insns = {
7565 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7566 			BPF_MOV64_IMM(BPF_REG_5, 0),
7567 			BPF_LD_ABS(BPF_W, -0x200000),
7568 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7569 			BPF_EXIT_INSN(),
7570 		},
7571 		.errstr = "R5 !read_ok",
7572 		.result = REJECT,
7573 	},
7574 	{
7575 		"ld_abs: check calling conv, r7",
7576 		.insns = {
7577 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7578 			BPF_MOV64_IMM(BPF_REG_7, 0),
7579 			BPF_LD_ABS(BPF_W, -0x200000),
7580 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7581 			BPF_EXIT_INSN(),
7582 		},
7583 		.result = ACCEPT,
7584 	},
7585 	{
7586 		"ld_abs: tests on r6 and skb data reload helper",
7587 		.insns = {
7588 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7589 			BPF_LD_ABS(BPF_B, 0),
7590 			BPF_LD_ABS(BPF_H, 0),
7591 			BPF_LD_ABS(BPF_W, 0),
7592 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7593 			BPF_MOV64_IMM(BPF_REG_6, 0),
7594 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7595 			BPF_MOV64_IMM(BPF_REG_2, 1),
7596 			BPF_MOV64_IMM(BPF_REG_3, 2),
7597 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7598 				     BPF_FUNC_skb_vlan_push),
7599 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7600 			BPF_LD_ABS(BPF_B, 0),
7601 			BPF_LD_ABS(BPF_H, 0),
7602 			BPF_LD_ABS(BPF_W, 0),
7603 			BPF_MOV64_IMM(BPF_REG_0, 42),
7604 			BPF_EXIT_INSN(),
7605 		},
7606 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7607 		.result = ACCEPT,
7608 		.retval = 42 /* ultimate return value */,
7609 	},
7610 	{
7611 		"ld_ind: check calling conv, r1",
7612 		.insns = {
7613 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7614 			BPF_MOV64_IMM(BPF_REG_1, 1),
7615 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7616 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7617 			BPF_EXIT_INSN(),
7618 		},
7619 		.errstr = "R1 !read_ok",
7620 		.result = REJECT,
7621 	},
7622 	{
7623 		"ld_ind: check calling conv, r2",
7624 		.insns = {
7625 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7626 			BPF_MOV64_IMM(BPF_REG_2, 1),
7627 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7628 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7629 			BPF_EXIT_INSN(),
7630 		},
7631 		.errstr = "R2 !read_ok",
7632 		.result = REJECT,
7633 	},
7634 	{
7635 		"ld_ind: check calling conv, r3",
7636 		.insns = {
7637 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7638 			BPF_MOV64_IMM(BPF_REG_3, 1),
7639 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7640 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7641 			BPF_EXIT_INSN(),
7642 		},
7643 		.errstr = "R3 !read_ok",
7644 		.result = REJECT,
7645 	},
7646 	{
7647 		"ld_ind: check calling conv, r4",
7648 		.insns = {
7649 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7650 			BPF_MOV64_IMM(BPF_REG_4, 1),
7651 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7652 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7653 			BPF_EXIT_INSN(),
7654 		},
7655 		.errstr = "R4 !read_ok",
7656 		.result = REJECT,
7657 	},
7658 	{
7659 		"ld_ind: check calling conv, r5",
7660 		.insns = {
7661 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7662 			BPF_MOV64_IMM(BPF_REG_5, 1),
7663 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7664 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7665 			BPF_EXIT_INSN(),
7666 		},
7667 		.errstr = "R5 !read_ok",
7668 		.result = REJECT,
7669 	},
7670 	{
7671 		"ld_ind: check calling conv, r7",
7672 		.insns = {
7673 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7674 			BPF_MOV64_IMM(BPF_REG_7, 1),
7675 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7676 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7677 			BPF_EXIT_INSN(),
7678 		},
7679 		.result = ACCEPT,
7680 		.retval = 1,
7681 	},
7682 	{
7683 		"check bpf_perf_event_data->sample_period byte load permitted",
7684 		.insns = {
7685 			BPF_MOV64_IMM(BPF_REG_0, 0),
7686 #if __BYTE_ORDER == __LITTLE_ENDIAN
7687 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7688 				    offsetof(struct bpf_perf_event_data, sample_period)),
7689 #else
7690 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7691 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
7692 #endif
7693 			BPF_EXIT_INSN(),
7694 		},
7695 		.result = ACCEPT,
7696 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7697 	},
7698 	{
7699 		"check bpf_perf_event_data->sample_period half load permitted",
7700 		.insns = {
7701 			BPF_MOV64_IMM(BPF_REG_0, 0),
7702 #if __BYTE_ORDER == __LITTLE_ENDIAN
7703 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7704 				    offsetof(struct bpf_perf_event_data, sample_period)),
7705 #else
7706 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7707 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
7708 #endif
7709 			BPF_EXIT_INSN(),
7710 		},
7711 		.result = ACCEPT,
7712 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7713 	},
7714 	{
7715 		"check bpf_perf_event_data->sample_period word load permitted",
7716 		.insns = {
7717 			BPF_MOV64_IMM(BPF_REG_0, 0),
7718 #if __BYTE_ORDER == __LITTLE_ENDIAN
7719 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7720 				    offsetof(struct bpf_perf_event_data, sample_period)),
7721 #else
7722 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7723 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
7724 #endif
7725 			BPF_EXIT_INSN(),
7726 		},
7727 		.result = ACCEPT,
7728 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7729 	},
7730 	{
7731 		"check bpf_perf_event_data->sample_period dword load permitted",
7732 		.insns = {
7733 			BPF_MOV64_IMM(BPF_REG_0, 0),
7734 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7735 				    offsetof(struct bpf_perf_event_data, sample_period)),
7736 			BPF_EXIT_INSN(),
7737 		},
7738 		.result = ACCEPT,
7739 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7740 	},
7741 	{
7742 		"check skb->data half load not permitted",
7743 		.insns = {
7744 			BPF_MOV64_IMM(BPF_REG_0, 0),
7745 #if __BYTE_ORDER == __LITTLE_ENDIAN
7746 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7747 				    offsetof(struct __sk_buff, data)),
7748 #else
7749 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7750 				    offsetof(struct __sk_buff, data) + 2),
7751 #endif
7752 			BPF_EXIT_INSN(),
7753 		},
7754 		.result = REJECT,
7755 		.errstr = "invalid bpf_context access",
7756 	},
7757 	{
7758 		"check skb->tc_classid half load not permitted for lwt prog",
7759 		.insns = {
7760 			BPF_MOV64_IMM(BPF_REG_0, 0),
7761 #if __BYTE_ORDER == __LITTLE_ENDIAN
7762 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7763 				    offsetof(struct __sk_buff, tc_classid)),
7764 #else
7765 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7766 				    offsetof(struct __sk_buff, tc_classid) + 2),
7767 #endif
7768 			BPF_EXIT_INSN(),
7769 		},
7770 		.result = REJECT,
7771 		.errstr = "invalid bpf_context access",
7772 		.prog_type = BPF_PROG_TYPE_LWT_IN,
7773 	},
7774 	{
7775 		"bounds checks mixing signed and unsigned, positive bounds",
7776 		.insns = {
7777 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7779 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7780 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7782 				     BPF_FUNC_map_lookup_elem),
7783 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7784 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7785 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7786 			BPF_MOV64_IMM(BPF_REG_2, 2),
7787 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7788 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7789 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7790 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7791 			BPF_MOV64_IMM(BPF_REG_0, 0),
7792 			BPF_EXIT_INSN(),
7793 		},
7794 		.fixup_map_hash_8b = { 3 },
7795 		.errstr = "unbounded min value",
7796 		.result = REJECT,
7797 	},
7798 	{
7799 		"bounds checks mixing signed and unsigned",
7800 		.insns = {
7801 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7802 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7803 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7804 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7805 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7806 				     BPF_FUNC_map_lookup_elem),
7807 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7808 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7809 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7810 			BPF_MOV64_IMM(BPF_REG_2, -1),
7811 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7812 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7813 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7814 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7815 			BPF_MOV64_IMM(BPF_REG_0, 0),
7816 			BPF_EXIT_INSN(),
7817 		},
7818 		.fixup_map_hash_8b = { 3 },
7819 		.errstr = "unbounded min value",
7820 		.result = REJECT,
7821 	},
7822 	{
7823 		"bounds checks mixing signed and unsigned, variant 2",
7824 		.insns = {
7825 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7826 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7827 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7828 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7829 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7830 				     BPF_FUNC_map_lookup_elem),
7831 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7832 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7833 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7834 			BPF_MOV64_IMM(BPF_REG_2, -1),
7835 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7836 			BPF_MOV64_IMM(BPF_REG_8, 0),
7837 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
7838 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7839 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7840 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7841 			BPF_MOV64_IMM(BPF_REG_0, 0),
7842 			BPF_EXIT_INSN(),
7843 		},
7844 		.fixup_map_hash_8b = { 3 },
7845 		.errstr = "unbounded min value",
7846 		.result = REJECT,
7847 	},
7848 	{
7849 		"bounds checks mixing signed and unsigned, variant 3",
7850 		.insns = {
7851 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7852 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7853 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7854 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7855 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7856 				     BPF_FUNC_map_lookup_elem),
7857 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7858 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7859 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7860 			BPF_MOV64_IMM(BPF_REG_2, -1),
7861 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
7862 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
7863 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
7864 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
7865 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
7866 			BPF_MOV64_IMM(BPF_REG_0, 0),
7867 			BPF_EXIT_INSN(),
7868 		},
7869 		.fixup_map_hash_8b = { 3 },
7870 		.errstr = "unbounded min value",
7871 		.result = REJECT,
7872 	},
7873 	{
7874 		"bounds checks mixing signed and unsigned, variant 4",
7875 		.insns = {
7876 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7877 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7879 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7881 				     BPF_FUNC_map_lookup_elem),
7882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7883 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7884 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7885 			BPF_MOV64_IMM(BPF_REG_2, 1),
7886 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
7887 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7888 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7889 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7890 			BPF_MOV64_IMM(BPF_REG_0, 0),
7891 			BPF_EXIT_INSN(),
7892 		},
7893 		.fixup_map_hash_8b = { 3 },
7894 		.result = ACCEPT,
7895 	},
7896 	{
7897 		"bounds checks mixing signed and unsigned, variant 5",
7898 		.insns = {
7899 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7900 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7902 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7903 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7904 				     BPF_FUNC_map_lookup_elem),
7905 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7906 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7907 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7908 			BPF_MOV64_IMM(BPF_REG_2, -1),
7909 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
7910 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
7911 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
7912 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
7913 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7914 			BPF_MOV64_IMM(BPF_REG_0, 0),
7915 			BPF_EXIT_INSN(),
7916 		},
7917 		.fixup_map_hash_8b = { 3 },
7918 		.errstr = "unbounded min value",
7919 		.result = REJECT,
7920 	},
7921 	{
7922 		"bounds checks mixing signed and unsigned, variant 6",
7923 		.insns = {
7924 			BPF_MOV64_IMM(BPF_REG_2, 0),
7925 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
7926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
7927 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7928 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
7929 			BPF_MOV64_IMM(BPF_REG_6, -1),
7930 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
7931 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
7932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
7933 			BPF_MOV64_IMM(BPF_REG_5, 0),
7934 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
7935 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7936 				     BPF_FUNC_skb_load_bytes),
7937 			BPF_MOV64_IMM(BPF_REG_0, 0),
7938 			BPF_EXIT_INSN(),
7939 		},
7940 		.errstr = "R4 min value is negative, either use unsigned",
7941 		.result = REJECT,
7942 	},
7943 	{
7944 		"bounds checks mixing signed and unsigned, variant 7",
7945 		.insns = {
7946 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7947 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7948 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7949 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7950 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7951 				     BPF_FUNC_map_lookup_elem),
7952 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7953 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7954 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7955 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
7956 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7957 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7958 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7959 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7960 			BPF_MOV64_IMM(BPF_REG_0, 0),
7961 			BPF_EXIT_INSN(),
7962 		},
7963 		.fixup_map_hash_8b = { 3 },
7964 		.result = ACCEPT,
7965 	},
7966 	{
7967 		"bounds checks mixing signed and unsigned, variant 8",
7968 		.insns = {
7969 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7970 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7971 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7972 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7973 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7974 				     BPF_FUNC_map_lookup_elem),
7975 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7976 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7977 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7978 			BPF_MOV64_IMM(BPF_REG_2, -1),
7979 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
7980 			BPF_MOV64_IMM(BPF_REG_0, 0),
7981 			BPF_EXIT_INSN(),
7982 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7983 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7984 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7985 			BPF_MOV64_IMM(BPF_REG_0, 0),
7986 			BPF_EXIT_INSN(),
7987 		},
7988 		.fixup_map_hash_8b = { 3 },
7989 		.errstr = "unbounded min value",
7990 		.result = REJECT,
7991 	},
7992 	{
7993 		"bounds checks mixing signed and unsigned, variant 9",
7994 		.insns = {
7995 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7996 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7998 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7999 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8000 				     BPF_FUNC_map_lookup_elem),
8001 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8002 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8003 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8004 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8005 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8006 			BPF_MOV64_IMM(BPF_REG_0, 0),
8007 			BPF_EXIT_INSN(),
8008 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8009 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8010 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8011 			BPF_MOV64_IMM(BPF_REG_0, 0),
8012 			BPF_EXIT_INSN(),
8013 		},
8014 		.fixup_map_hash_8b = { 3 },
8015 		.result = ACCEPT,
8016 	},
8017 	{
8018 		"bounds checks mixing signed and unsigned, variant 10",
8019 		.insns = {
8020 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8021 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8022 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8023 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8024 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8025 				     BPF_FUNC_map_lookup_elem),
8026 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8027 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8028 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8029 			BPF_MOV64_IMM(BPF_REG_2, 0),
8030 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8031 			BPF_MOV64_IMM(BPF_REG_0, 0),
8032 			BPF_EXIT_INSN(),
8033 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8034 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8035 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8036 			BPF_MOV64_IMM(BPF_REG_0, 0),
8037 			BPF_EXIT_INSN(),
8038 		},
8039 		.fixup_map_hash_8b = { 3 },
8040 		.errstr = "unbounded min value",
8041 		.result = REJECT,
8042 	},
8043 	{
8044 		"bounds checks mixing signed and unsigned, variant 11",
8045 		.insns = {
8046 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8047 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8049 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8050 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8051 				     BPF_FUNC_map_lookup_elem),
8052 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8053 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8054 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8055 			BPF_MOV64_IMM(BPF_REG_2, -1),
8056 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8057 			/* Dead branch. */
8058 			BPF_MOV64_IMM(BPF_REG_0, 0),
8059 			BPF_EXIT_INSN(),
8060 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8061 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8062 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8063 			BPF_MOV64_IMM(BPF_REG_0, 0),
8064 			BPF_EXIT_INSN(),
8065 		},
8066 		.fixup_map_hash_8b = { 3 },
8067 		.errstr = "unbounded min value",
8068 		.result = REJECT,
8069 	},
8070 	{
8071 		"bounds checks mixing signed and unsigned, variant 12",
8072 		.insns = {
8073 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8074 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8076 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8077 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8078 				     BPF_FUNC_map_lookup_elem),
8079 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8080 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8081 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8082 			BPF_MOV64_IMM(BPF_REG_2, -6),
8083 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8084 			BPF_MOV64_IMM(BPF_REG_0, 0),
8085 			BPF_EXIT_INSN(),
8086 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8087 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8088 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8089 			BPF_MOV64_IMM(BPF_REG_0, 0),
8090 			BPF_EXIT_INSN(),
8091 		},
8092 		.fixup_map_hash_8b = { 3 },
8093 		.errstr = "unbounded min value",
8094 		.result = REJECT,
8095 	},
8096 	{
8097 		"bounds checks mixing signed and unsigned, variant 13",
8098 		.insns = {
8099 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8100 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8102 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8103 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8104 				     BPF_FUNC_map_lookup_elem),
8105 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8106 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8107 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8108 			BPF_MOV64_IMM(BPF_REG_2, 2),
8109 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8110 			BPF_MOV64_IMM(BPF_REG_7, 1),
8111 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8112 			BPF_MOV64_IMM(BPF_REG_0, 0),
8113 			BPF_EXIT_INSN(),
8114 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8115 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8116 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8117 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8118 			BPF_MOV64_IMM(BPF_REG_0, 0),
8119 			BPF_EXIT_INSN(),
8120 		},
8121 		.fixup_map_hash_8b = { 3 },
8122 		.errstr = "unbounded min value",
8123 		.result = REJECT,
8124 	},
8125 	{
8126 		"bounds checks mixing signed and unsigned, variant 14",
8127 		.insns = {
8128 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8129 				    offsetof(struct __sk_buff, mark)),
8130 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8131 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8132 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8133 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8134 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8135 				     BPF_FUNC_map_lookup_elem),
8136 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8137 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8138 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8139 			BPF_MOV64_IMM(BPF_REG_2, -1),
8140 			BPF_MOV64_IMM(BPF_REG_8, 2),
8141 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8142 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8143 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8144 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8145 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8146 			BPF_MOV64_IMM(BPF_REG_0, 0),
8147 			BPF_EXIT_INSN(),
8148 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8149 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8150 		},
8151 		.fixup_map_hash_8b = { 4 },
8152 		.errstr = "R0 invalid mem access 'inv'",
8153 		.result = REJECT,
8154 	},
8155 	{
8156 		"bounds checks mixing signed and unsigned, variant 15",
8157 		.insns = {
8158 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8159 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8161 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8162 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8163 				     BPF_FUNC_map_lookup_elem),
8164 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8165 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8166 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8167 			BPF_MOV64_IMM(BPF_REG_2, -6),
8168 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8169 			BPF_MOV64_IMM(BPF_REG_0, 0),
8170 			BPF_EXIT_INSN(),
8171 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8172 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8173 			BPF_MOV64_IMM(BPF_REG_0, 0),
8174 			BPF_EXIT_INSN(),
8175 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8176 			BPF_MOV64_IMM(BPF_REG_0, 0),
8177 			BPF_EXIT_INSN(),
8178 		},
8179 		.fixup_map_hash_8b = { 3 },
8180 		.errstr = "unbounded min value",
8181 		.result = REJECT,
8182 		.result_unpriv = REJECT,
8183 	},
8184 	{
8185 		"subtraction bounds (map value) variant 1",
8186 		.insns = {
8187 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8188 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8190 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8191 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8192 				     BPF_FUNC_map_lookup_elem),
8193 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8194 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8195 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8196 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8197 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8198 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8199 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8200 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8201 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8202 			BPF_EXIT_INSN(),
8203 			BPF_MOV64_IMM(BPF_REG_0, 0),
8204 			BPF_EXIT_INSN(),
8205 		},
8206 		.fixup_map_hash_8b = { 3 },
8207 		.errstr = "R0 max value is outside of the array range",
8208 		.result = REJECT,
8209 	},
8210 	{
8211 		"subtraction bounds (map value) variant 2",
8212 		.insns = {
8213 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8214 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8216 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8217 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8218 				     BPF_FUNC_map_lookup_elem),
8219 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8220 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8221 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8222 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8223 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8224 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8225 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8226 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8227 			BPF_EXIT_INSN(),
8228 			BPF_MOV64_IMM(BPF_REG_0, 0),
8229 			BPF_EXIT_INSN(),
8230 		},
8231 		.fixup_map_hash_8b = { 3 },
8232 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8233 		.result = REJECT,
8234 	},
8235 	{
8236 		"bounds check based on zero-extended MOV",
8237 		.insns = {
8238 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8239 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8240 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8241 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8242 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8243 				     BPF_FUNC_map_lookup_elem),
8244 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8245 			/* r2 = 0x0000'0000'ffff'ffff */
8246 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8247 			/* r2 = 0 */
8248 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8249 			/* no-op */
8250 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8251 			/* access at offset 0 */
8252 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8253 			/* exit */
8254 			BPF_MOV64_IMM(BPF_REG_0, 0),
8255 			BPF_EXIT_INSN(),
8256 		},
8257 		.fixup_map_hash_8b = { 3 },
8258 		.result = ACCEPT
8259 	},
8260 	{
8261 		"bounds check based on sign-extended MOV. test1",
8262 		.insns = {
8263 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8264 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8265 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8266 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8267 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8268 				     BPF_FUNC_map_lookup_elem),
8269 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8270 			/* r2 = 0xffff'ffff'ffff'ffff */
8271 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8272 			/* r2 = 0xffff'ffff */
8273 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8274 			/* r0 = <oob pointer> */
8275 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8276 			/* access to OOB pointer */
8277 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8278 			/* exit */
8279 			BPF_MOV64_IMM(BPF_REG_0, 0),
8280 			BPF_EXIT_INSN(),
8281 		},
8282 		.fixup_map_hash_8b = { 3 },
8283 		.errstr = "map_value pointer and 4294967295",
8284 		.result = REJECT
8285 	},
8286 	{
8287 		"bounds check based on sign-extended MOV. test2",
8288 		.insns = {
8289 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8290 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8292 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8293 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8294 				     BPF_FUNC_map_lookup_elem),
8295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8296 			/* r2 = 0xffff'ffff'ffff'ffff */
8297 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8298 			/* r2 = 0xfff'ffff */
8299 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8300 			/* r0 = <oob pointer> */
8301 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8302 			/* access to OOB pointer */
8303 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8304 			/* exit */
8305 			BPF_MOV64_IMM(BPF_REG_0, 0),
8306 			BPF_EXIT_INSN(),
8307 		},
8308 		.fixup_map_hash_8b = { 3 },
8309 		.errstr = "R0 min value is outside of the array range",
8310 		.result = REJECT
8311 	},
8312 	{
8313 		"bounds check based on reg_off + var_off + insn_off. test1",
8314 		.insns = {
8315 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8316 				    offsetof(struct __sk_buff, mark)),
8317 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8318 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8319 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8320 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8322 				     BPF_FUNC_map_lookup_elem),
8323 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8324 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8326 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8327 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8328 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8329 			BPF_MOV64_IMM(BPF_REG_0, 0),
8330 			BPF_EXIT_INSN(),
8331 		},
8332 		.fixup_map_hash_8b = { 4 },
8333 		.errstr = "value_size=8 off=1073741825",
8334 		.result = REJECT,
8335 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8336 	},
8337 	{
8338 		"bounds check based on reg_off + var_off + insn_off. test2",
8339 		.insns = {
8340 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8341 				    offsetof(struct __sk_buff, mark)),
8342 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8343 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8344 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8345 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8346 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8347 				     BPF_FUNC_map_lookup_elem),
8348 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8349 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8351 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8353 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8354 			BPF_MOV64_IMM(BPF_REG_0, 0),
8355 			BPF_EXIT_INSN(),
8356 		},
8357 		.fixup_map_hash_8b = { 4 },
8358 		.errstr = "value 1073741823",
8359 		.result = REJECT,
8360 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8361 	},
8362 	{
8363 		"bounds check after truncation of non-boundary-crossing range",
8364 		.insns = {
8365 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8366 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8367 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8368 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8369 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8370 				     BPF_FUNC_map_lookup_elem),
8371 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8372 			/* r1 = [0x00, 0xff] */
8373 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8374 			BPF_MOV64_IMM(BPF_REG_2, 1),
8375 			/* r2 = 0x10'0000'0000 */
8376 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8377 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8378 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8379 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8380 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8381 			/* r1 = [0x00, 0xff] */
8382 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8383 			/* r1 = 0 */
8384 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8385 			/* no-op */
8386 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8387 			/* access at offset 0 */
8388 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8389 			/* exit */
8390 			BPF_MOV64_IMM(BPF_REG_0, 0),
8391 			BPF_EXIT_INSN(),
8392 		},
8393 		.fixup_map_hash_8b = { 3 },
8394 		.result = ACCEPT
8395 	},
8396 	{
8397 		"bounds check after truncation of boundary-crossing range (1)",
8398 		.insns = {
8399 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8400 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8401 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8402 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8403 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8404 				     BPF_FUNC_map_lookup_elem),
8405 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8406 			/* r1 = [0x00, 0xff] */
8407 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8409 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8411 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8412 			 *      [0x0000'0000, 0x0000'007f]
8413 			 */
8414 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8415 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8416 			/* r1 = [0x00, 0xff] or
8417 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8418 			 */
8419 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8420 			/* r1 = 0 or
8421 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8422 			 */
8423 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8424 			/* no-op or OOB pointer computation */
8425 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8426 			/* potentially OOB access */
8427 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8428 			/* exit */
8429 			BPF_MOV64_IMM(BPF_REG_0, 0),
8430 			BPF_EXIT_INSN(),
8431 		},
8432 		.fixup_map_hash_8b = { 3 },
8433 		/* not actually fully unbounded, but the bound is very high */
8434 		.errstr = "R0 unbounded memory access",
8435 		.result = REJECT
8436 	},
8437 	{
8438 		"bounds check after truncation of boundary-crossing range (2)",
8439 		.insns = {
8440 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8441 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8443 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8445 				     BPF_FUNC_map_lookup_elem),
8446 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8447 			/* r1 = [0x00, 0xff] */
8448 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8449 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8450 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8452 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8453 			 *      [0x0000'0000, 0x0000'007f]
8454 			 * difference to previous test: truncation via MOV32
8455 			 * instead of ALU32.
8456 			 */
8457 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8458 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8459 			/* r1 = [0x00, 0xff] or
8460 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8461 			 */
8462 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8463 			/* r1 = 0 or
8464 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8465 			 */
8466 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8467 			/* no-op or OOB pointer computation */
8468 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8469 			/* potentially OOB access */
8470 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8471 			/* exit */
8472 			BPF_MOV64_IMM(BPF_REG_0, 0),
8473 			BPF_EXIT_INSN(),
8474 		},
8475 		.fixup_map_hash_8b = { 3 },
8476 		/* not actually fully unbounded, but the bound is very high */
8477 		.errstr = "R0 unbounded memory access",
8478 		.result = REJECT
8479 	},
8480 	{
8481 		"bounds check after wrapping 32-bit addition",
8482 		.insns = {
8483 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8484 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8485 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8486 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8487 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8488 				     BPF_FUNC_map_lookup_elem),
8489 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8490 			/* r1 = 0x7fff'ffff */
8491 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8492 			/* r1 = 0xffff'fffe */
8493 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8494 			/* r1 = 0 */
8495 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8496 			/* no-op */
8497 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8498 			/* access at offset 0 */
8499 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8500 			/* exit */
8501 			BPF_MOV64_IMM(BPF_REG_0, 0),
8502 			BPF_EXIT_INSN(),
8503 		},
8504 		.fixup_map_hash_8b = { 3 },
8505 		.result = ACCEPT
8506 	},
8507 	{
8508 		"bounds check after shift with oversized count operand",
8509 		.insns = {
8510 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8511 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8513 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8514 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8515 				     BPF_FUNC_map_lookup_elem),
8516 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8517 			BPF_MOV64_IMM(BPF_REG_2, 32),
8518 			BPF_MOV64_IMM(BPF_REG_1, 1),
8519 			/* r1 = (u32)1 << (u32)32 = ? */
8520 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8521 			/* r1 = [0x0000, 0xffff] */
8522 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8523 			/* computes unknown pointer, potentially OOB */
8524 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8525 			/* potentially OOB access */
8526 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8527 			/* exit */
8528 			BPF_MOV64_IMM(BPF_REG_0, 0),
8529 			BPF_EXIT_INSN(),
8530 		},
8531 		.fixup_map_hash_8b = { 3 },
8532 		.errstr = "R0 max value is outside of the array range",
8533 		.result = REJECT
8534 	},
8535 	{
8536 		"bounds check after right shift of maybe-negative number",
8537 		.insns = {
8538 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8539 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8541 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8542 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8543 				     BPF_FUNC_map_lookup_elem),
8544 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8545 			/* r1 = [0x00, 0xff] */
8546 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8547 			/* r1 = [-0x01, 0xfe] */
8548 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8549 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
8550 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8551 			/* r1 = 0 or 0xffff'ffff'ffff */
8552 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8553 			/* computes unknown pointer, potentially OOB */
8554 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8555 			/* potentially OOB access */
8556 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8557 			/* exit */
8558 			BPF_MOV64_IMM(BPF_REG_0, 0),
8559 			BPF_EXIT_INSN(),
8560 		},
8561 		.fixup_map_hash_8b = { 3 },
8562 		.errstr = "R0 unbounded memory access",
8563 		.result = REJECT
8564 	},
8565 	{
8566 		"bounds check map access with off+size signed 32bit overflow. test1",
8567 		.insns = {
8568 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8569 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8570 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8571 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8572 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8573 				     BPF_FUNC_map_lookup_elem),
8574 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8575 			BPF_EXIT_INSN(),
8576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8577 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8578 			BPF_JMP_A(0),
8579 			BPF_EXIT_INSN(),
8580 		},
8581 		.fixup_map_hash_8b = { 3 },
8582 		.errstr = "map_value pointer and 2147483646",
8583 		.result = REJECT
8584 	},
8585 	{
8586 		"bounds check map access with off+size signed 32bit overflow. test2",
8587 		.insns = {
8588 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8589 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8590 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8591 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8592 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8593 				     BPF_FUNC_map_lookup_elem),
8594 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8595 			BPF_EXIT_INSN(),
8596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8598 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8599 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8600 			BPF_JMP_A(0),
8601 			BPF_EXIT_INSN(),
8602 		},
8603 		.fixup_map_hash_8b = { 3 },
8604 		.errstr = "pointer offset 1073741822",
8605 		.result = REJECT
8606 	},
8607 	{
8608 		"bounds check map access with off+size signed 32bit overflow. test3",
8609 		.insns = {
8610 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8611 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8613 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8614 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8615 				     BPF_FUNC_map_lookup_elem),
8616 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8617 			BPF_EXIT_INSN(),
8618 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8619 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8620 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8621 			BPF_JMP_A(0),
8622 			BPF_EXIT_INSN(),
8623 		},
8624 		.fixup_map_hash_8b = { 3 },
8625 		.errstr = "pointer offset -1073741822",
8626 		.result = REJECT
8627 	},
8628 	{
8629 		"bounds check map access with off+size signed 32bit overflow. test4",
8630 		.insns = {
8631 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8632 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8633 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8634 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8635 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8636 				     BPF_FUNC_map_lookup_elem),
8637 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8638 			BPF_EXIT_INSN(),
8639 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
8640 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8641 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8642 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8643 			BPF_JMP_A(0),
8644 			BPF_EXIT_INSN(),
8645 		},
8646 		.fixup_map_hash_8b = { 3 },
8647 		.errstr = "map_value pointer and 1000000000000",
8648 		.result = REJECT
8649 	},
8650 	{
8651 		"pointer/scalar confusion in state equality check (way 1)",
8652 		.insns = {
8653 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8654 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8655 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8656 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8657 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8658 				     BPF_FUNC_map_lookup_elem),
8659 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8660 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8661 			BPF_JMP_A(1),
8662 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8663 			BPF_JMP_A(0),
8664 			BPF_EXIT_INSN(),
8665 		},
8666 		.fixup_map_hash_8b = { 3 },
8667 		.result = ACCEPT,
8668 		.retval = POINTER_VALUE,
8669 		.result_unpriv = REJECT,
8670 		.errstr_unpriv = "R0 leaks addr as return value"
8671 	},
8672 	{
8673 		"pointer/scalar confusion in state equality check (way 2)",
8674 		.insns = {
8675 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8676 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8677 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8678 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8679 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8680 				     BPF_FUNC_map_lookup_elem),
8681 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8682 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8683 			BPF_JMP_A(1),
8684 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8685 			BPF_EXIT_INSN(),
8686 		},
8687 		.fixup_map_hash_8b = { 3 },
8688 		.result = ACCEPT,
8689 		.retval = POINTER_VALUE,
8690 		.result_unpriv = REJECT,
8691 		.errstr_unpriv = "R0 leaks addr as return value"
8692 	},
8693 	{
8694 		"variable-offset ctx access",
8695 		.insns = {
8696 			/* Get an unknown value */
8697 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8698 			/* Make it small and 4-byte aligned */
8699 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8700 			/* add it to skb.  We now have either &skb->len or
8701 			 * &skb->pkt_type, but we don't know which
8702 			 */
8703 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8704 			/* dereference it */
8705 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8706 			BPF_EXIT_INSN(),
8707 		},
8708 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
8709 		.result = REJECT,
8710 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8711 	},
8712 	{
8713 		"variable-offset stack access",
8714 		.insns = {
8715 			/* Fill the top 8 bytes of the stack */
8716 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8717 			/* Get an unknown value */
8718 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8719 			/* Make it small and 4-byte aligned */
8720 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8721 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8722 			/* add it to fp.  We now have either fp-4 or fp-8, but
8723 			 * we don't know which
8724 			 */
8725 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8726 			/* dereference it */
8727 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8728 			BPF_EXIT_INSN(),
8729 		},
8730 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8731 		.result = REJECT,
8732 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8733 	},
8734 	{
8735 		"indirect variable-offset stack access",
8736 		.insns = {
8737 			/* Fill the top 8 bytes of the stack */
8738 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8739 			/* Get an unknown value */
8740 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8741 			/* Make it small and 4-byte aligned */
8742 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8743 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8744 			/* add it to fp.  We now have either fp-4 or fp-8, but
8745 			 * we don't know which
8746 			 */
8747 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8748 			/* dereference it indirectly */
8749 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8750 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8751 				     BPF_FUNC_map_lookup_elem),
8752 			BPF_MOV64_IMM(BPF_REG_0, 0),
8753 			BPF_EXIT_INSN(),
8754 		},
8755 		.fixup_map_hash_8b = { 5 },
8756 		.errstr = "variable stack read R2",
8757 		.result = REJECT,
8758 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8759 	},
8760 	{
8761 		"direct stack access with 32-bit wraparound. test1",
8762 		.insns = {
8763 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8766 			BPF_MOV32_IMM(BPF_REG_0, 0),
8767 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8768 			BPF_EXIT_INSN()
8769 		},
8770 		.errstr = "fp pointer and 2147483647",
8771 		.result = REJECT
8772 	},
8773 	{
8774 		"direct stack access with 32-bit wraparound. test2",
8775 		.insns = {
8776 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8779 			BPF_MOV32_IMM(BPF_REG_0, 0),
8780 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8781 			BPF_EXIT_INSN()
8782 		},
8783 		.errstr = "fp pointer and 1073741823",
8784 		.result = REJECT
8785 	},
8786 	{
8787 		"direct stack access with 32-bit wraparound. test3",
8788 		.insns = {
8789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8790 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8791 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8792 			BPF_MOV32_IMM(BPF_REG_0, 0),
8793 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8794 			BPF_EXIT_INSN()
8795 		},
8796 		.errstr = "fp pointer offset 1073741822",
8797 		.result = REJECT
8798 	},
8799 	{
8800 		"liveness pruning and write screening",
8801 		.insns = {
8802 			/* Get an unknown value */
8803 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8804 			/* branch conditions teach us nothing about R2 */
8805 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8806 			BPF_MOV64_IMM(BPF_REG_0, 0),
8807 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8808 			BPF_MOV64_IMM(BPF_REG_0, 0),
8809 			BPF_EXIT_INSN(),
8810 		},
8811 		.errstr = "R0 !read_ok",
8812 		.result = REJECT,
8813 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8814 	},
8815 	{
8816 		"varlen_map_value_access pruning",
8817 		.insns = {
8818 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8819 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8820 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8821 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8822 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8823 				     BPF_FUNC_map_lookup_elem),
8824 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8825 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
8826 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
8827 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
8828 			BPF_MOV32_IMM(BPF_REG_1, 0),
8829 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
8830 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8831 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
8832 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
8833 				   offsetof(struct test_val, foo)),
8834 			BPF_EXIT_INSN(),
8835 		},
8836 		.fixup_map_hash_48b = { 3 },
8837 		.errstr_unpriv = "R0 leaks addr",
8838 		.errstr = "R0 unbounded memory access",
8839 		.result_unpriv = REJECT,
8840 		.result = REJECT,
8841 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8842 	},
8843 	{
8844 		"invalid 64-bit BPF_END",
8845 		.insns = {
8846 			BPF_MOV32_IMM(BPF_REG_0, 0),
8847 			{
8848 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
8849 				.dst_reg = BPF_REG_0,
8850 				.src_reg = 0,
8851 				.off   = 0,
8852 				.imm   = 32,
8853 			},
8854 			BPF_EXIT_INSN(),
8855 		},
8856 		.errstr = "unknown opcode d7",
8857 		.result = REJECT,
8858 	},
8859 	{
8860 		"XDP, using ifindex from netdev",
8861 		.insns = {
8862 			BPF_MOV64_IMM(BPF_REG_0, 0),
8863 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8864 				    offsetof(struct xdp_md, ingress_ifindex)),
8865 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
8866 			BPF_MOV64_IMM(BPF_REG_0, 1),
8867 			BPF_EXIT_INSN(),
8868 		},
8869 		.result = ACCEPT,
8870 		.prog_type = BPF_PROG_TYPE_XDP,
8871 		.retval = 1,
8872 	},
8873 	{
8874 		"meta access, test1",
8875 		.insns = {
8876 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8877 				    offsetof(struct xdp_md, data_meta)),
8878 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8879 				    offsetof(struct xdp_md, data)),
8880 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8882 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8883 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8884 			BPF_MOV64_IMM(BPF_REG_0, 0),
8885 			BPF_EXIT_INSN(),
8886 		},
8887 		.result = ACCEPT,
8888 		.prog_type = BPF_PROG_TYPE_XDP,
8889 	},
8890 	{
8891 		"meta access, test2",
8892 		.insns = {
8893 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8894 				    offsetof(struct xdp_md, data_meta)),
8895 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8896 				    offsetof(struct xdp_md, data)),
8897 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8898 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
8899 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8900 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8901 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
8902 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8903 			BPF_MOV64_IMM(BPF_REG_0, 0),
8904 			BPF_EXIT_INSN(),
8905 		},
8906 		.result = REJECT,
8907 		.errstr = "invalid access to packet, off=-8",
8908 		.prog_type = BPF_PROG_TYPE_XDP,
8909 	},
8910 	{
8911 		"meta access, test3",
8912 		.insns = {
8913 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8914 				    offsetof(struct xdp_md, data_meta)),
8915 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8916 				    offsetof(struct xdp_md, data_end)),
8917 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8918 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8919 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8920 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8921 			BPF_MOV64_IMM(BPF_REG_0, 0),
8922 			BPF_EXIT_INSN(),
8923 		},
8924 		.result = REJECT,
8925 		.errstr = "invalid access to packet",
8926 		.prog_type = BPF_PROG_TYPE_XDP,
8927 	},
8928 	{
8929 		"meta access, test4",
8930 		.insns = {
8931 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8932 				    offsetof(struct xdp_md, data_meta)),
8933 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8934 				    offsetof(struct xdp_md, data_end)),
8935 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8936 				    offsetof(struct xdp_md, data)),
8937 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8939 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
8940 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8941 			BPF_MOV64_IMM(BPF_REG_0, 0),
8942 			BPF_EXIT_INSN(),
8943 		},
8944 		.result = REJECT,
8945 		.errstr = "invalid access to packet",
8946 		.prog_type = BPF_PROG_TYPE_XDP,
8947 	},
8948 	{
8949 		"meta access, test5",
8950 		.insns = {
8951 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8952 				    offsetof(struct xdp_md, data_meta)),
8953 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
8954 				    offsetof(struct xdp_md, data)),
8955 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8956 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8957 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
8958 			BPF_MOV64_IMM(BPF_REG_2, -8),
8959 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8960 				     BPF_FUNC_xdp_adjust_meta),
8961 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
8962 			BPF_MOV64_IMM(BPF_REG_0, 0),
8963 			BPF_EXIT_INSN(),
8964 		},
8965 		.result = REJECT,
8966 		.errstr = "R3 !read_ok",
8967 		.prog_type = BPF_PROG_TYPE_XDP,
8968 	},
8969 	{
8970 		"meta access, test6",
8971 		.insns = {
8972 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8973 				    offsetof(struct xdp_md, data_meta)),
8974 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8975 				    offsetof(struct xdp_md, data)),
8976 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8978 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
8980 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
8981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
8982 			BPF_MOV64_IMM(BPF_REG_0, 0),
8983 			BPF_EXIT_INSN(),
8984 		},
8985 		.result = REJECT,
8986 		.errstr = "invalid access to packet",
8987 		.prog_type = BPF_PROG_TYPE_XDP,
8988 	},
8989 	{
8990 		"meta access, test7",
8991 		.insns = {
8992 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
8993 				    offsetof(struct xdp_md, data_meta)),
8994 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
8995 				    offsetof(struct xdp_md, data)),
8996 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
8998 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
8999 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9000 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9001 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9002 			BPF_MOV64_IMM(BPF_REG_0, 0),
9003 			BPF_EXIT_INSN(),
9004 		},
9005 		.result = ACCEPT,
9006 		.prog_type = BPF_PROG_TYPE_XDP,
9007 	},
9008 	{
9009 		"meta access, test8",
9010 		.insns = {
9011 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9012 				    offsetof(struct xdp_md, data_meta)),
9013 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9014 				    offsetof(struct xdp_md, data)),
9015 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9016 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9017 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9018 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9019 			BPF_MOV64_IMM(BPF_REG_0, 0),
9020 			BPF_EXIT_INSN(),
9021 		},
9022 		.result = ACCEPT,
9023 		.prog_type = BPF_PROG_TYPE_XDP,
9024 	},
9025 	{
9026 		"meta access, test9",
9027 		.insns = {
9028 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9029 				    offsetof(struct xdp_md, data_meta)),
9030 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9031 				    offsetof(struct xdp_md, data)),
9032 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9035 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9036 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9037 			BPF_MOV64_IMM(BPF_REG_0, 0),
9038 			BPF_EXIT_INSN(),
9039 		},
9040 		.result = REJECT,
9041 		.errstr = "invalid access to packet",
9042 		.prog_type = BPF_PROG_TYPE_XDP,
9043 	},
9044 	{
9045 		"meta access, test10",
9046 		.insns = {
9047 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9048 				    offsetof(struct xdp_md, data_meta)),
9049 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9050 				    offsetof(struct xdp_md, data)),
9051 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9052 				    offsetof(struct xdp_md, data_end)),
9053 			BPF_MOV64_IMM(BPF_REG_5, 42),
9054 			BPF_MOV64_IMM(BPF_REG_6, 24),
9055 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9056 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9057 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9058 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9059 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9060 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9061 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9062 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9063 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9064 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9065 			BPF_MOV64_IMM(BPF_REG_0, 0),
9066 			BPF_EXIT_INSN(),
9067 		},
9068 		.result = REJECT,
9069 		.errstr = "invalid access to packet",
9070 		.prog_type = BPF_PROG_TYPE_XDP,
9071 	},
9072 	{
9073 		"meta access, test11",
9074 		.insns = {
9075 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9076 				    offsetof(struct xdp_md, data_meta)),
9077 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9078 				    offsetof(struct xdp_md, data)),
9079 			BPF_MOV64_IMM(BPF_REG_5, 42),
9080 			BPF_MOV64_IMM(BPF_REG_6, 24),
9081 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9082 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9083 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9084 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9085 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9086 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9087 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9088 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9089 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9090 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9091 			BPF_MOV64_IMM(BPF_REG_0, 0),
9092 			BPF_EXIT_INSN(),
9093 		},
9094 		.result = ACCEPT,
9095 		.prog_type = BPF_PROG_TYPE_XDP,
9096 	},
9097 	{
9098 		"meta access, test12",
9099 		.insns = {
9100 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9101 				    offsetof(struct xdp_md, data_meta)),
9102 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9103 				    offsetof(struct xdp_md, data)),
9104 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9105 				    offsetof(struct xdp_md, data_end)),
9106 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9107 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9108 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9109 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9110 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9112 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9113 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9114 			BPF_MOV64_IMM(BPF_REG_0, 0),
9115 			BPF_EXIT_INSN(),
9116 		},
9117 		.result = ACCEPT,
9118 		.prog_type = BPF_PROG_TYPE_XDP,
9119 	},
9120 	{
9121 		"arithmetic ops make PTR_TO_CTX unusable",
9122 		.insns = {
9123 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9124 				      offsetof(struct __sk_buff, data) -
9125 				      offsetof(struct __sk_buff, mark)),
9126 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9127 				    offsetof(struct __sk_buff, mark)),
9128 			BPF_EXIT_INSN(),
9129 		},
9130 		.errstr = "dereference of modified ctx ptr",
9131 		.result = REJECT,
9132 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9133 	},
9134 	{
9135 		"pkt_end - pkt_start is allowed",
9136 		.insns = {
9137 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9138 				    offsetof(struct __sk_buff, data_end)),
9139 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9140 				    offsetof(struct __sk_buff, data)),
9141 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9142 			BPF_EXIT_INSN(),
9143 		},
9144 		.result = ACCEPT,
9145 		.retval = TEST_DATA_LEN,
9146 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9147 	},
9148 	{
9149 		"XDP pkt read, pkt_end mangling, bad access 1",
9150 		.insns = {
9151 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9152 				    offsetof(struct xdp_md, data)),
9153 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9154 				    offsetof(struct xdp_md, data_end)),
9155 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9156 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9158 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9159 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9160 			BPF_MOV64_IMM(BPF_REG_0, 0),
9161 			BPF_EXIT_INSN(),
9162 		},
9163 		.errstr = "R3 pointer arithmetic on pkt_end",
9164 		.result = REJECT,
9165 		.prog_type = BPF_PROG_TYPE_XDP,
9166 	},
9167 	{
9168 		"XDP pkt read, pkt_end mangling, bad access 2",
9169 		.insns = {
9170 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9171 				    offsetof(struct xdp_md, data)),
9172 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9173 				    offsetof(struct xdp_md, data_end)),
9174 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9175 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9176 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9177 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9178 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9179 			BPF_MOV64_IMM(BPF_REG_0, 0),
9180 			BPF_EXIT_INSN(),
9181 		},
9182 		.errstr = "R3 pointer arithmetic on pkt_end",
9183 		.result = REJECT,
9184 		.prog_type = BPF_PROG_TYPE_XDP,
9185 	},
9186 	{
9187 		"XDP pkt read, pkt_data' > pkt_end, good access",
9188 		.insns = {
9189 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9190 				    offsetof(struct xdp_md, data)),
9191 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9192 				    offsetof(struct xdp_md, data_end)),
9193 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9194 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9195 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9196 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9197 			BPF_MOV64_IMM(BPF_REG_0, 0),
9198 			BPF_EXIT_INSN(),
9199 		},
9200 		.result = ACCEPT,
9201 		.prog_type = BPF_PROG_TYPE_XDP,
9202 	},
9203 	{
9204 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9205 		.insns = {
9206 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9207 				    offsetof(struct xdp_md, data)),
9208 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9209 				    offsetof(struct xdp_md, data_end)),
9210 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9212 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9213 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9214 			BPF_MOV64_IMM(BPF_REG_0, 0),
9215 			BPF_EXIT_INSN(),
9216 		},
9217 		.errstr = "R1 offset is outside of the packet",
9218 		.result = REJECT,
9219 		.prog_type = BPF_PROG_TYPE_XDP,
9220 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9221 	},
9222 	{
9223 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9224 		.insns = {
9225 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9226 				    offsetof(struct xdp_md, data)),
9227 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9228 				    offsetof(struct xdp_md, data_end)),
9229 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9231 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9232 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9233 			BPF_MOV64_IMM(BPF_REG_0, 0),
9234 			BPF_EXIT_INSN(),
9235 		},
9236 		.errstr = "R1 offset is outside of the packet",
9237 		.result = REJECT,
9238 		.prog_type = BPF_PROG_TYPE_XDP,
9239 	},
9240 	{
9241 		"XDP pkt read, pkt_end > pkt_data', good access",
9242 		.insns = {
9243 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9244 				    offsetof(struct xdp_md, data)),
9245 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9246 				    offsetof(struct xdp_md, data_end)),
9247 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9248 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9249 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9250 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9251 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9252 			BPF_MOV64_IMM(BPF_REG_0, 0),
9253 			BPF_EXIT_INSN(),
9254 		},
9255 		.result = ACCEPT,
9256 		.prog_type = BPF_PROG_TYPE_XDP,
9257 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9258 	},
9259 	{
9260 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9261 		.insns = {
9262 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9263 				    offsetof(struct xdp_md, data)),
9264 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9265 				    offsetof(struct xdp_md, data_end)),
9266 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9267 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9268 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9269 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9270 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9271 			BPF_MOV64_IMM(BPF_REG_0, 0),
9272 			BPF_EXIT_INSN(),
9273 		},
9274 		.errstr = "R1 offset is outside of the packet",
9275 		.result = REJECT,
9276 		.prog_type = BPF_PROG_TYPE_XDP,
9277 	},
9278 	{
9279 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9280 		.insns = {
9281 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9282 				    offsetof(struct xdp_md, data)),
9283 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9284 				    offsetof(struct xdp_md, data_end)),
9285 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9287 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9288 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9289 			BPF_MOV64_IMM(BPF_REG_0, 0),
9290 			BPF_EXIT_INSN(),
9291 		},
9292 		.errstr = "R1 offset is outside of the packet",
9293 		.result = REJECT,
9294 		.prog_type = BPF_PROG_TYPE_XDP,
9295 	},
9296 	{
9297 		"XDP pkt read, pkt_data' < pkt_end, good access",
9298 		.insns = {
9299 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9300 				    offsetof(struct xdp_md, data)),
9301 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9302 				    offsetof(struct xdp_md, data_end)),
9303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9305 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9306 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9307 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9308 			BPF_MOV64_IMM(BPF_REG_0, 0),
9309 			BPF_EXIT_INSN(),
9310 		},
9311 		.result = ACCEPT,
9312 		.prog_type = BPF_PROG_TYPE_XDP,
9313 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9314 	},
9315 	{
9316 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9317 		.insns = {
9318 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9319 				    offsetof(struct xdp_md, data)),
9320 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9321 				    offsetof(struct xdp_md, data_end)),
9322 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9323 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9324 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9325 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9326 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9327 			BPF_MOV64_IMM(BPF_REG_0, 0),
9328 			BPF_EXIT_INSN(),
9329 		},
9330 		.errstr = "R1 offset is outside of the packet",
9331 		.result = REJECT,
9332 		.prog_type = BPF_PROG_TYPE_XDP,
9333 	},
9334 	{
9335 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9336 		.insns = {
9337 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9338 				    offsetof(struct xdp_md, data)),
9339 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9340 				    offsetof(struct xdp_md, data_end)),
9341 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9342 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9343 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9344 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9345 			BPF_MOV64_IMM(BPF_REG_0, 0),
9346 			BPF_EXIT_INSN(),
9347 		},
9348 		.errstr = "R1 offset is outside of the packet",
9349 		.result = REJECT,
9350 		.prog_type = BPF_PROG_TYPE_XDP,
9351 	},
9352 	{
9353 		"XDP pkt read, pkt_end < pkt_data', good access",
9354 		.insns = {
9355 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9356 				    offsetof(struct xdp_md, data)),
9357 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9358 				    offsetof(struct xdp_md, data_end)),
9359 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9361 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9362 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9363 			BPF_MOV64_IMM(BPF_REG_0, 0),
9364 			BPF_EXIT_INSN(),
9365 		},
9366 		.result = ACCEPT,
9367 		.prog_type = BPF_PROG_TYPE_XDP,
9368 	},
9369 	{
9370 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9371 		.insns = {
9372 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9373 				    offsetof(struct xdp_md, data)),
9374 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9375 				    offsetof(struct xdp_md, data_end)),
9376 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9377 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9378 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9379 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9380 			BPF_MOV64_IMM(BPF_REG_0, 0),
9381 			BPF_EXIT_INSN(),
9382 		},
9383 		.errstr = "R1 offset is outside of the packet",
9384 		.result = REJECT,
9385 		.prog_type = BPF_PROG_TYPE_XDP,
9386 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9387 	},
9388 	{
9389 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9390 		.insns = {
9391 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9392 				    offsetof(struct xdp_md, data)),
9393 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9394 				    offsetof(struct xdp_md, data_end)),
9395 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9396 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9397 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9398 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9399 			BPF_MOV64_IMM(BPF_REG_0, 0),
9400 			BPF_EXIT_INSN(),
9401 		},
9402 		.errstr = "R1 offset is outside of the packet",
9403 		.result = REJECT,
9404 		.prog_type = BPF_PROG_TYPE_XDP,
9405 	},
9406 	{
9407 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9408 		.insns = {
9409 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9410 				    offsetof(struct xdp_md, data)),
9411 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9412 				    offsetof(struct xdp_md, data_end)),
9413 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9414 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9415 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9416 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9417 			BPF_MOV64_IMM(BPF_REG_0, 0),
9418 			BPF_EXIT_INSN(),
9419 		},
9420 		.result = ACCEPT,
9421 		.prog_type = BPF_PROG_TYPE_XDP,
9422 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9423 	},
9424 	{
9425 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9426 		.insns = {
9427 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9428 				    offsetof(struct xdp_md, data)),
9429 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9430 				    offsetof(struct xdp_md, data_end)),
9431 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9433 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9434 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9435 			BPF_MOV64_IMM(BPF_REG_0, 0),
9436 			BPF_EXIT_INSN(),
9437 		},
9438 		.errstr = "R1 offset is outside of the packet",
9439 		.result = REJECT,
9440 		.prog_type = BPF_PROG_TYPE_XDP,
9441 	},
9442 	{
9443 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9444 		.insns = {
9445 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9446 				    offsetof(struct xdp_md, data)),
9447 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9448 				    offsetof(struct xdp_md, data_end)),
9449 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9450 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9451 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9452 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9453 			BPF_MOV64_IMM(BPF_REG_0, 0),
9454 			BPF_EXIT_INSN(),
9455 		},
9456 		.errstr = "R1 offset is outside of the packet",
9457 		.result = REJECT,
9458 		.prog_type = BPF_PROG_TYPE_XDP,
9459 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9460 	},
9461 	{
9462 		"XDP pkt read, pkt_end >= pkt_data', good access",
9463 		.insns = {
9464 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9465 				    offsetof(struct xdp_md, data)),
9466 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9467 				    offsetof(struct xdp_md, data_end)),
9468 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9470 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9471 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9472 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9473 			BPF_MOV64_IMM(BPF_REG_0, 0),
9474 			BPF_EXIT_INSN(),
9475 		},
9476 		.result = ACCEPT,
9477 		.prog_type = BPF_PROG_TYPE_XDP,
9478 	},
9479 	{
9480 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
9481 		.insns = {
9482 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9483 				    offsetof(struct xdp_md, data)),
9484 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9485 				    offsetof(struct xdp_md, data_end)),
9486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9488 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9489 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9491 			BPF_MOV64_IMM(BPF_REG_0, 0),
9492 			BPF_EXIT_INSN(),
9493 		},
9494 		.errstr = "R1 offset is outside of the packet",
9495 		.result = REJECT,
9496 		.prog_type = BPF_PROG_TYPE_XDP,
9497 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9498 	},
9499 	{
9500 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
9501 		.insns = {
9502 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9503 				    offsetof(struct xdp_md, data)),
9504 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9505 				    offsetof(struct xdp_md, data_end)),
9506 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9508 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9509 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9510 			BPF_MOV64_IMM(BPF_REG_0, 0),
9511 			BPF_EXIT_INSN(),
9512 		},
9513 		.errstr = "R1 offset is outside of the packet",
9514 		.result = REJECT,
9515 		.prog_type = BPF_PROG_TYPE_XDP,
9516 	},
9517 	{
9518 		"XDP pkt read, pkt_data' <= pkt_end, good access",
9519 		.insns = {
9520 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9521 				    offsetof(struct xdp_md, data)),
9522 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9523 				    offsetof(struct xdp_md, data_end)),
9524 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9525 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9526 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9527 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9528 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9529 			BPF_MOV64_IMM(BPF_REG_0, 0),
9530 			BPF_EXIT_INSN(),
9531 		},
9532 		.result = ACCEPT,
9533 		.prog_type = BPF_PROG_TYPE_XDP,
9534 	},
9535 	{
9536 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9537 		.insns = {
9538 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9539 				    offsetof(struct xdp_md, data)),
9540 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9541 				    offsetof(struct xdp_md, data_end)),
9542 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9544 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9545 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9546 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9547 			BPF_MOV64_IMM(BPF_REG_0, 0),
9548 			BPF_EXIT_INSN(),
9549 		},
9550 		.errstr = "R1 offset is outside of the packet",
9551 		.result = REJECT,
9552 		.prog_type = BPF_PROG_TYPE_XDP,
9553 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9554 	},
9555 	{
9556 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9557 		.insns = {
9558 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9559 				    offsetof(struct xdp_md, data)),
9560 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9561 				    offsetof(struct xdp_md, data_end)),
9562 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9563 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9564 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9565 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9566 			BPF_MOV64_IMM(BPF_REG_0, 0),
9567 			BPF_EXIT_INSN(),
9568 		},
9569 		.errstr = "R1 offset is outside of the packet",
9570 		.result = REJECT,
9571 		.prog_type = BPF_PROG_TYPE_XDP,
9572 	},
9573 	{
9574 		"XDP pkt read, pkt_end <= pkt_data', good access",
9575 		.insns = {
9576 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9577 				    offsetof(struct xdp_md, data)),
9578 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9579 				    offsetof(struct xdp_md, data_end)),
9580 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9581 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9582 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9583 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9584 			BPF_MOV64_IMM(BPF_REG_0, 0),
9585 			BPF_EXIT_INSN(),
9586 		},
9587 		.result = ACCEPT,
9588 		.prog_type = BPF_PROG_TYPE_XDP,
9589 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9590 	},
9591 	{
9592 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
9593 		.insns = {
9594 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9595 				    offsetof(struct xdp_md, data)),
9596 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9597 				    offsetof(struct xdp_md, data_end)),
9598 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9599 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9600 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9601 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9602 			BPF_MOV64_IMM(BPF_REG_0, 0),
9603 			BPF_EXIT_INSN(),
9604 		},
9605 		.errstr = "R1 offset is outside of the packet",
9606 		.result = REJECT,
9607 		.prog_type = BPF_PROG_TYPE_XDP,
9608 	},
9609 	{
9610 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
9611 		.insns = {
9612 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9613 				    offsetof(struct xdp_md, data)),
9614 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9615 				    offsetof(struct xdp_md, data_end)),
9616 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9618 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9619 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9620 			BPF_MOV64_IMM(BPF_REG_0, 0),
9621 			BPF_EXIT_INSN(),
9622 		},
9623 		.errstr = "R1 offset is outside of the packet",
9624 		.result = REJECT,
9625 		.prog_type = BPF_PROG_TYPE_XDP,
9626 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9627 	},
9628 	{
9629 		"XDP pkt read, pkt_meta' > pkt_data, good access",
9630 		.insns = {
9631 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9632 				    offsetof(struct xdp_md, data_meta)),
9633 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9634 				    offsetof(struct xdp_md, data)),
9635 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9637 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9638 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9639 			BPF_MOV64_IMM(BPF_REG_0, 0),
9640 			BPF_EXIT_INSN(),
9641 		},
9642 		.result = ACCEPT,
9643 		.prog_type = BPF_PROG_TYPE_XDP,
9644 	},
9645 	{
9646 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9647 		.insns = {
9648 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9649 				    offsetof(struct xdp_md, data_meta)),
9650 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9651 				    offsetof(struct xdp_md, data)),
9652 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9653 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9654 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9655 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9656 			BPF_MOV64_IMM(BPF_REG_0, 0),
9657 			BPF_EXIT_INSN(),
9658 		},
9659 		.errstr = "R1 offset is outside of the packet",
9660 		.result = REJECT,
9661 		.prog_type = BPF_PROG_TYPE_XDP,
9662 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9663 	},
9664 	{
9665 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9666 		.insns = {
9667 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9668 				    offsetof(struct xdp_md, data_meta)),
9669 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9670 				    offsetof(struct xdp_md, data)),
9671 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9672 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9673 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9674 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9675 			BPF_MOV64_IMM(BPF_REG_0, 0),
9676 			BPF_EXIT_INSN(),
9677 		},
9678 		.errstr = "R1 offset is outside of the packet",
9679 		.result = REJECT,
9680 		.prog_type = BPF_PROG_TYPE_XDP,
9681 	},
9682 	{
9683 		"XDP pkt read, pkt_data > pkt_meta', good access",
9684 		.insns = {
9685 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9686 				    offsetof(struct xdp_md, data_meta)),
9687 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9688 				    offsetof(struct xdp_md, data)),
9689 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9691 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9692 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9693 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9694 			BPF_MOV64_IMM(BPF_REG_0, 0),
9695 			BPF_EXIT_INSN(),
9696 		},
9697 		.result = ACCEPT,
9698 		.prog_type = BPF_PROG_TYPE_XDP,
9699 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9700 	},
9701 	{
9702 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
9703 		.insns = {
9704 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9705 				    offsetof(struct xdp_md, data_meta)),
9706 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9707 				    offsetof(struct xdp_md, data)),
9708 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9710 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9711 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9712 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9713 			BPF_MOV64_IMM(BPF_REG_0, 0),
9714 			BPF_EXIT_INSN(),
9715 		},
9716 		.errstr = "R1 offset is outside of the packet",
9717 		.result = REJECT,
9718 		.prog_type = BPF_PROG_TYPE_XDP,
9719 	},
9720 	{
9721 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
9722 		.insns = {
9723 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9724 				    offsetof(struct xdp_md, data_meta)),
9725 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9726 				    offsetof(struct xdp_md, data)),
9727 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9728 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9729 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9730 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9731 			BPF_MOV64_IMM(BPF_REG_0, 0),
9732 			BPF_EXIT_INSN(),
9733 		},
9734 		.errstr = "R1 offset is outside of the packet",
9735 		.result = REJECT,
9736 		.prog_type = BPF_PROG_TYPE_XDP,
9737 	},
9738 	{
9739 		"XDP pkt read, pkt_meta' < pkt_data, good access",
9740 		.insns = {
9741 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9742 				    offsetof(struct xdp_md, data_meta)),
9743 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9744 				    offsetof(struct xdp_md, data)),
9745 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9747 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9748 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9749 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9750 			BPF_MOV64_IMM(BPF_REG_0, 0),
9751 			BPF_EXIT_INSN(),
9752 		},
9753 		.result = ACCEPT,
9754 		.prog_type = BPF_PROG_TYPE_XDP,
9755 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9756 	},
9757 	{
9758 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9759 		.insns = {
9760 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9761 				    offsetof(struct xdp_md, data_meta)),
9762 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9763 				    offsetof(struct xdp_md, data)),
9764 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9766 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9767 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9768 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9769 			BPF_MOV64_IMM(BPF_REG_0, 0),
9770 			BPF_EXIT_INSN(),
9771 		},
9772 		.errstr = "R1 offset is outside of the packet",
9773 		.result = REJECT,
9774 		.prog_type = BPF_PROG_TYPE_XDP,
9775 	},
9776 	{
9777 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9778 		.insns = {
9779 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9780 				    offsetof(struct xdp_md, data_meta)),
9781 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9782 				    offsetof(struct xdp_md, data)),
9783 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9784 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9785 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9786 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9787 			BPF_MOV64_IMM(BPF_REG_0, 0),
9788 			BPF_EXIT_INSN(),
9789 		},
9790 		.errstr = "R1 offset is outside of the packet",
9791 		.result = REJECT,
9792 		.prog_type = BPF_PROG_TYPE_XDP,
9793 	},
9794 	{
9795 		"XDP pkt read, pkt_data < pkt_meta', good access",
9796 		.insns = {
9797 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9798 				    offsetof(struct xdp_md, data_meta)),
9799 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9800 				    offsetof(struct xdp_md, data)),
9801 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9802 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9803 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9804 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9805 			BPF_MOV64_IMM(BPF_REG_0, 0),
9806 			BPF_EXIT_INSN(),
9807 		},
9808 		.result = ACCEPT,
9809 		.prog_type = BPF_PROG_TYPE_XDP,
9810 	},
9811 	{
9812 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
9813 		.insns = {
9814 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9815 				    offsetof(struct xdp_md, data_meta)),
9816 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9817 				    offsetof(struct xdp_md, data)),
9818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9819 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9820 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9821 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9822 			BPF_MOV64_IMM(BPF_REG_0, 0),
9823 			BPF_EXIT_INSN(),
9824 		},
9825 		.errstr = "R1 offset is outside of the packet",
9826 		.result = REJECT,
9827 		.prog_type = BPF_PROG_TYPE_XDP,
9828 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9829 	},
9830 	{
9831 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
9832 		.insns = {
9833 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9834 				    offsetof(struct xdp_md, data_meta)),
9835 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9836 				    offsetof(struct xdp_md, data)),
9837 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9838 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9839 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9840 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9841 			BPF_MOV64_IMM(BPF_REG_0, 0),
9842 			BPF_EXIT_INSN(),
9843 		},
9844 		.errstr = "R1 offset is outside of the packet",
9845 		.result = REJECT,
9846 		.prog_type = BPF_PROG_TYPE_XDP,
9847 	},
9848 	{
9849 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
9850 		.insns = {
9851 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9852 				    offsetof(struct xdp_md, data_meta)),
9853 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9854 				    offsetof(struct xdp_md, data)),
9855 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9856 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9857 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9858 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9859 			BPF_MOV64_IMM(BPF_REG_0, 0),
9860 			BPF_EXIT_INSN(),
9861 		},
9862 		.result = ACCEPT,
9863 		.prog_type = BPF_PROG_TYPE_XDP,
9864 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9865 	},
9866 	{
9867 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
9868 		.insns = {
9869 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9870 				    offsetof(struct xdp_md, data_meta)),
9871 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9872 				    offsetof(struct xdp_md, data)),
9873 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9874 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9875 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9876 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9877 			BPF_MOV64_IMM(BPF_REG_0, 0),
9878 			BPF_EXIT_INSN(),
9879 		},
9880 		.errstr = "R1 offset is outside of the packet",
9881 		.result = REJECT,
9882 		.prog_type = BPF_PROG_TYPE_XDP,
9883 	},
9884 	{
9885 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
9886 		.insns = {
9887 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9888 				    offsetof(struct xdp_md, data_meta)),
9889 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9890 				    offsetof(struct xdp_md, data)),
9891 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9892 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9893 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9895 			BPF_MOV64_IMM(BPF_REG_0, 0),
9896 			BPF_EXIT_INSN(),
9897 		},
9898 		.errstr = "R1 offset is outside of the packet",
9899 		.result = REJECT,
9900 		.prog_type = BPF_PROG_TYPE_XDP,
9901 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9902 	},
9903 	{
9904 		"XDP pkt read, pkt_data >= pkt_meta', good access",
9905 		.insns = {
9906 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9907 				    offsetof(struct xdp_md, data_meta)),
9908 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9909 				    offsetof(struct xdp_md, data)),
9910 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9911 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9912 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9913 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9914 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9915 			BPF_MOV64_IMM(BPF_REG_0, 0),
9916 			BPF_EXIT_INSN(),
9917 		},
9918 		.result = ACCEPT,
9919 		.prog_type = BPF_PROG_TYPE_XDP,
9920 	},
9921 	{
9922 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
9923 		.insns = {
9924 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9925 				    offsetof(struct xdp_md, data_meta)),
9926 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9927 				    offsetof(struct xdp_md, data)),
9928 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9929 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9930 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9931 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9932 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9933 			BPF_MOV64_IMM(BPF_REG_0, 0),
9934 			BPF_EXIT_INSN(),
9935 		},
9936 		.errstr = "R1 offset is outside of the packet",
9937 		.result = REJECT,
9938 		.prog_type = BPF_PROG_TYPE_XDP,
9939 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9940 	},
9941 	{
9942 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
9943 		.insns = {
9944 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9945 				    offsetof(struct xdp_md, data_meta)),
9946 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9947 				    offsetof(struct xdp_md, data)),
9948 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9950 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9951 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9952 			BPF_MOV64_IMM(BPF_REG_0, 0),
9953 			BPF_EXIT_INSN(),
9954 		},
9955 		.errstr = "R1 offset is outside of the packet",
9956 		.result = REJECT,
9957 		.prog_type = BPF_PROG_TYPE_XDP,
9958 	},
9959 	{
9960 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
9961 		.insns = {
9962 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9963 				    offsetof(struct xdp_md, data_meta)),
9964 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9965 				    offsetof(struct xdp_md, data)),
9966 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9967 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9968 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9969 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9970 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9971 			BPF_MOV64_IMM(BPF_REG_0, 0),
9972 			BPF_EXIT_INSN(),
9973 		},
9974 		.result = ACCEPT,
9975 		.prog_type = BPF_PROG_TYPE_XDP,
9976 	},
9977 	{
9978 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
9979 		.insns = {
9980 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9981 				    offsetof(struct xdp_md, data_meta)),
9982 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9983 				    offsetof(struct xdp_md, data)),
9984 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9985 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9986 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9987 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9988 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9989 			BPF_MOV64_IMM(BPF_REG_0, 0),
9990 			BPF_EXIT_INSN(),
9991 		},
9992 		.errstr = "R1 offset is outside of the packet",
9993 		.result = REJECT,
9994 		.prog_type = BPF_PROG_TYPE_XDP,
9995 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9996 	},
9997 	{
9998 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
9999 		.insns = {
10000 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10001 				    offsetof(struct xdp_md, data_meta)),
10002 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10003 				    offsetof(struct xdp_md, data)),
10004 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10006 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10007 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10008 			BPF_MOV64_IMM(BPF_REG_0, 0),
10009 			BPF_EXIT_INSN(),
10010 		},
10011 		.errstr = "R1 offset is outside of the packet",
10012 		.result = REJECT,
10013 		.prog_type = BPF_PROG_TYPE_XDP,
10014 	},
10015 	{
10016 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10017 		.insns = {
10018 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10019 				    offsetof(struct xdp_md, data_meta)),
10020 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10021 				    offsetof(struct xdp_md, data)),
10022 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10023 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10024 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10025 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10026 			BPF_MOV64_IMM(BPF_REG_0, 0),
10027 			BPF_EXIT_INSN(),
10028 		},
10029 		.result = ACCEPT,
10030 		.prog_type = BPF_PROG_TYPE_XDP,
10031 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10032 	},
10033 	{
10034 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10035 		.insns = {
10036 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10037 				    offsetof(struct xdp_md, data_meta)),
10038 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10039 				    offsetof(struct xdp_md, data)),
10040 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10041 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10042 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10043 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10044 			BPF_MOV64_IMM(BPF_REG_0, 0),
10045 			BPF_EXIT_INSN(),
10046 		},
10047 		.errstr = "R1 offset is outside of the packet",
10048 		.result = REJECT,
10049 		.prog_type = BPF_PROG_TYPE_XDP,
10050 	},
10051 	{
10052 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10053 		.insns = {
10054 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10055 				    offsetof(struct xdp_md, data_meta)),
10056 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10057 				    offsetof(struct xdp_md, data)),
10058 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10059 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10060 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10061 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10062 			BPF_MOV64_IMM(BPF_REG_0, 0),
10063 			BPF_EXIT_INSN(),
10064 		},
10065 		.errstr = "R1 offset is outside of the packet",
10066 		.result = REJECT,
10067 		.prog_type = BPF_PROG_TYPE_XDP,
10068 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10069 	},
10070 	{
10071 		"check deducing bounds from const, 1",
10072 		.insns = {
10073 			BPF_MOV64_IMM(BPF_REG_0, 1),
10074 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10075 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10076 			BPF_EXIT_INSN(),
10077 		},
10078 		.result = REJECT,
10079 		.errstr = "R0 tried to subtract pointer from scalar",
10080 	},
10081 	{
10082 		"check deducing bounds from const, 2",
10083 		.insns = {
10084 			BPF_MOV64_IMM(BPF_REG_0, 1),
10085 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10086 			BPF_EXIT_INSN(),
10087 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10088 			BPF_EXIT_INSN(),
10089 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10090 			BPF_EXIT_INSN(),
10091 		},
10092 		.result = ACCEPT,
10093 		.retval = 1,
10094 	},
10095 	{
10096 		"check deducing bounds from const, 3",
10097 		.insns = {
10098 			BPF_MOV64_IMM(BPF_REG_0, 0),
10099 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10100 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10101 			BPF_EXIT_INSN(),
10102 		},
10103 		.result = REJECT,
10104 		.errstr = "R0 tried to subtract pointer from scalar",
10105 	},
10106 	{
10107 		"check deducing bounds from const, 4",
10108 		.insns = {
10109 			BPF_MOV64_IMM(BPF_REG_0, 0),
10110 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10111 			BPF_EXIT_INSN(),
10112 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10113 			BPF_EXIT_INSN(),
10114 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10115 			BPF_EXIT_INSN(),
10116 		},
10117 		.result = ACCEPT,
10118 	},
10119 	{
10120 		"check deducing bounds from const, 5",
10121 		.insns = {
10122 			BPF_MOV64_IMM(BPF_REG_0, 0),
10123 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10124 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10125 			BPF_EXIT_INSN(),
10126 		},
10127 		.result = REJECT,
10128 		.errstr = "R0 tried to subtract pointer from scalar",
10129 	},
10130 	{
10131 		"check deducing bounds from const, 6",
10132 		.insns = {
10133 			BPF_MOV64_IMM(BPF_REG_0, 0),
10134 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10135 			BPF_EXIT_INSN(),
10136 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10137 			BPF_EXIT_INSN(),
10138 		},
10139 		.result = REJECT,
10140 		.errstr = "R0 tried to subtract pointer from scalar",
10141 	},
10142 	{
10143 		"check deducing bounds from const, 7",
10144 		.insns = {
10145 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10146 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10147 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10148 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10149 				    offsetof(struct __sk_buff, mark)),
10150 			BPF_EXIT_INSN(),
10151 		},
10152 		.result = REJECT,
10153 		.errstr = "dereference of modified ctx ptr",
10154 	},
10155 	{
10156 		"check deducing bounds from const, 8",
10157 		.insns = {
10158 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10159 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10160 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10161 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10162 				    offsetof(struct __sk_buff, mark)),
10163 			BPF_EXIT_INSN(),
10164 		},
10165 		.result = REJECT,
10166 		.errstr = "dereference of modified ctx ptr",
10167 	},
10168 	{
10169 		"check deducing bounds from const, 9",
10170 		.insns = {
10171 			BPF_MOV64_IMM(BPF_REG_0, 0),
10172 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10173 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10174 			BPF_EXIT_INSN(),
10175 		},
10176 		.result = REJECT,
10177 		.errstr = "R0 tried to subtract pointer from scalar",
10178 	},
10179 	{
10180 		"check deducing bounds from const, 10",
10181 		.insns = {
10182 			BPF_MOV64_IMM(BPF_REG_0, 0),
10183 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10184 			/* Marks reg as unknown. */
10185 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10186 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10187 			BPF_EXIT_INSN(),
10188 		},
10189 		.result = REJECT,
10190 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10191 	},
10192 	{
10193 		"bpf_exit with invalid return code. test1",
10194 		.insns = {
10195 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10196 			BPF_EXIT_INSN(),
10197 		},
10198 		.errstr = "R0 has value (0x0; 0xffffffff)",
10199 		.result = REJECT,
10200 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10201 	},
10202 	{
10203 		"bpf_exit with invalid return code. test2",
10204 		.insns = {
10205 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10206 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10207 			BPF_EXIT_INSN(),
10208 		},
10209 		.result = ACCEPT,
10210 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10211 	},
10212 	{
10213 		"bpf_exit with invalid return code. test3",
10214 		.insns = {
10215 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10216 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10217 			BPF_EXIT_INSN(),
10218 		},
10219 		.errstr = "R0 has value (0x0; 0x3)",
10220 		.result = REJECT,
10221 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10222 	},
10223 	{
10224 		"bpf_exit with invalid return code. test4",
10225 		.insns = {
10226 			BPF_MOV64_IMM(BPF_REG_0, 1),
10227 			BPF_EXIT_INSN(),
10228 		},
10229 		.result = ACCEPT,
10230 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10231 	},
10232 	{
10233 		"bpf_exit with invalid return code. test5",
10234 		.insns = {
10235 			BPF_MOV64_IMM(BPF_REG_0, 2),
10236 			BPF_EXIT_INSN(),
10237 		},
10238 		.errstr = "R0 has value (0x2; 0x0)",
10239 		.result = REJECT,
10240 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10241 	},
10242 	{
10243 		"bpf_exit with invalid return code. test6",
10244 		.insns = {
10245 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10246 			BPF_EXIT_INSN(),
10247 		},
10248 		.errstr = "R0 is not a known value (ctx)",
10249 		.result = REJECT,
10250 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10251 	},
10252 	{
10253 		"bpf_exit with invalid return code. test7",
10254 		.insns = {
10255 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10256 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10257 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10258 			BPF_EXIT_INSN(),
10259 		},
10260 		.errstr = "R0 has unknown scalar value",
10261 		.result = REJECT,
10262 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10263 	},
10264 	{
10265 		"calls: basic sanity",
10266 		.insns = {
10267 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10268 			BPF_MOV64_IMM(BPF_REG_0, 1),
10269 			BPF_EXIT_INSN(),
10270 			BPF_MOV64_IMM(BPF_REG_0, 2),
10271 			BPF_EXIT_INSN(),
10272 		},
10273 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10274 		.result = ACCEPT,
10275 	},
10276 	{
10277 		"calls: not on unpriviledged",
10278 		.insns = {
10279 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10280 			BPF_MOV64_IMM(BPF_REG_0, 1),
10281 			BPF_EXIT_INSN(),
10282 			BPF_MOV64_IMM(BPF_REG_0, 2),
10283 			BPF_EXIT_INSN(),
10284 		},
10285 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10286 		.result_unpriv = REJECT,
10287 		.result = ACCEPT,
10288 		.retval = 1,
10289 	},
10290 	{
10291 		"calls: div by 0 in subprog",
10292 		.insns = {
10293 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10294 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10295 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10296 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10297 				    offsetof(struct __sk_buff, data_end)),
10298 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10299 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10300 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10301 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10302 			BPF_MOV64_IMM(BPF_REG_0, 1),
10303 			BPF_EXIT_INSN(),
10304 			BPF_MOV32_IMM(BPF_REG_2, 0),
10305 			BPF_MOV32_IMM(BPF_REG_3, 1),
10306 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10307 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10308 				    offsetof(struct __sk_buff, data)),
10309 			BPF_EXIT_INSN(),
10310 		},
10311 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10312 		.result = ACCEPT,
10313 		.retval = 1,
10314 	},
10315 	{
10316 		"calls: multiple ret types in subprog 1",
10317 		.insns = {
10318 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10319 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10320 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10321 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10322 				    offsetof(struct __sk_buff, data_end)),
10323 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10325 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10326 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10327 			BPF_MOV64_IMM(BPF_REG_0, 1),
10328 			BPF_EXIT_INSN(),
10329 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10330 				    offsetof(struct __sk_buff, data)),
10331 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10332 			BPF_MOV32_IMM(BPF_REG_0, 42),
10333 			BPF_EXIT_INSN(),
10334 		},
10335 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10336 		.result = REJECT,
10337 		.errstr = "R0 invalid mem access 'inv'",
10338 	},
10339 	{
10340 		"calls: multiple ret types in subprog 2",
10341 		.insns = {
10342 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10343 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10344 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10345 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10346 				    offsetof(struct __sk_buff, data_end)),
10347 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10349 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10350 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10351 			BPF_MOV64_IMM(BPF_REG_0, 1),
10352 			BPF_EXIT_INSN(),
10353 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10354 				    offsetof(struct __sk_buff, data)),
10355 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10356 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10357 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10358 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10360 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10361 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10362 				     BPF_FUNC_map_lookup_elem),
10363 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10364 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10365 				    offsetof(struct __sk_buff, data)),
10366 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10367 			BPF_EXIT_INSN(),
10368 		},
10369 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10370 		.fixup_map_hash_8b = { 16 },
10371 		.result = REJECT,
10372 		.errstr = "R0 min value is outside of the array range",
10373 	},
10374 	{
10375 		"calls: overlapping caller/callee",
10376 		.insns = {
10377 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10378 			BPF_MOV64_IMM(BPF_REG_0, 1),
10379 			BPF_EXIT_INSN(),
10380 		},
10381 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10382 		.errstr = "last insn is not an exit or jmp",
10383 		.result = REJECT,
10384 	},
10385 	{
10386 		"calls: wrong recursive calls",
10387 		.insns = {
10388 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10389 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10390 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10392 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10393 			BPF_MOV64_IMM(BPF_REG_0, 1),
10394 			BPF_EXIT_INSN(),
10395 		},
10396 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10397 		.errstr = "jump out of range",
10398 		.result = REJECT,
10399 	},
10400 	{
10401 		"calls: wrong src reg",
10402 		.insns = {
10403 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10404 			BPF_MOV64_IMM(BPF_REG_0, 1),
10405 			BPF_EXIT_INSN(),
10406 		},
10407 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10408 		.errstr = "BPF_CALL uses reserved fields",
10409 		.result = REJECT,
10410 	},
10411 	{
10412 		"calls: wrong off value",
10413 		.insns = {
10414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10415 			BPF_MOV64_IMM(BPF_REG_0, 1),
10416 			BPF_EXIT_INSN(),
10417 			BPF_MOV64_IMM(BPF_REG_0, 2),
10418 			BPF_EXIT_INSN(),
10419 		},
10420 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10421 		.errstr = "BPF_CALL uses reserved fields",
10422 		.result = REJECT,
10423 	},
10424 	{
10425 		"calls: jump back loop",
10426 		.insns = {
10427 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10428 			BPF_MOV64_IMM(BPF_REG_0, 1),
10429 			BPF_EXIT_INSN(),
10430 		},
10431 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10432 		.errstr = "back-edge from insn 0 to 0",
10433 		.result = REJECT,
10434 	},
10435 	{
10436 		"calls: conditional call",
10437 		.insns = {
10438 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10439 				    offsetof(struct __sk_buff, mark)),
10440 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10441 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10442 			BPF_MOV64_IMM(BPF_REG_0, 1),
10443 			BPF_EXIT_INSN(),
10444 			BPF_MOV64_IMM(BPF_REG_0, 2),
10445 			BPF_EXIT_INSN(),
10446 		},
10447 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10448 		.errstr = "jump out of range",
10449 		.result = REJECT,
10450 	},
10451 	{
10452 		"calls: conditional call 2",
10453 		.insns = {
10454 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10455 				    offsetof(struct __sk_buff, mark)),
10456 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10458 			BPF_MOV64_IMM(BPF_REG_0, 1),
10459 			BPF_EXIT_INSN(),
10460 			BPF_MOV64_IMM(BPF_REG_0, 2),
10461 			BPF_EXIT_INSN(),
10462 			BPF_MOV64_IMM(BPF_REG_0, 3),
10463 			BPF_EXIT_INSN(),
10464 		},
10465 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10466 		.result = ACCEPT,
10467 	},
10468 	{
10469 		"calls: conditional call 3",
10470 		.insns = {
10471 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10472 				    offsetof(struct __sk_buff, mark)),
10473 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10474 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10475 			BPF_MOV64_IMM(BPF_REG_0, 1),
10476 			BPF_EXIT_INSN(),
10477 			BPF_MOV64_IMM(BPF_REG_0, 1),
10478 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10479 			BPF_MOV64_IMM(BPF_REG_0, 3),
10480 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10481 		},
10482 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10483 		.errstr = "back-edge from insn",
10484 		.result = REJECT,
10485 	},
10486 	{
10487 		"calls: conditional call 4",
10488 		.insns = {
10489 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10490 				    offsetof(struct __sk_buff, mark)),
10491 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10492 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10493 			BPF_MOV64_IMM(BPF_REG_0, 1),
10494 			BPF_EXIT_INSN(),
10495 			BPF_MOV64_IMM(BPF_REG_0, 1),
10496 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10497 			BPF_MOV64_IMM(BPF_REG_0, 3),
10498 			BPF_EXIT_INSN(),
10499 		},
10500 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10501 		.result = ACCEPT,
10502 	},
10503 	{
10504 		"calls: conditional call 5",
10505 		.insns = {
10506 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10507 				    offsetof(struct __sk_buff, mark)),
10508 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10509 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10510 			BPF_MOV64_IMM(BPF_REG_0, 1),
10511 			BPF_EXIT_INSN(),
10512 			BPF_MOV64_IMM(BPF_REG_0, 1),
10513 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10514 			BPF_MOV64_IMM(BPF_REG_0, 3),
10515 			BPF_EXIT_INSN(),
10516 		},
10517 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10518 		.errstr = "back-edge from insn",
10519 		.result = REJECT,
10520 	},
10521 	{
10522 		"calls: conditional call 6",
10523 		.insns = {
10524 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10525 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10526 			BPF_EXIT_INSN(),
10527 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10528 				    offsetof(struct __sk_buff, mark)),
10529 			BPF_EXIT_INSN(),
10530 		},
10531 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10532 		.errstr = "back-edge from insn",
10533 		.result = REJECT,
10534 	},
10535 	{
10536 		"calls: using r0 returned by callee",
10537 		.insns = {
10538 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10539 			BPF_EXIT_INSN(),
10540 			BPF_MOV64_IMM(BPF_REG_0, 2),
10541 			BPF_EXIT_INSN(),
10542 		},
10543 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10544 		.result = ACCEPT,
10545 	},
10546 	{
10547 		"calls: using uninit r0 from callee",
10548 		.insns = {
10549 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10550 			BPF_EXIT_INSN(),
10551 			BPF_EXIT_INSN(),
10552 		},
10553 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10554 		.errstr = "!read_ok",
10555 		.result = REJECT,
10556 	},
10557 	{
10558 		"calls: callee is using r1",
10559 		.insns = {
10560 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10561 			BPF_EXIT_INSN(),
10562 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10563 				    offsetof(struct __sk_buff, len)),
10564 			BPF_EXIT_INSN(),
10565 		},
10566 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
10567 		.result = ACCEPT,
10568 		.retval = TEST_DATA_LEN,
10569 	},
10570 	{
10571 		"calls: callee using args1",
10572 		.insns = {
10573 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10574 			BPF_EXIT_INSN(),
10575 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10576 			BPF_EXIT_INSN(),
10577 		},
10578 		.errstr_unpriv = "allowed for root only",
10579 		.result_unpriv = REJECT,
10580 		.result = ACCEPT,
10581 		.retval = POINTER_VALUE,
10582 	},
10583 	{
10584 		"calls: callee using wrong args2",
10585 		.insns = {
10586 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10587 			BPF_EXIT_INSN(),
10588 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10589 			BPF_EXIT_INSN(),
10590 		},
10591 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10592 		.errstr = "R2 !read_ok",
10593 		.result = REJECT,
10594 	},
10595 	{
10596 		"calls: callee using two args",
10597 		.insns = {
10598 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10599 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10600 				    offsetof(struct __sk_buff, len)),
10601 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10602 				    offsetof(struct __sk_buff, len)),
10603 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10604 			BPF_EXIT_INSN(),
10605 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10606 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10607 			BPF_EXIT_INSN(),
10608 		},
10609 		.errstr_unpriv = "allowed for root only",
10610 		.result_unpriv = REJECT,
10611 		.result = ACCEPT,
10612 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
10613 	},
10614 	{
10615 		"calls: callee changing pkt pointers",
10616 		.insns = {
10617 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10618 				    offsetof(struct xdp_md, data)),
10619 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10620 				    offsetof(struct xdp_md, data_end)),
10621 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10623 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10624 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10625 			/* clear_all_pkt_pointers() has to walk all frames
10626 			 * to make sure that pkt pointers in the caller
10627 			 * are cleared when callee is calling a helper that
10628 			 * adjusts packet size
10629 			 */
10630 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10631 			BPF_MOV32_IMM(BPF_REG_0, 0),
10632 			BPF_EXIT_INSN(),
10633 			BPF_MOV64_IMM(BPF_REG_2, 0),
10634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10635 				     BPF_FUNC_xdp_adjust_head),
10636 			BPF_EXIT_INSN(),
10637 		},
10638 		.result = REJECT,
10639 		.errstr = "R6 invalid mem access 'inv'",
10640 		.prog_type = BPF_PROG_TYPE_XDP,
10641 	},
10642 	{
10643 		"calls: two calls with args",
10644 		.insns = {
10645 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10646 			BPF_EXIT_INSN(),
10647 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10648 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10649 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10650 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10651 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10652 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10653 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10654 			BPF_EXIT_INSN(),
10655 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10656 				    offsetof(struct __sk_buff, len)),
10657 			BPF_EXIT_INSN(),
10658 		},
10659 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10660 		.result = ACCEPT,
10661 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
10662 	},
10663 	{
10664 		"calls: calls with stack arith",
10665 		.insns = {
10666 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10668 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10669 			BPF_EXIT_INSN(),
10670 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10671 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10672 			BPF_EXIT_INSN(),
10673 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10674 			BPF_MOV64_IMM(BPF_REG_0, 42),
10675 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10676 			BPF_EXIT_INSN(),
10677 		},
10678 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10679 		.result = ACCEPT,
10680 		.retval = 42,
10681 	},
10682 	{
10683 		"calls: calls with misaligned stack access",
10684 		.insns = {
10685 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10686 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10687 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10688 			BPF_EXIT_INSN(),
10689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10690 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10691 			BPF_EXIT_INSN(),
10692 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10693 			BPF_MOV64_IMM(BPF_REG_0, 42),
10694 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10695 			BPF_EXIT_INSN(),
10696 		},
10697 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10698 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10699 		.errstr = "misaligned stack access",
10700 		.result = REJECT,
10701 	},
10702 	{
10703 		"calls: calls control flow, jump test",
10704 		.insns = {
10705 			BPF_MOV64_IMM(BPF_REG_0, 42),
10706 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10707 			BPF_MOV64_IMM(BPF_REG_0, 43),
10708 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10709 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10710 			BPF_EXIT_INSN(),
10711 		},
10712 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10713 		.result = ACCEPT,
10714 		.retval = 43,
10715 	},
10716 	{
10717 		"calls: calls control flow, jump test 2",
10718 		.insns = {
10719 			BPF_MOV64_IMM(BPF_REG_0, 42),
10720 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10721 			BPF_MOV64_IMM(BPF_REG_0, 43),
10722 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10723 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10724 			BPF_EXIT_INSN(),
10725 		},
10726 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10727 		.errstr = "jump out of range from insn 1 to 4",
10728 		.result = REJECT,
10729 	},
10730 	{
10731 		"calls: two calls with bad jump",
10732 		.insns = {
10733 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10734 			BPF_EXIT_INSN(),
10735 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10736 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10737 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10738 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10739 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10740 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10741 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10742 			BPF_EXIT_INSN(),
10743 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10744 				    offsetof(struct __sk_buff, len)),
10745 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10746 			BPF_EXIT_INSN(),
10747 		},
10748 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10749 		.errstr = "jump out of range from insn 11 to 9",
10750 		.result = REJECT,
10751 	},
10752 	{
10753 		"calls: recursive call. test1",
10754 		.insns = {
10755 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10756 			BPF_EXIT_INSN(),
10757 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10758 			BPF_EXIT_INSN(),
10759 		},
10760 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10761 		.errstr = "back-edge",
10762 		.result = REJECT,
10763 	},
10764 	{
10765 		"calls: recursive call. test2",
10766 		.insns = {
10767 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10768 			BPF_EXIT_INSN(),
10769 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10770 			BPF_EXIT_INSN(),
10771 		},
10772 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10773 		.errstr = "back-edge",
10774 		.result = REJECT,
10775 	},
10776 	{
10777 		"calls: unreachable code",
10778 		.insns = {
10779 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10780 			BPF_EXIT_INSN(),
10781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10782 			BPF_EXIT_INSN(),
10783 			BPF_MOV64_IMM(BPF_REG_0, 0),
10784 			BPF_EXIT_INSN(),
10785 			BPF_MOV64_IMM(BPF_REG_0, 0),
10786 			BPF_EXIT_INSN(),
10787 		},
10788 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10789 		.errstr = "unreachable insn 6",
10790 		.result = REJECT,
10791 	},
10792 	{
10793 		"calls: invalid call",
10794 		.insns = {
10795 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10796 			BPF_EXIT_INSN(),
10797 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10798 			BPF_EXIT_INSN(),
10799 		},
10800 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10801 		.errstr = "invalid destination",
10802 		.result = REJECT,
10803 	},
10804 	{
10805 		"calls: invalid call 2",
10806 		.insns = {
10807 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10808 			BPF_EXIT_INSN(),
10809 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10810 			BPF_EXIT_INSN(),
10811 		},
10812 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10813 		.errstr = "invalid destination",
10814 		.result = REJECT,
10815 	},
10816 	{
10817 		"calls: jumping across function bodies. test1",
10818 		.insns = {
10819 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10820 			BPF_MOV64_IMM(BPF_REG_0, 0),
10821 			BPF_EXIT_INSN(),
10822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10823 			BPF_EXIT_INSN(),
10824 		},
10825 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10826 		.errstr = "jump out of range",
10827 		.result = REJECT,
10828 	},
10829 	{
10830 		"calls: jumping across function bodies. test2",
10831 		.insns = {
10832 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
10833 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10834 			BPF_MOV64_IMM(BPF_REG_0, 0),
10835 			BPF_EXIT_INSN(),
10836 			BPF_EXIT_INSN(),
10837 		},
10838 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10839 		.errstr = "jump out of range",
10840 		.result = REJECT,
10841 	},
10842 	{
10843 		"calls: call without exit",
10844 		.insns = {
10845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10846 			BPF_EXIT_INSN(),
10847 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10848 			BPF_EXIT_INSN(),
10849 			BPF_MOV64_IMM(BPF_REG_0, 0),
10850 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
10851 		},
10852 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10853 		.errstr = "not an exit",
10854 		.result = REJECT,
10855 	},
10856 	{
10857 		"calls: call into middle of ld_imm64",
10858 		.insns = {
10859 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10860 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10861 			BPF_MOV64_IMM(BPF_REG_0, 0),
10862 			BPF_EXIT_INSN(),
10863 			BPF_LD_IMM64(BPF_REG_0, 0),
10864 			BPF_EXIT_INSN(),
10865 		},
10866 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10867 		.errstr = "last insn",
10868 		.result = REJECT,
10869 	},
10870 	{
10871 		"calls: call into middle of other call",
10872 		.insns = {
10873 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10874 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10875 			BPF_MOV64_IMM(BPF_REG_0, 0),
10876 			BPF_EXIT_INSN(),
10877 			BPF_MOV64_IMM(BPF_REG_0, 0),
10878 			BPF_MOV64_IMM(BPF_REG_0, 0),
10879 			BPF_EXIT_INSN(),
10880 		},
10881 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10882 		.errstr = "last insn",
10883 		.result = REJECT,
10884 	},
10885 	{
10886 		"calls: ld_abs with changing ctx data in callee",
10887 		.insns = {
10888 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10889 			BPF_LD_ABS(BPF_B, 0),
10890 			BPF_LD_ABS(BPF_H, 0),
10891 			BPF_LD_ABS(BPF_W, 0),
10892 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
10893 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
10894 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
10895 			BPF_LD_ABS(BPF_B, 0),
10896 			BPF_LD_ABS(BPF_H, 0),
10897 			BPF_LD_ABS(BPF_W, 0),
10898 			BPF_EXIT_INSN(),
10899 			BPF_MOV64_IMM(BPF_REG_2, 1),
10900 			BPF_MOV64_IMM(BPF_REG_3, 2),
10901 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10902 				     BPF_FUNC_skb_vlan_push),
10903 			BPF_EXIT_INSN(),
10904 		},
10905 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10906 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
10907 		.result = REJECT,
10908 	},
10909 	{
10910 		"calls: two calls with bad fallthrough",
10911 		.insns = {
10912 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10913 			BPF_EXIT_INSN(),
10914 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10915 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10916 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10917 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10918 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10919 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10920 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10921 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
10922 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10923 				    offsetof(struct __sk_buff, len)),
10924 			BPF_EXIT_INSN(),
10925 		},
10926 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10927 		.errstr = "not an exit",
10928 		.result = REJECT,
10929 	},
10930 	{
10931 		"calls: two calls with stack read",
10932 		.insns = {
10933 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10934 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10935 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10937 			BPF_EXIT_INSN(),
10938 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10939 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10940 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10941 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10942 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10943 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10944 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10945 			BPF_EXIT_INSN(),
10946 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10947 			BPF_EXIT_INSN(),
10948 		},
10949 		.prog_type = BPF_PROG_TYPE_XDP,
10950 		.result = ACCEPT,
10951 	},
10952 	{
10953 		"calls: two calls with stack write",
10954 		.insns = {
10955 			/* main prog */
10956 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
10958 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
10959 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
10961 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10962 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
10963 			BPF_EXIT_INSN(),
10964 
10965 			/* subprog 1 */
10966 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10967 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
10968 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
10969 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
10970 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10971 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10972 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
10973 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
10974 			/* write into stack frame of main prog */
10975 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
10976 			BPF_EXIT_INSN(),
10977 
10978 			/* subprog 2 */
10979 			/* read from stack frame of main prog */
10980 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10981 			BPF_EXIT_INSN(),
10982 		},
10983 		.prog_type = BPF_PROG_TYPE_XDP,
10984 		.result = ACCEPT,
10985 	},
10986 	{
10987 		"calls: stack overflow using two frames (pre-call access)",
10988 		.insns = {
10989 			/* prog 1 */
10990 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10991 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
10992 			BPF_EXIT_INSN(),
10993 
10994 			/* prog 2 */
10995 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
10996 			BPF_MOV64_IMM(BPF_REG_0, 0),
10997 			BPF_EXIT_INSN(),
10998 		},
10999 		.prog_type = BPF_PROG_TYPE_XDP,
11000 		.errstr = "combined stack size",
11001 		.result = REJECT,
11002 	},
11003 	{
11004 		"calls: stack overflow using two frames (post-call access)",
11005 		.insns = {
11006 			/* prog 1 */
11007 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11008 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11009 			BPF_EXIT_INSN(),
11010 
11011 			/* prog 2 */
11012 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11013 			BPF_MOV64_IMM(BPF_REG_0, 0),
11014 			BPF_EXIT_INSN(),
11015 		},
11016 		.prog_type = BPF_PROG_TYPE_XDP,
11017 		.errstr = "combined stack size",
11018 		.result = REJECT,
11019 	},
11020 	{
11021 		"calls: stack depth check using three frames. test1",
11022 		.insns = {
11023 			/* main */
11024 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11025 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11026 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11027 			BPF_MOV64_IMM(BPF_REG_0, 0),
11028 			BPF_EXIT_INSN(),
11029 			/* A */
11030 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11031 			BPF_EXIT_INSN(),
11032 			/* B */
11033 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11034 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11035 			BPF_EXIT_INSN(),
11036 		},
11037 		.prog_type = BPF_PROG_TYPE_XDP,
11038 		/* stack_main=32, stack_A=256, stack_B=64
11039 		 * and max(main+A, main+A+B) < 512
11040 		 */
11041 		.result = ACCEPT,
11042 	},
11043 	{
11044 		"calls: stack depth check using three frames. test2",
11045 		.insns = {
11046 			/* main */
11047 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11048 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11049 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11050 			BPF_MOV64_IMM(BPF_REG_0, 0),
11051 			BPF_EXIT_INSN(),
11052 			/* A */
11053 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11054 			BPF_EXIT_INSN(),
11055 			/* B */
11056 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11057 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11058 			BPF_EXIT_INSN(),
11059 		},
11060 		.prog_type = BPF_PROG_TYPE_XDP,
11061 		/* stack_main=32, stack_A=64, stack_B=256
11062 		 * and max(main+A, main+A+B) < 512
11063 		 */
11064 		.result = ACCEPT,
11065 	},
11066 	{
11067 		"calls: stack depth check using three frames. test3",
11068 		.insns = {
11069 			/* main */
11070 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11071 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11072 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11073 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11074 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11075 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11076 			BPF_MOV64_IMM(BPF_REG_0, 0),
11077 			BPF_EXIT_INSN(),
11078 			/* A */
11079 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11080 			BPF_EXIT_INSN(),
11081 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11082 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11083 			/* B */
11084 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11085 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11086 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11087 			BPF_EXIT_INSN(),
11088 		},
11089 		.prog_type = BPF_PROG_TYPE_XDP,
11090 		/* stack_main=64, stack_A=224, stack_B=256
11091 		 * and max(main+A, main+A+B) > 512
11092 		 */
11093 		.errstr = "combined stack",
11094 		.result = REJECT,
11095 	},
11096 	{
11097 		"calls: stack depth check using three frames. test4",
11098 		/* void main(void) {
11099 		 *   func1(0);
11100 		 *   func1(1);
11101 		 *   func2(1);
11102 		 * }
11103 		 * void func1(int alloc_or_recurse) {
11104 		 *   if (alloc_or_recurse) {
11105 		 *     frame_pointer[-300] = 1;
11106 		 *   } else {
11107 		 *     func2(alloc_or_recurse);
11108 		 *   }
11109 		 * }
11110 		 * void func2(int alloc_or_recurse) {
11111 		 *   if (alloc_or_recurse) {
11112 		 *     frame_pointer[-300] = 1;
11113 		 *   }
11114 		 * }
11115 		 */
11116 		.insns = {
11117 			/* main */
11118 			BPF_MOV64_IMM(BPF_REG_1, 0),
11119 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11120 			BPF_MOV64_IMM(BPF_REG_1, 1),
11121 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11122 			BPF_MOV64_IMM(BPF_REG_1, 1),
11123 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11124 			BPF_MOV64_IMM(BPF_REG_0, 0),
11125 			BPF_EXIT_INSN(),
11126 			/* A */
11127 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11128 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11129 			BPF_EXIT_INSN(),
11130 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11131 			BPF_EXIT_INSN(),
11132 			/* B */
11133 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11134 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11135 			BPF_EXIT_INSN(),
11136 		},
11137 		.prog_type = BPF_PROG_TYPE_XDP,
11138 		.result = REJECT,
11139 		.errstr = "combined stack",
11140 	},
11141 	{
11142 		"calls: stack depth check using three frames. test5",
11143 		.insns = {
11144 			/* main */
11145 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11146 			BPF_EXIT_INSN(),
11147 			/* A */
11148 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11149 			BPF_EXIT_INSN(),
11150 			/* B */
11151 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11152 			BPF_EXIT_INSN(),
11153 			/* C */
11154 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11155 			BPF_EXIT_INSN(),
11156 			/* D */
11157 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11158 			BPF_EXIT_INSN(),
11159 			/* E */
11160 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11161 			BPF_EXIT_INSN(),
11162 			/* F */
11163 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11164 			BPF_EXIT_INSN(),
11165 			/* G */
11166 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11167 			BPF_EXIT_INSN(),
11168 			/* H */
11169 			BPF_MOV64_IMM(BPF_REG_0, 0),
11170 			BPF_EXIT_INSN(),
11171 		},
11172 		.prog_type = BPF_PROG_TYPE_XDP,
11173 		.errstr = "call stack",
11174 		.result = REJECT,
11175 	},
11176 	{
11177 		"calls: spill into caller stack frame",
11178 		.insns = {
11179 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11180 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11183 			BPF_EXIT_INSN(),
11184 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11185 			BPF_MOV64_IMM(BPF_REG_0, 0),
11186 			BPF_EXIT_INSN(),
11187 		},
11188 		.prog_type = BPF_PROG_TYPE_XDP,
11189 		.errstr = "cannot spill",
11190 		.result = REJECT,
11191 	},
11192 	{
11193 		"calls: write into caller stack frame",
11194 		.insns = {
11195 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11196 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11197 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11198 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11199 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11200 			BPF_EXIT_INSN(),
11201 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11202 			BPF_MOV64_IMM(BPF_REG_0, 0),
11203 			BPF_EXIT_INSN(),
11204 		},
11205 		.prog_type = BPF_PROG_TYPE_XDP,
11206 		.result = ACCEPT,
11207 		.retval = 42,
11208 	},
11209 	{
11210 		"calls: write into callee stack frame",
11211 		.insns = {
11212 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11213 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11214 			BPF_EXIT_INSN(),
11215 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11217 			BPF_EXIT_INSN(),
11218 		},
11219 		.prog_type = BPF_PROG_TYPE_XDP,
11220 		.errstr = "cannot return stack pointer",
11221 		.result = REJECT,
11222 	},
11223 	{
11224 		"calls: two calls with stack write and void return",
11225 		.insns = {
11226 			/* main prog */
11227 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11228 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11229 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11230 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11231 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11232 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11233 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11234 			BPF_EXIT_INSN(),
11235 
11236 			/* subprog 1 */
11237 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11238 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11240 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11241 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11242 			BPF_EXIT_INSN(),
11243 
11244 			/* subprog 2 */
11245 			/* write into stack frame of main prog */
11246 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11247 			BPF_EXIT_INSN(), /* void return */
11248 		},
11249 		.prog_type = BPF_PROG_TYPE_XDP,
11250 		.result = ACCEPT,
11251 	},
11252 	{
11253 		"calls: ambiguous return value",
11254 		.insns = {
11255 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11256 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11257 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11258 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11259 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11260 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11261 			BPF_EXIT_INSN(),
11262 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11263 			BPF_MOV64_IMM(BPF_REG_0, 0),
11264 			BPF_EXIT_INSN(),
11265 		},
11266 		.errstr_unpriv = "allowed for root only",
11267 		.result_unpriv = REJECT,
11268 		.errstr = "R0 !read_ok",
11269 		.result = REJECT,
11270 	},
11271 	{
11272 		"calls: two calls that return map_value",
11273 		.insns = {
11274 			/* main prog */
11275 			/* pass fp-16, fp-8 into a function */
11276 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11277 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11278 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11279 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11280 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11281 
11282 			/* fetch map_value_ptr from the stack of this function */
11283 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11285 			/* write into map value */
11286 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11287 			/* fetch secound map_value_ptr from the stack */
11288 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11290 			/* write into map value */
11291 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11292 			BPF_MOV64_IMM(BPF_REG_0, 0),
11293 			BPF_EXIT_INSN(),
11294 
11295 			/* subprog 1 */
11296 			/* call 3rd function twice */
11297 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11298 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11299 			/* first time with fp-8 */
11300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11301 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11302 			/* second time with fp-16 */
11303 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11304 			BPF_EXIT_INSN(),
11305 
11306 			/* subprog 2 */
11307 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11308 			/* lookup from map */
11309 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11310 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11312 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11313 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11314 				     BPF_FUNC_map_lookup_elem),
11315 			/* write map_value_ptr into stack frame of main prog */
11316 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11317 			BPF_MOV64_IMM(BPF_REG_0, 0),
11318 			BPF_EXIT_INSN(), /* return 0 */
11319 		},
11320 		.prog_type = BPF_PROG_TYPE_XDP,
11321 		.fixup_map_hash_8b = { 23 },
11322 		.result = ACCEPT,
11323 	},
11324 	{
11325 		"calls: two calls that return map_value with bool condition",
11326 		.insns = {
11327 			/* main prog */
11328 			/* pass fp-16, fp-8 into a function */
11329 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11331 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11332 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11333 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11334 			BPF_MOV64_IMM(BPF_REG_0, 0),
11335 			BPF_EXIT_INSN(),
11336 
11337 			/* subprog 1 */
11338 			/* call 3rd function twice */
11339 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11340 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11341 			/* first time with fp-8 */
11342 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11343 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11344 			/* fetch map_value_ptr from the stack of this function */
11345 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11346 			/* write into map value */
11347 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11348 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11349 			/* second time with fp-16 */
11350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11351 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11352 			/* fetch secound map_value_ptr from the stack */
11353 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11354 			/* write into map value */
11355 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11356 			BPF_EXIT_INSN(),
11357 
11358 			/* subprog 2 */
11359 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11360 			/* lookup from map */
11361 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11362 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11363 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11364 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11365 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11366 				     BPF_FUNC_map_lookup_elem),
11367 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11368 			BPF_MOV64_IMM(BPF_REG_0, 0),
11369 			BPF_EXIT_INSN(), /* return 0 */
11370 			/* write map_value_ptr into stack frame of main prog */
11371 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11372 			BPF_MOV64_IMM(BPF_REG_0, 1),
11373 			BPF_EXIT_INSN(), /* return 1 */
11374 		},
11375 		.prog_type = BPF_PROG_TYPE_XDP,
11376 		.fixup_map_hash_8b = { 23 },
11377 		.result = ACCEPT,
11378 	},
11379 	{
11380 		"calls: two calls that return map_value with incorrect bool check",
11381 		.insns = {
11382 			/* main prog */
11383 			/* pass fp-16, fp-8 into a function */
11384 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11385 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11388 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11389 			BPF_MOV64_IMM(BPF_REG_0, 0),
11390 			BPF_EXIT_INSN(),
11391 
11392 			/* subprog 1 */
11393 			/* call 3rd function twice */
11394 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11395 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11396 			/* first time with fp-8 */
11397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11398 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11399 			/* fetch map_value_ptr from the stack of this function */
11400 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11401 			/* write into map value */
11402 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11403 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11404 			/* second time with fp-16 */
11405 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11406 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11407 			/* fetch secound map_value_ptr from the stack */
11408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11409 			/* write into map value */
11410 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11411 			BPF_EXIT_INSN(),
11412 
11413 			/* subprog 2 */
11414 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11415 			/* lookup from map */
11416 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11417 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11419 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11420 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11421 				     BPF_FUNC_map_lookup_elem),
11422 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11423 			BPF_MOV64_IMM(BPF_REG_0, 0),
11424 			BPF_EXIT_INSN(), /* return 0 */
11425 			/* write map_value_ptr into stack frame of main prog */
11426 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11427 			BPF_MOV64_IMM(BPF_REG_0, 1),
11428 			BPF_EXIT_INSN(), /* return 1 */
11429 		},
11430 		.prog_type = BPF_PROG_TYPE_XDP,
11431 		.fixup_map_hash_8b = { 23 },
11432 		.result = REJECT,
11433 		.errstr = "invalid read from stack off -16+0 size 8",
11434 	},
11435 	{
11436 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11437 		.insns = {
11438 			/* main prog */
11439 			/* pass fp-16, fp-8 into a function */
11440 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11441 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11442 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11443 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11445 			BPF_MOV64_IMM(BPF_REG_0, 0),
11446 			BPF_EXIT_INSN(),
11447 
11448 			/* subprog 1 */
11449 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11450 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11451 			/* 1st lookup from map */
11452 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11453 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11455 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11457 				     BPF_FUNC_map_lookup_elem),
11458 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11459 			BPF_MOV64_IMM(BPF_REG_8, 0),
11460 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11461 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11462 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11463 			BPF_MOV64_IMM(BPF_REG_8, 1),
11464 
11465 			/* 2nd lookup from map */
11466 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11467 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11468 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11469 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11470 				     BPF_FUNC_map_lookup_elem),
11471 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11472 			BPF_MOV64_IMM(BPF_REG_9, 0),
11473 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11474 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11475 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11476 			BPF_MOV64_IMM(BPF_REG_9, 1),
11477 
11478 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11479 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11480 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11481 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11482 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11483 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
11484 			BPF_EXIT_INSN(),
11485 
11486 			/* subprog 2 */
11487 			/* if arg2 == 1 do *arg1 = 0 */
11488 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11489 			/* fetch map_value_ptr from the stack of this function */
11490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11491 			/* write into map value */
11492 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11493 
11494 			/* if arg4 == 1 do *arg3 = 0 */
11495 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11496 			/* fetch map_value_ptr from the stack of this function */
11497 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11498 			/* write into map value */
11499 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11500 			BPF_EXIT_INSN(),
11501 		},
11502 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11503 		.fixup_map_hash_8b = { 12, 22 },
11504 		.result = REJECT,
11505 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
11506 	},
11507 	{
11508 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11509 		.insns = {
11510 			/* main prog */
11511 			/* pass fp-16, fp-8 into a function */
11512 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11514 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11516 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11517 			BPF_MOV64_IMM(BPF_REG_0, 0),
11518 			BPF_EXIT_INSN(),
11519 
11520 			/* subprog 1 */
11521 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11522 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11523 			/* 1st lookup from map */
11524 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11525 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11526 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11527 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11528 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11529 				     BPF_FUNC_map_lookup_elem),
11530 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11531 			BPF_MOV64_IMM(BPF_REG_8, 0),
11532 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11533 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11534 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11535 			BPF_MOV64_IMM(BPF_REG_8, 1),
11536 
11537 			/* 2nd lookup from map */
11538 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11539 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11540 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11541 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11542 				     BPF_FUNC_map_lookup_elem),
11543 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11544 			BPF_MOV64_IMM(BPF_REG_9, 0),
11545 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11546 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11547 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11548 			BPF_MOV64_IMM(BPF_REG_9, 1),
11549 
11550 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11551 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11552 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11553 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11554 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11555 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
11556 			BPF_EXIT_INSN(),
11557 
11558 			/* subprog 2 */
11559 			/* if arg2 == 1 do *arg1 = 0 */
11560 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11561 			/* fetch map_value_ptr from the stack of this function */
11562 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11563 			/* write into map value */
11564 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11565 
11566 			/* if arg4 == 1 do *arg3 = 0 */
11567 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11568 			/* fetch map_value_ptr from the stack of this function */
11569 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11570 			/* write into map value */
11571 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11572 			BPF_EXIT_INSN(),
11573 		},
11574 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11575 		.fixup_map_hash_8b = { 12, 22 },
11576 		.result = ACCEPT,
11577 	},
11578 	{
11579 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11580 		.insns = {
11581 			/* main prog */
11582 			/* pass fp-16, fp-8 into a function */
11583 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11584 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11585 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11586 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11587 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11588 			BPF_MOV64_IMM(BPF_REG_0, 0),
11589 			BPF_EXIT_INSN(),
11590 
11591 			/* subprog 1 */
11592 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11593 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11594 			/* 1st lookup from map */
11595 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11596 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11598 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11599 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11600 				     BPF_FUNC_map_lookup_elem),
11601 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11602 			BPF_MOV64_IMM(BPF_REG_8, 0),
11603 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11604 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11605 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11606 			BPF_MOV64_IMM(BPF_REG_8, 1),
11607 
11608 			/* 2nd lookup from map */
11609 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11611 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11612 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11613 				     BPF_FUNC_map_lookup_elem),
11614 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11615 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
11616 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11617 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11618 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11619 			BPF_MOV64_IMM(BPF_REG_9, 1),
11620 
11621 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11622 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11623 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11624 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11625 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11626 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11627 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11628 
11629 			/* subprog 2 */
11630 			/* if arg2 == 1 do *arg1 = 0 */
11631 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11632 			/* fetch map_value_ptr from the stack of this function */
11633 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11634 			/* write into map value */
11635 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11636 
11637 			/* if arg4 == 1 do *arg3 = 0 */
11638 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11639 			/* fetch map_value_ptr from the stack of this function */
11640 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11641 			/* write into map value */
11642 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11643 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11644 		},
11645 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11646 		.fixup_map_hash_8b = { 12, 22 },
11647 		.result = REJECT,
11648 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
11649 	},
11650 	{
11651 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
11652 		.insns = {
11653 			/* main prog */
11654 			/* pass fp-16, fp-8 into a function */
11655 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11656 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11657 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11658 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11659 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11660 			BPF_MOV64_IMM(BPF_REG_0, 0),
11661 			BPF_EXIT_INSN(),
11662 
11663 			/* subprog 1 */
11664 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11665 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11666 			/* 1st lookup from map */
11667 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11668 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11669 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11670 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11671 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11672 				     BPF_FUNC_map_lookup_elem),
11673 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11674 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11675 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11676 			BPF_MOV64_IMM(BPF_REG_8, 0),
11677 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11678 			BPF_MOV64_IMM(BPF_REG_8, 1),
11679 
11680 			/* 2nd lookup from map */
11681 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11682 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11683 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11684 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11685 				     BPF_FUNC_map_lookup_elem),
11686 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11687 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11688 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11689 			BPF_MOV64_IMM(BPF_REG_9, 0),
11690 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11691 			BPF_MOV64_IMM(BPF_REG_9, 1),
11692 
11693 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11694 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11695 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11696 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11697 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11698 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11699 			BPF_EXIT_INSN(),
11700 
11701 			/* subprog 2 */
11702 			/* if arg2 == 1 do *arg1 = 0 */
11703 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11704 			/* fetch map_value_ptr from the stack of this function */
11705 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11706 			/* write into map value */
11707 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11708 
11709 			/* if arg4 == 1 do *arg3 = 0 */
11710 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11711 			/* fetch map_value_ptr from the stack of this function */
11712 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11713 			/* write into map value */
11714 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11715 			BPF_EXIT_INSN(),
11716 		},
11717 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11718 		.fixup_map_hash_8b = { 12, 22 },
11719 		.result = ACCEPT,
11720 	},
11721 	{
11722 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
11723 		.insns = {
11724 			/* main prog */
11725 			/* pass fp-16, fp-8 into a function */
11726 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11727 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11728 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11730 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11731 			BPF_MOV64_IMM(BPF_REG_0, 0),
11732 			BPF_EXIT_INSN(),
11733 
11734 			/* subprog 1 */
11735 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11736 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11737 			/* 1st lookup from map */
11738 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11741 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11742 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11743 				     BPF_FUNC_map_lookup_elem),
11744 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11745 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11746 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11747 			BPF_MOV64_IMM(BPF_REG_8, 0),
11748 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11749 			BPF_MOV64_IMM(BPF_REG_8, 1),
11750 
11751 			/* 2nd lookup from map */
11752 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11754 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11755 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11756 				     BPF_FUNC_map_lookup_elem),
11757 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11758 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11759 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11760 			BPF_MOV64_IMM(BPF_REG_9, 0),
11761 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11762 			BPF_MOV64_IMM(BPF_REG_9, 1),
11763 
11764 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11765 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11766 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11767 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11768 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11769 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11770 			BPF_EXIT_INSN(),
11771 
11772 			/* subprog 2 */
11773 			/* if arg2 == 1 do *arg1 = 0 */
11774 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11775 			/* fetch map_value_ptr from the stack of this function */
11776 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11777 			/* write into map value */
11778 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11779 
11780 			/* if arg4 == 0 do *arg3 = 0 */
11781 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11782 			/* fetch map_value_ptr from the stack of this function */
11783 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11784 			/* write into map value */
11785 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11786 			BPF_EXIT_INSN(),
11787 		},
11788 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11789 		.fixup_map_hash_8b = { 12, 22 },
11790 		.result = REJECT,
11791 		.errstr = "R0 invalid mem access 'inv'",
11792 	},
11793 	{
11794 		"calls: pkt_ptr spill into caller stack",
11795 		.insns = {
11796 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11797 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11798 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11799 			BPF_EXIT_INSN(),
11800 
11801 			/* subprog 1 */
11802 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11803 				    offsetof(struct __sk_buff, data)),
11804 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11805 				    offsetof(struct __sk_buff, data_end)),
11806 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11808 			/* spill unchecked pkt_ptr into stack of caller */
11809 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11810 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11811 			/* now the pkt range is verified, read pkt_ptr from stack */
11812 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11813 			/* write 4 bytes into packet */
11814 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11815 			BPF_EXIT_INSN(),
11816 		},
11817 		.result = ACCEPT,
11818 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11819 		.retval = POINTER_VALUE,
11820 	},
11821 	{
11822 		"calls: pkt_ptr spill into caller stack 2",
11823 		.insns = {
11824 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11826 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11827 			/* Marking is still kept, but not in all cases safe. */
11828 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11829 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11830 			BPF_EXIT_INSN(),
11831 
11832 			/* subprog 1 */
11833 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11834 				    offsetof(struct __sk_buff, data)),
11835 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11836 				    offsetof(struct __sk_buff, data_end)),
11837 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11838 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11839 			/* spill unchecked pkt_ptr into stack of caller */
11840 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11841 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11842 			/* now the pkt range is verified, read pkt_ptr from stack */
11843 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11844 			/* write 4 bytes into packet */
11845 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11846 			BPF_EXIT_INSN(),
11847 		},
11848 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11849 		.errstr = "invalid access to packet",
11850 		.result = REJECT,
11851 	},
11852 	{
11853 		"calls: pkt_ptr spill into caller stack 3",
11854 		.insns = {
11855 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11856 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11857 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11858 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11859 			/* Marking is still kept and safe here. */
11860 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11861 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11862 			BPF_EXIT_INSN(),
11863 
11864 			/* subprog 1 */
11865 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11866 				    offsetof(struct __sk_buff, data)),
11867 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11868 				    offsetof(struct __sk_buff, data_end)),
11869 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11871 			/* spill unchecked pkt_ptr into stack of caller */
11872 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11873 			BPF_MOV64_IMM(BPF_REG_5, 0),
11874 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11875 			BPF_MOV64_IMM(BPF_REG_5, 1),
11876 			/* now the pkt range is verified, read pkt_ptr from stack */
11877 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11878 			/* write 4 bytes into packet */
11879 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11880 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11881 			BPF_EXIT_INSN(),
11882 		},
11883 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11884 		.result = ACCEPT,
11885 		.retval = 1,
11886 	},
11887 	{
11888 		"calls: pkt_ptr spill into caller stack 4",
11889 		.insns = {
11890 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11892 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11893 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
11894 			/* Check marking propagated. */
11895 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11896 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
11897 			BPF_EXIT_INSN(),
11898 
11899 			/* subprog 1 */
11900 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11901 				    offsetof(struct __sk_buff, data)),
11902 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11903 				    offsetof(struct __sk_buff, data_end)),
11904 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11906 			/* spill unchecked pkt_ptr into stack of caller */
11907 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11908 			BPF_MOV64_IMM(BPF_REG_5, 0),
11909 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11910 			BPF_MOV64_IMM(BPF_REG_5, 1),
11911 			/* don't read back pkt_ptr from stack here */
11912 			/* write 4 bytes into packet */
11913 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11914 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11915 			BPF_EXIT_INSN(),
11916 		},
11917 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11918 		.result = ACCEPT,
11919 		.retval = 1,
11920 	},
11921 	{
11922 		"calls: pkt_ptr spill into caller stack 5",
11923 		.insns = {
11924 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11926 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
11927 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11928 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11929 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11930 			BPF_EXIT_INSN(),
11931 
11932 			/* subprog 1 */
11933 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11934 				    offsetof(struct __sk_buff, data)),
11935 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11936 				    offsetof(struct __sk_buff, data_end)),
11937 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11939 			BPF_MOV64_IMM(BPF_REG_5, 0),
11940 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11941 			/* spill checked pkt_ptr into stack of caller */
11942 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11943 			BPF_MOV64_IMM(BPF_REG_5, 1),
11944 			/* don't read back pkt_ptr from stack here */
11945 			/* write 4 bytes into packet */
11946 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11947 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11948 			BPF_EXIT_INSN(),
11949 		},
11950 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11951 		.errstr = "same insn cannot be used with different",
11952 		.result = REJECT,
11953 	},
11954 	{
11955 		"calls: pkt_ptr spill into caller stack 6",
11956 		.insns = {
11957 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11958 				    offsetof(struct __sk_buff, data_end)),
11959 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11961 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11962 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11963 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11964 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11965 			BPF_EXIT_INSN(),
11966 
11967 			/* subprog 1 */
11968 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11969 				    offsetof(struct __sk_buff, data)),
11970 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11971 				    offsetof(struct __sk_buff, data_end)),
11972 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11973 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11974 			BPF_MOV64_IMM(BPF_REG_5, 0),
11975 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
11976 			/* spill checked pkt_ptr into stack of caller */
11977 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11978 			BPF_MOV64_IMM(BPF_REG_5, 1),
11979 			/* don't read back pkt_ptr from stack here */
11980 			/* write 4 bytes into packet */
11981 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11982 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
11983 			BPF_EXIT_INSN(),
11984 		},
11985 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11986 		.errstr = "R4 invalid mem access",
11987 		.result = REJECT,
11988 	},
11989 	{
11990 		"calls: pkt_ptr spill into caller stack 7",
11991 		.insns = {
11992 			BPF_MOV64_IMM(BPF_REG_2, 0),
11993 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11994 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11995 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11996 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11997 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
11998 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
11999 			BPF_EXIT_INSN(),
12000 
12001 			/* subprog 1 */
12002 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12003 				    offsetof(struct __sk_buff, data)),
12004 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12005 				    offsetof(struct __sk_buff, data_end)),
12006 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12007 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12008 			BPF_MOV64_IMM(BPF_REG_5, 0),
12009 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12010 			/* spill checked pkt_ptr into stack of caller */
12011 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12012 			BPF_MOV64_IMM(BPF_REG_5, 1),
12013 			/* don't read back pkt_ptr from stack here */
12014 			/* write 4 bytes into packet */
12015 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12016 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12017 			BPF_EXIT_INSN(),
12018 		},
12019 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12020 		.errstr = "R4 invalid mem access",
12021 		.result = REJECT,
12022 	},
12023 	{
12024 		"calls: pkt_ptr spill into caller stack 8",
12025 		.insns = {
12026 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12027 				    offsetof(struct __sk_buff, data)),
12028 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12029 				    offsetof(struct __sk_buff, data_end)),
12030 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12032 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12033 			BPF_EXIT_INSN(),
12034 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12035 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12036 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12037 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12038 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12039 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12040 			BPF_EXIT_INSN(),
12041 
12042 			/* subprog 1 */
12043 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12044 				    offsetof(struct __sk_buff, data)),
12045 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12046 				    offsetof(struct __sk_buff, data_end)),
12047 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12048 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12049 			BPF_MOV64_IMM(BPF_REG_5, 0),
12050 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12051 			/* spill checked pkt_ptr into stack of caller */
12052 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12053 			BPF_MOV64_IMM(BPF_REG_5, 1),
12054 			/* don't read back pkt_ptr from stack here */
12055 			/* write 4 bytes into packet */
12056 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12057 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12058 			BPF_EXIT_INSN(),
12059 		},
12060 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12061 		.result = ACCEPT,
12062 	},
12063 	{
12064 		"calls: pkt_ptr spill into caller stack 9",
12065 		.insns = {
12066 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12067 				    offsetof(struct __sk_buff, data)),
12068 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12069 				    offsetof(struct __sk_buff, data_end)),
12070 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12071 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12072 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12073 			BPF_EXIT_INSN(),
12074 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12076 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12077 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12078 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12079 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12080 			BPF_EXIT_INSN(),
12081 
12082 			/* subprog 1 */
12083 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12084 				    offsetof(struct __sk_buff, data)),
12085 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12086 				    offsetof(struct __sk_buff, data_end)),
12087 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12088 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12089 			BPF_MOV64_IMM(BPF_REG_5, 0),
12090 			/* spill unchecked pkt_ptr into stack of caller */
12091 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12092 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12093 			BPF_MOV64_IMM(BPF_REG_5, 1),
12094 			/* don't read back pkt_ptr from stack here */
12095 			/* write 4 bytes into packet */
12096 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12097 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12098 			BPF_EXIT_INSN(),
12099 		},
12100 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12101 		.errstr = "invalid access to packet",
12102 		.result = REJECT,
12103 	},
12104 	{
12105 		"calls: caller stack init to zero or map_value_or_null",
12106 		.insns = {
12107 			BPF_MOV64_IMM(BPF_REG_0, 0),
12108 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12111 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12112 			/* fetch map_value_or_null or const_zero from stack */
12113 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12114 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12115 			/* store into map_value */
12116 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12117 			BPF_EXIT_INSN(),
12118 
12119 			/* subprog 1 */
12120 			/* if (ctx == 0) return; */
12121 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12122 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12123 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12124 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12125 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12126 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12127 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12129 				     BPF_FUNC_map_lookup_elem),
12130 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12131 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12132 			BPF_EXIT_INSN(),
12133 		},
12134 		.fixup_map_hash_8b = { 13 },
12135 		.result = ACCEPT,
12136 		.prog_type = BPF_PROG_TYPE_XDP,
12137 	},
12138 	{
12139 		"calls: stack init to zero and pruning",
12140 		.insns = {
12141 			/* first make allocated_stack 16 byte */
12142 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12143 			/* now fork the execution such that the false branch
12144 			 * of JGT insn will be verified second and it skisp zero
12145 			 * init of fp-8 stack slot. If stack liveness marking
12146 			 * is missing live_read marks from call map_lookup
12147 			 * processing then pruning will incorrectly assume
12148 			 * that fp-8 stack slot was unused in the fall-through
12149 			 * branch and will accept the program incorrectly
12150 			 */
12151 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12152 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12153 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12154 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12155 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12156 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12157 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12158 				     BPF_FUNC_map_lookup_elem),
12159 			BPF_EXIT_INSN(),
12160 		},
12161 		.fixup_map_hash_48b = { 6 },
12162 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12163 		.result = REJECT,
12164 		.prog_type = BPF_PROG_TYPE_XDP,
12165 	},
12166 	{
12167 		"calls: two calls returning different map pointers for lookup (hash, array)",
12168 		.insns = {
12169 			/* main prog */
12170 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12171 			BPF_CALL_REL(11),
12172 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12173 			BPF_CALL_REL(12),
12174 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12175 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12176 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12177 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12179 				     BPF_FUNC_map_lookup_elem),
12180 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12181 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12182 				   offsetof(struct test_val, foo)),
12183 			BPF_MOV64_IMM(BPF_REG_0, 1),
12184 			BPF_EXIT_INSN(),
12185 			/* subprog 1 */
12186 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12187 			BPF_EXIT_INSN(),
12188 			/* subprog 2 */
12189 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12190 			BPF_EXIT_INSN(),
12191 		},
12192 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12193 		.fixup_map_hash_48b = { 13 },
12194 		.fixup_map_array_48b = { 16 },
12195 		.result = ACCEPT,
12196 		.retval = 1,
12197 	},
12198 	{
12199 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12200 		.insns = {
12201 			/* main prog */
12202 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12203 			BPF_CALL_REL(11),
12204 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12205 			BPF_CALL_REL(12),
12206 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12207 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12208 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12210 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12211 				     BPF_FUNC_map_lookup_elem),
12212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12213 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12214 				   offsetof(struct test_val, foo)),
12215 			BPF_MOV64_IMM(BPF_REG_0, 1),
12216 			BPF_EXIT_INSN(),
12217 			/* subprog 1 */
12218 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12219 			BPF_EXIT_INSN(),
12220 			/* subprog 2 */
12221 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12222 			BPF_EXIT_INSN(),
12223 		},
12224 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12225 		.fixup_map_in_map = { 16 },
12226 		.fixup_map_array_48b = { 13 },
12227 		.result = REJECT,
12228 		.errstr = "R0 invalid mem access 'map_ptr'",
12229 	},
12230 	{
12231 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12232 		.insns = {
12233 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12234 				    offsetof(struct __sk_buff, mark)),
12235 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12236 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12237 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12238 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12239 			BPF_MOV64_IMM(BPF_REG_3, 7),
12240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12241 				     BPF_FUNC_tail_call),
12242 			BPF_MOV64_IMM(BPF_REG_0, 1),
12243 			BPF_EXIT_INSN(),
12244 		},
12245 		.fixup_prog1 = { 5 },
12246 		.fixup_prog2 = { 2 },
12247 		.result_unpriv = REJECT,
12248 		.errstr_unpriv = "tail_call abusing map_ptr",
12249 		.result = ACCEPT,
12250 		.retval = 42,
12251 	},
12252 	{
12253 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12254 		.insns = {
12255 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12256 				    offsetof(struct __sk_buff, mark)),
12257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12258 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12259 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12260 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12261 			BPF_MOV64_IMM(BPF_REG_3, 7),
12262 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12263 				     BPF_FUNC_tail_call),
12264 			BPF_MOV64_IMM(BPF_REG_0, 1),
12265 			BPF_EXIT_INSN(),
12266 		},
12267 		.fixup_prog2 = { 2, 5 },
12268 		.result_unpriv = ACCEPT,
12269 		.result = ACCEPT,
12270 		.retval = 42,
12271 	},
12272 	{
12273 		"search pruning: all branches should be verified (nop operation)",
12274 		.insns = {
12275 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12277 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12278 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12279 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12281 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12283 			BPF_MOV64_IMM(BPF_REG_4, 0),
12284 			BPF_JMP_A(1),
12285 			BPF_MOV64_IMM(BPF_REG_4, 1),
12286 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12287 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12288 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12290 			BPF_MOV64_IMM(BPF_REG_6, 0),
12291 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12292 			BPF_EXIT_INSN(),
12293 		},
12294 		.fixup_map_hash_8b = { 3 },
12295 		.errstr = "R6 invalid mem access 'inv'",
12296 		.result = REJECT,
12297 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12298 	},
12299 	{
12300 		"search pruning: all branches should be verified (invalid stack access)",
12301 		.insns = {
12302 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12304 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12305 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12306 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12308 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12309 			BPF_MOV64_IMM(BPF_REG_4, 0),
12310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12311 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12312 			BPF_JMP_A(1),
12313 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12314 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12315 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12316 			BPF_EXIT_INSN(),
12317 		},
12318 		.fixup_map_hash_8b = { 3 },
12319 		.errstr = "invalid read from stack off -16+0 size 8",
12320 		.result = REJECT,
12321 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12322 	},
12323 	{
12324 		"jit: lsh, rsh, arsh by 1",
12325 		.insns = {
12326 			BPF_MOV64_IMM(BPF_REG_0, 1),
12327 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12328 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12329 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12330 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12331 			BPF_EXIT_INSN(),
12332 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12333 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12334 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12335 			BPF_EXIT_INSN(),
12336 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12337 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12338 			BPF_EXIT_INSN(),
12339 			BPF_MOV64_IMM(BPF_REG_0, 2),
12340 			BPF_EXIT_INSN(),
12341 		},
12342 		.result = ACCEPT,
12343 		.retval = 2,
12344 	},
12345 	{
12346 		"jit: mov32 for ldimm64, 1",
12347 		.insns = {
12348 			BPF_MOV64_IMM(BPF_REG_0, 2),
12349 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12350 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12351 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12352 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12353 			BPF_MOV64_IMM(BPF_REG_0, 1),
12354 			BPF_EXIT_INSN(),
12355 		},
12356 		.result = ACCEPT,
12357 		.retval = 2,
12358 	},
12359 	{
12360 		"jit: mov32 for ldimm64, 2",
12361 		.insns = {
12362 			BPF_MOV64_IMM(BPF_REG_0, 1),
12363 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12364 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12365 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12366 			BPF_MOV64_IMM(BPF_REG_0, 2),
12367 			BPF_EXIT_INSN(),
12368 		},
12369 		.result = ACCEPT,
12370 		.retval = 2,
12371 	},
12372 	{
12373 		"jit: various mul tests",
12374 		.insns = {
12375 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12376 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12377 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12378 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12379 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12380 			BPF_MOV64_IMM(BPF_REG_0, 1),
12381 			BPF_EXIT_INSN(),
12382 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12383 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12384 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12385 			BPF_MOV64_IMM(BPF_REG_0, 1),
12386 			BPF_EXIT_INSN(),
12387 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12388 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12389 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12390 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12391 			BPF_MOV64_IMM(BPF_REG_0, 1),
12392 			BPF_EXIT_INSN(),
12393 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12394 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12395 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12396 			BPF_MOV64_IMM(BPF_REG_0, 1),
12397 			BPF_EXIT_INSN(),
12398 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12399 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12400 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12401 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12402 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12403 			BPF_MOV64_IMM(BPF_REG_0, 1),
12404 			BPF_EXIT_INSN(),
12405 			BPF_MOV64_IMM(BPF_REG_0, 2),
12406 			BPF_EXIT_INSN(),
12407 		},
12408 		.result = ACCEPT,
12409 		.retval = 2,
12410 	},
12411 	{
12412 		"xadd/w check unaligned stack",
12413 		.insns = {
12414 			BPF_MOV64_IMM(BPF_REG_0, 1),
12415 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12416 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12417 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12418 			BPF_EXIT_INSN(),
12419 		},
12420 		.result = REJECT,
12421 		.errstr = "misaligned stack access off",
12422 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12423 	},
12424 	{
12425 		"xadd/w check unaligned map",
12426 		.insns = {
12427 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12428 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12429 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12430 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12431 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12432 				     BPF_FUNC_map_lookup_elem),
12433 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12434 			BPF_EXIT_INSN(),
12435 			BPF_MOV64_IMM(BPF_REG_1, 1),
12436 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12437 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12438 			BPF_EXIT_INSN(),
12439 		},
12440 		.fixup_map_hash_8b = { 3 },
12441 		.result = REJECT,
12442 		.errstr = "misaligned value access off",
12443 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12444 	},
12445 	{
12446 		"xadd/w check unaligned pkt",
12447 		.insns = {
12448 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12449 				    offsetof(struct xdp_md, data)),
12450 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12451 				    offsetof(struct xdp_md, data_end)),
12452 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12453 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12454 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12455 			BPF_MOV64_IMM(BPF_REG_0, 99),
12456 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12457 			BPF_MOV64_IMM(BPF_REG_0, 1),
12458 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12459 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12460 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12461 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12462 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12463 			BPF_EXIT_INSN(),
12464 		},
12465 		.result = REJECT,
12466 		.errstr = "BPF_XADD stores into R2 ctx",
12467 		.prog_type = BPF_PROG_TYPE_XDP,
12468 	},
12469 	{
12470 		"xadd/w check whether src/dst got mangled, 1",
12471 		.insns = {
12472 			BPF_MOV64_IMM(BPF_REG_0, 1),
12473 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12474 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12475 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12476 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12477 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12478 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12479 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12480 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12481 			BPF_EXIT_INSN(),
12482 			BPF_MOV64_IMM(BPF_REG_0, 42),
12483 			BPF_EXIT_INSN(),
12484 		},
12485 		.result = ACCEPT,
12486 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12487 		.retval = 3,
12488 	},
12489 	{
12490 		"xadd/w check whether src/dst got mangled, 2",
12491 		.insns = {
12492 			BPF_MOV64_IMM(BPF_REG_0, 1),
12493 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12494 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12495 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12496 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12497 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12498 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12499 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12500 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12501 			BPF_EXIT_INSN(),
12502 			BPF_MOV64_IMM(BPF_REG_0, 42),
12503 			BPF_EXIT_INSN(),
12504 		},
12505 		.result = ACCEPT,
12506 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12507 		.retval = 3,
12508 	},
12509 	{
12510 		"bpf_get_stack return R0 within range",
12511 		.insns = {
12512 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12513 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12514 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12516 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12517 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12518 				     BPF_FUNC_map_lookup_elem),
12519 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12520 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12521 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
12522 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12523 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12524 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
12525 			BPF_MOV64_IMM(BPF_REG_4, 256),
12526 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
12527 			BPF_MOV64_IMM(BPF_REG_1, 0),
12528 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12529 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12530 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12531 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
12532 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12533 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12534 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12536 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12537 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12538 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12539 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12540 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12541 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
12542 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12543 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12544 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12545 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12546 			BPF_MOV64_IMM(BPF_REG_4, 0),
12547 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
12548 			BPF_EXIT_INSN(),
12549 		},
12550 		.fixup_map_hash_48b = { 4 },
12551 		.result = ACCEPT,
12552 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12553 	},
12554 	{
12555 		"ld_abs: invalid op 1",
12556 		.insns = {
12557 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12558 			BPF_LD_ABS(BPF_DW, 0),
12559 			BPF_EXIT_INSN(),
12560 		},
12561 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12562 		.result = REJECT,
12563 		.errstr = "unknown opcode",
12564 	},
12565 	{
12566 		"ld_abs: invalid op 2",
12567 		.insns = {
12568 			BPF_MOV32_IMM(BPF_REG_0, 256),
12569 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12570 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12571 			BPF_EXIT_INSN(),
12572 		},
12573 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12574 		.result = REJECT,
12575 		.errstr = "unknown opcode",
12576 	},
12577 	{
12578 		"ld_abs: nmap reduced",
12579 		.insns = {
12580 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12581 			BPF_LD_ABS(BPF_H, 12),
12582 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12583 			BPF_LD_ABS(BPF_H, 12),
12584 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12585 			BPF_MOV32_IMM(BPF_REG_0, 18),
12586 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12587 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12588 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12589 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12590 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
12591 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12592 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12593 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12594 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12595 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12596 			BPF_LD_ABS(BPF_H, 12),
12597 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12598 			BPF_MOV32_IMM(BPF_REG_0, 22),
12599 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12600 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12601 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12602 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12603 			BPF_MOV32_IMM(BPF_REG_0, 17366),
12604 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12605 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12606 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12607 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12608 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12609 			BPF_MOV32_IMM(BPF_REG_0, 256),
12610 			BPF_EXIT_INSN(),
12611 			BPF_MOV32_IMM(BPF_REG_0, 0),
12612 			BPF_EXIT_INSN(),
12613 		},
12614 		.data = {
12615 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12616 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12617 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12618 		},
12619 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12620 		.result = ACCEPT,
12621 		.retval = 256,
12622 	},
12623 	{
12624 		"ld_abs: div + abs, test 1",
12625 		.insns = {
12626 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12627 			BPF_LD_ABS(BPF_B, 3),
12628 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12629 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12630 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12631 			BPF_LD_ABS(BPF_B, 4),
12632 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12633 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12634 			BPF_EXIT_INSN(),
12635 		},
12636 		.data = {
12637 			10, 20, 30, 40, 50,
12638 		},
12639 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12640 		.result = ACCEPT,
12641 		.retval = 10,
12642 	},
12643 	{
12644 		"ld_abs: div + abs, test 2",
12645 		.insns = {
12646 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12647 			BPF_LD_ABS(BPF_B, 3),
12648 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12649 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12650 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12651 			BPF_LD_ABS(BPF_B, 128),
12652 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12653 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12654 			BPF_EXIT_INSN(),
12655 		},
12656 		.data = {
12657 			10, 20, 30, 40, 50,
12658 		},
12659 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12660 		.result = ACCEPT,
12661 		.retval = 0,
12662 	},
12663 	{
12664 		"ld_abs: div + abs, test 3",
12665 		.insns = {
12666 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12667 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12668 			BPF_LD_ABS(BPF_B, 3),
12669 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12670 			BPF_EXIT_INSN(),
12671 		},
12672 		.data = {
12673 			10, 20, 30, 40, 50,
12674 		},
12675 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12676 		.result = ACCEPT,
12677 		.retval = 0,
12678 	},
12679 	{
12680 		"ld_abs: div + abs, test 4",
12681 		.insns = {
12682 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12683 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12684 			BPF_LD_ABS(BPF_B, 256),
12685 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12686 			BPF_EXIT_INSN(),
12687 		},
12688 		.data = {
12689 			10, 20, 30, 40, 50,
12690 		},
12691 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12692 		.result = ACCEPT,
12693 		.retval = 0,
12694 	},
12695 	{
12696 		"ld_abs: vlan + abs, test 1",
12697 		.insns = { },
12698 		.data = {
12699 			0x34,
12700 		},
12701 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12702 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12703 		.result = ACCEPT,
12704 		.retval = 0xbef,
12705 	},
12706 	{
12707 		"ld_abs: vlan + abs, test 2",
12708 		.insns = {
12709 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12710 			BPF_LD_ABS(BPF_B, 0),
12711 			BPF_LD_ABS(BPF_H, 0),
12712 			BPF_LD_ABS(BPF_W, 0),
12713 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12714 			BPF_MOV64_IMM(BPF_REG_6, 0),
12715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12716 			BPF_MOV64_IMM(BPF_REG_2, 1),
12717 			BPF_MOV64_IMM(BPF_REG_3, 2),
12718 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12719 				     BPF_FUNC_skb_vlan_push),
12720 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12721 			BPF_LD_ABS(BPF_B, 0),
12722 			BPF_LD_ABS(BPF_H, 0),
12723 			BPF_LD_ABS(BPF_W, 0),
12724 			BPF_MOV64_IMM(BPF_REG_0, 42),
12725 			BPF_EXIT_INSN(),
12726 		},
12727 		.data = {
12728 			0x34,
12729 		},
12730 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12731 		.result = ACCEPT,
12732 		.retval = 42,
12733 	},
12734 	{
12735 		"ld_abs: jump around ld_abs",
12736 		.insns = { },
12737 		.data = {
12738 			10, 11,
12739 		},
12740 		.fill_helper = bpf_fill_jump_around_ld_abs,
12741 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12742 		.result = ACCEPT,
12743 		.retval = 10,
12744 	},
12745 	{
12746 		"ld_dw: xor semi-random 64 bit imms, test 1",
12747 		.insns = { },
12748 		.data = { },
12749 		.fill_helper = bpf_fill_rand_ld_dw,
12750 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12751 		.result = ACCEPT,
12752 		.retval = 4090,
12753 	},
12754 	{
12755 		"ld_dw: xor semi-random 64 bit imms, test 2",
12756 		.insns = { },
12757 		.data = { },
12758 		.fill_helper = bpf_fill_rand_ld_dw,
12759 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12760 		.result = ACCEPT,
12761 		.retval = 2047,
12762 	},
12763 	{
12764 		"ld_dw: xor semi-random 64 bit imms, test 3",
12765 		.insns = { },
12766 		.data = { },
12767 		.fill_helper = bpf_fill_rand_ld_dw,
12768 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12769 		.result = ACCEPT,
12770 		.retval = 511,
12771 	},
12772 	{
12773 		"ld_dw: xor semi-random 64 bit imms, test 4",
12774 		.insns = { },
12775 		.data = { },
12776 		.fill_helper = bpf_fill_rand_ld_dw,
12777 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12778 		.result = ACCEPT,
12779 		.retval = 5,
12780 	},
12781 	{
12782 		"pass unmodified ctx pointer to helper",
12783 		.insns = {
12784 			BPF_MOV64_IMM(BPF_REG_2, 0),
12785 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12786 				     BPF_FUNC_csum_update),
12787 			BPF_MOV64_IMM(BPF_REG_0, 0),
12788 			BPF_EXIT_INSN(),
12789 		},
12790 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12791 		.result = ACCEPT,
12792 	},
12793 	{
12794 		"reference tracking: leak potential reference",
12795 		.insns = {
12796 			BPF_SK_LOOKUP,
12797 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
12798 			BPF_EXIT_INSN(),
12799 		},
12800 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12801 		.errstr = "Unreleased reference",
12802 		.result = REJECT,
12803 	},
12804 	{
12805 		"reference tracking: leak potential reference on stack",
12806 		.insns = {
12807 			BPF_SK_LOOKUP,
12808 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12810 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12811 			BPF_MOV64_IMM(BPF_REG_0, 0),
12812 			BPF_EXIT_INSN(),
12813 		},
12814 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12815 		.errstr = "Unreleased reference",
12816 		.result = REJECT,
12817 	},
12818 	{
12819 		"reference tracking: leak potential reference on stack 2",
12820 		.insns = {
12821 			BPF_SK_LOOKUP,
12822 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12824 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12825 			BPF_MOV64_IMM(BPF_REG_0, 0),
12826 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
12827 			BPF_EXIT_INSN(),
12828 		},
12829 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12830 		.errstr = "Unreleased reference",
12831 		.result = REJECT,
12832 	},
12833 	{
12834 		"reference tracking: zero potential reference",
12835 		.insns = {
12836 			BPF_SK_LOOKUP,
12837 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
12838 			BPF_EXIT_INSN(),
12839 		},
12840 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12841 		.errstr = "Unreleased reference",
12842 		.result = REJECT,
12843 	},
12844 	{
12845 		"reference tracking: copy and zero potential references",
12846 		.insns = {
12847 			BPF_SK_LOOKUP,
12848 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12849 			BPF_MOV64_IMM(BPF_REG_0, 0),
12850 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
12851 			BPF_EXIT_INSN(),
12852 		},
12853 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12854 		.errstr = "Unreleased reference",
12855 		.result = REJECT,
12856 	},
12857 	{
12858 		"reference tracking: release reference without check",
12859 		.insns = {
12860 			BPF_SK_LOOKUP,
12861 			/* reference in r0 may be NULL */
12862 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12863 			BPF_MOV64_IMM(BPF_REG_2, 0),
12864 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12865 			BPF_EXIT_INSN(),
12866 		},
12867 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12868 		.errstr = "type=sock_or_null expected=sock",
12869 		.result = REJECT,
12870 	},
12871 	{
12872 		"reference tracking: release reference",
12873 		.insns = {
12874 			BPF_SK_LOOKUP,
12875 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12876 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12877 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12878 			BPF_EXIT_INSN(),
12879 		},
12880 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12881 		.result = ACCEPT,
12882 	},
12883 	{
12884 		"reference tracking: release reference 2",
12885 		.insns = {
12886 			BPF_SK_LOOKUP,
12887 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12888 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12889 			BPF_EXIT_INSN(),
12890 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12891 			BPF_EXIT_INSN(),
12892 		},
12893 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12894 		.result = ACCEPT,
12895 	},
12896 	{
12897 		"reference tracking: release reference twice",
12898 		.insns = {
12899 			BPF_SK_LOOKUP,
12900 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12901 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12902 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12903 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12904 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12905 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12906 			BPF_EXIT_INSN(),
12907 		},
12908 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12909 		.errstr = "type=inv expected=sock",
12910 		.result = REJECT,
12911 	},
12912 	{
12913 		"reference tracking: release reference twice inside branch",
12914 		.insns = {
12915 			BPF_SK_LOOKUP,
12916 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12917 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12918 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
12919 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12920 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12921 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12922 			BPF_EXIT_INSN(),
12923 		},
12924 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12925 		.errstr = "type=inv expected=sock",
12926 		.result = REJECT,
12927 	},
12928 	{
12929 		"reference tracking: alloc, check, free in one subbranch",
12930 		.insns = {
12931 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12932 				    offsetof(struct __sk_buff, data)),
12933 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12934 				    offsetof(struct __sk_buff, data_end)),
12935 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12936 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
12937 			/* if (offsetof(skb, mark) > data_len) exit; */
12938 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12939 			BPF_EXIT_INSN(),
12940 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
12941 				    offsetof(struct __sk_buff, mark)),
12942 			BPF_SK_LOOKUP,
12943 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
12944 			/* Leak reference in R0 */
12945 			BPF_EXIT_INSN(),
12946 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12947 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12948 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12949 			BPF_EXIT_INSN(),
12950 		},
12951 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12952 		.errstr = "Unreleased reference",
12953 		.result = REJECT,
12954 	},
12955 	{
12956 		"reference tracking: alloc, check, free in both subbranches",
12957 		.insns = {
12958 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12959 				    offsetof(struct __sk_buff, data)),
12960 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12961 				    offsetof(struct __sk_buff, data_end)),
12962 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12963 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
12964 			/* if (offsetof(skb, mark) > data_len) exit; */
12965 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12966 			BPF_EXIT_INSN(),
12967 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
12968 				    offsetof(struct __sk_buff, mark)),
12969 			BPF_SK_LOOKUP,
12970 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
12971 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12972 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12973 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12974 			BPF_EXIT_INSN(),
12975 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
12976 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12977 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12978 			BPF_EXIT_INSN(),
12979 		},
12980 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12981 		.result = ACCEPT,
12982 	},
12983 	{
12984 		"reference tracking in call: free reference in subprog",
12985 		.insns = {
12986 			BPF_SK_LOOKUP,
12987 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
12988 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12989 			BPF_MOV64_IMM(BPF_REG_0, 0),
12990 			BPF_EXIT_INSN(),
12991 
12992 			/* subprog 1 */
12993 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
12994 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
12995 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
12996 			BPF_EXIT_INSN(),
12997 		},
12998 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12999 		.result = ACCEPT,
13000 	},
13001 	{
13002 		"pass modified ctx pointer to helper, 1",
13003 		.insns = {
13004 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13005 			BPF_MOV64_IMM(BPF_REG_2, 0),
13006 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13007 				     BPF_FUNC_csum_update),
13008 			BPF_MOV64_IMM(BPF_REG_0, 0),
13009 			BPF_EXIT_INSN(),
13010 		},
13011 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13012 		.result = REJECT,
13013 		.errstr = "dereference of modified ctx ptr",
13014 	},
13015 	{
13016 		"pass modified ctx pointer to helper, 2",
13017 		.insns = {
13018 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13019 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13020 				     BPF_FUNC_get_socket_cookie),
13021 			BPF_MOV64_IMM(BPF_REG_0, 0),
13022 			BPF_EXIT_INSN(),
13023 		},
13024 		.result_unpriv = REJECT,
13025 		.result = REJECT,
13026 		.errstr_unpriv = "dereference of modified ctx ptr",
13027 		.errstr = "dereference of modified ctx ptr",
13028 	},
13029 	{
13030 		"pass modified ctx pointer to helper, 3",
13031 		.insns = {
13032 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13033 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13034 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13035 			BPF_MOV64_IMM(BPF_REG_2, 0),
13036 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13037 				     BPF_FUNC_csum_update),
13038 			BPF_MOV64_IMM(BPF_REG_0, 0),
13039 			BPF_EXIT_INSN(),
13040 		},
13041 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13042 		.result = REJECT,
13043 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13044 	},
13045 	{
13046 		"mov64 src == dst",
13047 		.insns = {
13048 			BPF_MOV64_IMM(BPF_REG_2, 0),
13049 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13050 			// Check bounds are OK
13051 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13052 			BPF_MOV64_IMM(BPF_REG_0, 0),
13053 			BPF_EXIT_INSN(),
13054 		},
13055 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13056 		.result = ACCEPT,
13057 	},
13058 	{
13059 		"mov64 src != dst",
13060 		.insns = {
13061 			BPF_MOV64_IMM(BPF_REG_3, 0),
13062 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13063 			// Check bounds are OK
13064 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13065 			BPF_MOV64_IMM(BPF_REG_0, 0),
13066 			BPF_EXIT_INSN(),
13067 		},
13068 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13069 		.result = ACCEPT,
13070 	},
13071 	{
13072 		"reference tracking in call: free reference in subprog and outside",
13073 		.insns = {
13074 			BPF_SK_LOOKUP,
13075 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13076 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13077 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13078 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13079 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13080 			BPF_EXIT_INSN(),
13081 
13082 			/* subprog 1 */
13083 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13084 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13085 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13086 			BPF_EXIT_INSN(),
13087 		},
13088 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13089 		.errstr = "type=inv expected=sock",
13090 		.result = REJECT,
13091 	},
13092 	{
13093 		"reference tracking in call: alloc & leak reference in subprog",
13094 		.insns = {
13095 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13098 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13099 			BPF_MOV64_IMM(BPF_REG_0, 0),
13100 			BPF_EXIT_INSN(),
13101 
13102 			/* subprog 1 */
13103 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13104 			BPF_SK_LOOKUP,
13105 			/* spill unchecked sk_ptr into stack of caller */
13106 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13107 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13108 			BPF_EXIT_INSN(),
13109 		},
13110 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13111 		.errstr = "Unreleased reference",
13112 		.result = REJECT,
13113 	},
13114 	{
13115 		"reference tracking in call: alloc in subprog, release outside",
13116 		.insns = {
13117 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13118 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13119 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13120 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13121 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13122 			BPF_EXIT_INSN(),
13123 
13124 			/* subprog 1 */
13125 			BPF_SK_LOOKUP,
13126 			BPF_EXIT_INSN(), /* return sk */
13127 		},
13128 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13129 		.retval = POINTER_VALUE,
13130 		.result = ACCEPT,
13131 	},
13132 	{
13133 		"reference tracking in call: sk_ptr leak into caller stack",
13134 		.insns = {
13135 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13138 			BPF_MOV64_IMM(BPF_REG_0, 0),
13139 			BPF_EXIT_INSN(),
13140 
13141 			/* subprog 1 */
13142 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13143 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13144 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13145 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13146 			/* spill unchecked sk_ptr into stack of caller */
13147 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13148 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13149 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13150 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13151 			BPF_EXIT_INSN(),
13152 
13153 			/* subprog 2 */
13154 			BPF_SK_LOOKUP,
13155 			BPF_EXIT_INSN(),
13156 		},
13157 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13158 		.errstr = "Unreleased reference",
13159 		.result = REJECT,
13160 	},
13161 	{
13162 		"reference tracking in call: sk_ptr spill into caller stack",
13163 		.insns = {
13164 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13166 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13167 			BPF_MOV64_IMM(BPF_REG_0, 0),
13168 			BPF_EXIT_INSN(),
13169 
13170 			/* subprog 1 */
13171 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13173 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13174 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13175 			/* spill unchecked sk_ptr into stack of caller */
13176 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13177 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13178 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13179 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13180 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13181 			/* now the sk_ptr is verified, free the reference */
13182 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13183 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13184 			BPF_EXIT_INSN(),
13185 
13186 			/* subprog 2 */
13187 			BPF_SK_LOOKUP,
13188 			BPF_EXIT_INSN(),
13189 		},
13190 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13191 		.result = ACCEPT,
13192 	},
13193 	{
13194 		"reference tracking: allow LD_ABS",
13195 		.insns = {
13196 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13197 			BPF_SK_LOOKUP,
13198 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13199 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13200 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13201 			BPF_LD_ABS(BPF_B, 0),
13202 			BPF_LD_ABS(BPF_H, 0),
13203 			BPF_LD_ABS(BPF_W, 0),
13204 			BPF_EXIT_INSN(),
13205 		},
13206 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13207 		.result = ACCEPT,
13208 	},
13209 	{
13210 		"reference tracking: forbid LD_ABS while holding reference",
13211 		.insns = {
13212 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13213 			BPF_SK_LOOKUP,
13214 			BPF_LD_ABS(BPF_B, 0),
13215 			BPF_LD_ABS(BPF_H, 0),
13216 			BPF_LD_ABS(BPF_W, 0),
13217 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13218 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13219 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13220 			BPF_EXIT_INSN(),
13221 		},
13222 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13223 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13224 		.result = REJECT,
13225 	},
13226 	{
13227 		"reference tracking: allow LD_IND",
13228 		.insns = {
13229 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13230 			BPF_SK_LOOKUP,
13231 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13233 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13234 			BPF_MOV64_IMM(BPF_REG_7, 1),
13235 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13236 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13237 			BPF_EXIT_INSN(),
13238 		},
13239 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13240 		.result = ACCEPT,
13241 		.retval = 1,
13242 	},
13243 	{
13244 		"reference tracking: forbid LD_IND while holding reference",
13245 		.insns = {
13246 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13247 			BPF_SK_LOOKUP,
13248 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13249 			BPF_MOV64_IMM(BPF_REG_7, 1),
13250 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13251 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13252 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13254 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13255 			BPF_EXIT_INSN(),
13256 		},
13257 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13258 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13259 		.result = REJECT,
13260 	},
13261 	{
13262 		"reference tracking: check reference or tail call",
13263 		.insns = {
13264 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13265 			BPF_SK_LOOKUP,
13266 			/* if (sk) bpf_sk_release() */
13267 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13268 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13269 			/* bpf_tail_call() */
13270 			BPF_MOV64_IMM(BPF_REG_3, 2),
13271 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13272 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13273 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13274 				     BPF_FUNC_tail_call),
13275 			BPF_MOV64_IMM(BPF_REG_0, 0),
13276 			BPF_EXIT_INSN(),
13277 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13278 			BPF_EXIT_INSN(),
13279 		},
13280 		.fixup_prog1 = { 17 },
13281 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13282 		.result = ACCEPT,
13283 	},
13284 	{
13285 		"reference tracking: release reference then tail call",
13286 		.insns = {
13287 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13288 			BPF_SK_LOOKUP,
13289 			/* if (sk) bpf_sk_release() */
13290 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13292 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13293 			/* bpf_tail_call() */
13294 			BPF_MOV64_IMM(BPF_REG_3, 2),
13295 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13296 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13297 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13298 				     BPF_FUNC_tail_call),
13299 			BPF_MOV64_IMM(BPF_REG_0, 0),
13300 			BPF_EXIT_INSN(),
13301 		},
13302 		.fixup_prog1 = { 18 },
13303 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13304 		.result = ACCEPT,
13305 	},
13306 	{
13307 		"reference tracking: leak possible reference over tail call",
13308 		.insns = {
13309 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13310 			/* Look up socket and store in REG_6 */
13311 			BPF_SK_LOOKUP,
13312 			/* bpf_tail_call() */
13313 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13314 			BPF_MOV64_IMM(BPF_REG_3, 2),
13315 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13316 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13317 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13318 				     BPF_FUNC_tail_call),
13319 			BPF_MOV64_IMM(BPF_REG_0, 0),
13320 			/* if (sk) bpf_sk_release() */
13321 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13322 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13323 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13324 			BPF_EXIT_INSN(),
13325 		},
13326 		.fixup_prog1 = { 16 },
13327 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13328 		.errstr = "tail_call would lead to reference leak",
13329 		.result = REJECT,
13330 	},
13331 	{
13332 		"reference tracking: leak checked reference over tail call",
13333 		.insns = {
13334 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13335 			/* Look up socket and store in REG_6 */
13336 			BPF_SK_LOOKUP,
13337 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13338 			/* if (!sk) goto end */
13339 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13340 			/* bpf_tail_call() */
13341 			BPF_MOV64_IMM(BPF_REG_3, 0),
13342 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13343 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13344 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13345 				     BPF_FUNC_tail_call),
13346 			BPF_MOV64_IMM(BPF_REG_0, 0),
13347 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13348 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13349 			BPF_EXIT_INSN(),
13350 		},
13351 		.fixup_prog1 = { 17 },
13352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13353 		.errstr = "tail_call would lead to reference leak",
13354 		.result = REJECT,
13355 	},
13356 	{
13357 		"reference tracking: mangle and release sock_or_null",
13358 		.insns = {
13359 			BPF_SK_LOOKUP,
13360 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13362 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13363 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13364 			BPF_EXIT_INSN(),
13365 		},
13366 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13367 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13368 		.result = REJECT,
13369 	},
13370 	{
13371 		"reference tracking: mangle and release sock",
13372 		.insns = {
13373 			BPF_SK_LOOKUP,
13374 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13375 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13377 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13378 			BPF_EXIT_INSN(),
13379 		},
13380 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13381 		.errstr = "R1 pointer arithmetic on sock prohibited",
13382 		.result = REJECT,
13383 	},
13384 	{
13385 		"reference tracking: access member",
13386 		.insns = {
13387 			BPF_SK_LOOKUP,
13388 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13389 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13390 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13391 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13392 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13393 			BPF_EXIT_INSN(),
13394 		},
13395 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13396 		.result = ACCEPT,
13397 	},
13398 	{
13399 		"reference tracking: write to member",
13400 		.insns = {
13401 			BPF_SK_LOOKUP,
13402 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13403 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13405 			BPF_LD_IMM64(BPF_REG_2, 42),
13406 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13407 				    offsetof(struct bpf_sock, mark)),
13408 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13409 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13410 			BPF_LD_IMM64(BPF_REG_0, 0),
13411 			BPF_EXIT_INSN(),
13412 		},
13413 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13414 		.errstr = "cannot write into socket",
13415 		.result = REJECT,
13416 	},
13417 	{
13418 		"reference tracking: invalid 64-bit access of member",
13419 		.insns = {
13420 			BPF_SK_LOOKUP,
13421 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13422 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13423 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13424 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13425 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13426 			BPF_EXIT_INSN(),
13427 		},
13428 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13429 		.errstr = "invalid bpf_sock access off=0 size=8",
13430 		.result = REJECT,
13431 	},
13432 	{
13433 		"reference tracking: access after release",
13434 		.insns = {
13435 			BPF_SK_LOOKUP,
13436 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13437 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13438 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13439 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13440 			BPF_EXIT_INSN(),
13441 		},
13442 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13443 		.errstr = "!read_ok",
13444 		.result = REJECT,
13445 	},
13446 	{
13447 		"reference tracking: direct access for lookup",
13448 		.insns = {
13449 			/* Check that the packet is at least 64B long */
13450 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13451 				    offsetof(struct __sk_buff, data)),
13452 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13453 				    offsetof(struct __sk_buff, data_end)),
13454 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
13456 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
13457 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
13458 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
13459 			BPF_MOV64_IMM(BPF_REG_4, 0),
13460 			BPF_MOV64_IMM(BPF_REG_5, 0),
13461 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
13462 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13463 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13464 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13466 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13467 			BPF_EXIT_INSN(),
13468 		},
13469 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13470 		.result = ACCEPT,
13471 	},
13472 };
13473 
13474 static int probe_filter_length(const struct bpf_insn *fp)
13475 {
13476 	int len;
13477 
13478 	for (len = MAX_INSNS - 1; len > 0; --len)
13479 		if (fp[len].code != 0 || fp[len].imm != 0)
13480 			break;
13481 	return len + 1;
13482 }
13483 
13484 static int create_map(uint32_t type, uint32_t size_key,
13485 		      uint32_t size_value, uint32_t max_elem)
13486 {
13487 	int fd;
13488 
13489 	fd = bpf_create_map(type, size_key, size_value, max_elem,
13490 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
13491 	if (fd < 0)
13492 		printf("Failed to create hash map '%s'!\n", strerror(errno));
13493 
13494 	return fd;
13495 }
13496 
13497 static int create_prog_dummy1(enum bpf_map_type prog_type)
13498 {
13499 	struct bpf_insn prog[] = {
13500 		BPF_MOV64_IMM(BPF_REG_0, 42),
13501 		BPF_EXIT_INSN(),
13502 	};
13503 
13504 	return bpf_load_program(prog_type, prog,
13505 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13506 }
13507 
13508 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
13509 {
13510 	struct bpf_insn prog[] = {
13511 		BPF_MOV64_IMM(BPF_REG_3, idx),
13512 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
13513 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13514 			     BPF_FUNC_tail_call),
13515 		BPF_MOV64_IMM(BPF_REG_0, 41),
13516 		BPF_EXIT_INSN(),
13517 	};
13518 
13519 	return bpf_load_program(prog_type, prog,
13520 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13521 }
13522 
13523 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
13524 			     int p1key)
13525 {
13526 	int p2key = 1;
13527 	int mfd, p1fd, p2fd;
13528 
13529 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
13530 			     sizeof(int), max_elem, 0);
13531 	if (mfd < 0) {
13532 		printf("Failed to create prog array '%s'!\n", strerror(errno));
13533 		return -1;
13534 	}
13535 
13536 	p1fd = create_prog_dummy1(prog_type);
13537 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
13538 	if (p1fd < 0 || p2fd < 0)
13539 		goto out;
13540 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
13541 		goto out;
13542 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
13543 		goto out;
13544 	close(p2fd);
13545 	close(p1fd);
13546 
13547 	return mfd;
13548 out:
13549 	close(p2fd);
13550 	close(p1fd);
13551 	close(mfd);
13552 	return -1;
13553 }
13554 
13555 static int create_map_in_map(void)
13556 {
13557 	int inner_map_fd, outer_map_fd;
13558 
13559 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13560 				      sizeof(int), 1, 0);
13561 	if (inner_map_fd < 0) {
13562 		printf("Failed to create array '%s'!\n", strerror(errno));
13563 		return inner_map_fd;
13564 	}
13565 
13566 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
13567 					     sizeof(int), inner_map_fd, 1, 0);
13568 	if (outer_map_fd < 0)
13569 		printf("Failed to create array of maps '%s'!\n",
13570 		       strerror(errno));
13571 
13572 	close(inner_map_fd);
13573 
13574 	return outer_map_fd;
13575 }
13576 
13577 static int create_cgroup_storage(bool percpu)
13578 {
13579 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
13580 		BPF_MAP_TYPE_CGROUP_STORAGE;
13581 	int fd;
13582 
13583 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
13584 			    TEST_DATA_LEN, 0, 0);
13585 	if (fd < 0)
13586 		printf("Failed to create cgroup storage '%s'!\n",
13587 		       strerror(errno));
13588 
13589 	return fd;
13590 }
13591 
13592 static char bpf_vlog[UINT_MAX >> 8];
13593 
13594 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
13595 			  struct bpf_insn *prog, int *map_fds)
13596 {
13597 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
13598 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
13599 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
13600 	int *fixup_map_array_48b = test->fixup_map_array_48b;
13601 	int *fixup_map_sockmap = test->fixup_map_sockmap;
13602 	int *fixup_map_sockhash = test->fixup_map_sockhash;
13603 	int *fixup_map_xskmap = test->fixup_map_xskmap;
13604 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
13605 	int *fixup_prog1 = test->fixup_prog1;
13606 	int *fixup_prog2 = test->fixup_prog2;
13607 	int *fixup_map_in_map = test->fixup_map_in_map;
13608 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
13609 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
13610 
13611 	if (test->fill_helper)
13612 		test->fill_helper(test);
13613 
13614 	/* Allocating HTs with 1 elem is fine here, since we only test
13615 	 * for verifier and not do a runtime lookup, so the only thing
13616 	 * that really matters is value size in this case.
13617 	 */
13618 	if (*fixup_map_hash_8b) {
13619 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13620 					sizeof(long long), 1);
13621 		do {
13622 			prog[*fixup_map_hash_8b].imm = map_fds[0];
13623 			fixup_map_hash_8b++;
13624 		} while (*fixup_map_hash_8b);
13625 	}
13626 
13627 	if (*fixup_map_hash_48b) {
13628 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13629 					sizeof(struct test_val), 1);
13630 		do {
13631 			prog[*fixup_map_hash_48b].imm = map_fds[1];
13632 			fixup_map_hash_48b++;
13633 		} while (*fixup_map_hash_48b);
13634 	}
13635 
13636 	if (*fixup_map_hash_16b) {
13637 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13638 					sizeof(struct other_val), 1);
13639 		do {
13640 			prog[*fixup_map_hash_16b].imm = map_fds[2];
13641 			fixup_map_hash_16b++;
13642 		} while (*fixup_map_hash_16b);
13643 	}
13644 
13645 	if (*fixup_map_array_48b) {
13646 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13647 					sizeof(struct test_val), 1);
13648 		do {
13649 			prog[*fixup_map_array_48b].imm = map_fds[3];
13650 			fixup_map_array_48b++;
13651 		} while (*fixup_map_array_48b);
13652 	}
13653 
13654 	if (*fixup_prog1) {
13655 		map_fds[4] = create_prog_array(prog_type, 4, 0);
13656 		do {
13657 			prog[*fixup_prog1].imm = map_fds[4];
13658 			fixup_prog1++;
13659 		} while (*fixup_prog1);
13660 	}
13661 
13662 	if (*fixup_prog2) {
13663 		map_fds[5] = create_prog_array(prog_type, 8, 7);
13664 		do {
13665 			prog[*fixup_prog2].imm = map_fds[5];
13666 			fixup_prog2++;
13667 		} while (*fixup_prog2);
13668 	}
13669 
13670 	if (*fixup_map_in_map) {
13671 		map_fds[6] = create_map_in_map();
13672 		do {
13673 			prog[*fixup_map_in_map].imm = map_fds[6];
13674 			fixup_map_in_map++;
13675 		} while (*fixup_map_in_map);
13676 	}
13677 
13678 	if (*fixup_cgroup_storage) {
13679 		map_fds[7] = create_cgroup_storage(false);
13680 		do {
13681 			prog[*fixup_cgroup_storage].imm = map_fds[7];
13682 			fixup_cgroup_storage++;
13683 		} while (*fixup_cgroup_storage);
13684 	}
13685 
13686 	if (*fixup_percpu_cgroup_storage) {
13687 		map_fds[8] = create_cgroup_storage(true);
13688 		do {
13689 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
13690 			fixup_percpu_cgroup_storage++;
13691 		} while (*fixup_percpu_cgroup_storage);
13692 	}
13693 	if (*fixup_map_sockmap) {
13694 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
13695 					sizeof(int), 1);
13696 		do {
13697 			prog[*fixup_map_sockmap].imm = map_fds[9];
13698 			fixup_map_sockmap++;
13699 		} while (*fixup_map_sockmap);
13700 	}
13701 	if (*fixup_map_sockhash) {
13702 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
13703 					sizeof(int), 1);
13704 		do {
13705 			prog[*fixup_map_sockhash].imm = map_fds[10];
13706 			fixup_map_sockhash++;
13707 		} while (*fixup_map_sockhash);
13708 	}
13709 	if (*fixup_map_xskmap) {
13710 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
13711 					sizeof(int), 1);
13712 		do {
13713 			prog[*fixup_map_xskmap].imm = map_fds[11];
13714 			fixup_map_xskmap++;
13715 		} while (*fixup_map_xskmap);
13716 	}
13717 	if (*fixup_map_stacktrace) {
13718 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
13719 					 sizeof(u64), 1);
13720 		do {
13721 			prog[*fixup_map_stacktrace].imm = map_fds[12];
13722 			fixup_map_stacktrace++;
13723 		} while (fixup_map_stacktrace);
13724 	}
13725 }
13726 
13727 static void do_test_single(struct bpf_test *test, bool unpriv,
13728 			   int *passes, int *errors)
13729 {
13730 	int fd_prog, expected_ret, reject_from_alignment;
13731 	int prog_len, prog_type = test->prog_type;
13732 	struct bpf_insn *prog = test->insns;
13733 	int map_fds[MAX_NR_MAPS];
13734 	const char *expected_err;
13735 	uint32_t retval;
13736 	int i, err;
13737 
13738 	for (i = 0; i < MAX_NR_MAPS; i++)
13739 		map_fds[i] = -1;
13740 
13741 	if (!prog_type)
13742 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
13743 	do_test_fixup(test, prog_type, prog, map_fds);
13744 	prog_len = probe_filter_length(prog);
13745 
13746 	fd_prog = bpf_verify_program(prog_type, prog, prog_len,
13747 				     test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
13748 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
13749 
13750 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
13751 		       test->result_unpriv : test->result;
13752 	expected_err = unpriv && test->errstr_unpriv ?
13753 		       test->errstr_unpriv : test->errstr;
13754 
13755 	reject_from_alignment = fd_prog < 0 &&
13756 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
13757 				strstr(bpf_vlog, "Unknown alignment.");
13758 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13759 	if (reject_from_alignment) {
13760 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
13761 		       strerror(errno));
13762 		goto fail_log;
13763 	}
13764 #endif
13765 	if (expected_ret == ACCEPT) {
13766 		if (fd_prog < 0 && !reject_from_alignment) {
13767 			printf("FAIL\nFailed to load prog '%s'!\n",
13768 			       strerror(errno));
13769 			goto fail_log;
13770 		}
13771 	} else {
13772 		if (fd_prog >= 0) {
13773 			printf("FAIL\nUnexpected success to load!\n");
13774 			goto fail_log;
13775 		}
13776 		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
13777 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
13778 			      expected_err, bpf_vlog);
13779 			goto fail_log;
13780 		}
13781 	}
13782 
13783 	if (fd_prog >= 0) {
13784 		__u8 tmp[TEST_DATA_LEN << 2];
13785 		__u32 size_tmp = sizeof(tmp);
13786 
13787 		err = bpf_prog_test_run(fd_prog, 1, test->data,
13788 					sizeof(test->data), tmp, &size_tmp,
13789 					&retval, NULL);
13790 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13791 			printf("Unexpected bpf_prog_test_run error\n");
13792 			goto fail_log;
13793 		}
13794 		if (!err && retval != test->retval &&
13795 		    test->retval != POINTER_VALUE) {
13796 			printf("FAIL retval %d != %d\n", retval, test->retval);
13797 			goto fail_log;
13798 		}
13799 	}
13800 	(*passes)++;
13801 	printf("OK%s\n", reject_from_alignment ?
13802 	       " (NOTE: reject due to unknown alignment)" : "");
13803 close_fds:
13804 	close(fd_prog);
13805 	for (i = 0; i < MAX_NR_MAPS; i++)
13806 		close(map_fds[i]);
13807 	sched_yield();
13808 	return;
13809 fail_log:
13810 	(*errors)++;
13811 	printf("%s", bpf_vlog);
13812 	goto close_fds;
13813 }
13814 
13815 static bool is_admin(void)
13816 {
13817 	cap_t caps;
13818 	cap_flag_value_t sysadmin = CAP_CLEAR;
13819 	const cap_value_t cap_val = CAP_SYS_ADMIN;
13820 
13821 #ifdef CAP_IS_SUPPORTED
13822 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
13823 		perror("cap_get_flag");
13824 		return false;
13825 	}
13826 #endif
13827 	caps = cap_get_proc();
13828 	if (!caps) {
13829 		perror("cap_get_proc");
13830 		return false;
13831 	}
13832 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
13833 		perror("cap_get_flag");
13834 	if (cap_free(caps))
13835 		perror("cap_free");
13836 	return (sysadmin == CAP_SET);
13837 }
13838 
13839 static int set_admin(bool admin)
13840 {
13841 	cap_t caps;
13842 	const cap_value_t cap_val = CAP_SYS_ADMIN;
13843 	int ret = -1;
13844 
13845 	caps = cap_get_proc();
13846 	if (!caps) {
13847 		perror("cap_get_proc");
13848 		return -1;
13849 	}
13850 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
13851 				admin ? CAP_SET : CAP_CLEAR)) {
13852 		perror("cap_set_flag");
13853 		goto out;
13854 	}
13855 	if (cap_set_proc(caps)) {
13856 		perror("cap_set_proc");
13857 		goto out;
13858 	}
13859 	ret = 0;
13860 out:
13861 	if (cap_free(caps))
13862 		perror("cap_free");
13863 	return ret;
13864 }
13865 
13866 static void get_unpriv_disabled()
13867 {
13868 	char buf[2];
13869 	FILE *fd;
13870 
13871 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
13872 	if (!fd) {
13873 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
13874 		unpriv_disabled = true;
13875 		return;
13876 	}
13877 	if (fgets(buf, 2, fd) == buf && atoi(buf))
13878 		unpriv_disabled = true;
13879 	fclose(fd);
13880 }
13881 
13882 static int do_test(bool unpriv, unsigned int from, unsigned int to)
13883 {
13884 	int i, passes = 0, errors = 0, skips = 0;
13885 
13886 	for (i = from; i < to; i++) {
13887 		struct bpf_test *test = &tests[i];
13888 
13889 		/* Program types that are not supported by non-root we
13890 		 * skip right away.
13891 		 */
13892 		if (!test->prog_type && unpriv_disabled) {
13893 			printf("#%d/u %s SKIP\n", i, test->descr);
13894 			skips++;
13895 		} else if (!test->prog_type) {
13896 			if (!unpriv)
13897 				set_admin(false);
13898 			printf("#%d/u %s ", i, test->descr);
13899 			do_test_single(test, true, &passes, &errors);
13900 			if (!unpriv)
13901 				set_admin(true);
13902 		}
13903 
13904 		if (unpriv) {
13905 			printf("#%d/p %s SKIP\n", i, test->descr);
13906 			skips++;
13907 		} else {
13908 			printf("#%d/p %s ", i, test->descr);
13909 			do_test_single(test, false, &passes, &errors);
13910 		}
13911 	}
13912 
13913 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
13914 	       skips, errors);
13915 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
13916 }
13917 
13918 int main(int argc, char **argv)
13919 {
13920 	unsigned int from = 0, to = ARRAY_SIZE(tests);
13921 	bool unpriv = !is_admin();
13922 
13923 	if (argc == 3) {
13924 		unsigned int l = atoi(argv[argc - 2]);
13925 		unsigned int u = atoi(argv[argc - 1]);
13926 
13927 		if (l < to && u < to) {
13928 			from = l;
13929 			to   = u + 1;
13930 		}
13931 	} else if (argc == 2) {
13932 		unsigned int t = atoi(argv[argc - 1]);
13933 
13934 		if (t < to) {
13935 			from = t;
13936 			to   = t + 1;
13937 		}
13938 	}
13939 
13940 	get_unpriv_disabled();
13941 	if (unpriv && unpriv_disabled) {
13942 		printf("Cannot run as unprivileged user with sysctl %s.\n",
13943 		       UNPRIV_SYSCTL);
13944 		return EXIT_FAILURE;
13945 	}
13946 
13947 	bpf_semi_rand_init();
13948 	return do_test(unpriv, from, to);
13949 }
13950