xref: /linux/tools/testing/selftests/bpf/test_verifier.c (revision 4494ce4fb4ff42946f48bbc8a5ac55ee18dca600)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define MAX_TEST_RUNS	8
53 #define POINTER_VALUE	0xcafe4all
54 #define TEST_DATA_LEN	64
55 
56 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
57 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
58 
59 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
60 static bool unpriv_disabled = false;
61 
62 struct bpf_test {
63 	const char *descr;
64 	struct bpf_insn	insns[MAX_INSNS];
65 	int fixup_map_hash_8b[MAX_FIXUPS];
66 	int fixup_map_hash_48b[MAX_FIXUPS];
67 	int fixup_map_hash_16b[MAX_FIXUPS];
68 	int fixup_map_array_48b[MAX_FIXUPS];
69 	int fixup_map_sockmap[MAX_FIXUPS];
70 	int fixup_map_sockhash[MAX_FIXUPS];
71 	int fixup_map_xskmap[MAX_FIXUPS];
72 	int fixup_map_stacktrace[MAX_FIXUPS];
73 	int fixup_prog1[MAX_FIXUPS];
74 	int fixup_prog2[MAX_FIXUPS];
75 	int fixup_map_in_map[MAX_FIXUPS];
76 	int fixup_cgroup_storage[MAX_FIXUPS];
77 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
78 	const char *errstr;
79 	const char *errstr_unpriv;
80 	uint32_t retval, retval_unpriv, insn_processed;
81 	enum {
82 		UNDEF,
83 		ACCEPT,
84 		REJECT
85 	} result, result_unpriv;
86 	enum bpf_prog_type prog_type;
87 	uint8_t flags;
88 	__u8 data[TEST_DATA_LEN];
89 	void (*fill_helper)(struct bpf_test *self);
90 	uint8_t runs;
91 	struct {
92 		uint32_t retval, retval_unpriv;
93 		union {
94 			__u8 data[TEST_DATA_LEN];
95 			__u64 data64[TEST_DATA_LEN / 8];
96 		};
97 	} retvals[MAX_TEST_RUNS];
98 };
99 
100 /* Note we want this to be 64 bit aligned so that the end of our array is
101  * actually the end of the structure.
102  */
103 #define MAX_ENTRIES 11
104 
105 struct test_val {
106 	unsigned int index;
107 	int foo[MAX_ENTRIES];
108 };
109 
110 struct other_val {
111 	long long foo;
112 	long long bar;
113 };
114 
115 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
116 {
117 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
118 #define PUSH_CNT 51
119 	unsigned int len = BPF_MAXINSNS;
120 	struct bpf_insn *insn = self->insns;
121 	int i = 0, j, k = 0;
122 
123 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
124 loop:
125 	for (j = 0; j < PUSH_CNT; j++) {
126 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
127 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
128 		i++;
129 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
130 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
131 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
132 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
133 					 BPF_FUNC_skb_vlan_push),
134 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
135 		i++;
136 	}
137 
138 	for (j = 0; j < PUSH_CNT; j++) {
139 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
140 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
141 		i++;
142 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
143 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
144 					 BPF_FUNC_skb_vlan_pop),
145 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
146 		i++;
147 	}
148 	if (++k < 5)
149 		goto loop;
150 
151 	for (; i < len - 1; i++)
152 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
153 	insn[len - 1] = BPF_EXIT_INSN();
154 }
155 
156 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
157 {
158 	struct bpf_insn *insn = self->insns;
159 	unsigned int len = BPF_MAXINSNS;
160 	int i = 0;
161 
162 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
163 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
164 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
165 	i++;
166 	while (i < len - 1)
167 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
168 	insn[i] = BPF_EXIT_INSN();
169 }
170 
171 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
172 {
173 	struct bpf_insn *insn = self->insns;
174 	uint64_t res = 0;
175 	int i = 0;
176 
177 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
178 	while (i < self->retval) {
179 		uint64_t val = bpf_semi_rand_get();
180 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
181 
182 		res ^= val;
183 		insn[i++] = tmp[0];
184 		insn[i++] = tmp[1];
185 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
186 	}
187 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
188 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
189 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
190 	insn[i] = BPF_EXIT_INSN();
191 	res ^= (res >> 32);
192 	self->retval = (uint32_t)res;
193 }
194 
195 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
196 #define BPF_SK_LOOKUP							\
197 	/* struct bpf_sock_tuple tuple = {} */				\
198 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
199 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
200 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
201 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
202 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
203 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
204 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
205 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
206 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
207 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
208 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
209 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
210 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
211 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
212 
213 static struct bpf_test tests[] = {
214 	{
215 		"add+sub+mul",
216 		.insns = {
217 			BPF_MOV64_IMM(BPF_REG_1, 1),
218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
219 			BPF_MOV64_IMM(BPF_REG_2, 3),
220 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
222 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
223 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
224 			BPF_EXIT_INSN(),
225 		},
226 		.result = ACCEPT,
227 		.retval = -3,
228 	},
229 	{
230 		"DIV32 by 0, zero check 1",
231 		.insns = {
232 			BPF_MOV32_IMM(BPF_REG_0, 42),
233 			BPF_MOV32_IMM(BPF_REG_1, 0),
234 			BPF_MOV32_IMM(BPF_REG_2, 1),
235 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
236 			BPF_EXIT_INSN(),
237 		},
238 		.result = ACCEPT,
239 		.retval = 42,
240 	},
241 	{
242 		"DIV32 by 0, zero check 2",
243 		.insns = {
244 			BPF_MOV32_IMM(BPF_REG_0, 42),
245 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
246 			BPF_MOV32_IMM(BPF_REG_2, 1),
247 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
248 			BPF_EXIT_INSN(),
249 		},
250 		.result = ACCEPT,
251 		.retval = 42,
252 	},
253 	{
254 		"DIV64 by 0, zero check",
255 		.insns = {
256 			BPF_MOV32_IMM(BPF_REG_0, 42),
257 			BPF_MOV32_IMM(BPF_REG_1, 0),
258 			BPF_MOV32_IMM(BPF_REG_2, 1),
259 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
260 			BPF_EXIT_INSN(),
261 		},
262 		.result = ACCEPT,
263 		.retval = 42,
264 	},
265 	{
266 		"MOD32 by 0, zero check 1",
267 		.insns = {
268 			BPF_MOV32_IMM(BPF_REG_0, 42),
269 			BPF_MOV32_IMM(BPF_REG_1, 0),
270 			BPF_MOV32_IMM(BPF_REG_2, 1),
271 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
272 			BPF_EXIT_INSN(),
273 		},
274 		.result = ACCEPT,
275 		.retval = 42,
276 	},
277 	{
278 		"MOD32 by 0, zero check 2",
279 		.insns = {
280 			BPF_MOV32_IMM(BPF_REG_0, 42),
281 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
282 			BPF_MOV32_IMM(BPF_REG_2, 1),
283 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
284 			BPF_EXIT_INSN(),
285 		},
286 		.result = ACCEPT,
287 		.retval = 42,
288 	},
289 	{
290 		"MOD64 by 0, zero check",
291 		.insns = {
292 			BPF_MOV32_IMM(BPF_REG_0, 42),
293 			BPF_MOV32_IMM(BPF_REG_1, 0),
294 			BPF_MOV32_IMM(BPF_REG_2, 1),
295 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
296 			BPF_EXIT_INSN(),
297 		},
298 		.result = ACCEPT,
299 		.retval = 42,
300 	},
301 	{
302 		"DIV32 by 0, zero check ok, cls",
303 		.insns = {
304 			BPF_MOV32_IMM(BPF_REG_0, 42),
305 			BPF_MOV32_IMM(BPF_REG_1, 2),
306 			BPF_MOV32_IMM(BPF_REG_2, 16),
307 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
308 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
309 			BPF_EXIT_INSN(),
310 		},
311 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
312 		.result = ACCEPT,
313 		.retval = 8,
314 	},
315 	{
316 		"DIV32 by 0, zero check 1, cls",
317 		.insns = {
318 			BPF_MOV32_IMM(BPF_REG_1, 0),
319 			BPF_MOV32_IMM(BPF_REG_0, 1),
320 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
321 			BPF_EXIT_INSN(),
322 		},
323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
324 		.result = ACCEPT,
325 		.retval = 0,
326 	},
327 	{
328 		"DIV32 by 0, zero check 2, cls",
329 		.insns = {
330 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
331 			BPF_MOV32_IMM(BPF_REG_0, 1),
332 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
333 			BPF_EXIT_INSN(),
334 		},
335 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
336 		.result = ACCEPT,
337 		.retval = 0,
338 	},
339 	{
340 		"DIV64 by 0, zero check, cls",
341 		.insns = {
342 			BPF_MOV32_IMM(BPF_REG_1, 0),
343 			BPF_MOV32_IMM(BPF_REG_0, 1),
344 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
345 			BPF_EXIT_INSN(),
346 		},
347 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
348 		.result = ACCEPT,
349 		.retval = 0,
350 	},
351 	{
352 		"MOD32 by 0, zero check ok, cls",
353 		.insns = {
354 			BPF_MOV32_IMM(BPF_REG_0, 42),
355 			BPF_MOV32_IMM(BPF_REG_1, 3),
356 			BPF_MOV32_IMM(BPF_REG_2, 5),
357 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
358 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
359 			BPF_EXIT_INSN(),
360 		},
361 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
362 		.result = ACCEPT,
363 		.retval = 2,
364 	},
365 	{
366 		"MOD32 by 0, zero check 1, cls",
367 		.insns = {
368 			BPF_MOV32_IMM(BPF_REG_1, 0),
369 			BPF_MOV32_IMM(BPF_REG_0, 1),
370 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
371 			BPF_EXIT_INSN(),
372 		},
373 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
374 		.result = ACCEPT,
375 		.retval = 1,
376 	},
377 	{
378 		"MOD32 by 0, zero check 2, cls",
379 		.insns = {
380 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
381 			BPF_MOV32_IMM(BPF_REG_0, 1),
382 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
383 			BPF_EXIT_INSN(),
384 		},
385 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
386 		.result = ACCEPT,
387 		.retval = 1,
388 	},
389 	{
390 		"MOD64 by 0, zero check 1, cls",
391 		.insns = {
392 			BPF_MOV32_IMM(BPF_REG_1, 0),
393 			BPF_MOV32_IMM(BPF_REG_0, 2),
394 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
395 			BPF_EXIT_INSN(),
396 		},
397 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 		.result = ACCEPT,
399 		.retval = 2,
400 	},
401 	{
402 		"MOD64 by 0, zero check 2, cls",
403 		.insns = {
404 			BPF_MOV32_IMM(BPF_REG_1, 0),
405 			BPF_MOV32_IMM(BPF_REG_0, -1),
406 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
407 			BPF_EXIT_INSN(),
408 		},
409 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
410 		.result = ACCEPT,
411 		.retval = -1,
412 	},
413 	/* Just make sure that JITs used udiv/umod as otherwise we get
414 	 * an exception from INT_MIN/-1 overflow similarly as with div
415 	 * by zero.
416 	 */
417 	{
418 		"DIV32 overflow, check 1",
419 		.insns = {
420 			BPF_MOV32_IMM(BPF_REG_1, -1),
421 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
422 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
423 			BPF_EXIT_INSN(),
424 		},
425 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
426 		.result = ACCEPT,
427 		.retval = 0,
428 	},
429 	{
430 		"DIV32 overflow, check 2",
431 		.insns = {
432 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
433 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
434 			BPF_EXIT_INSN(),
435 		},
436 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
437 		.result = ACCEPT,
438 		.retval = 0,
439 	},
440 	{
441 		"DIV64 overflow, check 1",
442 		.insns = {
443 			BPF_MOV64_IMM(BPF_REG_1, -1),
444 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
445 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
446 			BPF_EXIT_INSN(),
447 		},
448 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
449 		.result = ACCEPT,
450 		.retval = 0,
451 	},
452 	{
453 		"DIV64 overflow, check 2",
454 		.insns = {
455 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
456 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
457 			BPF_EXIT_INSN(),
458 		},
459 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
460 		.result = ACCEPT,
461 		.retval = 0,
462 	},
463 	{
464 		"MOD32 overflow, check 1",
465 		.insns = {
466 			BPF_MOV32_IMM(BPF_REG_1, -1),
467 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
468 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
469 			BPF_EXIT_INSN(),
470 		},
471 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
472 		.result = ACCEPT,
473 		.retval = INT_MIN,
474 	},
475 	{
476 		"MOD32 overflow, check 2",
477 		.insns = {
478 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
479 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
480 			BPF_EXIT_INSN(),
481 		},
482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
483 		.result = ACCEPT,
484 		.retval = INT_MIN,
485 	},
486 	{
487 		"MOD64 overflow, check 1",
488 		.insns = {
489 			BPF_MOV64_IMM(BPF_REG_1, -1),
490 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
491 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
492 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
493 			BPF_MOV32_IMM(BPF_REG_0, 0),
494 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
495 			BPF_MOV32_IMM(BPF_REG_0, 1),
496 			BPF_EXIT_INSN(),
497 		},
498 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
499 		.result = ACCEPT,
500 		.retval = 1,
501 	},
502 	{
503 		"MOD64 overflow, check 2",
504 		.insns = {
505 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
506 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
507 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
508 			BPF_MOV32_IMM(BPF_REG_0, 0),
509 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
510 			BPF_MOV32_IMM(BPF_REG_0, 1),
511 			BPF_EXIT_INSN(),
512 		},
513 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
514 		.result = ACCEPT,
515 		.retval = 1,
516 	},
517 	{
518 		"xor32 zero extend check",
519 		.insns = {
520 			BPF_MOV32_IMM(BPF_REG_2, -1),
521 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
522 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
523 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
524 			BPF_MOV32_IMM(BPF_REG_0, 2),
525 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
526 			BPF_MOV32_IMM(BPF_REG_0, 1),
527 			BPF_EXIT_INSN(),
528 		},
529 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
530 		.result = ACCEPT,
531 		.retval = 1,
532 	},
533 	{
534 		"empty prog",
535 		.insns = {
536 		},
537 		.errstr = "unknown opcode 00",
538 		.result = REJECT,
539 	},
540 	{
541 		"only exit insn",
542 		.insns = {
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "R0 !read_ok",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable",
550 		.insns = {
551 			BPF_EXIT_INSN(),
552 			BPF_EXIT_INSN(),
553 		},
554 		.errstr = "unreachable",
555 		.result = REJECT,
556 	},
557 	{
558 		"unreachable2",
559 		.insns = {
560 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "unreachable",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"out of range jump2",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
580 			BPF_EXIT_INSN(),
581 		},
582 		.errstr = "jump out of range",
583 		.result = REJECT,
584 	},
585 	{
586 		"test1 ld_imm64",
587 		.insns = {
588 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
589 			BPF_LD_IMM64(BPF_REG_0, 0),
590 			BPF_LD_IMM64(BPF_REG_0, 0),
591 			BPF_LD_IMM64(BPF_REG_0, 1),
592 			BPF_LD_IMM64(BPF_REG_0, 1),
593 			BPF_MOV64_IMM(BPF_REG_0, 2),
594 			BPF_EXIT_INSN(),
595 		},
596 		.errstr = "invalid BPF_LD_IMM insn",
597 		.errstr_unpriv = "R1 pointer comparison",
598 		.result = REJECT,
599 	},
600 	{
601 		"test2 ld_imm64",
602 		.insns = {
603 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
604 			BPF_LD_IMM64(BPF_REG_0, 0),
605 			BPF_LD_IMM64(BPF_REG_0, 0),
606 			BPF_LD_IMM64(BPF_REG_0, 1),
607 			BPF_LD_IMM64(BPF_REG_0, 1),
608 			BPF_EXIT_INSN(),
609 		},
610 		.errstr = "invalid BPF_LD_IMM insn",
611 		.errstr_unpriv = "R1 pointer comparison",
612 		.result = REJECT,
613 	},
614 	{
615 		"test3 ld_imm64",
616 		.insns = {
617 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
618 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
619 			BPF_LD_IMM64(BPF_REG_0, 0),
620 			BPF_LD_IMM64(BPF_REG_0, 0),
621 			BPF_LD_IMM64(BPF_REG_0, 1),
622 			BPF_LD_IMM64(BPF_REG_0, 1),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test4 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 			BPF_EXIT_INSN(),
633 		},
634 		.errstr = "invalid bpf_ld_imm64 insn",
635 		.result = REJECT,
636 	},
637 	{
638 		"test5 ld_imm64",
639 		.insns = {
640 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
641 		},
642 		.errstr = "invalid bpf_ld_imm64 insn",
643 		.result = REJECT,
644 	},
645 	{
646 		"test6 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
649 			BPF_RAW_INSN(0, 0, 0, 0, 0),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 	},
654 	{
655 		"test7 ld_imm64",
656 		.insns = {
657 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
658 			BPF_RAW_INSN(0, 0, 0, 0, 1),
659 			BPF_EXIT_INSN(),
660 		},
661 		.result = ACCEPT,
662 		.retval = 1,
663 	},
664 	{
665 		"test8 ld_imm64",
666 		.insns = {
667 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
668 			BPF_RAW_INSN(0, 0, 0, 0, 1),
669 			BPF_EXIT_INSN(),
670 		},
671 		.errstr = "uses reserved fields",
672 		.result = REJECT,
673 	},
674 	{
675 		"test9 ld_imm64",
676 		.insns = {
677 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
678 			BPF_RAW_INSN(0, 0, 0, 1, 1),
679 			BPF_EXIT_INSN(),
680 		},
681 		.errstr = "invalid bpf_ld_imm64 insn",
682 		.result = REJECT,
683 	},
684 	{
685 		"test10 ld_imm64",
686 		.insns = {
687 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
688 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
689 			BPF_EXIT_INSN(),
690 		},
691 		.errstr = "invalid bpf_ld_imm64 insn",
692 		.result = REJECT,
693 	},
694 	{
695 		"test11 ld_imm64",
696 		.insns = {
697 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
698 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
699 			BPF_EXIT_INSN(),
700 		},
701 		.errstr = "invalid bpf_ld_imm64 insn",
702 		.result = REJECT,
703 	},
704 	{
705 		"test12 ld_imm64",
706 		.insns = {
707 			BPF_MOV64_IMM(BPF_REG_1, 0),
708 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
709 			BPF_RAW_INSN(0, 0, 0, 0, 1),
710 			BPF_EXIT_INSN(),
711 		},
712 		.errstr = "not pointing to valid bpf_map",
713 		.result = REJECT,
714 	},
715 	{
716 		"test13 ld_imm64",
717 		.insns = {
718 			BPF_MOV64_IMM(BPF_REG_1, 0),
719 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
720 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
721 			BPF_EXIT_INSN(),
722 		},
723 		.errstr = "invalid bpf_ld_imm64 insn",
724 		.result = REJECT,
725 	},
726 	{
727 		"arsh32 on imm",
728 		.insns = {
729 			BPF_MOV64_IMM(BPF_REG_0, 1),
730 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
731 			BPF_EXIT_INSN(),
732 		},
733 		.result = ACCEPT,
734 		.retval = 0,
735 	},
736 	{
737 		"arsh32 on imm 2",
738 		.insns = {
739 			BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
740 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
741 			BPF_EXIT_INSN(),
742 		},
743 		.result = ACCEPT,
744 		.retval = -16069393,
745 	},
746 	{
747 		"arsh32 on reg",
748 		.insns = {
749 			BPF_MOV64_IMM(BPF_REG_0, 1),
750 			BPF_MOV64_IMM(BPF_REG_1, 5),
751 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
752 			BPF_EXIT_INSN(),
753 		},
754 		.result = ACCEPT,
755 		.retval = 0,
756 	},
757 	{
758 		"arsh32 on reg 2",
759 		.insns = {
760 			BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
761 			BPF_MOV64_IMM(BPF_REG_1, 15),
762 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
763 			BPF_EXIT_INSN(),
764 		},
765 		.result = ACCEPT,
766 		.retval = 43724,
767 	},
768 	{
769 		"arsh64 on imm",
770 		.insns = {
771 			BPF_MOV64_IMM(BPF_REG_0, 1),
772 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
773 			BPF_EXIT_INSN(),
774 		},
775 		.result = ACCEPT,
776 	},
777 	{
778 		"arsh64 on reg",
779 		.insns = {
780 			BPF_MOV64_IMM(BPF_REG_0, 1),
781 			BPF_MOV64_IMM(BPF_REG_1, 5),
782 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
783 			BPF_EXIT_INSN(),
784 		},
785 		.result = ACCEPT,
786 	},
787 	{
788 		"no bpf_exit",
789 		.insns = {
790 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
791 		},
792 		.errstr = "not an exit",
793 		.result = REJECT,
794 	},
795 	{
796 		"loop (back-edge)",
797 		.insns = {
798 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
799 			BPF_EXIT_INSN(),
800 		},
801 		.errstr = "back-edge",
802 		.result = REJECT,
803 	},
804 	{
805 		"loop2 (back-edge)",
806 		.insns = {
807 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
808 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
809 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
810 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
811 			BPF_EXIT_INSN(),
812 		},
813 		.errstr = "back-edge",
814 		.result = REJECT,
815 	},
816 	{
817 		"conditional loop",
818 		.insns = {
819 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
820 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
821 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
823 			BPF_EXIT_INSN(),
824 		},
825 		.errstr = "back-edge",
826 		.result = REJECT,
827 	},
828 	{
829 		"read uninitialized register",
830 		.insns = {
831 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
832 			BPF_EXIT_INSN(),
833 		},
834 		.errstr = "R2 !read_ok",
835 		.result = REJECT,
836 	},
837 	{
838 		"read invalid register",
839 		.insns = {
840 			BPF_MOV64_REG(BPF_REG_0, -1),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "R15 is invalid",
844 		.result = REJECT,
845 	},
846 	{
847 		"program doesn't init R0 before exit",
848 		.insns = {
849 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
850 			BPF_EXIT_INSN(),
851 		},
852 		.errstr = "R0 !read_ok",
853 		.result = REJECT,
854 	},
855 	{
856 		"program doesn't init R0 before exit in all branches",
857 		.insns = {
858 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
859 			BPF_MOV64_IMM(BPF_REG_0, 1),
860 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
861 			BPF_EXIT_INSN(),
862 		},
863 		.errstr = "R0 !read_ok",
864 		.errstr_unpriv = "R1 pointer comparison",
865 		.result = REJECT,
866 	},
867 	{
868 		"stack out of bounds",
869 		.insns = {
870 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
871 			BPF_EXIT_INSN(),
872 		},
873 		.errstr = "invalid stack",
874 		.result = REJECT,
875 	},
876 	{
877 		"invalid call insn1",
878 		.insns = {
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
880 			BPF_EXIT_INSN(),
881 		},
882 		.errstr = "unknown opcode 8d",
883 		.result = REJECT,
884 	},
885 	{
886 		"invalid call insn2",
887 		.insns = {
888 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
889 			BPF_EXIT_INSN(),
890 		},
891 		.errstr = "BPF_CALL uses reserved",
892 		.result = REJECT,
893 	},
894 	{
895 		"invalid function call",
896 		.insns = {
897 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
898 			BPF_EXIT_INSN(),
899 		},
900 		.errstr = "invalid func unknown#1234567",
901 		.result = REJECT,
902 	},
903 	{
904 		"uninitialized stack1",
905 		.insns = {
906 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
907 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
908 			BPF_LD_MAP_FD(BPF_REG_1, 0),
909 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
910 				     BPF_FUNC_map_lookup_elem),
911 			BPF_EXIT_INSN(),
912 		},
913 		.fixup_map_hash_8b = { 2 },
914 		.errstr = "invalid indirect read from stack",
915 		.result = REJECT,
916 	},
917 	{
918 		"uninitialized stack2",
919 		.insns = {
920 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
921 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
922 			BPF_EXIT_INSN(),
923 		},
924 		.errstr = "invalid read from stack",
925 		.result = REJECT,
926 	},
927 	{
928 		"invalid fp arithmetic",
929 		/* If this gets ever changed, make sure JITs can deal with it. */
930 		.insns = {
931 			BPF_MOV64_IMM(BPF_REG_0, 0),
932 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
933 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
934 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
935 			BPF_EXIT_INSN(),
936 		},
937 		.errstr = "R1 subtraction from stack pointer",
938 		.result = REJECT,
939 	},
940 	{
941 		"non-invalid fp arithmetic",
942 		.insns = {
943 			BPF_MOV64_IMM(BPF_REG_0, 0),
944 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
945 			BPF_EXIT_INSN(),
946 		},
947 		.result = ACCEPT,
948 	},
949 	{
950 		"invalid argument register",
951 		.insns = {
952 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
953 				     BPF_FUNC_get_cgroup_classid),
954 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
955 				     BPF_FUNC_get_cgroup_classid),
956 			BPF_EXIT_INSN(),
957 		},
958 		.errstr = "R1 !read_ok",
959 		.result = REJECT,
960 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
961 	},
962 	{
963 		"non-invalid argument register",
964 		.insns = {
965 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
966 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
967 				     BPF_FUNC_get_cgroup_classid),
968 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
969 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
970 				     BPF_FUNC_get_cgroup_classid),
971 			BPF_EXIT_INSN(),
972 		},
973 		.result = ACCEPT,
974 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
975 	},
976 	{
977 		"check valid spill/fill",
978 		.insns = {
979 			/* spill R1(ctx) into stack */
980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 			/* fill it back into R2 */
982 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
983 			/* should be able to access R0 = *(R2 + 8) */
984 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
985 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
986 			BPF_EXIT_INSN(),
987 		},
988 		.errstr_unpriv = "R0 leaks addr",
989 		.result = ACCEPT,
990 		.result_unpriv = REJECT,
991 		.retval = POINTER_VALUE,
992 	},
993 	{
994 		"check valid spill/fill, skb mark",
995 		.insns = {
996 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
997 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
998 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
999 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
1000 				    offsetof(struct __sk_buff, mark)),
1001 			BPF_EXIT_INSN(),
1002 		},
1003 		.result = ACCEPT,
1004 		.result_unpriv = ACCEPT,
1005 	},
1006 	{
1007 		"check corrupted spill/fill",
1008 		.insns = {
1009 			/* spill R1(ctx) into stack */
1010 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1011 			/* mess up with R1 pointer on stack */
1012 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1013 			/* fill back into R0 is fine for priv.
1014 			 * R0 now becomes SCALAR_VALUE.
1015 			 */
1016 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1017 			/* Load from R0 should fail. */
1018 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
1019 			BPF_EXIT_INSN(),
1020 		},
1021 		.errstr_unpriv = "attempt to corrupt spilled",
1022 		.errstr = "R0 invalid mem access 'inv",
1023 		.result = REJECT,
1024 	},
1025 	{
1026 		"check corrupted spill/fill, LSB",
1027 		.insns = {
1028 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1029 			BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
1030 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1031 			BPF_EXIT_INSN(),
1032 		},
1033 		.errstr_unpriv = "attempt to corrupt spilled",
1034 		.result_unpriv = REJECT,
1035 		.result = ACCEPT,
1036 		.retval = POINTER_VALUE,
1037 	},
1038 	{
1039 		"check corrupted spill/fill, MSB",
1040 		.insns = {
1041 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1042 			BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
1043 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1044 			BPF_EXIT_INSN(),
1045 		},
1046 		.errstr_unpriv = "attempt to corrupt spilled",
1047 		.result_unpriv = REJECT,
1048 		.result = ACCEPT,
1049 		.retval = POINTER_VALUE,
1050 	},
1051 	{
1052 		"invalid src register in STX",
1053 		.insns = {
1054 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1055 			BPF_EXIT_INSN(),
1056 		},
1057 		.errstr = "R15 is invalid",
1058 		.result = REJECT,
1059 	},
1060 	{
1061 		"invalid dst register in STX",
1062 		.insns = {
1063 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1064 			BPF_EXIT_INSN(),
1065 		},
1066 		.errstr = "R14 is invalid",
1067 		.result = REJECT,
1068 	},
1069 	{
1070 		"invalid dst register in ST",
1071 		.insns = {
1072 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1073 			BPF_EXIT_INSN(),
1074 		},
1075 		.errstr = "R14 is invalid",
1076 		.result = REJECT,
1077 	},
1078 	{
1079 		"invalid src register in LDX",
1080 		.insns = {
1081 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1082 			BPF_EXIT_INSN(),
1083 		},
1084 		.errstr = "R12 is invalid",
1085 		.result = REJECT,
1086 	},
1087 	{
1088 		"invalid dst register in LDX",
1089 		.insns = {
1090 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1091 			BPF_EXIT_INSN(),
1092 		},
1093 		.errstr = "R11 is invalid",
1094 		.result = REJECT,
1095 	},
1096 	{
1097 		"junk insn",
1098 		.insns = {
1099 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1100 			BPF_EXIT_INSN(),
1101 		},
1102 		.errstr = "unknown opcode 00",
1103 		.result = REJECT,
1104 	},
1105 	{
1106 		"junk insn2",
1107 		.insns = {
1108 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1109 			BPF_EXIT_INSN(),
1110 		},
1111 		.errstr = "BPF_LDX uses reserved fields",
1112 		.result = REJECT,
1113 	},
1114 	{
1115 		"junk insn3",
1116 		.insns = {
1117 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1118 			BPF_EXIT_INSN(),
1119 		},
1120 		.errstr = "unknown opcode ff",
1121 		.result = REJECT,
1122 	},
1123 	{
1124 		"junk insn4",
1125 		.insns = {
1126 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1127 			BPF_EXIT_INSN(),
1128 		},
1129 		.errstr = "unknown opcode ff",
1130 		.result = REJECT,
1131 	},
1132 	{
1133 		"junk insn5",
1134 		.insns = {
1135 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1136 			BPF_EXIT_INSN(),
1137 		},
1138 		.errstr = "BPF_ALU uses reserved fields",
1139 		.result = REJECT,
1140 	},
1141 	{
1142 		"misaligned read from stack",
1143 		.insns = {
1144 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1145 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1146 			BPF_EXIT_INSN(),
1147 		},
1148 		.errstr = "misaligned stack access",
1149 		.result = REJECT,
1150 	},
1151 	{
1152 		"invalid map_fd for function call",
1153 		.insns = {
1154 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1155 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1156 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1157 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1158 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1159 				     BPF_FUNC_map_delete_elem),
1160 			BPF_EXIT_INSN(),
1161 		},
1162 		.errstr = "fd 0 is not pointing to valid bpf_map",
1163 		.result = REJECT,
1164 	},
1165 	{
1166 		"don't check return value before access",
1167 		.insns = {
1168 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1169 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1171 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1172 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1173 				     BPF_FUNC_map_lookup_elem),
1174 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1175 			BPF_EXIT_INSN(),
1176 		},
1177 		.fixup_map_hash_8b = { 3 },
1178 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1179 		.result = REJECT,
1180 	},
1181 	{
1182 		"access memory with incorrect alignment",
1183 		.insns = {
1184 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1185 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1186 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1187 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1188 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1189 				     BPF_FUNC_map_lookup_elem),
1190 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1191 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1192 			BPF_EXIT_INSN(),
1193 		},
1194 		.fixup_map_hash_8b = { 3 },
1195 		.errstr = "misaligned value access",
1196 		.result = REJECT,
1197 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1198 	},
1199 	{
1200 		"sometimes access memory with incorrect alignment",
1201 		.insns = {
1202 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1203 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1204 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1205 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1206 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1207 				     BPF_FUNC_map_lookup_elem),
1208 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1209 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1210 			BPF_EXIT_INSN(),
1211 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1212 			BPF_EXIT_INSN(),
1213 		},
1214 		.fixup_map_hash_8b = { 3 },
1215 		.errstr = "R0 invalid mem access",
1216 		.errstr_unpriv = "R0 leaks addr",
1217 		.result = REJECT,
1218 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1219 	},
1220 	{
1221 		"jump test 1",
1222 		.insns = {
1223 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1224 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1225 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1226 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1227 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1228 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1229 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1230 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1231 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1232 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1233 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1234 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1235 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1236 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1237 			BPF_MOV64_IMM(BPF_REG_0, 0),
1238 			BPF_EXIT_INSN(),
1239 		},
1240 		.errstr_unpriv = "R1 pointer comparison",
1241 		.result_unpriv = REJECT,
1242 		.result = ACCEPT,
1243 	},
1244 	{
1245 		"jump test 2",
1246 		.insns = {
1247 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1248 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1249 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1250 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1251 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1252 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1253 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1255 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1256 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1259 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1261 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1262 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1264 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1265 			BPF_MOV64_IMM(BPF_REG_0, 0),
1266 			BPF_EXIT_INSN(),
1267 		},
1268 		.errstr_unpriv = "R1 pointer comparison",
1269 		.result_unpriv = REJECT,
1270 		.result = ACCEPT,
1271 	},
1272 	{
1273 		"jump test 3",
1274 		.insns = {
1275 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1277 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1278 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1279 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1281 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1282 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1283 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1285 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1287 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1289 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1291 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1293 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1295 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1297 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1299 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1301 				     BPF_FUNC_map_delete_elem),
1302 			BPF_EXIT_INSN(),
1303 		},
1304 		.fixup_map_hash_8b = { 24 },
1305 		.errstr_unpriv = "R1 pointer comparison",
1306 		.result_unpriv = REJECT,
1307 		.result = ACCEPT,
1308 		.retval = -ENOENT,
1309 	},
1310 	{
1311 		"jump test 4",
1312 		.insns = {
1313 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1314 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1315 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1316 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1317 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1318 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1319 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1320 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1321 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1322 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1323 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1324 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1325 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1326 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1327 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1328 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1329 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1330 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1331 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1332 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1333 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1334 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1335 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1336 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1337 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1338 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1339 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1340 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1341 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1342 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1343 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1344 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1345 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1346 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1347 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1348 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1349 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1350 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1351 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1352 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1353 			BPF_MOV64_IMM(BPF_REG_0, 0),
1354 			BPF_EXIT_INSN(),
1355 		},
1356 		.errstr_unpriv = "R1 pointer comparison",
1357 		.result_unpriv = REJECT,
1358 		.result = ACCEPT,
1359 	},
1360 	{
1361 		"jump test 5",
1362 		.insns = {
1363 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1364 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1365 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1366 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1367 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1368 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1369 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1370 			BPF_MOV64_IMM(BPF_REG_0, 0),
1371 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1372 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1373 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1374 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1375 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1376 			BPF_MOV64_IMM(BPF_REG_0, 0),
1377 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1378 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1379 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1380 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1381 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1382 			BPF_MOV64_IMM(BPF_REG_0, 0),
1383 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1384 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1385 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1386 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1387 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1388 			BPF_MOV64_IMM(BPF_REG_0, 0),
1389 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1390 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1391 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1392 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1393 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1394 			BPF_MOV64_IMM(BPF_REG_0, 0),
1395 			BPF_EXIT_INSN(),
1396 		},
1397 		.errstr_unpriv = "R1 pointer comparison",
1398 		.result_unpriv = REJECT,
1399 		.result = ACCEPT,
1400 	},
1401 	{
1402 		"access skb fields ok",
1403 		.insns = {
1404 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1405 				    offsetof(struct __sk_buff, len)),
1406 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1407 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 				    offsetof(struct __sk_buff, mark)),
1409 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1410 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1411 				    offsetof(struct __sk_buff, pkt_type)),
1412 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1413 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1414 				    offsetof(struct __sk_buff, queue_mapping)),
1415 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1416 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1417 				    offsetof(struct __sk_buff, protocol)),
1418 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1419 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1420 				    offsetof(struct __sk_buff, vlan_present)),
1421 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1422 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1423 				    offsetof(struct __sk_buff, vlan_tci)),
1424 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1425 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1426 				    offsetof(struct __sk_buff, napi_id)),
1427 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1428 			BPF_EXIT_INSN(),
1429 		},
1430 		.result = ACCEPT,
1431 	},
1432 	{
1433 		"access skb fields bad1",
1434 		.insns = {
1435 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1436 			BPF_EXIT_INSN(),
1437 		},
1438 		.errstr = "invalid bpf_context access",
1439 		.result = REJECT,
1440 	},
1441 	{
1442 		"access skb fields bad2",
1443 		.insns = {
1444 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1445 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1446 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1447 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1448 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1449 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1450 				     BPF_FUNC_map_lookup_elem),
1451 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1452 			BPF_EXIT_INSN(),
1453 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1454 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1455 				    offsetof(struct __sk_buff, pkt_type)),
1456 			BPF_EXIT_INSN(),
1457 		},
1458 		.fixup_map_hash_8b = { 4 },
1459 		.errstr = "different pointers",
1460 		.errstr_unpriv = "R1 pointer comparison",
1461 		.result = REJECT,
1462 	},
1463 	{
1464 		"access skb fields bad3",
1465 		.insns = {
1466 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1467 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1468 				    offsetof(struct __sk_buff, pkt_type)),
1469 			BPF_EXIT_INSN(),
1470 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1471 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1472 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1473 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1475 				     BPF_FUNC_map_lookup_elem),
1476 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1477 			BPF_EXIT_INSN(),
1478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1479 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1480 		},
1481 		.fixup_map_hash_8b = { 6 },
1482 		.errstr = "different pointers",
1483 		.errstr_unpriv = "R1 pointer comparison",
1484 		.result = REJECT,
1485 	},
1486 	{
1487 		"access skb fields bad4",
1488 		.insns = {
1489 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1490 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1491 				    offsetof(struct __sk_buff, len)),
1492 			BPF_MOV64_IMM(BPF_REG_0, 0),
1493 			BPF_EXIT_INSN(),
1494 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1495 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1496 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1497 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1498 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1499 				     BPF_FUNC_map_lookup_elem),
1500 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1501 			BPF_EXIT_INSN(),
1502 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1503 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1504 		},
1505 		.fixup_map_hash_8b = { 7 },
1506 		.errstr = "different pointers",
1507 		.errstr_unpriv = "R1 pointer comparison",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"invalid access __sk_buff family",
1512 		.insns = {
1513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 				    offsetof(struct __sk_buff, family)),
1515 			BPF_EXIT_INSN(),
1516 		},
1517 		.errstr = "invalid bpf_context access",
1518 		.result = REJECT,
1519 	},
1520 	{
1521 		"invalid access __sk_buff remote_ip4",
1522 		.insns = {
1523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 				    offsetof(struct __sk_buff, remote_ip4)),
1525 			BPF_EXIT_INSN(),
1526 		},
1527 		.errstr = "invalid bpf_context access",
1528 		.result = REJECT,
1529 	},
1530 	{
1531 		"invalid access __sk_buff local_ip4",
1532 		.insns = {
1533 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, local_ip4)),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.errstr = "invalid bpf_context access",
1538 		.result = REJECT,
1539 	},
1540 	{
1541 		"invalid access __sk_buff remote_ip6",
1542 		.insns = {
1543 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 				    offsetof(struct __sk_buff, remote_ip6)),
1545 			BPF_EXIT_INSN(),
1546 		},
1547 		.errstr = "invalid bpf_context access",
1548 		.result = REJECT,
1549 	},
1550 	{
1551 		"invalid access __sk_buff local_ip6",
1552 		.insns = {
1553 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 				    offsetof(struct __sk_buff, local_ip6)),
1555 			BPF_EXIT_INSN(),
1556 		},
1557 		.errstr = "invalid bpf_context access",
1558 		.result = REJECT,
1559 	},
1560 	{
1561 		"invalid access __sk_buff remote_port",
1562 		.insns = {
1563 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1564 				    offsetof(struct __sk_buff, remote_port)),
1565 			BPF_EXIT_INSN(),
1566 		},
1567 		.errstr = "invalid bpf_context access",
1568 		.result = REJECT,
1569 	},
1570 	{
1571 		"invalid access __sk_buff remote_port",
1572 		.insns = {
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, local_port)),
1575 			BPF_EXIT_INSN(),
1576 		},
1577 		.errstr = "invalid bpf_context access",
1578 		.result = REJECT,
1579 	},
1580 	{
1581 		"valid access __sk_buff family",
1582 		.insns = {
1583 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1584 				    offsetof(struct __sk_buff, family)),
1585 			BPF_EXIT_INSN(),
1586 		},
1587 		.result = ACCEPT,
1588 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1589 	},
1590 	{
1591 		"valid access __sk_buff remote_ip4",
1592 		.insns = {
1593 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1594 				    offsetof(struct __sk_buff, remote_ip4)),
1595 			BPF_EXIT_INSN(),
1596 		},
1597 		.result = ACCEPT,
1598 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1599 	},
1600 	{
1601 		"valid access __sk_buff local_ip4",
1602 		.insns = {
1603 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1604 				    offsetof(struct __sk_buff, local_ip4)),
1605 			BPF_EXIT_INSN(),
1606 		},
1607 		.result = ACCEPT,
1608 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1609 	},
1610 	{
1611 		"valid access __sk_buff remote_ip6",
1612 		.insns = {
1613 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1614 				    offsetof(struct __sk_buff, remote_ip6[0])),
1615 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1616 				    offsetof(struct __sk_buff, remote_ip6[1])),
1617 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1618 				    offsetof(struct __sk_buff, remote_ip6[2])),
1619 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1620 				    offsetof(struct __sk_buff, remote_ip6[3])),
1621 			BPF_EXIT_INSN(),
1622 		},
1623 		.result = ACCEPT,
1624 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1625 	},
1626 	{
1627 		"valid access __sk_buff local_ip6",
1628 		.insns = {
1629 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1630 				    offsetof(struct __sk_buff, local_ip6[0])),
1631 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1632 				    offsetof(struct __sk_buff, local_ip6[1])),
1633 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1634 				    offsetof(struct __sk_buff, local_ip6[2])),
1635 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1636 				    offsetof(struct __sk_buff, local_ip6[3])),
1637 			BPF_EXIT_INSN(),
1638 		},
1639 		.result = ACCEPT,
1640 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1641 	},
1642 	{
1643 		"valid access __sk_buff remote_port",
1644 		.insns = {
1645 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1646 				    offsetof(struct __sk_buff, remote_port)),
1647 			BPF_EXIT_INSN(),
1648 		},
1649 		.result = ACCEPT,
1650 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1651 	},
1652 	{
1653 		"valid access __sk_buff remote_port",
1654 		.insns = {
1655 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1656 				    offsetof(struct __sk_buff, local_port)),
1657 			BPF_EXIT_INSN(),
1658 		},
1659 		.result = ACCEPT,
1660 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1661 	},
1662 	{
1663 		"invalid access of tc_classid for SK_SKB",
1664 		.insns = {
1665 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1666 				    offsetof(struct __sk_buff, tc_classid)),
1667 			BPF_EXIT_INSN(),
1668 		},
1669 		.result = REJECT,
1670 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1671 		.errstr = "invalid bpf_context access",
1672 	},
1673 	{
1674 		"invalid access of skb->mark for SK_SKB",
1675 		.insns = {
1676 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1677 				    offsetof(struct __sk_buff, mark)),
1678 			BPF_EXIT_INSN(),
1679 		},
1680 		.result =  REJECT,
1681 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1682 		.errstr = "invalid bpf_context access",
1683 	},
1684 	{
1685 		"check skb->mark is not writeable by SK_SKB",
1686 		.insns = {
1687 			BPF_MOV64_IMM(BPF_REG_0, 0),
1688 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1689 				    offsetof(struct __sk_buff, mark)),
1690 			BPF_EXIT_INSN(),
1691 		},
1692 		.result =  REJECT,
1693 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1694 		.errstr = "invalid bpf_context access",
1695 	},
1696 	{
1697 		"check skb->tc_index is writeable by SK_SKB",
1698 		.insns = {
1699 			BPF_MOV64_IMM(BPF_REG_0, 0),
1700 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1701 				    offsetof(struct __sk_buff, tc_index)),
1702 			BPF_EXIT_INSN(),
1703 		},
1704 		.result = ACCEPT,
1705 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1706 	},
1707 	{
1708 		"check skb->priority is writeable by SK_SKB",
1709 		.insns = {
1710 			BPF_MOV64_IMM(BPF_REG_0, 0),
1711 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1712 				    offsetof(struct __sk_buff, priority)),
1713 			BPF_EXIT_INSN(),
1714 		},
1715 		.result = ACCEPT,
1716 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1717 	},
1718 	{
1719 		"direct packet read for SK_SKB",
1720 		.insns = {
1721 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1722 				    offsetof(struct __sk_buff, data)),
1723 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1724 				    offsetof(struct __sk_buff, data_end)),
1725 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1726 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1727 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1728 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1729 			BPF_MOV64_IMM(BPF_REG_0, 0),
1730 			BPF_EXIT_INSN(),
1731 		},
1732 		.result = ACCEPT,
1733 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1734 	},
1735 	{
1736 		"direct packet write for SK_SKB",
1737 		.insns = {
1738 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1739 				    offsetof(struct __sk_buff, data)),
1740 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1741 				    offsetof(struct __sk_buff, data_end)),
1742 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1744 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1745 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1746 			BPF_MOV64_IMM(BPF_REG_0, 0),
1747 			BPF_EXIT_INSN(),
1748 		},
1749 		.result = ACCEPT,
1750 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1751 	},
1752 	{
1753 		"overlapping checks for direct packet access SK_SKB",
1754 		.insns = {
1755 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1756 				    offsetof(struct __sk_buff, data)),
1757 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1758 				    offsetof(struct __sk_buff, data_end)),
1759 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1761 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1762 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1763 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1764 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1765 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1766 			BPF_MOV64_IMM(BPF_REG_0, 0),
1767 			BPF_EXIT_INSN(),
1768 		},
1769 		.result = ACCEPT,
1770 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1771 	},
1772 	{
1773 		"valid access family in SK_MSG",
1774 		.insns = {
1775 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1776 				    offsetof(struct sk_msg_md, family)),
1777 			BPF_EXIT_INSN(),
1778 		},
1779 		.result = ACCEPT,
1780 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1781 	},
1782 	{
1783 		"valid access remote_ip4 in SK_MSG",
1784 		.insns = {
1785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 				    offsetof(struct sk_msg_md, remote_ip4)),
1787 			BPF_EXIT_INSN(),
1788 		},
1789 		.result = ACCEPT,
1790 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1791 	},
1792 	{
1793 		"valid access local_ip4 in SK_MSG",
1794 		.insns = {
1795 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1796 				    offsetof(struct sk_msg_md, local_ip4)),
1797 			BPF_EXIT_INSN(),
1798 		},
1799 		.result = ACCEPT,
1800 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1801 	},
1802 	{
1803 		"valid access remote_port in SK_MSG",
1804 		.insns = {
1805 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1806 				    offsetof(struct sk_msg_md, remote_port)),
1807 			BPF_EXIT_INSN(),
1808 		},
1809 		.result = ACCEPT,
1810 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1811 	},
1812 	{
1813 		"valid access local_port in SK_MSG",
1814 		.insns = {
1815 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1816 				    offsetof(struct sk_msg_md, local_port)),
1817 			BPF_EXIT_INSN(),
1818 		},
1819 		.result = ACCEPT,
1820 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1821 	},
1822 	{
1823 		"valid access remote_ip6 in SK_MSG",
1824 		.insns = {
1825 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1826 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1827 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1828 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1829 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1830 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1831 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1832 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1833 			BPF_EXIT_INSN(),
1834 		},
1835 		.result = ACCEPT,
1836 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1837 	},
1838 	{
1839 		"valid access local_ip6 in SK_MSG",
1840 		.insns = {
1841 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1842 				    offsetof(struct sk_msg_md, local_ip6[0])),
1843 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1844 				    offsetof(struct sk_msg_md, local_ip6[1])),
1845 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1846 				    offsetof(struct sk_msg_md, local_ip6[2])),
1847 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1848 				    offsetof(struct sk_msg_md, local_ip6[3])),
1849 			BPF_EXIT_INSN(),
1850 		},
1851 		.result = ACCEPT,
1852 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1853 	},
1854 	{
1855 		"valid access size in SK_MSG",
1856 		.insns = {
1857 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1858 				    offsetof(struct sk_msg_md, size)),
1859 			BPF_EXIT_INSN(),
1860 		},
1861 		.result = ACCEPT,
1862 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1863 	},
1864 	{
1865 		"invalid 64B read of size in SK_MSG",
1866 		.insns = {
1867 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1868 				    offsetof(struct sk_msg_md, size)),
1869 			BPF_EXIT_INSN(),
1870 		},
1871 		.errstr = "invalid bpf_context access",
1872 		.result = REJECT,
1873 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1874 	},
1875 	{
1876 		"invalid read past end of SK_MSG",
1877 		.insns = {
1878 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1879 				    offsetof(struct sk_msg_md, size) + 4),
1880 			BPF_EXIT_INSN(),
1881 		},
1882 		.errstr = "invalid bpf_context access",
1883 		.result = REJECT,
1884 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1885 	},
1886 	{
1887 		"invalid read offset in SK_MSG",
1888 		.insns = {
1889 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1890 				    offsetof(struct sk_msg_md, family) + 1),
1891 			BPF_EXIT_INSN(),
1892 		},
1893 		.errstr = "invalid bpf_context access",
1894 		.result = REJECT,
1895 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1896 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1897 	},
1898 	{
1899 		"direct packet read for SK_MSG",
1900 		.insns = {
1901 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1902 				    offsetof(struct sk_msg_md, data)),
1903 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1904 				    offsetof(struct sk_msg_md, data_end)),
1905 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1906 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1907 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1908 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1909 			BPF_MOV64_IMM(BPF_REG_0, 0),
1910 			BPF_EXIT_INSN(),
1911 		},
1912 		.result = ACCEPT,
1913 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1914 	},
1915 	{
1916 		"direct packet write for SK_MSG",
1917 		.insns = {
1918 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1919 				    offsetof(struct sk_msg_md, data)),
1920 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1921 				    offsetof(struct sk_msg_md, data_end)),
1922 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1923 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1924 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1925 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1926 			BPF_MOV64_IMM(BPF_REG_0, 0),
1927 			BPF_EXIT_INSN(),
1928 		},
1929 		.result = ACCEPT,
1930 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1931 	},
1932 	{
1933 		"overlapping checks for direct packet access SK_MSG",
1934 		.insns = {
1935 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1936 				    offsetof(struct sk_msg_md, data)),
1937 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1938 				    offsetof(struct sk_msg_md, data_end)),
1939 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1941 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1942 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1944 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1945 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1946 			BPF_MOV64_IMM(BPF_REG_0, 0),
1947 			BPF_EXIT_INSN(),
1948 		},
1949 		.result = ACCEPT,
1950 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1951 	},
1952 	{
1953 		"check skb->mark is not writeable by sockets",
1954 		.insns = {
1955 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1956 				    offsetof(struct __sk_buff, mark)),
1957 			BPF_EXIT_INSN(),
1958 		},
1959 		.errstr = "invalid bpf_context access",
1960 		.errstr_unpriv = "R1 leaks addr",
1961 		.result = REJECT,
1962 	},
1963 	{
1964 		"check skb->tc_index is not writeable by sockets",
1965 		.insns = {
1966 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1967 				    offsetof(struct __sk_buff, tc_index)),
1968 			BPF_EXIT_INSN(),
1969 		},
1970 		.errstr = "invalid bpf_context access",
1971 		.errstr_unpriv = "R1 leaks addr",
1972 		.result = REJECT,
1973 	},
1974 	{
1975 		"check cb access: byte",
1976 		.insns = {
1977 			BPF_MOV64_IMM(BPF_REG_0, 0),
1978 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1979 				    offsetof(struct __sk_buff, cb[0])),
1980 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1981 				    offsetof(struct __sk_buff, cb[0]) + 1),
1982 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1983 				    offsetof(struct __sk_buff, cb[0]) + 2),
1984 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1985 				    offsetof(struct __sk_buff, cb[0]) + 3),
1986 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1987 				    offsetof(struct __sk_buff, cb[1])),
1988 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1989 				    offsetof(struct __sk_buff, cb[1]) + 1),
1990 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1991 				    offsetof(struct __sk_buff, cb[1]) + 2),
1992 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1993 				    offsetof(struct __sk_buff, cb[1]) + 3),
1994 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1995 				    offsetof(struct __sk_buff, cb[2])),
1996 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1997 				    offsetof(struct __sk_buff, cb[2]) + 1),
1998 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1999 				    offsetof(struct __sk_buff, cb[2]) + 2),
2000 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2001 				    offsetof(struct __sk_buff, cb[2]) + 3),
2002 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2003 				    offsetof(struct __sk_buff, cb[3])),
2004 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2005 				    offsetof(struct __sk_buff, cb[3]) + 1),
2006 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2007 				    offsetof(struct __sk_buff, cb[3]) + 2),
2008 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2009 				    offsetof(struct __sk_buff, cb[3]) + 3),
2010 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2011 				    offsetof(struct __sk_buff, cb[4])),
2012 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2013 				    offsetof(struct __sk_buff, cb[4]) + 1),
2014 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2015 				    offsetof(struct __sk_buff, cb[4]) + 2),
2016 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2017 				    offsetof(struct __sk_buff, cb[4]) + 3),
2018 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 				    offsetof(struct __sk_buff, cb[0])),
2020 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2021 				    offsetof(struct __sk_buff, cb[0]) + 1),
2022 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2023 				    offsetof(struct __sk_buff, cb[0]) + 2),
2024 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2025 				    offsetof(struct __sk_buff, cb[0]) + 3),
2026 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2027 				    offsetof(struct __sk_buff, cb[1])),
2028 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2029 				    offsetof(struct __sk_buff, cb[1]) + 1),
2030 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2031 				    offsetof(struct __sk_buff, cb[1]) + 2),
2032 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 				    offsetof(struct __sk_buff, cb[1]) + 3),
2034 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2035 				    offsetof(struct __sk_buff, cb[2])),
2036 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2037 				    offsetof(struct __sk_buff, cb[2]) + 1),
2038 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2039 				    offsetof(struct __sk_buff, cb[2]) + 2),
2040 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2041 				    offsetof(struct __sk_buff, cb[2]) + 3),
2042 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2043 				    offsetof(struct __sk_buff, cb[3])),
2044 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2045 				    offsetof(struct __sk_buff, cb[3]) + 1),
2046 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2047 				    offsetof(struct __sk_buff, cb[3]) + 2),
2048 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2049 				    offsetof(struct __sk_buff, cb[3]) + 3),
2050 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2051 				    offsetof(struct __sk_buff, cb[4])),
2052 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2053 				    offsetof(struct __sk_buff, cb[4]) + 1),
2054 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 				    offsetof(struct __sk_buff, cb[4]) + 2),
2056 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2057 				    offsetof(struct __sk_buff, cb[4]) + 3),
2058 			BPF_EXIT_INSN(),
2059 		},
2060 		.result = ACCEPT,
2061 	},
2062 	{
2063 		"__sk_buff->hash, offset 0, byte store not permitted",
2064 		.insns = {
2065 			BPF_MOV64_IMM(BPF_REG_0, 0),
2066 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2067 				    offsetof(struct __sk_buff, hash)),
2068 			BPF_EXIT_INSN(),
2069 		},
2070 		.errstr = "invalid bpf_context access",
2071 		.result = REJECT,
2072 	},
2073 	{
2074 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2075 		.insns = {
2076 			BPF_MOV64_IMM(BPF_REG_0, 0),
2077 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2078 				    offsetof(struct __sk_buff, tc_index) + 3),
2079 			BPF_EXIT_INSN(),
2080 		},
2081 		.errstr = "invalid bpf_context access",
2082 		.result = REJECT,
2083 	},
2084 	{
2085 		"check skb->hash byte load permitted",
2086 		.insns = {
2087 			BPF_MOV64_IMM(BPF_REG_0, 0),
2088 #if __BYTE_ORDER == __LITTLE_ENDIAN
2089 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2090 				    offsetof(struct __sk_buff, hash)),
2091 #else
2092 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2093 				    offsetof(struct __sk_buff, hash) + 3),
2094 #endif
2095 			BPF_EXIT_INSN(),
2096 		},
2097 		.result = ACCEPT,
2098 	},
2099 	{
2100 		"check skb->hash byte load permitted 1",
2101 		.insns = {
2102 			BPF_MOV64_IMM(BPF_REG_0, 0),
2103 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2104 				    offsetof(struct __sk_buff, hash) + 1),
2105 			BPF_EXIT_INSN(),
2106 		},
2107 		.result = ACCEPT,
2108 	},
2109 	{
2110 		"check skb->hash byte load permitted 2",
2111 		.insns = {
2112 			BPF_MOV64_IMM(BPF_REG_0, 0),
2113 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2114 				    offsetof(struct __sk_buff, hash) + 2),
2115 			BPF_EXIT_INSN(),
2116 		},
2117 		.result = ACCEPT,
2118 	},
2119 	{
2120 		"check skb->hash byte load permitted 3",
2121 		.insns = {
2122 			BPF_MOV64_IMM(BPF_REG_0, 0),
2123 #if __BYTE_ORDER == __LITTLE_ENDIAN
2124 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2125 				    offsetof(struct __sk_buff, hash) + 3),
2126 #else
2127 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2128 				    offsetof(struct __sk_buff, hash)),
2129 #endif
2130 			BPF_EXIT_INSN(),
2131 		},
2132 		.result = ACCEPT,
2133 	},
2134 	{
2135 		"check cb access: byte, wrong type",
2136 		.insns = {
2137 			BPF_MOV64_IMM(BPF_REG_0, 0),
2138 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2139 				    offsetof(struct __sk_buff, cb[0])),
2140 			BPF_EXIT_INSN(),
2141 		},
2142 		.errstr = "invalid bpf_context access",
2143 		.result = REJECT,
2144 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2145 	},
2146 	{
2147 		"check cb access: half",
2148 		.insns = {
2149 			BPF_MOV64_IMM(BPF_REG_0, 0),
2150 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2151 				    offsetof(struct __sk_buff, cb[0])),
2152 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2153 				    offsetof(struct __sk_buff, cb[0]) + 2),
2154 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2155 				    offsetof(struct __sk_buff, cb[1])),
2156 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2157 				    offsetof(struct __sk_buff, cb[1]) + 2),
2158 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2159 				    offsetof(struct __sk_buff, cb[2])),
2160 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2161 				    offsetof(struct __sk_buff, cb[2]) + 2),
2162 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2163 				    offsetof(struct __sk_buff, cb[3])),
2164 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2165 				    offsetof(struct __sk_buff, cb[3]) + 2),
2166 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2167 				    offsetof(struct __sk_buff, cb[4])),
2168 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2169 				    offsetof(struct __sk_buff, cb[4]) + 2),
2170 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2171 				    offsetof(struct __sk_buff, cb[0])),
2172 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2173 				    offsetof(struct __sk_buff, cb[0]) + 2),
2174 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2175 				    offsetof(struct __sk_buff, cb[1])),
2176 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2177 				    offsetof(struct __sk_buff, cb[1]) + 2),
2178 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2179 				    offsetof(struct __sk_buff, cb[2])),
2180 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 				    offsetof(struct __sk_buff, cb[2]) + 2),
2182 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2183 				    offsetof(struct __sk_buff, cb[3])),
2184 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2185 				    offsetof(struct __sk_buff, cb[3]) + 2),
2186 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2187 				    offsetof(struct __sk_buff, cb[4])),
2188 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2189 				    offsetof(struct __sk_buff, cb[4]) + 2),
2190 			BPF_EXIT_INSN(),
2191 		},
2192 		.result = ACCEPT,
2193 	},
2194 	{
2195 		"check cb access: half, unaligned",
2196 		.insns = {
2197 			BPF_MOV64_IMM(BPF_REG_0, 0),
2198 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2199 				    offsetof(struct __sk_buff, cb[0]) + 1),
2200 			BPF_EXIT_INSN(),
2201 		},
2202 		.errstr = "misaligned context access",
2203 		.result = REJECT,
2204 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2205 	},
2206 	{
2207 		"check __sk_buff->hash, offset 0, half store not permitted",
2208 		.insns = {
2209 			BPF_MOV64_IMM(BPF_REG_0, 0),
2210 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2211 				    offsetof(struct __sk_buff, hash)),
2212 			BPF_EXIT_INSN(),
2213 		},
2214 		.errstr = "invalid bpf_context access",
2215 		.result = REJECT,
2216 	},
2217 	{
2218 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2219 		.insns = {
2220 			BPF_MOV64_IMM(BPF_REG_0, 0),
2221 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2222 				    offsetof(struct __sk_buff, tc_index) + 2),
2223 			BPF_EXIT_INSN(),
2224 		},
2225 		.errstr = "invalid bpf_context access",
2226 		.result = REJECT,
2227 	},
2228 	{
2229 		"check skb->hash half load permitted",
2230 		.insns = {
2231 			BPF_MOV64_IMM(BPF_REG_0, 0),
2232 #if __BYTE_ORDER == __LITTLE_ENDIAN
2233 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2234 				    offsetof(struct __sk_buff, hash)),
2235 #else
2236 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2237 				    offsetof(struct __sk_buff, hash) + 2),
2238 #endif
2239 			BPF_EXIT_INSN(),
2240 		},
2241 		.result = ACCEPT,
2242 	},
2243 	{
2244 		"check skb->hash half load permitted 2",
2245 		.insns = {
2246 			BPF_MOV64_IMM(BPF_REG_0, 0),
2247 #if __BYTE_ORDER == __LITTLE_ENDIAN
2248 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2249 				    offsetof(struct __sk_buff, hash) + 2),
2250 #else
2251 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2252 				    offsetof(struct __sk_buff, hash)),
2253 #endif
2254 			BPF_EXIT_INSN(),
2255 		},
2256 		.result = ACCEPT,
2257 	},
2258 	{
2259 		"check skb->hash half load not permitted, unaligned 1",
2260 		.insns = {
2261 			BPF_MOV64_IMM(BPF_REG_0, 0),
2262 #if __BYTE_ORDER == __LITTLE_ENDIAN
2263 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2264 				    offsetof(struct __sk_buff, hash) + 1),
2265 #else
2266 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2267 				    offsetof(struct __sk_buff, hash) + 3),
2268 #endif
2269 			BPF_EXIT_INSN(),
2270 		},
2271 		.errstr = "invalid bpf_context access",
2272 		.result = REJECT,
2273 	},
2274 	{
2275 		"check skb->hash half load not permitted, unaligned 3",
2276 		.insns = {
2277 			BPF_MOV64_IMM(BPF_REG_0, 0),
2278 #if __BYTE_ORDER == __LITTLE_ENDIAN
2279 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2280 				    offsetof(struct __sk_buff, hash) + 3),
2281 #else
2282 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2283 				    offsetof(struct __sk_buff, hash) + 1),
2284 #endif
2285 			BPF_EXIT_INSN(),
2286 		},
2287 		.errstr = "invalid bpf_context access",
2288 		.result = REJECT,
2289 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2290 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2291 	},
2292 	{
2293 		"check cb access: half, wrong type",
2294 		.insns = {
2295 			BPF_MOV64_IMM(BPF_REG_0, 0),
2296 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2297 				    offsetof(struct __sk_buff, cb[0])),
2298 			BPF_EXIT_INSN(),
2299 		},
2300 		.errstr = "invalid bpf_context access",
2301 		.result = REJECT,
2302 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2303 	},
2304 	{
2305 		"check cb access: word",
2306 		.insns = {
2307 			BPF_MOV64_IMM(BPF_REG_0, 0),
2308 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2309 				    offsetof(struct __sk_buff, cb[0])),
2310 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2311 				    offsetof(struct __sk_buff, cb[1])),
2312 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2313 				    offsetof(struct __sk_buff, cb[2])),
2314 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2315 				    offsetof(struct __sk_buff, cb[3])),
2316 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2317 				    offsetof(struct __sk_buff, cb[4])),
2318 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2319 				    offsetof(struct __sk_buff, cb[0])),
2320 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2321 				    offsetof(struct __sk_buff, cb[1])),
2322 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2323 				    offsetof(struct __sk_buff, cb[2])),
2324 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2325 				    offsetof(struct __sk_buff, cb[3])),
2326 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2327 				    offsetof(struct __sk_buff, cb[4])),
2328 			BPF_EXIT_INSN(),
2329 		},
2330 		.result = ACCEPT,
2331 	},
2332 	{
2333 		"check cb access: word, unaligned 1",
2334 		.insns = {
2335 			BPF_MOV64_IMM(BPF_REG_0, 0),
2336 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2337 				    offsetof(struct __sk_buff, cb[0]) + 2),
2338 			BPF_EXIT_INSN(),
2339 		},
2340 		.errstr = "misaligned context access",
2341 		.result = REJECT,
2342 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2343 	},
2344 	{
2345 		"check cb access: word, unaligned 2",
2346 		.insns = {
2347 			BPF_MOV64_IMM(BPF_REG_0, 0),
2348 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2349 				    offsetof(struct __sk_buff, cb[4]) + 1),
2350 			BPF_EXIT_INSN(),
2351 		},
2352 		.errstr = "misaligned context access",
2353 		.result = REJECT,
2354 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2355 	},
2356 	{
2357 		"check cb access: word, unaligned 3",
2358 		.insns = {
2359 			BPF_MOV64_IMM(BPF_REG_0, 0),
2360 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2361 				    offsetof(struct __sk_buff, cb[4]) + 2),
2362 			BPF_EXIT_INSN(),
2363 		},
2364 		.errstr = "misaligned context access",
2365 		.result = REJECT,
2366 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2367 	},
2368 	{
2369 		"check cb access: word, unaligned 4",
2370 		.insns = {
2371 			BPF_MOV64_IMM(BPF_REG_0, 0),
2372 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2373 				    offsetof(struct __sk_buff, cb[4]) + 3),
2374 			BPF_EXIT_INSN(),
2375 		},
2376 		.errstr = "misaligned context access",
2377 		.result = REJECT,
2378 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2379 	},
2380 	{
2381 		"check cb access: double",
2382 		.insns = {
2383 			BPF_MOV64_IMM(BPF_REG_0, 0),
2384 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2385 				    offsetof(struct __sk_buff, cb[0])),
2386 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2387 				    offsetof(struct __sk_buff, cb[2])),
2388 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2389 				    offsetof(struct __sk_buff, cb[0])),
2390 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2391 				    offsetof(struct __sk_buff, cb[2])),
2392 			BPF_EXIT_INSN(),
2393 		},
2394 		.result = ACCEPT,
2395 	},
2396 	{
2397 		"check cb access: double, unaligned 1",
2398 		.insns = {
2399 			BPF_MOV64_IMM(BPF_REG_0, 0),
2400 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2401 				    offsetof(struct __sk_buff, cb[1])),
2402 			BPF_EXIT_INSN(),
2403 		},
2404 		.errstr = "misaligned context access",
2405 		.result = REJECT,
2406 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2407 	},
2408 	{
2409 		"check cb access: double, unaligned 2",
2410 		.insns = {
2411 			BPF_MOV64_IMM(BPF_REG_0, 0),
2412 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2413 				    offsetof(struct __sk_buff, cb[3])),
2414 			BPF_EXIT_INSN(),
2415 		},
2416 		.errstr = "misaligned context access",
2417 		.result = REJECT,
2418 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2419 	},
2420 	{
2421 		"check cb access: double, oob 1",
2422 		.insns = {
2423 			BPF_MOV64_IMM(BPF_REG_0, 0),
2424 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2425 				    offsetof(struct __sk_buff, cb[4])),
2426 			BPF_EXIT_INSN(),
2427 		},
2428 		.errstr = "invalid bpf_context access",
2429 		.result = REJECT,
2430 	},
2431 	{
2432 		"check cb access: double, oob 2",
2433 		.insns = {
2434 			BPF_MOV64_IMM(BPF_REG_0, 0),
2435 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2436 				    offsetof(struct __sk_buff, cb[4])),
2437 			BPF_EXIT_INSN(),
2438 		},
2439 		.errstr = "invalid bpf_context access",
2440 		.result = REJECT,
2441 	},
2442 	{
2443 		"check __sk_buff->ifindex dw store not permitted",
2444 		.insns = {
2445 			BPF_MOV64_IMM(BPF_REG_0, 0),
2446 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2447 				    offsetof(struct __sk_buff, ifindex)),
2448 			BPF_EXIT_INSN(),
2449 		},
2450 		.errstr = "invalid bpf_context access",
2451 		.result = REJECT,
2452 	},
2453 	{
2454 		"check __sk_buff->ifindex dw load not permitted",
2455 		.insns = {
2456 			BPF_MOV64_IMM(BPF_REG_0, 0),
2457 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2458 				    offsetof(struct __sk_buff, ifindex)),
2459 			BPF_EXIT_INSN(),
2460 		},
2461 		.errstr = "invalid bpf_context access",
2462 		.result = REJECT,
2463 	},
2464 	{
2465 		"check cb access: double, wrong type",
2466 		.insns = {
2467 			BPF_MOV64_IMM(BPF_REG_0, 0),
2468 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2469 				    offsetof(struct __sk_buff, cb[0])),
2470 			BPF_EXIT_INSN(),
2471 		},
2472 		.errstr = "invalid bpf_context access",
2473 		.result = REJECT,
2474 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2475 	},
2476 	{
2477 		"check out of range skb->cb access",
2478 		.insns = {
2479 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2480 				    offsetof(struct __sk_buff, cb[0]) + 256),
2481 			BPF_EXIT_INSN(),
2482 		},
2483 		.errstr = "invalid bpf_context access",
2484 		.errstr_unpriv = "",
2485 		.result = REJECT,
2486 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2487 	},
2488 	{
2489 		"write skb fields from socket prog",
2490 		.insns = {
2491 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2492 				    offsetof(struct __sk_buff, cb[4])),
2493 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2494 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2495 				    offsetof(struct __sk_buff, mark)),
2496 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2497 				    offsetof(struct __sk_buff, tc_index)),
2498 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2499 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2500 				    offsetof(struct __sk_buff, cb[0])),
2501 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2502 				    offsetof(struct __sk_buff, cb[2])),
2503 			BPF_EXIT_INSN(),
2504 		},
2505 		.result = ACCEPT,
2506 		.errstr_unpriv = "R1 leaks addr",
2507 		.result_unpriv = REJECT,
2508 	},
2509 	{
2510 		"write skb fields from tc_cls_act prog",
2511 		.insns = {
2512 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2513 				    offsetof(struct __sk_buff, cb[0])),
2514 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2515 				    offsetof(struct __sk_buff, mark)),
2516 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2517 				    offsetof(struct __sk_buff, tc_index)),
2518 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2519 				    offsetof(struct __sk_buff, tc_index)),
2520 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2521 				    offsetof(struct __sk_buff, cb[3])),
2522 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2523 				    offsetof(struct __sk_buff, tstamp)),
2524 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2525 				    offsetof(struct __sk_buff, tstamp)),
2526 			BPF_EXIT_INSN(),
2527 		},
2528 		.errstr_unpriv = "",
2529 		.result_unpriv = REJECT,
2530 		.result = ACCEPT,
2531 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2532 	},
2533 	{
2534 		"PTR_TO_STACK store/load",
2535 		.insns = {
2536 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2538 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2539 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2540 			BPF_EXIT_INSN(),
2541 		},
2542 		.result = ACCEPT,
2543 		.retval = 0xfaceb00c,
2544 	},
2545 	{
2546 		"PTR_TO_STACK store/load - bad alignment on off",
2547 		.insns = {
2548 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2550 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2551 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2552 			BPF_EXIT_INSN(),
2553 		},
2554 		.result = REJECT,
2555 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2556 	},
2557 	{
2558 		"PTR_TO_STACK store/load - bad alignment on reg",
2559 		.insns = {
2560 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2561 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2562 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2563 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2564 			BPF_EXIT_INSN(),
2565 		},
2566 		.result = REJECT,
2567 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2568 	},
2569 	{
2570 		"PTR_TO_STACK store/load - out of bounds low",
2571 		.insns = {
2572 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2573 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2574 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2575 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2576 			BPF_EXIT_INSN(),
2577 		},
2578 		.result = REJECT,
2579 		.errstr = "invalid stack off=-79992 size=8",
2580 	},
2581 	{
2582 		"PTR_TO_STACK store/load - out of bounds high",
2583 		.insns = {
2584 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2586 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2587 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2588 			BPF_EXIT_INSN(),
2589 		},
2590 		.result = REJECT,
2591 		.errstr = "invalid stack off=0 size=8",
2592 	},
2593 	{
2594 		"unpriv: return pointer",
2595 		.insns = {
2596 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2597 			BPF_EXIT_INSN(),
2598 		},
2599 		.result = ACCEPT,
2600 		.result_unpriv = REJECT,
2601 		.errstr_unpriv = "R0 leaks addr",
2602 		.retval = POINTER_VALUE,
2603 	},
2604 	{
2605 		"unpriv: add const to pointer",
2606 		.insns = {
2607 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2608 			BPF_MOV64_IMM(BPF_REG_0, 0),
2609 			BPF_EXIT_INSN(),
2610 		},
2611 		.result = ACCEPT,
2612 	},
2613 	{
2614 		"unpriv: add pointer to pointer",
2615 		.insns = {
2616 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2617 			BPF_MOV64_IMM(BPF_REG_0, 0),
2618 			BPF_EXIT_INSN(),
2619 		},
2620 		.result = REJECT,
2621 		.errstr = "R1 pointer += pointer",
2622 	},
2623 	{
2624 		"unpriv: neg pointer",
2625 		.insns = {
2626 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2627 			BPF_MOV64_IMM(BPF_REG_0, 0),
2628 			BPF_EXIT_INSN(),
2629 		},
2630 		.result = ACCEPT,
2631 		.result_unpriv = REJECT,
2632 		.errstr_unpriv = "R1 pointer arithmetic",
2633 	},
2634 	{
2635 		"unpriv: cmp pointer with const",
2636 		.insns = {
2637 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2638 			BPF_MOV64_IMM(BPF_REG_0, 0),
2639 			BPF_EXIT_INSN(),
2640 		},
2641 		.result = ACCEPT,
2642 		.result_unpriv = REJECT,
2643 		.errstr_unpriv = "R1 pointer comparison",
2644 	},
2645 	{
2646 		"unpriv: cmp pointer with pointer",
2647 		.insns = {
2648 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2649 			BPF_MOV64_IMM(BPF_REG_0, 0),
2650 			BPF_EXIT_INSN(),
2651 		},
2652 		.result = ACCEPT,
2653 		.result_unpriv = REJECT,
2654 		.errstr_unpriv = "R10 pointer comparison",
2655 	},
2656 	{
2657 		"unpriv: check that printk is disallowed",
2658 		.insns = {
2659 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2660 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2662 			BPF_MOV64_IMM(BPF_REG_2, 8),
2663 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2664 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2665 				     BPF_FUNC_trace_printk),
2666 			BPF_MOV64_IMM(BPF_REG_0, 0),
2667 			BPF_EXIT_INSN(),
2668 		},
2669 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2670 		.result_unpriv = REJECT,
2671 		.result = ACCEPT,
2672 	},
2673 	{
2674 		"unpriv: pass pointer to helper function",
2675 		.insns = {
2676 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2677 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2679 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2680 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2681 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2682 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2683 				     BPF_FUNC_map_update_elem),
2684 			BPF_MOV64_IMM(BPF_REG_0, 0),
2685 			BPF_EXIT_INSN(),
2686 		},
2687 		.fixup_map_hash_8b = { 3 },
2688 		.errstr_unpriv = "R4 leaks addr",
2689 		.result_unpriv = REJECT,
2690 		.result = ACCEPT,
2691 	},
2692 	{
2693 		"unpriv: indirectly pass pointer on stack to helper function",
2694 		.insns = {
2695 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2696 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2698 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2699 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2700 				     BPF_FUNC_map_lookup_elem),
2701 			BPF_MOV64_IMM(BPF_REG_0, 0),
2702 			BPF_EXIT_INSN(),
2703 		},
2704 		.fixup_map_hash_8b = { 3 },
2705 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2706 		.result = REJECT,
2707 	},
2708 	{
2709 		"unpriv: mangle pointer on stack 1",
2710 		.insns = {
2711 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2712 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2713 			BPF_MOV64_IMM(BPF_REG_0, 0),
2714 			BPF_EXIT_INSN(),
2715 		},
2716 		.errstr_unpriv = "attempt to corrupt spilled",
2717 		.result_unpriv = REJECT,
2718 		.result = ACCEPT,
2719 	},
2720 	{
2721 		"unpriv: mangle pointer on stack 2",
2722 		.insns = {
2723 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2724 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2725 			BPF_MOV64_IMM(BPF_REG_0, 0),
2726 			BPF_EXIT_INSN(),
2727 		},
2728 		.errstr_unpriv = "attempt to corrupt spilled",
2729 		.result_unpriv = REJECT,
2730 		.result = ACCEPT,
2731 	},
2732 	{
2733 		"unpriv: read pointer from stack in small chunks",
2734 		.insns = {
2735 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2736 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2737 			BPF_MOV64_IMM(BPF_REG_0, 0),
2738 			BPF_EXIT_INSN(),
2739 		},
2740 		.errstr = "invalid size",
2741 		.result = REJECT,
2742 	},
2743 	{
2744 		"unpriv: write pointer into ctx",
2745 		.insns = {
2746 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2747 			BPF_MOV64_IMM(BPF_REG_0, 0),
2748 			BPF_EXIT_INSN(),
2749 		},
2750 		.errstr_unpriv = "R1 leaks addr",
2751 		.result_unpriv = REJECT,
2752 		.errstr = "invalid bpf_context access",
2753 		.result = REJECT,
2754 	},
2755 	{
2756 		"unpriv: spill/fill of ctx",
2757 		.insns = {
2758 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2759 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2760 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2761 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2762 			BPF_MOV64_IMM(BPF_REG_0, 0),
2763 			BPF_EXIT_INSN(),
2764 		},
2765 		.result = ACCEPT,
2766 	},
2767 	{
2768 		"unpriv: spill/fill of ctx 2",
2769 		.insns = {
2770 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2771 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2772 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2773 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2774 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2775 				     BPF_FUNC_get_hash_recalc),
2776 			BPF_MOV64_IMM(BPF_REG_0, 0),
2777 			BPF_EXIT_INSN(),
2778 		},
2779 		.result = ACCEPT,
2780 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2781 	},
2782 	{
2783 		"unpriv: spill/fill of ctx 3",
2784 		.insns = {
2785 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2787 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2788 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2789 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2790 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2791 				     BPF_FUNC_get_hash_recalc),
2792 			BPF_EXIT_INSN(),
2793 		},
2794 		.result = REJECT,
2795 		.errstr = "R1 type=fp expected=ctx",
2796 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 	},
2798 	{
2799 		"unpriv: spill/fill of ctx 4",
2800 		.insns = {
2801 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2802 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2803 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2804 			BPF_MOV64_IMM(BPF_REG_0, 1),
2805 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2806 				     BPF_REG_0, -8, 0),
2807 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2808 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2809 				     BPF_FUNC_get_hash_recalc),
2810 			BPF_EXIT_INSN(),
2811 		},
2812 		.result = REJECT,
2813 		.errstr = "R1 type=inv expected=ctx",
2814 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2815 	},
2816 	{
2817 		"unpriv: spill/fill of different pointers stx",
2818 		.insns = {
2819 			BPF_MOV64_IMM(BPF_REG_3, 42),
2820 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2821 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2822 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2823 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2824 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2825 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2826 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2827 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2828 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2829 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2830 				    offsetof(struct __sk_buff, mark)),
2831 			BPF_MOV64_IMM(BPF_REG_0, 0),
2832 			BPF_EXIT_INSN(),
2833 		},
2834 		.result = REJECT,
2835 		.errstr = "same insn cannot be used with different pointers",
2836 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2837 	},
2838 	{
2839 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2840 		.insns = {
2841 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2842 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2843 			BPF_SK_LOOKUP,
2844 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2845 			/* u64 foo; */
2846 			/* void *target = &foo; */
2847 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2849 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2850 			/* if (skb == NULL) *target = sock; */
2851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2852 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2853 			/* else *target = skb; */
2854 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2855 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2856 			/* struct __sk_buff *skb = *target; */
2857 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2858 			/* skb->mark = 42; */
2859 			BPF_MOV64_IMM(BPF_REG_3, 42),
2860 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2861 				    offsetof(struct __sk_buff, mark)),
2862 			/* if (sk) bpf_sk_release(sk) */
2863 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2864 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2865 			BPF_MOV64_IMM(BPF_REG_0, 0),
2866 			BPF_EXIT_INSN(),
2867 		},
2868 		.result = REJECT,
2869 		.errstr = "type=ctx expected=sock",
2870 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2871 	},
2872 	{
2873 		"unpriv: spill/fill of different pointers stx - leak sock",
2874 		.insns = {
2875 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2876 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2877 			BPF_SK_LOOKUP,
2878 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2879 			/* u64 foo; */
2880 			/* void *target = &foo; */
2881 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2882 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2883 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2884 			/* if (skb == NULL) *target = sock; */
2885 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2886 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2887 			/* else *target = skb; */
2888 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2889 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2890 			/* struct __sk_buff *skb = *target; */
2891 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2892 			/* skb->mark = 42; */
2893 			BPF_MOV64_IMM(BPF_REG_3, 42),
2894 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2895 				    offsetof(struct __sk_buff, mark)),
2896 			BPF_EXIT_INSN(),
2897 		},
2898 		.result = REJECT,
2899 		//.errstr = "same insn cannot be used with different pointers",
2900 		.errstr = "Unreleased reference",
2901 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2902 	},
2903 	{
2904 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2905 		.insns = {
2906 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2907 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2908 			BPF_SK_LOOKUP,
2909 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2910 			/* u64 foo; */
2911 			/* void *target = &foo; */
2912 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2913 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2914 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2915 			/* if (skb) *target = skb */
2916 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2917 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2918 			/* else *target = sock */
2919 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2920 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2921 			/* struct bpf_sock *sk = *target; */
2922 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2923 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2924 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2925 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2926 					    offsetof(struct bpf_sock, mark)),
2927 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2928 			BPF_MOV64_IMM(BPF_REG_0, 0),
2929 			BPF_EXIT_INSN(),
2930 		},
2931 		.result = REJECT,
2932 		.errstr = "same insn cannot be used with different pointers",
2933 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2934 	},
2935 	{
2936 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2937 		.insns = {
2938 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2939 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2940 			BPF_SK_LOOKUP,
2941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2942 			/* u64 foo; */
2943 			/* void *target = &foo; */
2944 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2945 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2946 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2947 			/* if (skb) *target = skb */
2948 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2949 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2950 			/* else *target = sock */
2951 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2952 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2953 			/* struct bpf_sock *sk = *target; */
2954 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2955 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2956 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2957 				BPF_MOV64_IMM(BPF_REG_3, 42),
2958 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2959 					    offsetof(struct bpf_sock, mark)),
2960 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2961 			BPF_MOV64_IMM(BPF_REG_0, 0),
2962 			BPF_EXIT_INSN(),
2963 		},
2964 		.result = REJECT,
2965 		//.errstr = "same insn cannot be used with different pointers",
2966 		.errstr = "cannot write into socket",
2967 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2968 	},
2969 	{
2970 		"unpriv: spill/fill of different pointers ldx",
2971 		.insns = {
2972 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2973 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2974 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2975 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2976 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2977 				      -(__s32)offsetof(struct bpf_perf_event_data,
2978 						       sample_period) - 8),
2979 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2980 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2981 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2982 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2983 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2984 				    offsetof(struct bpf_perf_event_data,
2985 					     sample_period)),
2986 			BPF_MOV64_IMM(BPF_REG_0, 0),
2987 			BPF_EXIT_INSN(),
2988 		},
2989 		.result = REJECT,
2990 		.errstr = "same insn cannot be used with different pointers",
2991 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2992 	},
2993 	{
2994 		"unpriv: write pointer into map elem value",
2995 		.insns = {
2996 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2997 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2999 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3000 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3001 				     BPF_FUNC_map_lookup_elem),
3002 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3003 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
3004 			BPF_EXIT_INSN(),
3005 		},
3006 		.fixup_map_hash_8b = { 3 },
3007 		.errstr_unpriv = "R0 leaks addr",
3008 		.result_unpriv = REJECT,
3009 		.result = ACCEPT,
3010 	},
3011 	{
3012 		"alu32: mov u32 const",
3013 		.insns = {
3014 			BPF_MOV32_IMM(BPF_REG_7, 0),
3015 			BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
3016 			BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
3017 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3018 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
3019 			BPF_EXIT_INSN(),
3020 		},
3021 		.result = ACCEPT,
3022 		.retval = 0,
3023 	},
3024 	{
3025 		"unpriv: partial copy of pointer",
3026 		.insns = {
3027 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
3028 			BPF_MOV64_IMM(BPF_REG_0, 0),
3029 			BPF_EXIT_INSN(),
3030 		},
3031 		.errstr_unpriv = "R10 partial copy",
3032 		.result_unpriv = REJECT,
3033 		.result = ACCEPT,
3034 	},
3035 	{
3036 		"unpriv: pass pointer to tail_call",
3037 		.insns = {
3038 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
3039 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3040 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3041 				     BPF_FUNC_tail_call),
3042 			BPF_MOV64_IMM(BPF_REG_0, 0),
3043 			BPF_EXIT_INSN(),
3044 		},
3045 		.fixup_prog1 = { 1 },
3046 		.errstr_unpriv = "R3 leaks addr into helper",
3047 		.result_unpriv = REJECT,
3048 		.result = ACCEPT,
3049 	},
3050 	{
3051 		"unpriv: cmp map pointer with zero",
3052 		.insns = {
3053 			BPF_MOV64_IMM(BPF_REG_1, 0),
3054 			BPF_LD_MAP_FD(BPF_REG_1, 0),
3055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
3056 			BPF_MOV64_IMM(BPF_REG_0, 0),
3057 			BPF_EXIT_INSN(),
3058 		},
3059 		.fixup_map_hash_8b = { 1 },
3060 		.errstr_unpriv = "R1 pointer comparison",
3061 		.result_unpriv = REJECT,
3062 		.result = ACCEPT,
3063 	},
3064 	{
3065 		"unpriv: write into frame pointer",
3066 		.insns = {
3067 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3068 			BPF_MOV64_IMM(BPF_REG_0, 0),
3069 			BPF_EXIT_INSN(),
3070 		},
3071 		.errstr = "frame pointer is read only",
3072 		.result = REJECT,
3073 	},
3074 	{
3075 		"unpriv: spill/fill frame pointer",
3076 		.insns = {
3077 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3078 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3079 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3080 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3081 			BPF_MOV64_IMM(BPF_REG_0, 0),
3082 			BPF_EXIT_INSN(),
3083 		},
3084 		.errstr = "frame pointer is read only",
3085 		.result = REJECT,
3086 	},
3087 	{
3088 		"unpriv: cmp of frame pointer",
3089 		.insns = {
3090 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3091 			BPF_MOV64_IMM(BPF_REG_0, 0),
3092 			BPF_EXIT_INSN(),
3093 		},
3094 		.errstr_unpriv = "R10 pointer comparison",
3095 		.result_unpriv = REJECT,
3096 		.result = ACCEPT,
3097 	},
3098 	{
3099 		"unpriv: adding of fp",
3100 		.insns = {
3101 			BPF_MOV64_IMM(BPF_REG_0, 0),
3102 			BPF_MOV64_IMM(BPF_REG_1, 0),
3103 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3104 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3105 			BPF_EXIT_INSN(),
3106 		},
3107 		.result = ACCEPT,
3108 	},
3109 	{
3110 		"unpriv: cmp of stack pointer",
3111 		.insns = {
3112 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3114 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3115 			BPF_MOV64_IMM(BPF_REG_0, 0),
3116 			BPF_EXIT_INSN(),
3117 		},
3118 		.errstr_unpriv = "R2 pointer comparison",
3119 		.result_unpriv = REJECT,
3120 		.result = ACCEPT,
3121 	},
3122 	{
3123 		"runtime/jit: tail_call within bounds, prog once",
3124 		.insns = {
3125 			BPF_MOV64_IMM(BPF_REG_3, 0),
3126 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3127 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3128 				     BPF_FUNC_tail_call),
3129 			BPF_MOV64_IMM(BPF_REG_0, 1),
3130 			BPF_EXIT_INSN(),
3131 		},
3132 		.fixup_prog1 = { 1 },
3133 		.result = ACCEPT,
3134 		.retval = 42,
3135 	},
3136 	{
3137 		"runtime/jit: tail_call within bounds, prog loop",
3138 		.insns = {
3139 			BPF_MOV64_IMM(BPF_REG_3, 1),
3140 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3142 				     BPF_FUNC_tail_call),
3143 			BPF_MOV64_IMM(BPF_REG_0, 1),
3144 			BPF_EXIT_INSN(),
3145 		},
3146 		.fixup_prog1 = { 1 },
3147 		.result = ACCEPT,
3148 		.retval = 41,
3149 	},
3150 	{
3151 		"runtime/jit: tail_call within bounds, no prog",
3152 		.insns = {
3153 			BPF_MOV64_IMM(BPF_REG_3, 2),
3154 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3156 				     BPF_FUNC_tail_call),
3157 			BPF_MOV64_IMM(BPF_REG_0, 1),
3158 			BPF_EXIT_INSN(),
3159 		},
3160 		.fixup_prog1 = { 1 },
3161 		.result = ACCEPT,
3162 		.retval = 1,
3163 	},
3164 	{
3165 		"runtime/jit: tail_call out of bounds",
3166 		.insns = {
3167 			BPF_MOV64_IMM(BPF_REG_3, 256),
3168 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3169 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3170 				     BPF_FUNC_tail_call),
3171 			BPF_MOV64_IMM(BPF_REG_0, 2),
3172 			BPF_EXIT_INSN(),
3173 		},
3174 		.fixup_prog1 = { 1 },
3175 		.result = ACCEPT,
3176 		.retval = 2,
3177 	},
3178 	{
3179 		"runtime/jit: pass negative index to tail_call",
3180 		.insns = {
3181 			BPF_MOV64_IMM(BPF_REG_3, -1),
3182 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3183 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3184 				     BPF_FUNC_tail_call),
3185 			BPF_MOV64_IMM(BPF_REG_0, 2),
3186 			BPF_EXIT_INSN(),
3187 		},
3188 		.fixup_prog1 = { 1 },
3189 		.result = ACCEPT,
3190 		.retval = 2,
3191 	},
3192 	{
3193 		"runtime/jit: pass > 32bit index to tail_call",
3194 		.insns = {
3195 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3196 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3197 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3198 				     BPF_FUNC_tail_call),
3199 			BPF_MOV64_IMM(BPF_REG_0, 2),
3200 			BPF_EXIT_INSN(),
3201 		},
3202 		.fixup_prog1 = { 2 },
3203 		.result = ACCEPT,
3204 		.retval = 42,
3205 		/* Verifier rewrite for unpriv skips tail call here. */
3206 		.retval_unpriv = 2,
3207 	},
3208 	{
3209 		"stack pointer arithmetic",
3210 		.insns = {
3211 			BPF_MOV64_IMM(BPF_REG_1, 4),
3212 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3213 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3214 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3215 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3216 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3217 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3218 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3219 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3220 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3221 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3222 			BPF_MOV64_IMM(BPF_REG_0, 0),
3223 			BPF_EXIT_INSN(),
3224 		},
3225 		.result = ACCEPT,
3226 	},
3227 	{
3228 		"raw_stack: no skb_load_bytes",
3229 		.insns = {
3230 			BPF_MOV64_IMM(BPF_REG_2, 4),
3231 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3232 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3233 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3234 			BPF_MOV64_IMM(BPF_REG_4, 8),
3235 			/* Call to skb_load_bytes() omitted. */
3236 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3237 			BPF_EXIT_INSN(),
3238 		},
3239 		.result = REJECT,
3240 		.errstr = "invalid read from stack off -8+0 size 8",
3241 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3242 	},
3243 	{
3244 		"raw_stack: skb_load_bytes, negative len",
3245 		.insns = {
3246 			BPF_MOV64_IMM(BPF_REG_2, 4),
3247 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3248 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3249 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3250 			BPF_MOV64_IMM(BPF_REG_4, -8),
3251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3252 				     BPF_FUNC_skb_load_bytes),
3253 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3254 			BPF_EXIT_INSN(),
3255 		},
3256 		.result = REJECT,
3257 		.errstr = "R4 min value is negative",
3258 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3259 	},
3260 	{
3261 		"raw_stack: skb_load_bytes, negative len 2",
3262 		.insns = {
3263 			BPF_MOV64_IMM(BPF_REG_2, 4),
3264 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3265 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3266 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3267 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3268 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3269 				     BPF_FUNC_skb_load_bytes),
3270 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3271 			BPF_EXIT_INSN(),
3272 		},
3273 		.result = REJECT,
3274 		.errstr = "R4 min value is negative",
3275 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3276 	},
3277 	{
3278 		"raw_stack: skb_load_bytes, zero len",
3279 		.insns = {
3280 			BPF_MOV64_IMM(BPF_REG_2, 4),
3281 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3282 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3283 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3284 			BPF_MOV64_IMM(BPF_REG_4, 0),
3285 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3286 				     BPF_FUNC_skb_load_bytes),
3287 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3288 			BPF_EXIT_INSN(),
3289 		},
3290 		.result = REJECT,
3291 		.errstr = "invalid stack type R3",
3292 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3293 	},
3294 	{
3295 		"raw_stack: skb_load_bytes, no init",
3296 		.insns = {
3297 			BPF_MOV64_IMM(BPF_REG_2, 4),
3298 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3299 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3300 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3301 			BPF_MOV64_IMM(BPF_REG_4, 8),
3302 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3303 				     BPF_FUNC_skb_load_bytes),
3304 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3305 			BPF_EXIT_INSN(),
3306 		},
3307 		.result = ACCEPT,
3308 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3309 	},
3310 	{
3311 		"raw_stack: skb_load_bytes, init",
3312 		.insns = {
3313 			BPF_MOV64_IMM(BPF_REG_2, 4),
3314 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3315 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3316 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3317 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3318 			BPF_MOV64_IMM(BPF_REG_4, 8),
3319 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3320 				     BPF_FUNC_skb_load_bytes),
3321 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3322 			BPF_EXIT_INSN(),
3323 		},
3324 		.result = ACCEPT,
3325 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3326 	},
3327 	{
3328 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3329 		.insns = {
3330 			BPF_MOV64_IMM(BPF_REG_2, 4),
3331 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3332 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3333 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3334 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3335 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3336 			BPF_MOV64_IMM(BPF_REG_4, 8),
3337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3338 				     BPF_FUNC_skb_load_bytes),
3339 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3340 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3341 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3342 				    offsetof(struct __sk_buff, mark)),
3343 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3344 				    offsetof(struct __sk_buff, priority)),
3345 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3346 			BPF_EXIT_INSN(),
3347 		},
3348 		.result = ACCEPT,
3349 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3350 	},
3351 	{
3352 		"raw_stack: skb_load_bytes, spilled regs corruption",
3353 		.insns = {
3354 			BPF_MOV64_IMM(BPF_REG_2, 4),
3355 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3357 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3358 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3359 			BPF_MOV64_IMM(BPF_REG_4, 8),
3360 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3361 				     BPF_FUNC_skb_load_bytes),
3362 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3363 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3364 				    offsetof(struct __sk_buff, mark)),
3365 			BPF_EXIT_INSN(),
3366 		},
3367 		.result = REJECT,
3368 		.errstr = "R0 invalid mem access 'inv'",
3369 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3370 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3371 	},
3372 	{
3373 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3374 		.insns = {
3375 			BPF_MOV64_IMM(BPF_REG_2, 4),
3376 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3377 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3378 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3379 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3380 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3381 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3382 			BPF_MOV64_IMM(BPF_REG_4, 8),
3383 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3384 				     BPF_FUNC_skb_load_bytes),
3385 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3386 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3387 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3388 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3389 				    offsetof(struct __sk_buff, mark)),
3390 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3391 				    offsetof(struct __sk_buff, priority)),
3392 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3393 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3394 				    offsetof(struct __sk_buff, pkt_type)),
3395 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3396 			BPF_EXIT_INSN(),
3397 		},
3398 		.result = REJECT,
3399 		.errstr = "R3 invalid mem access 'inv'",
3400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3401 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3402 	},
3403 	{
3404 		"raw_stack: skb_load_bytes, spilled regs + data",
3405 		.insns = {
3406 			BPF_MOV64_IMM(BPF_REG_2, 4),
3407 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3409 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3410 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3411 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3412 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3413 			BPF_MOV64_IMM(BPF_REG_4, 8),
3414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3415 				     BPF_FUNC_skb_load_bytes),
3416 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3417 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3418 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3419 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3420 				    offsetof(struct __sk_buff, mark)),
3421 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3422 				    offsetof(struct __sk_buff, priority)),
3423 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3424 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3425 			BPF_EXIT_INSN(),
3426 		},
3427 		.result = ACCEPT,
3428 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3429 	},
3430 	{
3431 		"raw_stack: skb_load_bytes, invalid access 1",
3432 		.insns = {
3433 			BPF_MOV64_IMM(BPF_REG_2, 4),
3434 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3435 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3436 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3437 			BPF_MOV64_IMM(BPF_REG_4, 8),
3438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3439 				     BPF_FUNC_skb_load_bytes),
3440 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3441 			BPF_EXIT_INSN(),
3442 		},
3443 		.result = REJECT,
3444 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3445 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3446 	},
3447 	{
3448 		"raw_stack: skb_load_bytes, invalid access 2",
3449 		.insns = {
3450 			BPF_MOV64_IMM(BPF_REG_2, 4),
3451 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3453 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3454 			BPF_MOV64_IMM(BPF_REG_4, 8),
3455 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3456 				     BPF_FUNC_skb_load_bytes),
3457 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3458 			BPF_EXIT_INSN(),
3459 		},
3460 		.result = REJECT,
3461 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3463 	},
3464 	{
3465 		"raw_stack: skb_load_bytes, invalid access 3",
3466 		.insns = {
3467 			BPF_MOV64_IMM(BPF_REG_2, 4),
3468 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3470 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3471 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3472 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3473 				     BPF_FUNC_skb_load_bytes),
3474 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3475 			BPF_EXIT_INSN(),
3476 		},
3477 		.result = REJECT,
3478 		.errstr = "R4 min value is negative",
3479 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3480 	},
3481 	{
3482 		"raw_stack: skb_load_bytes, invalid access 4",
3483 		.insns = {
3484 			BPF_MOV64_IMM(BPF_REG_2, 4),
3485 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3487 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3488 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3489 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3490 				     BPF_FUNC_skb_load_bytes),
3491 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3492 			BPF_EXIT_INSN(),
3493 		},
3494 		.result = REJECT,
3495 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3496 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3497 	},
3498 	{
3499 		"raw_stack: skb_load_bytes, invalid access 5",
3500 		.insns = {
3501 			BPF_MOV64_IMM(BPF_REG_2, 4),
3502 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3503 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3504 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3505 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3506 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3507 				     BPF_FUNC_skb_load_bytes),
3508 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3509 			BPF_EXIT_INSN(),
3510 		},
3511 		.result = REJECT,
3512 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3513 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3514 	},
3515 	{
3516 		"raw_stack: skb_load_bytes, invalid access 6",
3517 		.insns = {
3518 			BPF_MOV64_IMM(BPF_REG_2, 4),
3519 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3521 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3522 			BPF_MOV64_IMM(BPF_REG_4, 0),
3523 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3524 				     BPF_FUNC_skb_load_bytes),
3525 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3526 			BPF_EXIT_INSN(),
3527 		},
3528 		.result = REJECT,
3529 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3530 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3531 	},
3532 	{
3533 		"raw_stack: skb_load_bytes, large access",
3534 		.insns = {
3535 			BPF_MOV64_IMM(BPF_REG_2, 4),
3536 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3538 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3539 			BPF_MOV64_IMM(BPF_REG_4, 512),
3540 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3541 				     BPF_FUNC_skb_load_bytes),
3542 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3543 			BPF_EXIT_INSN(),
3544 		},
3545 		.result = ACCEPT,
3546 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3547 	},
3548 	{
3549 		"context stores via ST",
3550 		.insns = {
3551 			BPF_MOV64_IMM(BPF_REG_0, 0),
3552 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3553 			BPF_EXIT_INSN(),
3554 		},
3555 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3556 		.result = REJECT,
3557 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3558 	},
3559 	{
3560 		"context stores via XADD",
3561 		.insns = {
3562 			BPF_MOV64_IMM(BPF_REG_0, 0),
3563 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3564 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3565 			BPF_EXIT_INSN(),
3566 		},
3567 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3568 		.result = REJECT,
3569 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3570 	},
3571 	{
3572 		"direct packet access: test1",
3573 		.insns = {
3574 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3575 				    offsetof(struct __sk_buff, data)),
3576 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3577 				    offsetof(struct __sk_buff, data_end)),
3578 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3579 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3580 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3581 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3582 			BPF_MOV64_IMM(BPF_REG_0, 0),
3583 			BPF_EXIT_INSN(),
3584 		},
3585 		.result = ACCEPT,
3586 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3587 	},
3588 	{
3589 		"direct packet access: test2",
3590 		.insns = {
3591 			BPF_MOV64_IMM(BPF_REG_0, 1),
3592 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3593 				    offsetof(struct __sk_buff, data_end)),
3594 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 				    offsetof(struct __sk_buff, data)),
3596 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3598 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3599 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3600 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3601 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3602 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3603 				    offsetof(struct __sk_buff, data)),
3604 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3605 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3606 				    offsetof(struct __sk_buff, len)),
3607 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3608 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3609 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3610 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3611 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3612 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3613 				    offsetof(struct __sk_buff, data_end)),
3614 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3615 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3616 			BPF_MOV64_IMM(BPF_REG_0, 0),
3617 			BPF_EXIT_INSN(),
3618 		},
3619 		.result = ACCEPT,
3620 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3621 	},
3622 	{
3623 		"direct packet access: test3",
3624 		.insns = {
3625 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3626 				    offsetof(struct __sk_buff, data)),
3627 			BPF_MOV64_IMM(BPF_REG_0, 0),
3628 			BPF_EXIT_INSN(),
3629 		},
3630 		.errstr = "invalid bpf_context access off=76",
3631 		.result = REJECT,
3632 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3633 	},
3634 	{
3635 		"direct packet access: test4 (write)",
3636 		.insns = {
3637 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3638 				    offsetof(struct __sk_buff, data)),
3639 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3640 				    offsetof(struct __sk_buff, data_end)),
3641 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3642 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3643 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3644 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3645 			BPF_MOV64_IMM(BPF_REG_0, 0),
3646 			BPF_EXIT_INSN(),
3647 		},
3648 		.result = ACCEPT,
3649 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3650 	},
3651 	{
3652 		"direct packet access: test5 (pkt_end >= reg, good access)",
3653 		.insns = {
3654 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3655 				    offsetof(struct __sk_buff, data)),
3656 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3657 				    offsetof(struct __sk_buff, data_end)),
3658 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3660 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3661 			BPF_MOV64_IMM(BPF_REG_0, 1),
3662 			BPF_EXIT_INSN(),
3663 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3664 			BPF_MOV64_IMM(BPF_REG_0, 0),
3665 			BPF_EXIT_INSN(),
3666 		},
3667 		.result = ACCEPT,
3668 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3669 	},
3670 	{
3671 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3672 		.insns = {
3673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3674 				    offsetof(struct __sk_buff, data)),
3675 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3676 				    offsetof(struct __sk_buff, data_end)),
3677 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3679 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3680 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3681 			BPF_MOV64_IMM(BPF_REG_0, 1),
3682 			BPF_EXIT_INSN(),
3683 			BPF_MOV64_IMM(BPF_REG_0, 0),
3684 			BPF_EXIT_INSN(),
3685 		},
3686 		.errstr = "invalid access to packet",
3687 		.result = REJECT,
3688 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3689 	},
3690 	{
3691 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3692 		.insns = {
3693 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3694 				    offsetof(struct __sk_buff, data)),
3695 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3696 				    offsetof(struct __sk_buff, data_end)),
3697 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3699 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3700 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3701 			BPF_MOV64_IMM(BPF_REG_0, 1),
3702 			BPF_EXIT_INSN(),
3703 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3704 			BPF_MOV64_IMM(BPF_REG_0, 0),
3705 			BPF_EXIT_INSN(),
3706 		},
3707 		.errstr = "invalid access to packet",
3708 		.result = REJECT,
3709 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3710 	},
3711 	{
3712 		"direct packet access: test8 (double test, variant 1)",
3713 		.insns = {
3714 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3715 				    offsetof(struct __sk_buff, data)),
3716 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3717 				    offsetof(struct __sk_buff, data_end)),
3718 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3720 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3721 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3722 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3723 			BPF_MOV64_IMM(BPF_REG_0, 1),
3724 			BPF_EXIT_INSN(),
3725 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3726 			BPF_MOV64_IMM(BPF_REG_0, 0),
3727 			BPF_EXIT_INSN(),
3728 		},
3729 		.result = ACCEPT,
3730 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3731 	},
3732 	{
3733 		"direct packet access: test9 (double test, variant 2)",
3734 		.insns = {
3735 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 				    offsetof(struct __sk_buff, data)),
3737 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3738 				    offsetof(struct __sk_buff, data_end)),
3739 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3741 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3742 			BPF_MOV64_IMM(BPF_REG_0, 1),
3743 			BPF_EXIT_INSN(),
3744 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3745 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3746 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3747 			BPF_MOV64_IMM(BPF_REG_0, 0),
3748 			BPF_EXIT_INSN(),
3749 		},
3750 		.result = ACCEPT,
3751 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3752 	},
3753 	{
3754 		"direct packet access: test10 (write invalid)",
3755 		.insns = {
3756 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3757 				    offsetof(struct __sk_buff, data)),
3758 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3759 				    offsetof(struct __sk_buff, data_end)),
3760 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3762 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3763 			BPF_MOV64_IMM(BPF_REG_0, 0),
3764 			BPF_EXIT_INSN(),
3765 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3766 			BPF_MOV64_IMM(BPF_REG_0, 0),
3767 			BPF_EXIT_INSN(),
3768 		},
3769 		.errstr = "invalid access to packet",
3770 		.result = REJECT,
3771 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3772 	},
3773 	{
3774 		"direct packet access: test11 (shift, good access)",
3775 		.insns = {
3776 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3777 				    offsetof(struct __sk_buff, data)),
3778 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3779 				    offsetof(struct __sk_buff, data_end)),
3780 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3782 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3783 			BPF_MOV64_IMM(BPF_REG_3, 144),
3784 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3785 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3786 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3787 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3788 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3789 			BPF_MOV64_IMM(BPF_REG_0, 1),
3790 			BPF_EXIT_INSN(),
3791 			BPF_MOV64_IMM(BPF_REG_0, 0),
3792 			BPF_EXIT_INSN(),
3793 		},
3794 		.result = ACCEPT,
3795 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3796 		.retval = 1,
3797 	},
3798 	{
3799 		"direct packet access: test12 (and, good access)",
3800 		.insns = {
3801 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3802 				    offsetof(struct __sk_buff, data)),
3803 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3804 				    offsetof(struct __sk_buff, data_end)),
3805 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3807 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3808 			BPF_MOV64_IMM(BPF_REG_3, 144),
3809 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3810 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3811 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3812 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3813 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3814 			BPF_MOV64_IMM(BPF_REG_0, 1),
3815 			BPF_EXIT_INSN(),
3816 			BPF_MOV64_IMM(BPF_REG_0, 0),
3817 			BPF_EXIT_INSN(),
3818 		},
3819 		.result = ACCEPT,
3820 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3821 		.retval = 1,
3822 	},
3823 	{
3824 		"direct packet access: test13 (branches, good access)",
3825 		.insns = {
3826 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3827 				    offsetof(struct __sk_buff, data)),
3828 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3829 				    offsetof(struct __sk_buff, data_end)),
3830 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3832 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3833 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3834 				    offsetof(struct __sk_buff, mark)),
3835 			BPF_MOV64_IMM(BPF_REG_4, 1),
3836 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3837 			BPF_MOV64_IMM(BPF_REG_3, 14),
3838 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3839 			BPF_MOV64_IMM(BPF_REG_3, 24),
3840 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3841 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3842 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3843 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3844 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3845 			BPF_MOV64_IMM(BPF_REG_0, 1),
3846 			BPF_EXIT_INSN(),
3847 			BPF_MOV64_IMM(BPF_REG_0, 0),
3848 			BPF_EXIT_INSN(),
3849 		},
3850 		.result = ACCEPT,
3851 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3852 		.retval = 1,
3853 	},
3854 	{
3855 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3856 		.insns = {
3857 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3858 				    offsetof(struct __sk_buff, data)),
3859 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3860 				    offsetof(struct __sk_buff, data_end)),
3861 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3862 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3863 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3864 			BPF_MOV64_IMM(BPF_REG_5, 12),
3865 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3866 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3867 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3868 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3869 			BPF_MOV64_IMM(BPF_REG_0, 1),
3870 			BPF_EXIT_INSN(),
3871 			BPF_MOV64_IMM(BPF_REG_0, 0),
3872 			BPF_EXIT_INSN(),
3873 		},
3874 		.result = ACCEPT,
3875 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3876 		.retval = 1,
3877 	},
3878 	{
3879 		"direct packet access: test15 (spill with xadd)",
3880 		.insns = {
3881 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3882 				    offsetof(struct __sk_buff, data)),
3883 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3884 				    offsetof(struct __sk_buff, data_end)),
3885 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3886 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3887 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3888 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3889 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3891 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3892 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3893 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3894 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3895 			BPF_MOV64_IMM(BPF_REG_0, 0),
3896 			BPF_EXIT_INSN(),
3897 		},
3898 		.errstr = "R2 invalid mem access 'inv'",
3899 		.result = REJECT,
3900 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3901 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3902 	},
3903 	{
3904 		"direct packet access: test16 (arith on data_end)",
3905 		.insns = {
3906 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3907 				    offsetof(struct __sk_buff, data)),
3908 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3909 				    offsetof(struct __sk_buff, data_end)),
3910 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3911 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3913 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3914 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3915 			BPF_MOV64_IMM(BPF_REG_0, 0),
3916 			BPF_EXIT_INSN(),
3917 		},
3918 		.errstr = "R3 pointer arithmetic on pkt_end",
3919 		.result = REJECT,
3920 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3921 	},
3922 	{
3923 		"direct packet access: test17 (pruning, alignment)",
3924 		.insns = {
3925 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3926 				    offsetof(struct __sk_buff, data)),
3927 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3928 				    offsetof(struct __sk_buff, data_end)),
3929 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3930 				    offsetof(struct __sk_buff, mark)),
3931 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3933 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3934 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3935 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3936 			BPF_MOV64_IMM(BPF_REG_0, 0),
3937 			BPF_EXIT_INSN(),
3938 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3939 			BPF_JMP_A(-6),
3940 		},
3941 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3942 		.result = REJECT,
3943 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3944 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3945 	},
3946 	{
3947 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3948 		.insns = {
3949 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3950 				    offsetof(struct __sk_buff, data)),
3951 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3952 				    offsetof(struct __sk_buff, data_end)),
3953 			BPF_MOV64_IMM(BPF_REG_0, 8),
3954 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3955 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3956 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3957 			BPF_MOV64_IMM(BPF_REG_0, 0),
3958 			BPF_EXIT_INSN(),
3959 		},
3960 		.result = ACCEPT,
3961 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3962 	},
3963 	{
3964 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3965 		.insns = {
3966 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3967 				    offsetof(struct __sk_buff, data)),
3968 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3969 				    offsetof(struct __sk_buff, data_end)),
3970 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3971 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3972 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3973 			BPF_MOV64_IMM(BPF_REG_4, 4),
3974 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3975 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3976 			BPF_MOV64_IMM(BPF_REG_0, 0),
3977 			BPF_EXIT_INSN(),
3978 		},
3979 		.result = ACCEPT,
3980 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3981 	},
3982 	{
3983 		"direct packet access: test20 (x += pkt_ptr, 1)",
3984 		.insns = {
3985 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3986 				    offsetof(struct __sk_buff, data)),
3987 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3988 				    offsetof(struct __sk_buff, data_end)),
3989 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3990 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3991 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3992 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3993 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3994 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3995 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3996 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3997 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3998 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3999 			BPF_MOV64_IMM(BPF_REG_0, 0),
4000 			BPF_EXIT_INSN(),
4001 		},
4002 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4003 		.result = ACCEPT,
4004 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4005 	},
4006 	{
4007 		"direct packet access: test21 (x += pkt_ptr, 2)",
4008 		.insns = {
4009 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4010 				    offsetof(struct __sk_buff, data)),
4011 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4012 				    offsetof(struct __sk_buff, data_end)),
4013 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4015 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
4016 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4017 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4018 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4019 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
4020 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4021 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4022 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
4023 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
4024 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
4025 			BPF_MOV64_IMM(BPF_REG_0, 0),
4026 			BPF_EXIT_INSN(),
4027 		},
4028 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4029 		.result = ACCEPT,
4030 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4031 	},
4032 	{
4033 		"direct packet access: test22 (x += pkt_ptr, 3)",
4034 		.insns = {
4035 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4036 				    offsetof(struct __sk_buff, data)),
4037 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4038 				    offsetof(struct __sk_buff, data_end)),
4039 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4040 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4041 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
4042 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
4043 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
4044 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
4045 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
4046 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
4047 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
4048 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
4049 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
4050 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
4051 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
4052 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
4053 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
4054 			BPF_MOV64_IMM(BPF_REG_2, 1),
4055 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
4056 			BPF_MOV64_IMM(BPF_REG_0, 0),
4057 			BPF_EXIT_INSN(),
4058 		},
4059 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4060 		.result = ACCEPT,
4061 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4062 	},
4063 	{
4064 		"direct packet access: test23 (x += pkt_ptr, 4)",
4065 		.insns = {
4066 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4067 				    offsetof(struct __sk_buff, data)),
4068 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4069 				    offsetof(struct __sk_buff, data_end)),
4070 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4071 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4072 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4073 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4074 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4075 			BPF_MOV64_IMM(BPF_REG_0, 31),
4076 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4077 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4078 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4079 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4080 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4081 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4082 			BPF_MOV64_IMM(BPF_REG_0, 0),
4083 			BPF_EXIT_INSN(),
4084 		},
4085 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4086 		.result = REJECT,
4087 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4088 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4089 	},
4090 	{
4091 		"direct packet access: test24 (x += pkt_ptr, 5)",
4092 		.insns = {
4093 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4094 				    offsetof(struct __sk_buff, data)),
4095 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4096 				    offsetof(struct __sk_buff, data_end)),
4097 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4098 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4099 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4100 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4101 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4102 			BPF_MOV64_IMM(BPF_REG_0, 64),
4103 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4104 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4105 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4106 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4107 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4108 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4109 			BPF_MOV64_IMM(BPF_REG_0, 0),
4110 			BPF_EXIT_INSN(),
4111 		},
4112 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4113 		.result = ACCEPT,
4114 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4115 	},
4116 	{
4117 		"direct packet access: test25 (marking on <, good access)",
4118 		.insns = {
4119 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4120 				    offsetof(struct __sk_buff, data)),
4121 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4122 				    offsetof(struct __sk_buff, data_end)),
4123 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4125 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4126 			BPF_MOV64_IMM(BPF_REG_0, 0),
4127 			BPF_EXIT_INSN(),
4128 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4129 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4130 		},
4131 		.result = ACCEPT,
4132 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4133 	},
4134 	{
4135 		"direct packet access: test26 (marking on <, bad access)",
4136 		.insns = {
4137 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4138 				    offsetof(struct __sk_buff, data)),
4139 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4140 				    offsetof(struct __sk_buff, data_end)),
4141 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4143 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4144 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4145 			BPF_MOV64_IMM(BPF_REG_0, 0),
4146 			BPF_EXIT_INSN(),
4147 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4148 		},
4149 		.result = REJECT,
4150 		.errstr = "invalid access to packet",
4151 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4152 	},
4153 	{
4154 		"direct packet access: test27 (marking on <=, good access)",
4155 		.insns = {
4156 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4157 				    offsetof(struct __sk_buff, data)),
4158 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4159 				    offsetof(struct __sk_buff, data_end)),
4160 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4161 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4162 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4163 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4164 			BPF_MOV64_IMM(BPF_REG_0, 1),
4165 			BPF_EXIT_INSN(),
4166 		},
4167 		.result = ACCEPT,
4168 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4169 		.retval = 1,
4170 	},
4171 	{
4172 		"direct packet access: test28 (marking on <=, bad access)",
4173 		.insns = {
4174 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4175 				    offsetof(struct __sk_buff, data)),
4176 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4177 				    offsetof(struct __sk_buff, data_end)),
4178 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4180 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4181 			BPF_MOV64_IMM(BPF_REG_0, 1),
4182 			BPF_EXIT_INSN(),
4183 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4184 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4185 		},
4186 		.result = REJECT,
4187 		.errstr = "invalid access to packet",
4188 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4189 	},
4190 	{
4191 		"helper access to packet: test1, valid packet_ptr range",
4192 		.insns = {
4193 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4194 				    offsetof(struct xdp_md, data)),
4195 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4196 				    offsetof(struct xdp_md, data_end)),
4197 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4198 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4199 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4200 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4202 			BPF_MOV64_IMM(BPF_REG_4, 0),
4203 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4204 				     BPF_FUNC_map_update_elem),
4205 			BPF_MOV64_IMM(BPF_REG_0, 0),
4206 			BPF_EXIT_INSN(),
4207 		},
4208 		.fixup_map_hash_8b = { 5 },
4209 		.result_unpriv = ACCEPT,
4210 		.result = ACCEPT,
4211 		.prog_type = BPF_PROG_TYPE_XDP,
4212 	},
4213 	{
4214 		"helper access to packet: test2, unchecked packet_ptr",
4215 		.insns = {
4216 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4217 				    offsetof(struct xdp_md, data)),
4218 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4219 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4220 				     BPF_FUNC_map_lookup_elem),
4221 			BPF_MOV64_IMM(BPF_REG_0, 0),
4222 			BPF_EXIT_INSN(),
4223 		},
4224 		.fixup_map_hash_8b = { 1 },
4225 		.result = REJECT,
4226 		.errstr = "invalid access to packet",
4227 		.prog_type = BPF_PROG_TYPE_XDP,
4228 	},
4229 	{
4230 		"helper access to packet: test3, variable add",
4231 		.insns = {
4232 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4233 					offsetof(struct xdp_md, data)),
4234 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4235 					offsetof(struct xdp_md, data_end)),
4236 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4238 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4239 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4240 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4241 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4242 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4244 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4245 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4246 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4247 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4248 				     BPF_FUNC_map_lookup_elem),
4249 			BPF_MOV64_IMM(BPF_REG_0, 0),
4250 			BPF_EXIT_INSN(),
4251 		},
4252 		.fixup_map_hash_8b = { 11 },
4253 		.result = ACCEPT,
4254 		.prog_type = BPF_PROG_TYPE_XDP,
4255 	},
4256 	{
4257 		"helper access to packet: test4, packet_ptr with bad range",
4258 		.insns = {
4259 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4260 				    offsetof(struct xdp_md, data)),
4261 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4262 				    offsetof(struct xdp_md, data_end)),
4263 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4264 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4265 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4266 			BPF_MOV64_IMM(BPF_REG_0, 0),
4267 			BPF_EXIT_INSN(),
4268 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4269 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4270 				     BPF_FUNC_map_lookup_elem),
4271 			BPF_MOV64_IMM(BPF_REG_0, 0),
4272 			BPF_EXIT_INSN(),
4273 		},
4274 		.fixup_map_hash_8b = { 7 },
4275 		.result = REJECT,
4276 		.errstr = "invalid access to packet",
4277 		.prog_type = BPF_PROG_TYPE_XDP,
4278 	},
4279 	{
4280 		"helper access to packet: test5, packet_ptr with too short range",
4281 		.insns = {
4282 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4283 				    offsetof(struct xdp_md, data)),
4284 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4285 				    offsetof(struct xdp_md, data_end)),
4286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4287 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4288 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4289 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4290 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4291 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4292 				     BPF_FUNC_map_lookup_elem),
4293 			BPF_MOV64_IMM(BPF_REG_0, 0),
4294 			BPF_EXIT_INSN(),
4295 		},
4296 		.fixup_map_hash_8b = { 6 },
4297 		.result = REJECT,
4298 		.errstr = "invalid access to packet",
4299 		.prog_type = BPF_PROG_TYPE_XDP,
4300 	},
4301 	{
4302 		"helper access to packet: test6, cls valid packet_ptr range",
4303 		.insns = {
4304 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4305 				    offsetof(struct __sk_buff, data)),
4306 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4307 				    offsetof(struct __sk_buff, data_end)),
4308 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4309 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4310 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4311 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4312 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4313 			BPF_MOV64_IMM(BPF_REG_4, 0),
4314 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4315 				     BPF_FUNC_map_update_elem),
4316 			BPF_MOV64_IMM(BPF_REG_0, 0),
4317 			BPF_EXIT_INSN(),
4318 		},
4319 		.fixup_map_hash_8b = { 5 },
4320 		.result = ACCEPT,
4321 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4322 	},
4323 	{
4324 		"helper access to packet: test7, cls unchecked packet_ptr",
4325 		.insns = {
4326 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4327 				    offsetof(struct __sk_buff, data)),
4328 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4329 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4330 				     BPF_FUNC_map_lookup_elem),
4331 			BPF_MOV64_IMM(BPF_REG_0, 0),
4332 			BPF_EXIT_INSN(),
4333 		},
4334 		.fixup_map_hash_8b = { 1 },
4335 		.result = REJECT,
4336 		.errstr = "invalid access to packet",
4337 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4338 	},
4339 	{
4340 		"helper access to packet: test8, cls variable add",
4341 		.insns = {
4342 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4343 					offsetof(struct __sk_buff, data)),
4344 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4345 					offsetof(struct __sk_buff, data_end)),
4346 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4348 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4349 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4350 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4351 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4352 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4353 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4354 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4355 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4356 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4357 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4358 				     BPF_FUNC_map_lookup_elem),
4359 			BPF_MOV64_IMM(BPF_REG_0, 0),
4360 			BPF_EXIT_INSN(),
4361 		},
4362 		.fixup_map_hash_8b = { 11 },
4363 		.result = ACCEPT,
4364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4365 	},
4366 	{
4367 		"helper access to packet: test9, cls packet_ptr with bad range",
4368 		.insns = {
4369 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4370 				    offsetof(struct __sk_buff, data)),
4371 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4372 				    offsetof(struct __sk_buff, data_end)),
4373 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4375 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4376 			BPF_MOV64_IMM(BPF_REG_0, 0),
4377 			BPF_EXIT_INSN(),
4378 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4379 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4380 				     BPF_FUNC_map_lookup_elem),
4381 			BPF_MOV64_IMM(BPF_REG_0, 0),
4382 			BPF_EXIT_INSN(),
4383 		},
4384 		.fixup_map_hash_8b = { 7 },
4385 		.result = REJECT,
4386 		.errstr = "invalid access to packet",
4387 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4388 	},
4389 	{
4390 		"helper access to packet: test10, cls packet_ptr with too short range",
4391 		.insns = {
4392 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4393 				    offsetof(struct __sk_buff, data)),
4394 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4395 				    offsetof(struct __sk_buff, data_end)),
4396 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4397 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4399 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4400 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4402 				     BPF_FUNC_map_lookup_elem),
4403 			BPF_MOV64_IMM(BPF_REG_0, 0),
4404 			BPF_EXIT_INSN(),
4405 		},
4406 		.fixup_map_hash_8b = { 6 },
4407 		.result = REJECT,
4408 		.errstr = "invalid access to packet",
4409 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4410 	},
4411 	{
4412 		"helper access to packet: test11, cls unsuitable helper 1",
4413 		.insns = {
4414 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4415 				    offsetof(struct __sk_buff, data)),
4416 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4417 				    offsetof(struct __sk_buff, data_end)),
4418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4419 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4420 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4421 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4422 			BPF_MOV64_IMM(BPF_REG_2, 0),
4423 			BPF_MOV64_IMM(BPF_REG_4, 42),
4424 			BPF_MOV64_IMM(BPF_REG_5, 0),
4425 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4426 				     BPF_FUNC_skb_store_bytes),
4427 			BPF_MOV64_IMM(BPF_REG_0, 0),
4428 			BPF_EXIT_INSN(),
4429 		},
4430 		.result = REJECT,
4431 		.errstr = "helper access to the packet",
4432 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4433 	},
4434 	{
4435 		"helper access to packet: test12, cls unsuitable helper 2",
4436 		.insns = {
4437 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4438 				    offsetof(struct __sk_buff, data)),
4439 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4440 				    offsetof(struct __sk_buff, data_end)),
4441 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4443 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4444 			BPF_MOV64_IMM(BPF_REG_2, 0),
4445 			BPF_MOV64_IMM(BPF_REG_4, 4),
4446 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4447 				     BPF_FUNC_skb_load_bytes),
4448 			BPF_MOV64_IMM(BPF_REG_0, 0),
4449 			BPF_EXIT_INSN(),
4450 		},
4451 		.result = REJECT,
4452 		.errstr = "helper access to the packet",
4453 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4454 	},
4455 	{
4456 		"helper access to packet: test13, cls helper ok",
4457 		.insns = {
4458 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4459 				    offsetof(struct __sk_buff, data)),
4460 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4461 				    offsetof(struct __sk_buff, data_end)),
4462 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4464 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4465 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4466 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4467 			BPF_MOV64_IMM(BPF_REG_2, 4),
4468 			BPF_MOV64_IMM(BPF_REG_3, 0),
4469 			BPF_MOV64_IMM(BPF_REG_4, 0),
4470 			BPF_MOV64_IMM(BPF_REG_5, 0),
4471 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4472 				     BPF_FUNC_csum_diff),
4473 			BPF_MOV64_IMM(BPF_REG_0, 0),
4474 			BPF_EXIT_INSN(),
4475 		},
4476 		.result = ACCEPT,
4477 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4478 	},
4479 	{
4480 		"helper access to packet: test14, cls helper ok sub",
4481 		.insns = {
4482 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4483 				    offsetof(struct __sk_buff, data)),
4484 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4485 				    offsetof(struct __sk_buff, data_end)),
4486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4487 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4489 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4490 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4491 			BPF_MOV64_IMM(BPF_REG_2, 4),
4492 			BPF_MOV64_IMM(BPF_REG_3, 0),
4493 			BPF_MOV64_IMM(BPF_REG_4, 0),
4494 			BPF_MOV64_IMM(BPF_REG_5, 0),
4495 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4496 				     BPF_FUNC_csum_diff),
4497 			BPF_MOV64_IMM(BPF_REG_0, 0),
4498 			BPF_EXIT_INSN(),
4499 		},
4500 		.result = ACCEPT,
4501 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4502 	},
4503 	{
4504 		"helper access to packet: test15, cls helper fail sub",
4505 		.insns = {
4506 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4507 				    offsetof(struct __sk_buff, data)),
4508 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4509 				    offsetof(struct __sk_buff, data_end)),
4510 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4511 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4513 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4514 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4515 			BPF_MOV64_IMM(BPF_REG_2, 4),
4516 			BPF_MOV64_IMM(BPF_REG_3, 0),
4517 			BPF_MOV64_IMM(BPF_REG_4, 0),
4518 			BPF_MOV64_IMM(BPF_REG_5, 0),
4519 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4520 				     BPF_FUNC_csum_diff),
4521 			BPF_MOV64_IMM(BPF_REG_0, 0),
4522 			BPF_EXIT_INSN(),
4523 		},
4524 		.result = REJECT,
4525 		.errstr = "invalid access to packet",
4526 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4527 	},
4528 	{
4529 		"helper access to packet: test16, cls helper fail range 1",
4530 		.insns = {
4531 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4532 				    offsetof(struct __sk_buff, data)),
4533 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4534 				    offsetof(struct __sk_buff, data_end)),
4535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4536 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4538 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4539 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4540 			BPF_MOV64_IMM(BPF_REG_2, 8),
4541 			BPF_MOV64_IMM(BPF_REG_3, 0),
4542 			BPF_MOV64_IMM(BPF_REG_4, 0),
4543 			BPF_MOV64_IMM(BPF_REG_5, 0),
4544 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4545 				     BPF_FUNC_csum_diff),
4546 			BPF_MOV64_IMM(BPF_REG_0, 0),
4547 			BPF_EXIT_INSN(),
4548 		},
4549 		.result = REJECT,
4550 		.errstr = "invalid access to packet",
4551 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4552 	},
4553 	{
4554 		"helper access to packet: test17, cls helper fail range 2",
4555 		.insns = {
4556 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4557 				    offsetof(struct __sk_buff, data)),
4558 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4559 				    offsetof(struct __sk_buff, data_end)),
4560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4561 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4563 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4564 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4565 			BPF_MOV64_IMM(BPF_REG_2, -9),
4566 			BPF_MOV64_IMM(BPF_REG_3, 0),
4567 			BPF_MOV64_IMM(BPF_REG_4, 0),
4568 			BPF_MOV64_IMM(BPF_REG_5, 0),
4569 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4570 				     BPF_FUNC_csum_diff),
4571 			BPF_MOV64_IMM(BPF_REG_0, 0),
4572 			BPF_EXIT_INSN(),
4573 		},
4574 		.result = REJECT,
4575 		.errstr = "R2 min value is negative",
4576 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4577 	},
4578 	{
4579 		"helper access to packet: test18, cls helper fail range 3",
4580 		.insns = {
4581 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4582 				    offsetof(struct __sk_buff, data)),
4583 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4584 				    offsetof(struct __sk_buff, data_end)),
4585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4586 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4587 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4588 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4589 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4590 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4591 			BPF_MOV64_IMM(BPF_REG_3, 0),
4592 			BPF_MOV64_IMM(BPF_REG_4, 0),
4593 			BPF_MOV64_IMM(BPF_REG_5, 0),
4594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4595 				     BPF_FUNC_csum_diff),
4596 			BPF_MOV64_IMM(BPF_REG_0, 0),
4597 			BPF_EXIT_INSN(),
4598 		},
4599 		.result = REJECT,
4600 		.errstr = "R2 min value is negative",
4601 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4602 	},
4603 	{
4604 		"helper access to packet: test19, cls helper range zero",
4605 		.insns = {
4606 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4607 				    offsetof(struct __sk_buff, data)),
4608 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4609 				    offsetof(struct __sk_buff, data_end)),
4610 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4611 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4613 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4614 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4615 			BPF_MOV64_IMM(BPF_REG_2, 0),
4616 			BPF_MOV64_IMM(BPF_REG_3, 0),
4617 			BPF_MOV64_IMM(BPF_REG_4, 0),
4618 			BPF_MOV64_IMM(BPF_REG_5, 0),
4619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4620 				     BPF_FUNC_csum_diff),
4621 			BPF_MOV64_IMM(BPF_REG_0, 0),
4622 			BPF_EXIT_INSN(),
4623 		},
4624 		.result = ACCEPT,
4625 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4626 	},
4627 	{
4628 		"helper access to packet: test20, pkt end as input",
4629 		.insns = {
4630 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4631 				    offsetof(struct __sk_buff, data)),
4632 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4633 				    offsetof(struct __sk_buff, data_end)),
4634 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4635 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4637 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4638 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4639 			BPF_MOV64_IMM(BPF_REG_2, 4),
4640 			BPF_MOV64_IMM(BPF_REG_3, 0),
4641 			BPF_MOV64_IMM(BPF_REG_4, 0),
4642 			BPF_MOV64_IMM(BPF_REG_5, 0),
4643 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4644 				     BPF_FUNC_csum_diff),
4645 			BPF_MOV64_IMM(BPF_REG_0, 0),
4646 			BPF_EXIT_INSN(),
4647 		},
4648 		.result = REJECT,
4649 		.errstr = "R1 type=pkt_end expected=fp",
4650 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4651 	},
4652 	{
4653 		"helper access to packet: test21, wrong reg",
4654 		.insns = {
4655 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4656 				    offsetof(struct __sk_buff, data)),
4657 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4658 				    offsetof(struct __sk_buff, data_end)),
4659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4660 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4662 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4663 			BPF_MOV64_IMM(BPF_REG_2, 4),
4664 			BPF_MOV64_IMM(BPF_REG_3, 0),
4665 			BPF_MOV64_IMM(BPF_REG_4, 0),
4666 			BPF_MOV64_IMM(BPF_REG_5, 0),
4667 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4668 				     BPF_FUNC_csum_diff),
4669 			BPF_MOV64_IMM(BPF_REG_0, 0),
4670 			BPF_EXIT_INSN(),
4671 		},
4672 		.result = REJECT,
4673 		.errstr = "invalid access to packet",
4674 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4675 	},
4676 	{
4677 		"prevent map lookup in sockmap",
4678 		.insns = {
4679 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4680 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4681 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4682 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4683 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4684 				     BPF_FUNC_map_lookup_elem),
4685 			BPF_EXIT_INSN(),
4686 		},
4687 		.fixup_map_sockmap = { 3 },
4688 		.result = REJECT,
4689 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4690 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4691 	},
4692 	{
4693 		"prevent map lookup in sockhash",
4694 		.insns = {
4695 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4696 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4698 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4699 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4700 				     BPF_FUNC_map_lookup_elem),
4701 			BPF_EXIT_INSN(),
4702 		},
4703 		.fixup_map_sockhash = { 3 },
4704 		.result = REJECT,
4705 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4706 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4707 	},
4708 	{
4709 		"prevent map lookup in xskmap",
4710 		.insns = {
4711 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4712 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4713 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4714 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4715 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4716 				     BPF_FUNC_map_lookup_elem),
4717 			BPF_EXIT_INSN(),
4718 		},
4719 		.fixup_map_xskmap = { 3 },
4720 		.result = REJECT,
4721 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4722 		.prog_type = BPF_PROG_TYPE_XDP,
4723 	},
4724 	{
4725 		"prevent map lookup in stack trace",
4726 		.insns = {
4727 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4728 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4729 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4730 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4731 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4732 				     BPF_FUNC_map_lookup_elem),
4733 			BPF_EXIT_INSN(),
4734 		},
4735 		.fixup_map_stacktrace = { 3 },
4736 		.result = REJECT,
4737 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4738 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4739 	},
4740 	{
4741 		"prevent map lookup in prog array",
4742 		.insns = {
4743 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4744 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4745 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4746 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4747 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4748 				     BPF_FUNC_map_lookup_elem),
4749 			BPF_EXIT_INSN(),
4750 		},
4751 		.fixup_prog2 = { 3 },
4752 		.result = REJECT,
4753 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4754 	},
4755 	{
4756 		"valid map access into an array with a constant",
4757 		.insns = {
4758 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4761 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4763 				     BPF_FUNC_map_lookup_elem),
4764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4765 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4766 				   offsetof(struct test_val, foo)),
4767 			BPF_EXIT_INSN(),
4768 		},
4769 		.fixup_map_hash_48b = { 3 },
4770 		.errstr_unpriv = "R0 leaks addr",
4771 		.result_unpriv = REJECT,
4772 		.result = ACCEPT,
4773 	},
4774 	{
4775 		"valid map access into an array with a register",
4776 		.insns = {
4777 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4779 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4780 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4781 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4782 				     BPF_FUNC_map_lookup_elem),
4783 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4784 			BPF_MOV64_IMM(BPF_REG_1, 4),
4785 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4786 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4787 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4788 				   offsetof(struct test_val, foo)),
4789 			BPF_EXIT_INSN(),
4790 		},
4791 		.fixup_map_hash_48b = { 3 },
4792 		.errstr_unpriv = "R0 leaks addr",
4793 		.result_unpriv = REJECT,
4794 		.result = ACCEPT,
4795 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4796 	},
4797 	{
4798 		"valid map access into an array with a variable",
4799 		.insns = {
4800 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4801 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4802 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4803 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4804 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4805 				     BPF_FUNC_map_lookup_elem),
4806 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4807 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4808 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4809 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4810 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4811 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4812 				   offsetof(struct test_val, foo)),
4813 			BPF_EXIT_INSN(),
4814 		},
4815 		.fixup_map_hash_48b = { 3 },
4816 		.errstr_unpriv = "R0 leaks addr",
4817 		.result_unpriv = REJECT,
4818 		.result = ACCEPT,
4819 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4820 	},
4821 	{
4822 		"valid map access into an array with a signed variable",
4823 		.insns = {
4824 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4825 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4826 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4827 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4828 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4829 				     BPF_FUNC_map_lookup_elem),
4830 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4831 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4832 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4833 			BPF_MOV32_IMM(BPF_REG_1, 0),
4834 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4835 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4836 			BPF_MOV32_IMM(BPF_REG_1, 0),
4837 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4838 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4839 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4840 				   offsetof(struct test_val, foo)),
4841 			BPF_EXIT_INSN(),
4842 		},
4843 		.fixup_map_hash_48b = { 3 },
4844 		.errstr_unpriv = "R0 leaks addr",
4845 		.result_unpriv = REJECT,
4846 		.result = ACCEPT,
4847 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4848 	},
4849 	{
4850 		"invalid map access into an array with a constant",
4851 		.insns = {
4852 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4853 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4854 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4855 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4857 				     BPF_FUNC_map_lookup_elem),
4858 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4859 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4860 				   offsetof(struct test_val, foo)),
4861 			BPF_EXIT_INSN(),
4862 		},
4863 		.fixup_map_hash_48b = { 3 },
4864 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4865 		.result = REJECT,
4866 	},
4867 	{
4868 		"invalid map access into an array with a register",
4869 		.insns = {
4870 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4871 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4873 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4874 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4875 				     BPF_FUNC_map_lookup_elem),
4876 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4877 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4878 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4879 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4880 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4881 				   offsetof(struct test_val, foo)),
4882 			BPF_EXIT_INSN(),
4883 		},
4884 		.fixup_map_hash_48b = { 3 },
4885 		.errstr = "R0 min value is outside of the array range",
4886 		.result = REJECT,
4887 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4888 	},
4889 	{
4890 		"invalid map access into an array with a variable",
4891 		.insns = {
4892 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4893 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4895 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4896 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4897 				     BPF_FUNC_map_lookup_elem),
4898 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4899 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4900 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4901 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4902 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4903 				   offsetof(struct test_val, foo)),
4904 			BPF_EXIT_INSN(),
4905 		},
4906 		.fixup_map_hash_48b = { 3 },
4907 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4908 		.result = REJECT,
4909 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4910 	},
4911 	{
4912 		"invalid map access into an array with no floor check",
4913 		.insns = {
4914 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4915 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4916 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4917 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4918 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4919 				     BPF_FUNC_map_lookup_elem),
4920 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4921 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4922 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4923 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4924 			BPF_MOV32_IMM(BPF_REG_1, 0),
4925 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4926 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4927 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4928 				   offsetof(struct test_val, foo)),
4929 			BPF_EXIT_INSN(),
4930 		},
4931 		.fixup_map_hash_48b = { 3 },
4932 		.errstr_unpriv = "R0 leaks addr",
4933 		.errstr = "R0 unbounded memory access",
4934 		.result_unpriv = REJECT,
4935 		.result = REJECT,
4936 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4937 	},
4938 	{
4939 		"invalid map access into an array with a invalid max check",
4940 		.insns = {
4941 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4942 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4944 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4945 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4946 				     BPF_FUNC_map_lookup_elem),
4947 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4948 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4949 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4950 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4951 			BPF_MOV32_IMM(BPF_REG_1, 0),
4952 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4953 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4954 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4955 				   offsetof(struct test_val, foo)),
4956 			BPF_EXIT_INSN(),
4957 		},
4958 		.fixup_map_hash_48b = { 3 },
4959 		.errstr_unpriv = "R0 leaks addr",
4960 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4961 		.result_unpriv = REJECT,
4962 		.result = REJECT,
4963 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4964 	},
4965 	{
4966 		"invalid map access into an array with a invalid max check",
4967 		.insns = {
4968 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4969 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4970 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4971 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4973 				     BPF_FUNC_map_lookup_elem),
4974 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4975 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4976 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4977 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4978 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4979 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4980 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4981 				     BPF_FUNC_map_lookup_elem),
4982 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4983 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4984 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4985 				    offsetof(struct test_val, foo)),
4986 			BPF_EXIT_INSN(),
4987 		},
4988 		.fixup_map_hash_48b = { 3, 11 },
4989 		.errstr = "R0 pointer += pointer",
4990 		.result = REJECT,
4991 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4992 	},
4993 	{
4994 		"direct packet read test#1 for CGROUP_SKB",
4995 		.insns = {
4996 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4997 				    offsetof(struct __sk_buff, data)),
4998 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4999 				    offsetof(struct __sk_buff, data_end)),
5000 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5001 				    offsetof(struct __sk_buff, len)),
5002 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5003 				    offsetof(struct __sk_buff, pkt_type)),
5004 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5005 				    offsetof(struct __sk_buff, mark)),
5006 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5007 				    offsetof(struct __sk_buff, mark)),
5008 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5009 				    offsetof(struct __sk_buff, queue_mapping)),
5010 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5011 				    offsetof(struct __sk_buff, protocol)),
5012 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5013 				    offsetof(struct __sk_buff, vlan_present)),
5014 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5015 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5016 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5017 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5018 			BPF_MOV64_IMM(BPF_REG_0, 0),
5019 			BPF_EXIT_INSN(),
5020 		},
5021 		.result = ACCEPT,
5022 		.result_unpriv = REJECT,
5023 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
5024 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5025 	},
5026 	{
5027 		"direct packet read test#2 for CGROUP_SKB",
5028 		.insns = {
5029 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5030 				    offsetof(struct __sk_buff, vlan_tci)),
5031 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5032 				    offsetof(struct __sk_buff, vlan_proto)),
5033 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5034 				    offsetof(struct __sk_buff, priority)),
5035 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5036 				    offsetof(struct __sk_buff, priority)),
5037 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5038 				    offsetof(struct __sk_buff,
5039 					     ingress_ifindex)),
5040 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5041 				    offsetof(struct __sk_buff, tc_index)),
5042 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5043 				    offsetof(struct __sk_buff, hash)),
5044 			BPF_MOV64_IMM(BPF_REG_0, 0),
5045 			BPF_EXIT_INSN(),
5046 		},
5047 		.result = ACCEPT,
5048 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5049 	},
5050 	{
5051 		"direct packet read test#3 for CGROUP_SKB",
5052 		.insns = {
5053 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5054 				    offsetof(struct __sk_buff, cb[0])),
5055 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5056 				    offsetof(struct __sk_buff, cb[1])),
5057 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5058 				    offsetof(struct __sk_buff, cb[2])),
5059 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5060 				    offsetof(struct __sk_buff, cb[3])),
5061 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5062 				    offsetof(struct __sk_buff, cb[4])),
5063 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5064 				    offsetof(struct __sk_buff, napi_id)),
5065 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5066 				    offsetof(struct __sk_buff, cb[0])),
5067 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5068 				    offsetof(struct __sk_buff, cb[1])),
5069 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5070 				    offsetof(struct __sk_buff, cb[2])),
5071 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5072 				    offsetof(struct __sk_buff, cb[3])),
5073 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5074 				    offsetof(struct __sk_buff, cb[4])),
5075 			BPF_MOV64_IMM(BPF_REG_0, 0),
5076 			BPF_EXIT_INSN(),
5077 		},
5078 		.result = ACCEPT,
5079 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5080 	},
5081 	{
5082 		"direct packet read test#4 for CGROUP_SKB",
5083 		.insns = {
5084 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5085 				    offsetof(struct __sk_buff, family)),
5086 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5087 				    offsetof(struct __sk_buff, remote_ip4)),
5088 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5089 				    offsetof(struct __sk_buff, local_ip4)),
5090 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5091 				    offsetof(struct __sk_buff, remote_ip6[0])),
5092 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5093 				    offsetof(struct __sk_buff, remote_ip6[1])),
5094 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5095 				    offsetof(struct __sk_buff, remote_ip6[2])),
5096 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5097 				    offsetof(struct __sk_buff, remote_ip6[3])),
5098 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5099 				    offsetof(struct __sk_buff, local_ip6[0])),
5100 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5101 				    offsetof(struct __sk_buff, local_ip6[1])),
5102 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5103 				    offsetof(struct __sk_buff, local_ip6[2])),
5104 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5105 				    offsetof(struct __sk_buff, local_ip6[3])),
5106 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5107 				    offsetof(struct __sk_buff, remote_port)),
5108 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5109 				    offsetof(struct __sk_buff, local_port)),
5110 			BPF_MOV64_IMM(BPF_REG_0, 0),
5111 			BPF_EXIT_INSN(),
5112 		},
5113 		.result = ACCEPT,
5114 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5115 	},
5116 	{
5117 		"invalid access of tc_classid for CGROUP_SKB",
5118 		.insns = {
5119 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5120 				    offsetof(struct __sk_buff, tc_classid)),
5121 			BPF_MOV64_IMM(BPF_REG_0, 0),
5122 			BPF_EXIT_INSN(),
5123 		},
5124 		.result = REJECT,
5125 		.errstr = "invalid bpf_context access",
5126 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5127 	},
5128 	{
5129 		"invalid access of data_meta for CGROUP_SKB",
5130 		.insns = {
5131 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5132 				    offsetof(struct __sk_buff, data_meta)),
5133 			BPF_MOV64_IMM(BPF_REG_0, 0),
5134 			BPF_EXIT_INSN(),
5135 		},
5136 		.result = REJECT,
5137 		.errstr = "invalid bpf_context access",
5138 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5139 	},
5140 	{
5141 		"invalid access of flow_keys for CGROUP_SKB",
5142 		.insns = {
5143 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5144 				    offsetof(struct __sk_buff, flow_keys)),
5145 			BPF_MOV64_IMM(BPF_REG_0, 0),
5146 			BPF_EXIT_INSN(),
5147 		},
5148 		.result = REJECT,
5149 		.errstr = "invalid bpf_context access",
5150 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5151 	},
5152 	{
5153 		"invalid write access to napi_id for CGROUP_SKB",
5154 		.insns = {
5155 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5156 				    offsetof(struct __sk_buff, napi_id)),
5157 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5158 				    offsetof(struct __sk_buff, napi_id)),
5159 			BPF_MOV64_IMM(BPF_REG_0, 0),
5160 			BPF_EXIT_INSN(),
5161 		},
5162 		.result = REJECT,
5163 		.errstr = "invalid bpf_context access",
5164 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5165 	},
5166 	{
5167 		"valid cgroup storage access",
5168 		.insns = {
5169 			BPF_MOV64_IMM(BPF_REG_2, 0),
5170 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5171 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5172 				     BPF_FUNC_get_local_storage),
5173 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5174 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5175 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5176 			BPF_EXIT_INSN(),
5177 		},
5178 		.fixup_cgroup_storage = { 1 },
5179 		.result = ACCEPT,
5180 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5181 	},
5182 	{
5183 		"invalid cgroup storage access 1",
5184 		.insns = {
5185 			BPF_MOV64_IMM(BPF_REG_2, 0),
5186 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5187 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5188 				     BPF_FUNC_get_local_storage),
5189 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5190 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5191 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5192 			BPF_EXIT_INSN(),
5193 		},
5194 		.fixup_map_hash_8b = { 1 },
5195 		.result = REJECT,
5196 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5197 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5198 	},
5199 	{
5200 		"invalid cgroup storage access 2",
5201 		.insns = {
5202 			BPF_MOV64_IMM(BPF_REG_2, 0),
5203 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5204 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5205 				     BPF_FUNC_get_local_storage),
5206 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5207 			BPF_EXIT_INSN(),
5208 		},
5209 		.result = REJECT,
5210 		.errstr = "fd 1 is not pointing to valid bpf_map",
5211 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5212 	},
5213 	{
5214 		"invalid cgroup storage access 3",
5215 		.insns = {
5216 			BPF_MOV64_IMM(BPF_REG_2, 0),
5217 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5218 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5219 				     BPF_FUNC_get_local_storage),
5220 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5222 			BPF_MOV64_IMM(BPF_REG_0, 0),
5223 			BPF_EXIT_INSN(),
5224 		},
5225 		.fixup_cgroup_storage = { 1 },
5226 		.result = REJECT,
5227 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5228 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5229 	},
5230 	{
5231 		"invalid cgroup storage access 4",
5232 		.insns = {
5233 			BPF_MOV64_IMM(BPF_REG_2, 0),
5234 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5235 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5236 				     BPF_FUNC_get_local_storage),
5237 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5238 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5239 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5240 			BPF_EXIT_INSN(),
5241 		},
5242 		.fixup_cgroup_storage = { 1 },
5243 		.result = REJECT,
5244 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5245 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5246 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5247 	},
5248 	{
5249 		"invalid cgroup storage access 5",
5250 		.insns = {
5251 			BPF_MOV64_IMM(BPF_REG_2, 7),
5252 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5253 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5254 				     BPF_FUNC_get_local_storage),
5255 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5256 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5257 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5258 			BPF_EXIT_INSN(),
5259 		},
5260 		.fixup_cgroup_storage = { 1 },
5261 		.result = REJECT,
5262 		.errstr = "get_local_storage() doesn't support non-zero flags",
5263 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5264 	},
5265 	{
5266 		"invalid cgroup storage access 6",
5267 		.insns = {
5268 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5269 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5270 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5271 				     BPF_FUNC_get_local_storage),
5272 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5273 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5274 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5275 			BPF_EXIT_INSN(),
5276 		},
5277 		.fixup_cgroup_storage = { 1 },
5278 		.result = REJECT,
5279 		.errstr = "get_local_storage() doesn't support non-zero flags",
5280 		.errstr_unpriv = "R2 leaks addr into helper function",
5281 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5282 	},
5283 	{
5284 		"valid per-cpu cgroup storage access",
5285 		.insns = {
5286 			BPF_MOV64_IMM(BPF_REG_2, 0),
5287 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5289 				     BPF_FUNC_get_local_storage),
5290 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5291 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5292 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5293 			BPF_EXIT_INSN(),
5294 		},
5295 		.fixup_percpu_cgroup_storage = { 1 },
5296 		.result = ACCEPT,
5297 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5298 	},
5299 	{
5300 		"invalid per-cpu cgroup storage access 1",
5301 		.insns = {
5302 			BPF_MOV64_IMM(BPF_REG_2, 0),
5303 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5304 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5305 				     BPF_FUNC_get_local_storage),
5306 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5307 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5308 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5309 			BPF_EXIT_INSN(),
5310 		},
5311 		.fixup_map_hash_8b = { 1 },
5312 		.result = REJECT,
5313 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5314 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5315 	},
5316 	{
5317 		"invalid per-cpu cgroup storage access 2",
5318 		.insns = {
5319 			BPF_MOV64_IMM(BPF_REG_2, 0),
5320 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5322 				     BPF_FUNC_get_local_storage),
5323 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5324 			BPF_EXIT_INSN(),
5325 		},
5326 		.result = REJECT,
5327 		.errstr = "fd 1 is not pointing to valid bpf_map",
5328 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5329 	},
5330 	{
5331 		"invalid per-cpu cgroup storage access 3",
5332 		.insns = {
5333 			BPF_MOV64_IMM(BPF_REG_2, 0),
5334 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5335 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5336 				     BPF_FUNC_get_local_storage),
5337 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5338 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5339 			BPF_MOV64_IMM(BPF_REG_0, 0),
5340 			BPF_EXIT_INSN(),
5341 		},
5342 		.fixup_percpu_cgroup_storage = { 1 },
5343 		.result = REJECT,
5344 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5345 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5346 	},
5347 	{
5348 		"invalid per-cpu cgroup storage access 4",
5349 		.insns = {
5350 			BPF_MOV64_IMM(BPF_REG_2, 0),
5351 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5353 				     BPF_FUNC_get_local_storage),
5354 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5355 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5357 			BPF_EXIT_INSN(),
5358 		},
5359 		.fixup_cgroup_storage = { 1 },
5360 		.result = REJECT,
5361 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5362 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5363 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5364 	},
5365 	{
5366 		"invalid per-cpu cgroup storage access 5",
5367 		.insns = {
5368 			BPF_MOV64_IMM(BPF_REG_2, 7),
5369 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5370 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5371 				     BPF_FUNC_get_local_storage),
5372 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5373 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5374 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5375 			BPF_EXIT_INSN(),
5376 		},
5377 		.fixup_percpu_cgroup_storage = { 1 },
5378 		.result = REJECT,
5379 		.errstr = "get_local_storage() doesn't support non-zero flags",
5380 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5381 	},
5382 	{
5383 		"invalid per-cpu cgroup storage access 6",
5384 		.insns = {
5385 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5386 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5387 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5388 				     BPF_FUNC_get_local_storage),
5389 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5390 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5391 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5392 			BPF_EXIT_INSN(),
5393 		},
5394 		.fixup_percpu_cgroup_storage = { 1 },
5395 		.result = REJECT,
5396 		.errstr = "get_local_storage() doesn't support non-zero flags",
5397 		.errstr_unpriv = "R2 leaks addr into helper function",
5398 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5399 	},
5400 	{
5401 		"write tstamp from CGROUP_SKB",
5402 		.insns = {
5403 			BPF_MOV64_IMM(BPF_REG_0, 0),
5404 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5405 				    offsetof(struct __sk_buff, tstamp)),
5406 			BPF_MOV64_IMM(BPF_REG_0, 0),
5407 			BPF_EXIT_INSN(),
5408 		},
5409 		.result = ACCEPT,
5410 		.result_unpriv = REJECT,
5411 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5412 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5413 	},
5414 	{
5415 		"read tstamp from CGROUP_SKB",
5416 		.insns = {
5417 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5418 				    offsetof(struct __sk_buff, tstamp)),
5419 			BPF_MOV64_IMM(BPF_REG_0, 0),
5420 			BPF_EXIT_INSN(),
5421 		},
5422 		.result = ACCEPT,
5423 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5424 	},
5425 	{
5426 		"multiple registers share map_lookup_elem result",
5427 		.insns = {
5428 			BPF_MOV64_IMM(BPF_REG_1, 10),
5429 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5430 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5431 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5432 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5433 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5434 				     BPF_FUNC_map_lookup_elem),
5435 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5436 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5437 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5438 			BPF_EXIT_INSN(),
5439 		},
5440 		.fixup_map_hash_8b = { 4 },
5441 		.result = ACCEPT,
5442 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5443 	},
5444 	{
5445 		"alu ops on ptr_to_map_value_or_null, 1",
5446 		.insns = {
5447 			BPF_MOV64_IMM(BPF_REG_1, 10),
5448 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5449 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5450 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5451 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5452 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5453 				     BPF_FUNC_map_lookup_elem),
5454 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5457 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5458 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5459 			BPF_EXIT_INSN(),
5460 		},
5461 		.fixup_map_hash_8b = { 4 },
5462 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5463 		.result = REJECT,
5464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5465 	},
5466 	{
5467 		"alu ops on ptr_to_map_value_or_null, 2",
5468 		.insns = {
5469 			BPF_MOV64_IMM(BPF_REG_1, 10),
5470 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5471 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5472 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5473 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5475 				     BPF_FUNC_map_lookup_elem),
5476 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5477 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5478 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5479 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5480 			BPF_EXIT_INSN(),
5481 		},
5482 		.fixup_map_hash_8b = { 4 },
5483 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5484 		.result = REJECT,
5485 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5486 	},
5487 	{
5488 		"alu ops on ptr_to_map_value_or_null, 3",
5489 		.insns = {
5490 			BPF_MOV64_IMM(BPF_REG_1, 10),
5491 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5492 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5493 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5494 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5495 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5496 				     BPF_FUNC_map_lookup_elem),
5497 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5498 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5499 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5500 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5501 			BPF_EXIT_INSN(),
5502 		},
5503 		.fixup_map_hash_8b = { 4 },
5504 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5505 		.result = REJECT,
5506 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5507 	},
5508 	{
5509 		"invalid memory access with multiple map_lookup_elem calls",
5510 		.insns = {
5511 			BPF_MOV64_IMM(BPF_REG_1, 10),
5512 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5513 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5514 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5515 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5516 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5517 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5518 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5519 				     BPF_FUNC_map_lookup_elem),
5520 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5521 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5522 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5523 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5524 				     BPF_FUNC_map_lookup_elem),
5525 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5526 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5527 			BPF_EXIT_INSN(),
5528 		},
5529 		.fixup_map_hash_8b = { 4 },
5530 		.result = REJECT,
5531 		.errstr = "R4 !read_ok",
5532 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5533 	},
5534 	{
5535 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5536 		.insns = {
5537 			BPF_MOV64_IMM(BPF_REG_1, 10),
5538 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5539 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5540 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5541 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5542 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5543 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5544 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5545 				     BPF_FUNC_map_lookup_elem),
5546 			BPF_MOV64_IMM(BPF_REG_2, 10),
5547 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5548 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5549 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5550 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5551 				     BPF_FUNC_map_lookup_elem),
5552 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5553 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5554 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5555 			BPF_EXIT_INSN(),
5556 		},
5557 		.fixup_map_hash_8b = { 4 },
5558 		.result = ACCEPT,
5559 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5560 	},
5561 	{
5562 		"invalid map access from else condition",
5563 		.insns = {
5564 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5565 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5566 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5567 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5568 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5569 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5570 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5571 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5572 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5573 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5574 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5575 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5576 			BPF_EXIT_INSN(),
5577 		},
5578 		.fixup_map_hash_48b = { 3 },
5579 		.errstr = "R0 unbounded memory access",
5580 		.result = REJECT,
5581 		.errstr_unpriv = "R0 leaks addr",
5582 		.result_unpriv = REJECT,
5583 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5584 	},
5585 	{
5586 		"constant register |= constant should keep constant type",
5587 		.insns = {
5588 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5589 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5590 			BPF_MOV64_IMM(BPF_REG_2, 34),
5591 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5592 			BPF_MOV64_IMM(BPF_REG_3, 0),
5593 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5594 			BPF_EXIT_INSN(),
5595 		},
5596 		.result = ACCEPT,
5597 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5598 	},
5599 	{
5600 		"constant register |= constant should not bypass stack boundary checks",
5601 		.insns = {
5602 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5604 			BPF_MOV64_IMM(BPF_REG_2, 34),
5605 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5606 			BPF_MOV64_IMM(BPF_REG_3, 0),
5607 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5608 			BPF_EXIT_INSN(),
5609 		},
5610 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5611 		.result = REJECT,
5612 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5613 	},
5614 	{
5615 		"constant register |= constant register should keep constant type",
5616 		.insns = {
5617 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5618 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5619 			BPF_MOV64_IMM(BPF_REG_2, 34),
5620 			BPF_MOV64_IMM(BPF_REG_4, 13),
5621 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5622 			BPF_MOV64_IMM(BPF_REG_3, 0),
5623 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5624 			BPF_EXIT_INSN(),
5625 		},
5626 		.result = ACCEPT,
5627 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5628 	},
5629 	{
5630 		"constant register |= constant register should not bypass stack boundary checks",
5631 		.insns = {
5632 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5633 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5634 			BPF_MOV64_IMM(BPF_REG_2, 34),
5635 			BPF_MOV64_IMM(BPF_REG_4, 24),
5636 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5637 			BPF_MOV64_IMM(BPF_REG_3, 0),
5638 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5639 			BPF_EXIT_INSN(),
5640 		},
5641 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5642 		.result = REJECT,
5643 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5644 	},
5645 	{
5646 		"invalid direct packet write for LWT_IN",
5647 		.insns = {
5648 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5649 				    offsetof(struct __sk_buff, data)),
5650 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5651 				    offsetof(struct __sk_buff, data_end)),
5652 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5653 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5654 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5655 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5656 			BPF_MOV64_IMM(BPF_REG_0, 0),
5657 			BPF_EXIT_INSN(),
5658 		},
5659 		.errstr = "cannot write into packet",
5660 		.result = REJECT,
5661 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5662 	},
5663 	{
5664 		"invalid direct packet write for LWT_OUT",
5665 		.insns = {
5666 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5667 				    offsetof(struct __sk_buff, data)),
5668 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5669 				    offsetof(struct __sk_buff, data_end)),
5670 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5671 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5672 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5673 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5674 			BPF_MOV64_IMM(BPF_REG_0, 0),
5675 			BPF_EXIT_INSN(),
5676 		},
5677 		.errstr = "cannot write into packet",
5678 		.result = REJECT,
5679 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5680 	},
5681 	{
5682 		"direct packet write for LWT_XMIT",
5683 		.insns = {
5684 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5685 				    offsetof(struct __sk_buff, data)),
5686 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5687 				    offsetof(struct __sk_buff, data_end)),
5688 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5690 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5691 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5692 			BPF_MOV64_IMM(BPF_REG_0, 0),
5693 			BPF_EXIT_INSN(),
5694 		},
5695 		.result = ACCEPT,
5696 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5697 	},
5698 	{
5699 		"direct packet read for LWT_IN",
5700 		.insns = {
5701 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5702 				    offsetof(struct __sk_buff, data)),
5703 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5704 				    offsetof(struct __sk_buff, data_end)),
5705 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5706 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5707 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5708 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5709 			BPF_MOV64_IMM(BPF_REG_0, 0),
5710 			BPF_EXIT_INSN(),
5711 		},
5712 		.result = ACCEPT,
5713 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5714 	},
5715 	{
5716 		"direct packet read for LWT_OUT",
5717 		.insns = {
5718 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5719 				    offsetof(struct __sk_buff, data)),
5720 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5721 				    offsetof(struct __sk_buff, data_end)),
5722 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5724 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5725 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5726 			BPF_MOV64_IMM(BPF_REG_0, 0),
5727 			BPF_EXIT_INSN(),
5728 		},
5729 		.result = ACCEPT,
5730 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5731 	},
5732 	{
5733 		"direct packet read for LWT_XMIT",
5734 		.insns = {
5735 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5736 				    offsetof(struct __sk_buff, data)),
5737 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5738 				    offsetof(struct __sk_buff, data_end)),
5739 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5741 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5742 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5743 			BPF_MOV64_IMM(BPF_REG_0, 0),
5744 			BPF_EXIT_INSN(),
5745 		},
5746 		.result = ACCEPT,
5747 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5748 	},
5749 	{
5750 		"overlapping checks for direct packet access",
5751 		.insns = {
5752 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5753 				    offsetof(struct __sk_buff, data)),
5754 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5755 				    offsetof(struct __sk_buff, data_end)),
5756 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5757 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5758 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5759 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5761 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5762 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5763 			BPF_MOV64_IMM(BPF_REG_0, 0),
5764 			BPF_EXIT_INSN(),
5765 		},
5766 		.result = ACCEPT,
5767 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5768 	},
5769 	{
5770 		"make headroom for LWT_XMIT",
5771 		.insns = {
5772 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5773 			BPF_MOV64_IMM(BPF_REG_2, 34),
5774 			BPF_MOV64_IMM(BPF_REG_3, 0),
5775 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5776 			/* split for s390 to succeed */
5777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5778 			BPF_MOV64_IMM(BPF_REG_2, 42),
5779 			BPF_MOV64_IMM(BPF_REG_3, 0),
5780 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5781 			BPF_MOV64_IMM(BPF_REG_0, 0),
5782 			BPF_EXIT_INSN(),
5783 		},
5784 		.result = ACCEPT,
5785 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5786 	},
5787 	{
5788 		"invalid access of tc_classid for LWT_IN",
5789 		.insns = {
5790 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5791 				    offsetof(struct __sk_buff, tc_classid)),
5792 			BPF_EXIT_INSN(),
5793 		},
5794 		.result = REJECT,
5795 		.errstr = "invalid bpf_context access",
5796 	},
5797 	{
5798 		"invalid access of tc_classid for LWT_OUT",
5799 		.insns = {
5800 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5801 				    offsetof(struct __sk_buff, tc_classid)),
5802 			BPF_EXIT_INSN(),
5803 		},
5804 		.result = REJECT,
5805 		.errstr = "invalid bpf_context access",
5806 	},
5807 	{
5808 		"invalid access of tc_classid for LWT_XMIT",
5809 		.insns = {
5810 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5811 				    offsetof(struct __sk_buff, tc_classid)),
5812 			BPF_EXIT_INSN(),
5813 		},
5814 		.result = REJECT,
5815 		.errstr = "invalid bpf_context access",
5816 	},
5817 	{
5818 		"leak pointer into ctx 1",
5819 		.insns = {
5820 			BPF_MOV64_IMM(BPF_REG_0, 0),
5821 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5822 				    offsetof(struct __sk_buff, cb[0])),
5823 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5824 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5825 				      offsetof(struct __sk_buff, cb[0])),
5826 			BPF_EXIT_INSN(),
5827 		},
5828 		.fixup_map_hash_8b = { 2 },
5829 		.errstr_unpriv = "R2 leaks addr into mem",
5830 		.result_unpriv = REJECT,
5831 		.result = REJECT,
5832 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5833 	},
5834 	{
5835 		"leak pointer into ctx 2",
5836 		.insns = {
5837 			BPF_MOV64_IMM(BPF_REG_0, 0),
5838 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5839 				    offsetof(struct __sk_buff, cb[0])),
5840 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5841 				      offsetof(struct __sk_buff, cb[0])),
5842 			BPF_EXIT_INSN(),
5843 		},
5844 		.errstr_unpriv = "R10 leaks addr into mem",
5845 		.result_unpriv = REJECT,
5846 		.result = REJECT,
5847 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5848 	},
5849 	{
5850 		"leak pointer into ctx 3",
5851 		.insns = {
5852 			BPF_MOV64_IMM(BPF_REG_0, 0),
5853 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5854 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5855 				      offsetof(struct __sk_buff, cb[0])),
5856 			BPF_EXIT_INSN(),
5857 		},
5858 		.fixup_map_hash_8b = { 1 },
5859 		.errstr_unpriv = "R2 leaks addr into ctx",
5860 		.result_unpriv = REJECT,
5861 		.result = ACCEPT,
5862 	},
5863 	{
5864 		"leak pointer into map val",
5865 		.insns = {
5866 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5867 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5868 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5870 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5871 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5872 				     BPF_FUNC_map_lookup_elem),
5873 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5874 			BPF_MOV64_IMM(BPF_REG_3, 0),
5875 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5876 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5877 			BPF_MOV64_IMM(BPF_REG_0, 0),
5878 			BPF_EXIT_INSN(),
5879 		},
5880 		.fixup_map_hash_8b = { 4 },
5881 		.errstr_unpriv = "R6 leaks addr into mem",
5882 		.result_unpriv = REJECT,
5883 		.result = ACCEPT,
5884 	},
5885 	{
5886 		"helper access to map: full range",
5887 		.insns = {
5888 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5889 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5890 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5891 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5892 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5893 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5894 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5895 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5896 			BPF_MOV64_IMM(BPF_REG_3, 0),
5897 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5898 			BPF_EXIT_INSN(),
5899 		},
5900 		.fixup_map_hash_48b = { 3 },
5901 		.result = ACCEPT,
5902 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5903 	},
5904 	{
5905 		"helper access to map: partial range",
5906 		.insns = {
5907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5909 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5910 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5911 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5912 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5913 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5914 			BPF_MOV64_IMM(BPF_REG_2, 8),
5915 			BPF_MOV64_IMM(BPF_REG_3, 0),
5916 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5917 			BPF_EXIT_INSN(),
5918 		},
5919 		.fixup_map_hash_48b = { 3 },
5920 		.result = ACCEPT,
5921 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5922 	},
5923 	{
5924 		"helper access to map: empty range",
5925 		.insns = {
5926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5928 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5930 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5931 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5932 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5933 			BPF_MOV64_IMM(BPF_REG_2, 0),
5934 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5935 			BPF_EXIT_INSN(),
5936 		},
5937 		.fixup_map_hash_48b = { 3 },
5938 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5939 		.result = REJECT,
5940 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5941 	},
5942 	{
5943 		"helper access to map: out-of-bound range",
5944 		.insns = {
5945 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5946 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5947 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5948 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5949 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5950 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5951 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5952 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5953 			BPF_MOV64_IMM(BPF_REG_3, 0),
5954 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5955 			BPF_EXIT_INSN(),
5956 		},
5957 		.fixup_map_hash_48b = { 3 },
5958 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5959 		.result = REJECT,
5960 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5961 	},
5962 	{
5963 		"helper access to map: negative range",
5964 		.insns = {
5965 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5966 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5967 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5968 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5969 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5970 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5971 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5972 			BPF_MOV64_IMM(BPF_REG_2, -8),
5973 			BPF_MOV64_IMM(BPF_REG_3, 0),
5974 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5975 			BPF_EXIT_INSN(),
5976 		},
5977 		.fixup_map_hash_48b = { 3 },
5978 		.errstr = "R2 min value is negative",
5979 		.result = REJECT,
5980 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5981 	},
5982 	{
5983 		"helper access to adjusted map (via const imm): full range",
5984 		.insns = {
5985 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5986 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5987 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5988 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5989 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5990 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5991 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5993 				offsetof(struct test_val, foo)),
5994 			BPF_MOV64_IMM(BPF_REG_2,
5995 				sizeof(struct test_val) -
5996 				offsetof(struct test_val, foo)),
5997 			BPF_MOV64_IMM(BPF_REG_3, 0),
5998 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5999 			BPF_EXIT_INSN(),
6000 		},
6001 		.fixup_map_hash_48b = { 3 },
6002 		.result = ACCEPT,
6003 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6004 	},
6005 	{
6006 		"helper access to adjusted map (via const imm): partial range",
6007 		.insns = {
6008 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6009 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6010 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6011 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6012 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6013 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6014 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6015 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6016 				offsetof(struct test_val, foo)),
6017 			BPF_MOV64_IMM(BPF_REG_2, 8),
6018 			BPF_MOV64_IMM(BPF_REG_3, 0),
6019 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6020 			BPF_EXIT_INSN(),
6021 		},
6022 		.fixup_map_hash_48b = { 3 },
6023 		.result = ACCEPT,
6024 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6025 	},
6026 	{
6027 		"helper access to adjusted map (via const imm): empty range",
6028 		.insns = {
6029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6031 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6032 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6033 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6034 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6035 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6036 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6037 				offsetof(struct test_val, foo)),
6038 			BPF_MOV64_IMM(BPF_REG_2, 0),
6039 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6040 			BPF_EXIT_INSN(),
6041 		},
6042 		.fixup_map_hash_48b = { 3 },
6043 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
6044 		.result = REJECT,
6045 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6046 	},
6047 	{
6048 		"helper access to adjusted map (via const imm): out-of-bound range",
6049 		.insns = {
6050 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6052 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6053 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6054 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6056 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6058 				offsetof(struct test_val, foo)),
6059 			BPF_MOV64_IMM(BPF_REG_2,
6060 				sizeof(struct test_val) -
6061 				offsetof(struct test_val, foo) + 8),
6062 			BPF_MOV64_IMM(BPF_REG_3, 0),
6063 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6064 			BPF_EXIT_INSN(),
6065 		},
6066 		.fixup_map_hash_48b = { 3 },
6067 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6068 		.result = REJECT,
6069 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6070 	},
6071 	{
6072 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
6073 		.insns = {
6074 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6076 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6077 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6078 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6079 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6080 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6082 				offsetof(struct test_val, foo)),
6083 			BPF_MOV64_IMM(BPF_REG_2, -8),
6084 			BPF_MOV64_IMM(BPF_REG_3, 0),
6085 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6086 			BPF_EXIT_INSN(),
6087 		},
6088 		.fixup_map_hash_48b = { 3 },
6089 		.errstr = "R2 min value is negative",
6090 		.result = REJECT,
6091 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6092 	},
6093 	{
6094 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
6095 		.insns = {
6096 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6097 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6098 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6099 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6100 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6101 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6102 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6104 				offsetof(struct test_val, foo)),
6105 			BPF_MOV64_IMM(BPF_REG_2, -1),
6106 			BPF_MOV64_IMM(BPF_REG_3, 0),
6107 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6108 			BPF_EXIT_INSN(),
6109 		},
6110 		.fixup_map_hash_48b = { 3 },
6111 		.errstr = "R2 min value is negative",
6112 		.result = REJECT,
6113 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6114 	},
6115 	{
6116 		"helper access to adjusted map (via const reg): full range",
6117 		.insns = {
6118 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6119 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6120 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6121 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6122 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6123 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6124 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6125 			BPF_MOV64_IMM(BPF_REG_3,
6126 				offsetof(struct test_val, foo)),
6127 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6128 			BPF_MOV64_IMM(BPF_REG_2,
6129 				sizeof(struct test_val) -
6130 				offsetof(struct test_val, foo)),
6131 			BPF_MOV64_IMM(BPF_REG_3, 0),
6132 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6133 			BPF_EXIT_INSN(),
6134 		},
6135 		.fixup_map_hash_48b = { 3 },
6136 		.result = ACCEPT,
6137 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6138 	},
6139 	{
6140 		"helper access to adjusted map (via const reg): partial range",
6141 		.insns = {
6142 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6143 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6144 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6145 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6146 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6147 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6148 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6149 			BPF_MOV64_IMM(BPF_REG_3,
6150 				offsetof(struct test_val, foo)),
6151 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6152 			BPF_MOV64_IMM(BPF_REG_2, 8),
6153 			BPF_MOV64_IMM(BPF_REG_3, 0),
6154 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6155 			BPF_EXIT_INSN(),
6156 		},
6157 		.fixup_map_hash_48b = { 3 },
6158 		.result = ACCEPT,
6159 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6160 	},
6161 	{
6162 		"helper access to adjusted map (via const reg): empty range",
6163 		.insns = {
6164 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6167 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6168 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6170 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6171 			BPF_MOV64_IMM(BPF_REG_3, 0),
6172 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6173 			BPF_MOV64_IMM(BPF_REG_2, 0),
6174 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6175 			BPF_EXIT_INSN(),
6176 		},
6177 		.fixup_map_hash_48b = { 3 },
6178 		.errstr = "R1 min value is outside of the array range",
6179 		.result = REJECT,
6180 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6181 	},
6182 	{
6183 		"helper access to adjusted map (via const reg): out-of-bound range",
6184 		.insns = {
6185 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6186 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6187 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6188 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6189 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6190 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6191 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6192 			BPF_MOV64_IMM(BPF_REG_3,
6193 				offsetof(struct test_val, foo)),
6194 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6195 			BPF_MOV64_IMM(BPF_REG_2,
6196 				sizeof(struct test_val) -
6197 				offsetof(struct test_val, foo) + 8),
6198 			BPF_MOV64_IMM(BPF_REG_3, 0),
6199 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6200 			BPF_EXIT_INSN(),
6201 		},
6202 		.fixup_map_hash_48b = { 3 },
6203 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6204 		.result = REJECT,
6205 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6206 	},
6207 	{
6208 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6209 		.insns = {
6210 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6212 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6213 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6214 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6215 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6216 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6217 			BPF_MOV64_IMM(BPF_REG_3,
6218 				offsetof(struct test_val, foo)),
6219 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6220 			BPF_MOV64_IMM(BPF_REG_2, -8),
6221 			BPF_MOV64_IMM(BPF_REG_3, 0),
6222 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6223 			BPF_EXIT_INSN(),
6224 		},
6225 		.fixup_map_hash_48b = { 3 },
6226 		.errstr = "R2 min value is negative",
6227 		.result = REJECT,
6228 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6229 	},
6230 	{
6231 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6232 		.insns = {
6233 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6235 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6236 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6237 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6238 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6239 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6240 			BPF_MOV64_IMM(BPF_REG_3,
6241 				offsetof(struct test_val, foo)),
6242 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6243 			BPF_MOV64_IMM(BPF_REG_2, -1),
6244 			BPF_MOV64_IMM(BPF_REG_3, 0),
6245 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6246 			BPF_EXIT_INSN(),
6247 		},
6248 		.fixup_map_hash_48b = { 3 },
6249 		.errstr = "R2 min value is negative",
6250 		.result = REJECT,
6251 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6252 	},
6253 	{
6254 		"helper access to adjusted map (via variable): full range",
6255 		.insns = {
6256 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6257 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6259 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6260 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6262 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6263 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6264 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6265 				offsetof(struct test_val, foo), 4),
6266 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6267 			BPF_MOV64_IMM(BPF_REG_2,
6268 				sizeof(struct test_val) -
6269 				offsetof(struct test_val, foo)),
6270 			BPF_MOV64_IMM(BPF_REG_3, 0),
6271 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6272 			BPF_EXIT_INSN(),
6273 		},
6274 		.fixup_map_hash_48b = { 3 },
6275 		.result = ACCEPT,
6276 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6277 	},
6278 	{
6279 		"helper access to adjusted map (via variable): partial range",
6280 		.insns = {
6281 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6282 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6283 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6284 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6285 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6287 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6288 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6289 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6290 				offsetof(struct test_val, foo), 4),
6291 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6292 			BPF_MOV64_IMM(BPF_REG_2, 8),
6293 			BPF_MOV64_IMM(BPF_REG_3, 0),
6294 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6295 			BPF_EXIT_INSN(),
6296 		},
6297 		.fixup_map_hash_48b = { 3 },
6298 		.result = ACCEPT,
6299 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6300 	},
6301 	{
6302 		"helper access to adjusted map (via variable): empty range",
6303 		.insns = {
6304 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6305 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6306 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6307 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6308 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6309 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6310 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6311 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6312 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6313 				offsetof(struct test_val, foo), 3),
6314 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6315 			BPF_MOV64_IMM(BPF_REG_2, 0),
6316 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6317 			BPF_EXIT_INSN(),
6318 		},
6319 		.fixup_map_hash_48b = { 3 },
6320 		.errstr = "R1 min value is outside of the array range",
6321 		.result = REJECT,
6322 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6323 	},
6324 	{
6325 		"helper access to adjusted map (via variable): no max check",
6326 		.insns = {
6327 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6328 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6329 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6330 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6331 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6332 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6333 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6334 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6335 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6336 			BPF_MOV64_IMM(BPF_REG_2, 1),
6337 			BPF_MOV64_IMM(BPF_REG_3, 0),
6338 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6339 			BPF_EXIT_INSN(),
6340 		},
6341 		.fixup_map_hash_48b = { 3 },
6342 		.errstr = "R1 unbounded memory access",
6343 		.result = REJECT,
6344 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6345 	},
6346 	{
6347 		"helper access to adjusted map (via variable): wrong max check",
6348 		.insns = {
6349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6351 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6352 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6353 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6355 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6356 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6357 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6358 				offsetof(struct test_val, foo), 4),
6359 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6360 			BPF_MOV64_IMM(BPF_REG_2,
6361 				sizeof(struct test_val) -
6362 				offsetof(struct test_val, foo) + 1),
6363 			BPF_MOV64_IMM(BPF_REG_3, 0),
6364 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6365 			BPF_EXIT_INSN(),
6366 		},
6367 		.fixup_map_hash_48b = { 3 },
6368 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6369 		.result = REJECT,
6370 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6371 	},
6372 	{
6373 		"helper access to map: bounds check using <, good access",
6374 		.insns = {
6375 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6376 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6377 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6378 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6379 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6380 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6381 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6382 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6383 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6384 			BPF_MOV64_IMM(BPF_REG_0, 0),
6385 			BPF_EXIT_INSN(),
6386 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6387 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6388 			BPF_MOV64_IMM(BPF_REG_0, 0),
6389 			BPF_EXIT_INSN(),
6390 		},
6391 		.fixup_map_hash_48b = { 3 },
6392 		.result = ACCEPT,
6393 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6394 	},
6395 	{
6396 		"helper access to map: bounds check using <, bad access",
6397 		.insns = {
6398 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6399 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6400 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6401 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6402 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6403 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6405 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6406 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6407 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6408 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6409 			BPF_MOV64_IMM(BPF_REG_0, 0),
6410 			BPF_EXIT_INSN(),
6411 			BPF_MOV64_IMM(BPF_REG_0, 0),
6412 			BPF_EXIT_INSN(),
6413 		},
6414 		.fixup_map_hash_48b = { 3 },
6415 		.result = REJECT,
6416 		.errstr = "R1 unbounded memory access",
6417 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6418 	},
6419 	{
6420 		"helper access to map: bounds check using <=, good access",
6421 		.insns = {
6422 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6424 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6425 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6426 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6427 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6428 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6429 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6430 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6431 			BPF_MOV64_IMM(BPF_REG_0, 0),
6432 			BPF_EXIT_INSN(),
6433 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6434 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6435 			BPF_MOV64_IMM(BPF_REG_0, 0),
6436 			BPF_EXIT_INSN(),
6437 		},
6438 		.fixup_map_hash_48b = { 3 },
6439 		.result = ACCEPT,
6440 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6441 	},
6442 	{
6443 		"helper access to map: bounds check using <=, bad access",
6444 		.insns = {
6445 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6446 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6447 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6448 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6449 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6450 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6451 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6452 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6453 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6454 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6455 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6456 			BPF_MOV64_IMM(BPF_REG_0, 0),
6457 			BPF_EXIT_INSN(),
6458 			BPF_MOV64_IMM(BPF_REG_0, 0),
6459 			BPF_EXIT_INSN(),
6460 		},
6461 		.fixup_map_hash_48b = { 3 },
6462 		.result = REJECT,
6463 		.errstr = "R1 unbounded memory access",
6464 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6465 	},
6466 	{
6467 		"helper access to map: bounds check using s<, good access",
6468 		.insns = {
6469 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6470 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6471 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6472 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6473 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6474 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6475 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6476 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6477 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6478 			BPF_MOV64_IMM(BPF_REG_0, 0),
6479 			BPF_EXIT_INSN(),
6480 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6481 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6482 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6483 			BPF_MOV64_IMM(BPF_REG_0, 0),
6484 			BPF_EXIT_INSN(),
6485 		},
6486 		.fixup_map_hash_48b = { 3 },
6487 		.result = ACCEPT,
6488 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6489 	},
6490 	{
6491 		"helper access to map: bounds check using s<, good access 2",
6492 		.insns = {
6493 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6495 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6496 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6497 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6498 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6500 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6501 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6502 			BPF_MOV64_IMM(BPF_REG_0, 0),
6503 			BPF_EXIT_INSN(),
6504 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6505 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6506 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6507 			BPF_MOV64_IMM(BPF_REG_0, 0),
6508 			BPF_EXIT_INSN(),
6509 		},
6510 		.fixup_map_hash_48b = { 3 },
6511 		.result = ACCEPT,
6512 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6513 	},
6514 	{
6515 		"helper access to map: bounds check using s<, bad access",
6516 		.insns = {
6517 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6519 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6520 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6521 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6522 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6523 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6524 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6525 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6526 			BPF_MOV64_IMM(BPF_REG_0, 0),
6527 			BPF_EXIT_INSN(),
6528 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6529 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6530 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6531 			BPF_MOV64_IMM(BPF_REG_0, 0),
6532 			BPF_EXIT_INSN(),
6533 		},
6534 		.fixup_map_hash_48b = { 3 },
6535 		.result = REJECT,
6536 		.errstr = "R1 min value is negative",
6537 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6538 	},
6539 	{
6540 		"helper access to map: bounds check using s<=, good access",
6541 		.insns = {
6542 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6544 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6545 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6546 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6547 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6548 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6550 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6551 			BPF_MOV64_IMM(BPF_REG_0, 0),
6552 			BPF_EXIT_INSN(),
6553 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6554 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6555 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6556 			BPF_MOV64_IMM(BPF_REG_0, 0),
6557 			BPF_EXIT_INSN(),
6558 		},
6559 		.fixup_map_hash_48b = { 3 },
6560 		.result = ACCEPT,
6561 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6562 	},
6563 	{
6564 		"helper access to map: bounds check using s<=, good access 2",
6565 		.insns = {
6566 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6567 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6568 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6570 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6571 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6572 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6573 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6574 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6575 			BPF_MOV64_IMM(BPF_REG_0, 0),
6576 			BPF_EXIT_INSN(),
6577 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6578 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6579 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6580 			BPF_MOV64_IMM(BPF_REG_0, 0),
6581 			BPF_EXIT_INSN(),
6582 		},
6583 		.fixup_map_hash_48b = { 3 },
6584 		.result = ACCEPT,
6585 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6586 	},
6587 	{
6588 		"helper access to map: bounds check using s<=, bad access",
6589 		.insns = {
6590 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6591 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6592 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6593 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6594 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6595 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6596 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6597 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6598 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6599 			BPF_MOV64_IMM(BPF_REG_0, 0),
6600 			BPF_EXIT_INSN(),
6601 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6602 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6603 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6604 			BPF_MOV64_IMM(BPF_REG_0, 0),
6605 			BPF_EXIT_INSN(),
6606 		},
6607 		.fixup_map_hash_48b = { 3 },
6608 		.result = REJECT,
6609 		.errstr = "R1 min value is negative",
6610 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6611 	},
6612 	{
6613 		"map access: known scalar += value_ptr",
6614 		.insns = {
6615 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6618 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6620 				     BPF_FUNC_map_lookup_elem),
6621 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6622 			BPF_MOV64_IMM(BPF_REG_1, 4),
6623 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6624 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6625 			BPF_MOV64_IMM(BPF_REG_0, 1),
6626 			BPF_EXIT_INSN(),
6627 		},
6628 		.fixup_map_array_48b = { 3 },
6629 		.result = ACCEPT,
6630 		.retval = 1,
6631 	},
6632 	{
6633 		"map access: value_ptr += known scalar",
6634 		.insns = {
6635 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6636 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6637 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6638 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6639 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6640 				     BPF_FUNC_map_lookup_elem),
6641 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6642 			BPF_MOV64_IMM(BPF_REG_1, 4),
6643 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6644 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6645 			BPF_MOV64_IMM(BPF_REG_0, 1),
6646 			BPF_EXIT_INSN(),
6647 		},
6648 		.fixup_map_array_48b = { 3 },
6649 		.result = ACCEPT,
6650 		.retval = 1,
6651 	},
6652 	{
6653 		"map access: unknown scalar += value_ptr",
6654 		.insns = {
6655 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6656 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6658 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6659 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6660 				     BPF_FUNC_map_lookup_elem),
6661 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6662 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6663 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6664 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6665 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6666 			BPF_MOV64_IMM(BPF_REG_0, 1),
6667 			BPF_EXIT_INSN(),
6668 		},
6669 		.fixup_map_array_48b = { 3 },
6670 		.result = ACCEPT,
6671 		.retval = 1,
6672 	},
6673 	{
6674 		"map access: value_ptr += unknown scalar",
6675 		.insns = {
6676 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6677 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6679 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6680 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6681 				     BPF_FUNC_map_lookup_elem),
6682 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6683 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6684 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6685 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6686 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6687 			BPF_MOV64_IMM(BPF_REG_0, 1),
6688 			BPF_EXIT_INSN(),
6689 		},
6690 		.fixup_map_array_48b = { 3 },
6691 		.result = ACCEPT,
6692 		.retval = 1,
6693 	},
6694 	{
6695 		"map access: value_ptr += value_ptr",
6696 		.insns = {
6697 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6698 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6700 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6702 				     BPF_FUNC_map_lookup_elem),
6703 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6704 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6705 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6706 			BPF_MOV64_IMM(BPF_REG_0, 1),
6707 			BPF_EXIT_INSN(),
6708 		},
6709 		.fixup_map_array_48b = { 3 },
6710 		.result = REJECT,
6711 		.errstr = "R0 pointer += pointer prohibited",
6712 	},
6713 	{
6714 		"map access: known scalar -= value_ptr",
6715 		.insns = {
6716 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6717 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6719 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6721 				     BPF_FUNC_map_lookup_elem),
6722 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6723 			BPF_MOV64_IMM(BPF_REG_1, 4),
6724 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6725 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6726 			BPF_MOV64_IMM(BPF_REG_0, 1),
6727 			BPF_EXIT_INSN(),
6728 		},
6729 		.fixup_map_array_48b = { 3 },
6730 		.result = REJECT,
6731 		.errstr = "R1 tried to subtract pointer from scalar",
6732 	},
6733 	{
6734 		"map access: value_ptr -= known scalar",
6735 		.insns = {
6736 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6737 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6739 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6740 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6741 				     BPF_FUNC_map_lookup_elem),
6742 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6743 			BPF_MOV64_IMM(BPF_REG_1, 4),
6744 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6745 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6746 			BPF_MOV64_IMM(BPF_REG_0, 1),
6747 			BPF_EXIT_INSN(),
6748 		},
6749 		.fixup_map_array_48b = { 3 },
6750 		.result = REJECT,
6751 		.errstr = "R0 min value is outside of the array range",
6752 	},
6753 	{
6754 		"map access: value_ptr -= known scalar, 2",
6755 		.insns = {
6756 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 				     BPF_FUNC_map_lookup_elem),
6762 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6763 			BPF_MOV64_IMM(BPF_REG_1, 6),
6764 			BPF_MOV64_IMM(BPF_REG_2, 4),
6765 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6766 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6767 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6768 			BPF_MOV64_IMM(BPF_REG_0, 1),
6769 			BPF_EXIT_INSN(),
6770 		},
6771 		.fixup_map_array_48b = { 3 },
6772 		.result = ACCEPT,
6773 		.retval = 1,
6774 	},
6775 	{
6776 		"map access: unknown scalar -= value_ptr",
6777 		.insns = {
6778 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6779 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6780 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6781 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6782 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6783 				     BPF_FUNC_map_lookup_elem),
6784 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6785 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6786 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6787 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6788 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6789 			BPF_MOV64_IMM(BPF_REG_0, 1),
6790 			BPF_EXIT_INSN(),
6791 		},
6792 		.fixup_map_array_48b = { 3 },
6793 		.result = REJECT,
6794 		.errstr = "R1 tried to subtract pointer from scalar",
6795 	},
6796 	{
6797 		"map access: value_ptr -= unknown scalar",
6798 		.insns = {
6799 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6800 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6801 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6802 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6803 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6804 				     BPF_FUNC_map_lookup_elem),
6805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6806 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6807 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6808 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6809 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6810 			BPF_MOV64_IMM(BPF_REG_0, 1),
6811 			BPF_EXIT_INSN(),
6812 		},
6813 		.fixup_map_array_48b = { 3 },
6814 		.result = REJECT,
6815 		.errstr = "R0 min value is negative",
6816 	},
6817 	{
6818 		"map access: value_ptr -= unknown scalar, 2",
6819 		.insns = {
6820 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6821 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6823 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6824 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6825 				     BPF_FUNC_map_lookup_elem),
6826 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6827 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6828 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6829 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6830 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6831 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6832 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6833 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6834 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6835 			BPF_MOV64_IMM(BPF_REG_0, 1),
6836 			BPF_EXIT_INSN(),
6837 		},
6838 		.fixup_map_array_48b = { 3 },
6839 		.result = ACCEPT,
6840 		.retval = 1,
6841 	},
6842 	{
6843 		"map access: value_ptr -= value_ptr",
6844 		.insns = {
6845 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6846 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6847 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6848 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6850 				     BPF_FUNC_map_lookup_elem),
6851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6852 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6853 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6854 			BPF_MOV64_IMM(BPF_REG_0, 1),
6855 			BPF_EXIT_INSN(),
6856 		},
6857 		.fixup_map_array_48b = { 3 },
6858 		.result = REJECT,
6859 		.errstr = "R0 invalid mem access 'inv'",
6860 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
6861 	},
6862 	{
6863 		"map lookup helper access to map",
6864 		.insns = {
6865 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6866 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6867 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6868 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6869 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6870 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6871 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6872 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6873 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6874 			BPF_EXIT_INSN(),
6875 		},
6876 		.fixup_map_hash_16b = { 3, 8 },
6877 		.result = ACCEPT,
6878 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6879 	},
6880 	{
6881 		"map update helper access to map",
6882 		.insns = {
6883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6885 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6886 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6889 			BPF_MOV64_IMM(BPF_REG_4, 0),
6890 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6891 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6892 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6893 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6894 			BPF_EXIT_INSN(),
6895 		},
6896 		.fixup_map_hash_16b = { 3, 10 },
6897 		.result = ACCEPT,
6898 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6899 	},
6900 	{
6901 		"map update helper access to map: wrong size",
6902 		.insns = {
6903 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6905 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6906 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6907 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6908 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6909 			BPF_MOV64_IMM(BPF_REG_4, 0),
6910 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6912 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6913 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6914 			BPF_EXIT_INSN(),
6915 		},
6916 		.fixup_map_hash_8b = { 3 },
6917 		.fixup_map_hash_16b = { 10 },
6918 		.result = REJECT,
6919 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6920 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6921 	},
6922 	{
6923 		"map helper access to adjusted map (via const imm)",
6924 		.insns = {
6925 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6927 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6928 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6929 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6930 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6931 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6933 				      offsetof(struct other_val, bar)),
6934 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6935 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6936 			BPF_EXIT_INSN(),
6937 		},
6938 		.fixup_map_hash_16b = { 3, 9 },
6939 		.result = ACCEPT,
6940 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6941 	},
6942 	{
6943 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6944 		.insns = {
6945 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6946 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6947 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6948 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6949 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6950 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6951 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6953 				      sizeof(struct other_val) - 4),
6954 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6955 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6956 			BPF_EXIT_INSN(),
6957 		},
6958 		.fixup_map_hash_16b = { 3, 9 },
6959 		.result = REJECT,
6960 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6961 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6962 	},
6963 	{
6964 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6965 		.insns = {
6966 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6967 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6968 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6969 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6970 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6971 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6972 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6973 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6974 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6975 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6976 			BPF_EXIT_INSN(),
6977 		},
6978 		.fixup_map_hash_16b = { 3, 9 },
6979 		.result = REJECT,
6980 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6981 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6982 	},
6983 	{
6984 		"map helper access to adjusted map (via const reg)",
6985 		.insns = {
6986 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6987 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6988 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6989 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6990 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6991 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6992 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6993 			BPF_MOV64_IMM(BPF_REG_3,
6994 				      offsetof(struct other_val, bar)),
6995 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6996 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6997 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6998 			BPF_EXIT_INSN(),
6999 		},
7000 		.fixup_map_hash_16b = { 3, 10 },
7001 		.result = ACCEPT,
7002 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7003 	},
7004 	{
7005 		"map helper access to adjusted map (via const reg): out-of-bound 1",
7006 		.insns = {
7007 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7008 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7009 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7010 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7011 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7012 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7013 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7014 			BPF_MOV64_IMM(BPF_REG_3,
7015 				      sizeof(struct other_val) - 4),
7016 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7017 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7018 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7019 			BPF_EXIT_INSN(),
7020 		},
7021 		.fixup_map_hash_16b = { 3, 10 },
7022 		.result = REJECT,
7023 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
7024 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7025 	},
7026 	{
7027 		"map helper access to adjusted map (via const reg): out-of-bound 2",
7028 		.insns = {
7029 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7030 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7031 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7032 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7033 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7034 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7035 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7036 			BPF_MOV64_IMM(BPF_REG_3, -4),
7037 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7038 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7039 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7040 			BPF_EXIT_INSN(),
7041 		},
7042 		.fixup_map_hash_16b = { 3, 10 },
7043 		.result = REJECT,
7044 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
7045 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7046 	},
7047 	{
7048 		"map helper access to adjusted map (via variable)",
7049 		.insns = {
7050 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7052 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7053 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7054 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7055 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7056 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7057 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7058 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7059 				    offsetof(struct other_val, bar), 4),
7060 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7061 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7062 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7063 			BPF_EXIT_INSN(),
7064 		},
7065 		.fixup_map_hash_16b = { 3, 11 },
7066 		.result = ACCEPT,
7067 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7068 	},
7069 	{
7070 		"map helper access to adjusted map (via variable): no max check",
7071 		.insns = {
7072 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7073 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7074 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7075 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7076 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7077 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7078 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7079 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7080 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7081 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7082 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7083 			BPF_EXIT_INSN(),
7084 		},
7085 		.fixup_map_hash_16b = { 3, 10 },
7086 		.result = REJECT,
7087 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7088 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7089 	},
7090 	{
7091 		"map helper access to adjusted map (via variable): wrong max check",
7092 		.insns = {
7093 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7095 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7096 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7097 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7098 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7099 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7100 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7101 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7102 				    offsetof(struct other_val, bar) + 1, 4),
7103 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7104 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7105 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7106 			BPF_EXIT_INSN(),
7107 		},
7108 		.fixup_map_hash_16b = { 3, 11 },
7109 		.result = REJECT,
7110 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7111 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7112 	},
7113 	{
7114 		"map element value is preserved across register spilling",
7115 		.insns = {
7116 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7118 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7119 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7120 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7121 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7122 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7123 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7125 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7126 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7127 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7128 			BPF_EXIT_INSN(),
7129 		},
7130 		.fixup_map_hash_48b = { 3 },
7131 		.errstr_unpriv = "R0 leaks addr",
7132 		.result = ACCEPT,
7133 		.result_unpriv = REJECT,
7134 	},
7135 	{
7136 		"map element value or null is marked on register spilling",
7137 		.insns = {
7138 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7139 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7140 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7141 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7142 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7143 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7145 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7146 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7147 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7148 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7149 			BPF_EXIT_INSN(),
7150 		},
7151 		.fixup_map_hash_48b = { 3 },
7152 		.errstr_unpriv = "R0 leaks addr",
7153 		.result = ACCEPT,
7154 		.result_unpriv = REJECT,
7155 	},
7156 	{
7157 		"map element value store of cleared call register",
7158 		.insns = {
7159 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7161 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7162 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7163 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7164 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7165 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7166 			BPF_EXIT_INSN(),
7167 		},
7168 		.fixup_map_hash_48b = { 3 },
7169 		.errstr_unpriv = "R1 !read_ok",
7170 		.errstr = "R1 !read_ok",
7171 		.result = REJECT,
7172 		.result_unpriv = REJECT,
7173 	},
7174 	{
7175 		"map element value with unaligned store",
7176 		.insns = {
7177 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7178 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7179 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7180 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7181 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7182 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7183 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7184 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7185 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7186 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7187 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7188 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7189 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7190 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7191 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7192 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7193 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7194 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7195 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7196 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7197 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7198 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7199 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7200 			BPF_EXIT_INSN(),
7201 		},
7202 		.fixup_map_hash_48b = { 3 },
7203 		.errstr_unpriv = "R0 leaks addr",
7204 		.result = ACCEPT,
7205 		.result_unpriv = REJECT,
7206 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7207 	},
7208 	{
7209 		"map element value with unaligned load",
7210 		.insns = {
7211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7213 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7214 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7215 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7217 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7218 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7220 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7221 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7222 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7223 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7224 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7226 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7227 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7228 			BPF_EXIT_INSN(),
7229 		},
7230 		.fixup_map_hash_48b = { 3 },
7231 		.errstr_unpriv = "R0 leaks addr",
7232 		.result = ACCEPT,
7233 		.result_unpriv = REJECT,
7234 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7235 	},
7236 	{
7237 		"map element value illegal alu op, 1",
7238 		.insns = {
7239 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7240 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7241 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7242 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7243 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7244 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7245 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7246 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7247 			BPF_EXIT_INSN(),
7248 		},
7249 		.fixup_map_hash_48b = { 3 },
7250 		.errstr = "R0 bitwise operator &= on pointer",
7251 		.result = REJECT,
7252 	},
7253 	{
7254 		"map element value illegal alu op, 2",
7255 		.insns = {
7256 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7257 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7259 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7260 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7262 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7263 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7264 			BPF_EXIT_INSN(),
7265 		},
7266 		.fixup_map_hash_48b = { 3 },
7267 		.errstr = "R0 32-bit pointer arithmetic prohibited",
7268 		.result = REJECT,
7269 	},
7270 	{
7271 		"map element value illegal alu op, 3",
7272 		.insns = {
7273 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7274 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7275 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7276 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7277 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7279 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7280 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7281 			BPF_EXIT_INSN(),
7282 		},
7283 		.fixup_map_hash_48b = { 3 },
7284 		.errstr = "R0 pointer arithmetic with /= operator",
7285 		.result = REJECT,
7286 	},
7287 	{
7288 		"map element value illegal alu op, 4",
7289 		.insns = {
7290 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7292 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7293 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7294 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7296 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7297 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7298 			BPF_EXIT_INSN(),
7299 		},
7300 		.fixup_map_hash_48b = { 3 },
7301 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
7302 		.errstr = "invalid mem access 'inv'",
7303 		.result = REJECT,
7304 		.result_unpriv = REJECT,
7305 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7306 	},
7307 	{
7308 		"map element value illegal alu op, 5",
7309 		.insns = {
7310 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7312 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7313 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7314 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7315 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7316 			BPF_MOV64_IMM(BPF_REG_3, 4096),
7317 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7318 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7319 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7320 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7321 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7322 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7323 			BPF_EXIT_INSN(),
7324 		},
7325 		.fixup_map_hash_48b = { 3 },
7326 		.errstr = "R0 invalid mem access 'inv'",
7327 		.result = REJECT,
7328 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7329 	},
7330 	{
7331 		"map element value is preserved across register spilling",
7332 		.insns = {
7333 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7334 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7335 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7336 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7337 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7338 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7339 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7340 				offsetof(struct test_val, foo)),
7341 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7342 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7343 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7344 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7345 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7346 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7347 			BPF_EXIT_INSN(),
7348 		},
7349 		.fixup_map_hash_48b = { 3 },
7350 		.errstr_unpriv = "R0 leaks addr",
7351 		.result = ACCEPT,
7352 		.result_unpriv = REJECT,
7353 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7354 	},
7355 	{
7356 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7357 		.insns = {
7358 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7360 			BPF_MOV64_IMM(BPF_REG_0, 0),
7361 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7362 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7363 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7364 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7365 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7366 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7367 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7368 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7369 			BPF_MOV64_IMM(BPF_REG_2, 16),
7370 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7371 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7372 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7373 			BPF_MOV64_IMM(BPF_REG_4, 0),
7374 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7375 			BPF_MOV64_IMM(BPF_REG_3, 0),
7376 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7377 			BPF_MOV64_IMM(BPF_REG_0, 0),
7378 			BPF_EXIT_INSN(),
7379 		},
7380 		.result = ACCEPT,
7381 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7382 	},
7383 	{
7384 		"helper access to variable memory: stack, bitwise AND, zero included",
7385 		.insns = {
7386 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7388 			BPF_MOV64_IMM(BPF_REG_2, 16),
7389 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7390 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7391 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7392 			BPF_MOV64_IMM(BPF_REG_3, 0),
7393 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7394 			BPF_EXIT_INSN(),
7395 		},
7396 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7397 		.result = REJECT,
7398 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7399 	},
7400 	{
7401 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7402 		.insns = {
7403 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7404 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7405 			BPF_MOV64_IMM(BPF_REG_2, 16),
7406 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7407 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7408 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7409 			BPF_MOV64_IMM(BPF_REG_4, 0),
7410 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7411 			BPF_MOV64_IMM(BPF_REG_3, 0),
7412 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7413 			BPF_MOV64_IMM(BPF_REG_0, 0),
7414 			BPF_EXIT_INSN(),
7415 		},
7416 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7417 		.result = REJECT,
7418 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7419 	},
7420 	{
7421 		"helper access to variable memory: stack, JMP, correct bounds",
7422 		.insns = {
7423 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7424 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7425 			BPF_MOV64_IMM(BPF_REG_0, 0),
7426 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7427 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7428 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7429 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7430 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7431 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7432 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7433 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7434 			BPF_MOV64_IMM(BPF_REG_2, 16),
7435 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7436 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7437 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7438 			BPF_MOV64_IMM(BPF_REG_4, 0),
7439 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7440 			BPF_MOV64_IMM(BPF_REG_3, 0),
7441 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7442 			BPF_MOV64_IMM(BPF_REG_0, 0),
7443 			BPF_EXIT_INSN(),
7444 		},
7445 		.result = ACCEPT,
7446 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7447 	},
7448 	{
7449 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7450 		.insns = {
7451 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7453 			BPF_MOV64_IMM(BPF_REG_0, 0),
7454 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7455 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7456 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7457 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7458 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7459 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7460 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7461 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7462 			BPF_MOV64_IMM(BPF_REG_2, 16),
7463 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7464 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7465 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7466 			BPF_MOV64_IMM(BPF_REG_4, 0),
7467 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7468 			BPF_MOV64_IMM(BPF_REG_3, 0),
7469 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7470 			BPF_MOV64_IMM(BPF_REG_0, 0),
7471 			BPF_EXIT_INSN(),
7472 		},
7473 		.result = ACCEPT,
7474 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7475 	},
7476 	{
7477 		"helper access to variable memory: stack, JMP, bounds + offset",
7478 		.insns = {
7479 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7481 			BPF_MOV64_IMM(BPF_REG_2, 16),
7482 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7483 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7484 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7485 			BPF_MOV64_IMM(BPF_REG_4, 0),
7486 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7488 			BPF_MOV64_IMM(BPF_REG_3, 0),
7489 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7490 			BPF_MOV64_IMM(BPF_REG_0, 0),
7491 			BPF_EXIT_INSN(),
7492 		},
7493 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7494 		.result = REJECT,
7495 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7496 	},
7497 	{
7498 		"helper access to variable memory: stack, JMP, wrong max",
7499 		.insns = {
7500 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7501 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7502 			BPF_MOV64_IMM(BPF_REG_2, 16),
7503 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7504 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7505 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7506 			BPF_MOV64_IMM(BPF_REG_4, 0),
7507 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7508 			BPF_MOV64_IMM(BPF_REG_3, 0),
7509 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7510 			BPF_MOV64_IMM(BPF_REG_0, 0),
7511 			BPF_EXIT_INSN(),
7512 		},
7513 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7514 		.result = REJECT,
7515 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7516 	},
7517 	{
7518 		"helper access to variable memory: stack, JMP, no max check",
7519 		.insns = {
7520 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7522 			BPF_MOV64_IMM(BPF_REG_2, 16),
7523 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7524 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7525 			BPF_MOV64_IMM(BPF_REG_4, 0),
7526 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7527 			BPF_MOV64_IMM(BPF_REG_3, 0),
7528 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7529 			BPF_MOV64_IMM(BPF_REG_0, 0),
7530 			BPF_EXIT_INSN(),
7531 		},
7532 		/* because max wasn't checked, signed min is negative */
7533 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7534 		.result = REJECT,
7535 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7536 	},
7537 	{
7538 		"helper access to variable memory: stack, JMP, no min check",
7539 		.insns = {
7540 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7542 			BPF_MOV64_IMM(BPF_REG_2, 16),
7543 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7544 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7545 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7546 			BPF_MOV64_IMM(BPF_REG_3, 0),
7547 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7548 			BPF_MOV64_IMM(BPF_REG_0, 0),
7549 			BPF_EXIT_INSN(),
7550 		},
7551 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7552 		.result = REJECT,
7553 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7554 	},
7555 	{
7556 		"helper access to variable memory: stack, JMP (signed), no min check",
7557 		.insns = {
7558 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7560 			BPF_MOV64_IMM(BPF_REG_2, 16),
7561 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7562 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7563 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7564 			BPF_MOV64_IMM(BPF_REG_3, 0),
7565 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7566 			BPF_MOV64_IMM(BPF_REG_0, 0),
7567 			BPF_EXIT_INSN(),
7568 		},
7569 		.errstr = "R2 min value is negative",
7570 		.result = REJECT,
7571 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7572 	},
7573 	{
7574 		"helper access to variable memory: map, JMP, correct bounds",
7575 		.insns = {
7576 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7577 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7578 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7579 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7580 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7581 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7582 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7583 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7584 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7585 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7586 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7587 				sizeof(struct test_val), 4),
7588 			BPF_MOV64_IMM(BPF_REG_4, 0),
7589 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7590 			BPF_MOV64_IMM(BPF_REG_3, 0),
7591 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7592 			BPF_MOV64_IMM(BPF_REG_0, 0),
7593 			BPF_EXIT_INSN(),
7594 		},
7595 		.fixup_map_hash_48b = { 3 },
7596 		.result = ACCEPT,
7597 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7598 	},
7599 	{
7600 		"helper access to variable memory: map, JMP, wrong max",
7601 		.insns = {
7602 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7604 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7605 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7606 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7607 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7608 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7609 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7610 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7611 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7612 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7613 				sizeof(struct test_val) + 1, 4),
7614 			BPF_MOV64_IMM(BPF_REG_4, 0),
7615 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7616 			BPF_MOV64_IMM(BPF_REG_3, 0),
7617 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7618 			BPF_MOV64_IMM(BPF_REG_0, 0),
7619 			BPF_EXIT_INSN(),
7620 		},
7621 		.fixup_map_hash_48b = { 3 },
7622 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7623 		.result = REJECT,
7624 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7625 	},
7626 	{
7627 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7628 		.insns = {
7629 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7630 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7631 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7632 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7633 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7634 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7635 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7637 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7638 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7639 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7640 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7641 				sizeof(struct test_val) - 20, 4),
7642 			BPF_MOV64_IMM(BPF_REG_4, 0),
7643 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7644 			BPF_MOV64_IMM(BPF_REG_3, 0),
7645 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7646 			BPF_MOV64_IMM(BPF_REG_0, 0),
7647 			BPF_EXIT_INSN(),
7648 		},
7649 		.fixup_map_hash_48b = { 3 },
7650 		.result = ACCEPT,
7651 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7652 	},
7653 	{
7654 		"helper access to variable memory: map adjusted, JMP, wrong max",
7655 		.insns = {
7656 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7658 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7659 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7660 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7661 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7662 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7664 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7665 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7666 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7667 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7668 				sizeof(struct test_val) - 19, 4),
7669 			BPF_MOV64_IMM(BPF_REG_4, 0),
7670 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7671 			BPF_MOV64_IMM(BPF_REG_3, 0),
7672 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7673 			BPF_MOV64_IMM(BPF_REG_0, 0),
7674 			BPF_EXIT_INSN(),
7675 		},
7676 		.fixup_map_hash_48b = { 3 },
7677 		.errstr = "R1 min value is outside of the array range",
7678 		.result = REJECT,
7679 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7680 	},
7681 	{
7682 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7683 		.insns = {
7684 			BPF_MOV64_IMM(BPF_REG_1, 0),
7685 			BPF_MOV64_IMM(BPF_REG_2, 0),
7686 			BPF_MOV64_IMM(BPF_REG_3, 0),
7687 			BPF_MOV64_IMM(BPF_REG_4, 0),
7688 			BPF_MOV64_IMM(BPF_REG_5, 0),
7689 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7690 			BPF_EXIT_INSN(),
7691 		},
7692 		.result = ACCEPT,
7693 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7694 	},
7695 	{
7696 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7697 		.insns = {
7698 			BPF_MOV64_IMM(BPF_REG_1, 0),
7699 			BPF_MOV64_IMM(BPF_REG_2, 1),
7700 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7701 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7702 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7703 			BPF_MOV64_IMM(BPF_REG_3, 0),
7704 			BPF_MOV64_IMM(BPF_REG_4, 0),
7705 			BPF_MOV64_IMM(BPF_REG_5, 0),
7706 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7707 			BPF_EXIT_INSN(),
7708 		},
7709 		.errstr = "R1 type=inv expected=fp",
7710 		.result = REJECT,
7711 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7712 	},
7713 	{
7714 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7715 		.insns = {
7716 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7717 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7718 			BPF_MOV64_IMM(BPF_REG_2, 0),
7719 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7720 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7721 			BPF_MOV64_IMM(BPF_REG_3, 0),
7722 			BPF_MOV64_IMM(BPF_REG_4, 0),
7723 			BPF_MOV64_IMM(BPF_REG_5, 0),
7724 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7725 			BPF_EXIT_INSN(),
7726 		},
7727 		.result = ACCEPT,
7728 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7729 	},
7730 	{
7731 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7732 		.insns = {
7733 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7734 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7735 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7736 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7737 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7738 				     BPF_FUNC_map_lookup_elem),
7739 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7740 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7741 			BPF_MOV64_IMM(BPF_REG_2, 0),
7742 			BPF_MOV64_IMM(BPF_REG_3, 0),
7743 			BPF_MOV64_IMM(BPF_REG_4, 0),
7744 			BPF_MOV64_IMM(BPF_REG_5, 0),
7745 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7746 			BPF_EXIT_INSN(),
7747 		},
7748 		.fixup_map_hash_8b = { 3 },
7749 		.result = ACCEPT,
7750 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7751 	},
7752 	{
7753 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7754 		.insns = {
7755 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7756 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7757 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7758 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7760 				     BPF_FUNC_map_lookup_elem),
7761 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7762 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7763 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7764 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7766 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7767 			BPF_MOV64_IMM(BPF_REG_3, 0),
7768 			BPF_MOV64_IMM(BPF_REG_4, 0),
7769 			BPF_MOV64_IMM(BPF_REG_5, 0),
7770 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7771 			BPF_EXIT_INSN(),
7772 		},
7773 		.fixup_map_hash_8b = { 3 },
7774 		.result = ACCEPT,
7775 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7776 	},
7777 	{
7778 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7779 		.insns = {
7780 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7781 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7782 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7783 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7784 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7785 				     BPF_FUNC_map_lookup_elem),
7786 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7787 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7788 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7789 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7790 			BPF_MOV64_IMM(BPF_REG_3, 0),
7791 			BPF_MOV64_IMM(BPF_REG_4, 0),
7792 			BPF_MOV64_IMM(BPF_REG_5, 0),
7793 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7794 			BPF_EXIT_INSN(),
7795 		},
7796 		.fixup_map_hash_8b = { 3 },
7797 		.result = ACCEPT,
7798 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7799 	},
7800 	{
7801 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7802 		.insns = {
7803 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7804 				    offsetof(struct __sk_buff, data)),
7805 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7806 				    offsetof(struct __sk_buff, data_end)),
7807 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7809 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7811 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7812 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7813 			BPF_MOV64_IMM(BPF_REG_3, 0),
7814 			BPF_MOV64_IMM(BPF_REG_4, 0),
7815 			BPF_MOV64_IMM(BPF_REG_5, 0),
7816 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7817 			BPF_EXIT_INSN(),
7818 		},
7819 		.result = ACCEPT,
7820 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7821 		.retval = 0 /* csum_diff of 64-byte packet */,
7822 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7823 	},
7824 	{
7825 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7826 		.insns = {
7827 			BPF_MOV64_IMM(BPF_REG_1, 0),
7828 			BPF_MOV64_IMM(BPF_REG_2, 0),
7829 			BPF_MOV64_IMM(BPF_REG_3, 0),
7830 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7831 			BPF_EXIT_INSN(),
7832 		},
7833 		.errstr = "R1 type=inv expected=fp",
7834 		.result = REJECT,
7835 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7836 	},
7837 	{
7838 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7839 		.insns = {
7840 			BPF_MOV64_IMM(BPF_REG_1, 0),
7841 			BPF_MOV64_IMM(BPF_REG_2, 1),
7842 			BPF_MOV64_IMM(BPF_REG_3, 0),
7843 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7844 			BPF_EXIT_INSN(),
7845 		},
7846 		.errstr = "R1 type=inv expected=fp",
7847 		.result = REJECT,
7848 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7849 	},
7850 	{
7851 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7852 		.insns = {
7853 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7854 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7855 			BPF_MOV64_IMM(BPF_REG_2, 0),
7856 			BPF_MOV64_IMM(BPF_REG_3, 0),
7857 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7858 			BPF_EXIT_INSN(),
7859 		},
7860 		.result = ACCEPT,
7861 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7862 	},
7863 	{
7864 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7865 		.insns = {
7866 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7867 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7869 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7870 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7871 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7872 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7873 			BPF_MOV64_IMM(BPF_REG_2, 0),
7874 			BPF_MOV64_IMM(BPF_REG_3, 0),
7875 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7876 			BPF_EXIT_INSN(),
7877 		},
7878 		.fixup_map_hash_8b = { 3 },
7879 		.result = ACCEPT,
7880 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7881 	},
7882 	{
7883 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7884 		.insns = {
7885 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7886 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7887 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7888 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7889 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7890 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7891 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7892 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7893 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7895 			BPF_MOV64_IMM(BPF_REG_3, 0),
7896 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7897 			BPF_EXIT_INSN(),
7898 		},
7899 		.fixup_map_hash_8b = { 3 },
7900 		.result = ACCEPT,
7901 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7902 	},
7903 	{
7904 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7905 		.insns = {
7906 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7909 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7910 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7911 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7912 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7913 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7914 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7915 			BPF_MOV64_IMM(BPF_REG_3, 0),
7916 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7917 			BPF_EXIT_INSN(),
7918 		},
7919 		.fixup_map_hash_8b = { 3 },
7920 		.result = ACCEPT,
7921 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7922 	},
7923 	{
7924 		"helper access to variable memory: 8 bytes leak",
7925 		.insns = {
7926 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7928 			BPF_MOV64_IMM(BPF_REG_0, 0),
7929 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7930 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7931 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7932 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7933 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7934 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7935 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7936 			BPF_MOV64_IMM(BPF_REG_2, 1),
7937 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7938 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7939 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7941 			BPF_MOV64_IMM(BPF_REG_3, 0),
7942 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7943 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7944 			BPF_EXIT_INSN(),
7945 		},
7946 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7947 		.result = REJECT,
7948 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7949 	},
7950 	{
7951 		"helper access to variable memory: 8 bytes no leak (init memory)",
7952 		.insns = {
7953 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7954 			BPF_MOV64_IMM(BPF_REG_0, 0),
7955 			BPF_MOV64_IMM(BPF_REG_0, 0),
7956 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7957 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7958 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7959 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7960 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7961 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7962 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7963 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7964 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7965 			BPF_MOV64_IMM(BPF_REG_2, 0),
7966 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7967 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7968 			BPF_MOV64_IMM(BPF_REG_3, 0),
7969 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7970 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7971 			BPF_EXIT_INSN(),
7972 		},
7973 		.result = ACCEPT,
7974 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7975 	},
7976 	{
7977 		"invalid and of negative number",
7978 		.insns = {
7979 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7980 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7981 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7982 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7983 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7984 				     BPF_FUNC_map_lookup_elem),
7985 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7986 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7987 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7988 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7989 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7990 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7991 				   offsetof(struct test_val, foo)),
7992 			BPF_EXIT_INSN(),
7993 		},
7994 		.fixup_map_hash_48b = { 3 },
7995 		.errstr = "R0 max value is outside of the array range",
7996 		.result = REJECT,
7997 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7998 	},
7999 	{
8000 		"invalid range check",
8001 		.insns = {
8002 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8003 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8004 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8005 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8006 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8007 				     BPF_FUNC_map_lookup_elem),
8008 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
8009 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
8010 			BPF_MOV64_IMM(BPF_REG_9, 1),
8011 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
8012 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
8013 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
8014 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
8015 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
8016 			BPF_MOV32_IMM(BPF_REG_3, 1),
8017 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
8018 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
8019 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
8020 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
8021 			BPF_MOV64_REG(BPF_REG_0, 0),
8022 			BPF_EXIT_INSN(),
8023 		},
8024 		.fixup_map_hash_48b = { 3 },
8025 		.errstr = "R0 max value is outside of the array range",
8026 		.result = REJECT,
8027 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
8028 	},
8029 	{
8030 		"map in map access",
8031 		.insns = {
8032 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8033 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8035 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8036 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8037 				     BPF_FUNC_map_lookup_elem),
8038 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8039 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8040 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8041 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8042 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8043 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8044 				     BPF_FUNC_map_lookup_elem),
8045 			BPF_MOV64_IMM(BPF_REG_0, 0),
8046 			BPF_EXIT_INSN(),
8047 		},
8048 		.fixup_map_in_map = { 3 },
8049 		.result = ACCEPT,
8050 	},
8051 	{
8052 		"invalid inner map pointer",
8053 		.insns = {
8054 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8055 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8056 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8057 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8058 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8059 				     BPF_FUNC_map_lookup_elem),
8060 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8061 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8062 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8063 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8064 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8065 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8066 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8067 				     BPF_FUNC_map_lookup_elem),
8068 			BPF_MOV64_IMM(BPF_REG_0, 0),
8069 			BPF_EXIT_INSN(),
8070 		},
8071 		.fixup_map_in_map = { 3 },
8072 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
8073 		.result = REJECT,
8074 	},
8075 	{
8076 		"forgot null checking on the inner map pointer",
8077 		.insns = {
8078 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8079 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8081 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8082 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8083 				     BPF_FUNC_map_lookup_elem),
8084 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8085 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8086 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8087 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8088 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8089 				     BPF_FUNC_map_lookup_elem),
8090 			BPF_MOV64_IMM(BPF_REG_0, 0),
8091 			BPF_EXIT_INSN(),
8092 		},
8093 		.fixup_map_in_map = { 3 },
8094 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
8095 		.result = REJECT,
8096 	},
8097 	{
8098 		"ld_abs: check calling conv, r1",
8099 		.insns = {
8100 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8101 			BPF_MOV64_IMM(BPF_REG_1, 0),
8102 			BPF_LD_ABS(BPF_W, -0x200000),
8103 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8104 			BPF_EXIT_INSN(),
8105 		},
8106 		.errstr = "R1 !read_ok",
8107 		.result = REJECT,
8108 	},
8109 	{
8110 		"ld_abs: check calling conv, r2",
8111 		.insns = {
8112 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8113 			BPF_MOV64_IMM(BPF_REG_2, 0),
8114 			BPF_LD_ABS(BPF_W, -0x200000),
8115 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8116 			BPF_EXIT_INSN(),
8117 		},
8118 		.errstr = "R2 !read_ok",
8119 		.result = REJECT,
8120 	},
8121 	{
8122 		"ld_abs: check calling conv, r3",
8123 		.insns = {
8124 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8125 			BPF_MOV64_IMM(BPF_REG_3, 0),
8126 			BPF_LD_ABS(BPF_W, -0x200000),
8127 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8128 			BPF_EXIT_INSN(),
8129 		},
8130 		.errstr = "R3 !read_ok",
8131 		.result = REJECT,
8132 	},
8133 	{
8134 		"ld_abs: check calling conv, r4",
8135 		.insns = {
8136 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8137 			BPF_MOV64_IMM(BPF_REG_4, 0),
8138 			BPF_LD_ABS(BPF_W, -0x200000),
8139 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8140 			BPF_EXIT_INSN(),
8141 		},
8142 		.errstr = "R4 !read_ok",
8143 		.result = REJECT,
8144 	},
8145 	{
8146 		"ld_abs: check calling conv, r5",
8147 		.insns = {
8148 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8149 			BPF_MOV64_IMM(BPF_REG_5, 0),
8150 			BPF_LD_ABS(BPF_W, -0x200000),
8151 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8152 			BPF_EXIT_INSN(),
8153 		},
8154 		.errstr = "R5 !read_ok",
8155 		.result = REJECT,
8156 	},
8157 	{
8158 		"ld_abs: check calling conv, r7",
8159 		.insns = {
8160 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8161 			BPF_MOV64_IMM(BPF_REG_7, 0),
8162 			BPF_LD_ABS(BPF_W, -0x200000),
8163 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8164 			BPF_EXIT_INSN(),
8165 		},
8166 		.result = ACCEPT,
8167 	},
8168 	{
8169 		"ld_abs: tests on r6 and skb data reload helper",
8170 		.insns = {
8171 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8172 			BPF_LD_ABS(BPF_B, 0),
8173 			BPF_LD_ABS(BPF_H, 0),
8174 			BPF_LD_ABS(BPF_W, 0),
8175 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8176 			BPF_MOV64_IMM(BPF_REG_6, 0),
8177 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8178 			BPF_MOV64_IMM(BPF_REG_2, 1),
8179 			BPF_MOV64_IMM(BPF_REG_3, 2),
8180 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8181 				     BPF_FUNC_skb_vlan_push),
8182 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8183 			BPF_LD_ABS(BPF_B, 0),
8184 			BPF_LD_ABS(BPF_H, 0),
8185 			BPF_LD_ABS(BPF_W, 0),
8186 			BPF_MOV64_IMM(BPF_REG_0, 42),
8187 			BPF_EXIT_INSN(),
8188 		},
8189 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8190 		.result = ACCEPT,
8191 		.retval = 42 /* ultimate return value */,
8192 	},
8193 	{
8194 		"ld_ind: check calling conv, r1",
8195 		.insns = {
8196 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8197 			BPF_MOV64_IMM(BPF_REG_1, 1),
8198 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8199 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8200 			BPF_EXIT_INSN(),
8201 		},
8202 		.errstr = "R1 !read_ok",
8203 		.result = REJECT,
8204 	},
8205 	{
8206 		"ld_ind: check calling conv, r2",
8207 		.insns = {
8208 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8209 			BPF_MOV64_IMM(BPF_REG_2, 1),
8210 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8211 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8212 			BPF_EXIT_INSN(),
8213 		},
8214 		.errstr = "R2 !read_ok",
8215 		.result = REJECT,
8216 	},
8217 	{
8218 		"ld_ind: check calling conv, r3",
8219 		.insns = {
8220 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8221 			BPF_MOV64_IMM(BPF_REG_3, 1),
8222 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8223 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8224 			BPF_EXIT_INSN(),
8225 		},
8226 		.errstr = "R3 !read_ok",
8227 		.result = REJECT,
8228 	},
8229 	{
8230 		"ld_ind: check calling conv, r4",
8231 		.insns = {
8232 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8233 			BPF_MOV64_IMM(BPF_REG_4, 1),
8234 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8235 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8236 			BPF_EXIT_INSN(),
8237 		},
8238 		.errstr = "R4 !read_ok",
8239 		.result = REJECT,
8240 	},
8241 	{
8242 		"ld_ind: check calling conv, r5",
8243 		.insns = {
8244 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8245 			BPF_MOV64_IMM(BPF_REG_5, 1),
8246 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8247 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8248 			BPF_EXIT_INSN(),
8249 		},
8250 		.errstr = "R5 !read_ok",
8251 		.result = REJECT,
8252 	},
8253 	{
8254 		"ld_ind: check calling conv, r7",
8255 		.insns = {
8256 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8257 			BPF_MOV64_IMM(BPF_REG_7, 1),
8258 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8259 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8260 			BPF_EXIT_INSN(),
8261 		},
8262 		.result = ACCEPT,
8263 		.retval = 1,
8264 	},
8265 	{
8266 		"check bpf_perf_event_data->sample_period byte load permitted",
8267 		.insns = {
8268 			BPF_MOV64_IMM(BPF_REG_0, 0),
8269 #if __BYTE_ORDER == __LITTLE_ENDIAN
8270 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8271 				    offsetof(struct bpf_perf_event_data, sample_period)),
8272 #else
8273 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8274 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
8275 #endif
8276 			BPF_EXIT_INSN(),
8277 		},
8278 		.result = ACCEPT,
8279 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8280 	},
8281 	{
8282 		"check bpf_perf_event_data->sample_period half load permitted",
8283 		.insns = {
8284 			BPF_MOV64_IMM(BPF_REG_0, 0),
8285 #if __BYTE_ORDER == __LITTLE_ENDIAN
8286 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8287 				    offsetof(struct bpf_perf_event_data, sample_period)),
8288 #else
8289 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8290 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
8291 #endif
8292 			BPF_EXIT_INSN(),
8293 		},
8294 		.result = ACCEPT,
8295 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8296 	},
8297 	{
8298 		"check bpf_perf_event_data->sample_period word load permitted",
8299 		.insns = {
8300 			BPF_MOV64_IMM(BPF_REG_0, 0),
8301 #if __BYTE_ORDER == __LITTLE_ENDIAN
8302 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8303 				    offsetof(struct bpf_perf_event_data, sample_period)),
8304 #else
8305 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8306 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
8307 #endif
8308 			BPF_EXIT_INSN(),
8309 		},
8310 		.result = ACCEPT,
8311 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8312 	},
8313 	{
8314 		"check bpf_perf_event_data->sample_period dword load permitted",
8315 		.insns = {
8316 			BPF_MOV64_IMM(BPF_REG_0, 0),
8317 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8318 				    offsetof(struct bpf_perf_event_data, sample_period)),
8319 			BPF_EXIT_INSN(),
8320 		},
8321 		.result = ACCEPT,
8322 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8323 	},
8324 	{
8325 		"check skb->data half load not permitted",
8326 		.insns = {
8327 			BPF_MOV64_IMM(BPF_REG_0, 0),
8328 #if __BYTE_ORDER == __LITTLE_ENDIAN
8329 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8330 				    offsetof(struct __sk_buff, data)),
8331 #else
8332 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8333 				    offsetof(struct __sk_buff, data) + 2),
8334 #endif
8335 			BPF_EXIT_INSN(),
8336 		},
8337 		.result = REJECT,
8338 		.errstr = "invalid bpf_context access",
8339 	},
8340 	{
8341 		"check skb->tc_classid half load not permitted for lwt prog",
8342 		.insns = {
8343 			BPF_MOV64_IMM(BPF_REG_0, 0),
8344 #if __BYTE_ORDER == __LITTLE_ENDIAN
8345 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8346 				    offsetof(struct __sk_buff, tc_classid)),
8347 #else
8348 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8349 				    offsetof(struct __sk_buff, tc_classid) + 2),
8350 #endif
8351 			BPF_EXIT_INSN(),
8352 		},
8353 		.result = REJECT,
8354 		.errstr = "invalid bpf_context access",
8355 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8356 	},
8357 	{
8358 		"bounds checks mixing signed and unsigned, positive bounds",
8359 		.insns = {
8360 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8361 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8362 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8363 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8364 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8365 				     BPF_FUNC_map_lookup_elem),
8366 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8367 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8368 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8369 			BPF_MOV64_IMM(BPF_REG_2, 2),
8370 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8371 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8372 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8373 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8374 			BPF_MOV64_IMM(BPF_REG_0, 0),
8375 			BPF_EXIT_INSN(),
8376 		},
8377 		.fixup_map_hash_8b = { 3 },
8378 		.errstr = "unbounded min value",
8379 		.result = REJECT,
8380 	},
8381 	{
8382 		"bounds checks mixing signed and unsigned",
8383 		.insns = {
8384 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8385 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8387 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8388 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8389 				     BPF_FUNC_map_lookup_elem),
8390 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8391 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8392 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8393 			BPF_MOV64_IMM(BPF_REG_2, -1),
8394 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8395 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8396 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8397 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8398 			BPF_MOV64_IMM(BPF_REG_0, 0),
8399 			BPF_EXIT_INSN(),
8400 		},
8401 		.fixup_map_hash_8b = { 3 },
8402 		.errstr = "unbounded min value",
8403 		.result = REJECT,
8404 	},
8405 	{
8406 		"bounds checks mixing signed and unsigned, variant 2",
8407 		.insns = {
8408 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8409 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8411 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8413 				     BPF_FUNC_map_lookup_elem),
8414 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8415 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8416 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8417 			BPF_MOV64_IMM(BPF_REG_2, -1),
8418 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8419 			BPF_MOV64_IMM(BPF_REG_8, 0),
8420 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8421 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8422 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8423 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8424 			BPF_MOV64_IMM(BPF_REG_0, 0),
8425 			BPF_EXIT_INSN(),
8426 		},
8427 		.fixup_map_hash_8b = { 3 },
8428 		.errstr = "unbounded min value",
8429 		.result = REJECT,
8430 	},
8431 	{
8432 		"bounds checks mixing signed and unsigned, variant 3",
8433 		.insns = {
8434 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8439 				     BPF_FUNC_map_lookup_elem),
8440 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8441 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8442 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8443 			BPF_MOV64_IMM(BPF_REG_2, -1),
8444 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8445 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8446 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8447 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8448 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8449 			BPF_MOV64_IMM(BPF_REG_0, 0),
8450 			BPF_EXIT_INSN(),
8451 		},
8452 		.fixup_map_hash_8b = { 3 },
8453 		.errstr = "unbounded min value",
8454 		.result = REJECT,
8455 	},
8456 	{
8457 		"bounds checks mixing signed and unsigned, variant 4",
8458 		.insns = {
8459 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8460 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8462 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8463 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8464 				     BPF_FUNC_map_lookup_elem),
8465 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8466 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8467 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8468 			BPF_MOV64_IMM(BPF_REG_2, 1),
8469 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8470 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8471 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8472 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8473 			BPF_MOV64_IMM(BPF_REG_0, 0),
8474 			BPF_EXIT_INSN(),
8475 		},
8476 		.fixup_map_hash_8b = { 3 },
8477 		.result = ACCEPT,
8478 	},
8479 	{
8480 		"bounds checks mixing signed and unsigned, variant 5",
8481 		.insns = {
8482 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8483 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8484 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8485 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8486 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8487 				     BPF_FUNC_map_lookup_elem),
8488 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8489 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8490 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8491 			BPF_MOV64_IMM(BPF_REG_2, -1),
8492 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8493 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8495 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8496 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8497 			BPF_MOV64_IMM(BPF_REG_0, 0),
8498 			BPF_EXIT_INSN(),
8499 		},
8500 		.fixup_map_hash_8b = { 3 },
8501 		.errstr = "unbounded min value",
8502 		.result = REJECT,
8503 	},
8504 	{
8505 		"bounds checks mixing signed and unsigned, variant 6",
8506 		.insns = {
8507 			BPF_MOV64_IMM(BPF_REG_2, 0),
8508 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8509 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8510 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8511 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8512 			BPF_MOV64_IMM(BPF_REG_6, -1),
8513 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8514 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8516 			BPF_MOV64_IMM(BPF_REG_5, 0),
8517 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8518 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8519 				     BPF_FUNC_skb_load_bytes),
8520 			BPF_MOV64_IMM(BPF_REG_0, 0),
8521 			BPF_EXIT_INSN(),
8522 		},
8523 		.errstr = "R4 min value is negative, either use unsigned",
8524 		.result = REJECT,
8525 	},
8526 	{
8527 		"bounds checks mixing signed and unsigned, variant 7",
8528 		.insns = {
8529 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8530 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8532 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8533 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8534 				     BPF_FUNC_map_lookup_elem),
8535 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8536 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8537 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8538 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8539 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8540 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8541 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8542 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8543 			BPF_MOV64_IMM(BPF_REG_0, 0),
8544 			BPF_EXIT_INSN(),
8545 		},
8546 		.fixup_map_hash_8b = { 3 },
8547 		.result = ACCEPT,
8548 	},
8549 	{
8550 		"bounds checks mixing signed and unsigned, variant 8",
8551 		.insns = {
8552 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8553 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8555 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8556 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8557 				     BPF_FUNC_map_lookup_elem),
8558 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8559 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8560 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8561 			BPF_MOV64_IMM(BPF_REG_2, -1),
8562 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8563 			BPF_MOV64_IMM(BPF_REG_0, 0),
8564 			BPF_EXIT_INSN(),
8565 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8566 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8567 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8568 			BPF_MOV64_IMM(BPF_REG_0, 0),
8569 			BPF_EXIT_INSN(),
8570 		},
8571 		.fixup_map_hash_8b = { 3 },
8572 		.errstr = "unbounded min value",
8573 		.result = REJECT,
8574 	},
8575 	{
8576 		"bounds checks mixing signed and unsigned, variant 9",
8577 		.insns = {
8578 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8579 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8581 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8582 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8583 				     BPF_FUNC_map_lookup_elem),
8584 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8585 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8586 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8587 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8588 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8589 			BPF_MOV64_IMM(BPF_REG_0, 0),
8590 			BPF_EXIT_INSN(),
8591 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8592 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8593 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8594 			BPF_MOV64_IMM(BPF_REG_0, 0),
8595 			BPF_EXIT_INSN(),
8596 		},
8597 		.fixup_map_hash_8b = { 3 },
8598 		.result = ACCEPT,
8599 	},
8600 	{
8601 		"bounds checks mixing signed and unsigned, variant 10",
8602 		.insns = {
8603 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8604 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8605 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8606 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8607 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8608 				     BPF_FUNC_map_lookup_elem),
8609 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8610 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8611 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8612 			BPF_MOV64_IMM(BPF_REG_2, 0),
8613 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8614 			BPF_MOV64_IMM(BPF_REG_0, 0),
8615 			BPF_EXIT_INSN(),
8616 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8617 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8618 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8619 			BPF_MOV64_IMM(BPF_REG_0, 0),
8620 			BPF_EXIT_INSN(),
8621 		},
8622 		.fixup_map_hash_8b = { 3 },
8623 		.errstr = "unbounded min value",
8624 		.result = REJECT,
8625 	},
8626 	{
8627 		"bounds checks mixing signed and unsigned, variant 11",
8628 		.insns = {
8629 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8630 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8631 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8632 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8633 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8634 				     BPF_FUNC_map_lookup_elem),
8635 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8636 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8637 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8638 			BPF_MOV64_IMM(BPF_REG_2, -1),
8639 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8640 			/* Dead branch. */
8641 			BPF_MOV64_IMM(BPF_REG_0, 0),
8642 			BPF_EXIT_INSN(),
8643 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8644 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8645 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8646 			BPF_MOV64_IMM(BPF_REG_0, 0),
8647 			BPF_EXIT_INSN(),
8648 		},
8649 		.fixup_map_hash_8b = { 3 },
8650 		.errstr = "unbounded min value",
8651 		.result = REJECT,
8652 	},
8653 	{
8654 		"bounds checks mixing signed and unsigned, variant 12",
8655 		.insns = {
8656 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8657 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8658 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8659 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8660 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8661 				     BPF_FUNC_map_lookup_elem),
8662 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8663 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8664 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8665 			BPF_MOV64_IMM(BPF_REG_2, -6),
8666 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8667 			BPF_MOV64_IMM(BPF_REG_0, 0),
8668 			BPF_EXIT_INSN(),
8669 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8670 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8671 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8672 			BPF_MOV64_IMM(BPF_REG_0, 0),
8673 			BPF_EXIT_INSN(),
8674 		},
8675 		.fixup_map_hash_8b = { 3 },
8676 		.errstr = "unbounded min value",
8677 		.result = REJECT,
8678 	},
8679 	{
8680 		"bounds checks mixing signed and unsigned, variant 13",
8681 		.insns = {
8682 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8683 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8685 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8686 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8687 				     BPF_FUNC_map_lookup_elem),
8688 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8689 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8690 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8691 			BPF_MOV64_IMM(BPF_REG_2, 2),
8692 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8693 			BPF_MOV64_IMM(BPF_REG_7, 1),
8694 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8695 			BPF_MOV64_IMM(BPF_REG_0, 0),
8696 			BPF_EXIT_INSN(),
8697 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8698 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8699 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8700 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8701 			BPF_MOV64_IMM(BPF_REG_0, 0),
8702 			BPF_EXIT_INSN(),
8703 		},
8704 		.fixup_map_hash_8b = { 3 },
8705 		.errstr = "unbounded min value",
8706 		.result = REJECT,
8707 	},
8708 	{
8709 		"bounds checks mixing signed and unsigned, variant 14",
8710 		.insns = {
8711 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8712 				    offsetof(struct __sk_buff, mark)),
8713 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8714 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8716 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8717 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8718 				     BPF_FUNC_map_lookup_elem),
8719 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8720 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8721 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8722 			BPF_MOV64_IMM(BPF_REG_2, -1),
8723 			BPF_MOV64_IMM(BPF_REG_8, 2),
8724 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8725 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8726 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8727 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8728 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8729 			BPF_MOV64_IMM(BPF_REG_0, 0),
8730 			BPF_EXIT_INSN(),
8731 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8732 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8733 		},
8734 		.fixup_map_hash_8b = { 4 },
8735 		.errstr = "unbounded min value",
8736 		.result = REJECT,
8737 	},
8738 	{
8739 		"bounds checks mixing signed and unsigned, variant 15",
8740 		.insns = {
8741 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8742 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8744 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8745 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8746 				     BPF_FUNC_map_lookup_elem),
8747 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8748 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8749 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8750 			BPF_MOV64_IMM(BPF_REG_2, -6),
8751 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8752 			BPF_MOV64_IMM(BPF_REG_0, 0),
8753 			BPF_EXIT_INSN(),
8754 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8755 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8756 			BPF_MOV64_IMM(BPF_REG_0, 0),
8757 			BPF_EXIT_INSN(),
8758 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8759 			BPF_MOV64_IMM(BPF_REG_0, 0),
8760 			BPF_EXIT_INSN(),
8761 		},
8762 		.fixup_map_hash_8b = { 3 },
8763 		.errstr = "unbounded min value",
8764 		.result = REJECT,
8765 		.result_unpriv = REJECT,
8766 	},
8767 	{
8768 		"subtraction bounds (map value) variant 1",
8769 		.insns = {
8770 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8773 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8774 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8775 				     BPF_FUNC_map_lookup_elem),
8776 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8777 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8778 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8779 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8780 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8781 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8782 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8783 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8784 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8785 			BPF_EXIT_INSN(),
8786 			BPF_MOV64_IMM(BPF_REG_0, 0),
8787 			BPF_EXIT_INSN(),
8788 		},
8789 		.fixup_map_hash_8b = { 3 },
8790 		.errstr = "R0 max value is outside of the array range",
8791 		.result = REJECT,
8792 	},
8793 	{
8794 		"subtraction bounds (map value) variant 2",
8795 		.insns = {
8796 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8797 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8798 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8799 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8800 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8801 				     BPF_FUNC_map_lookup_elem),
8802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8803 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8804 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8805 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8806 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8807 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8808 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8809 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8810 			BPF_EXIT_INSN(),
8811 			BPF_MOV64_IMM(BPF_REG_0, 0),
8812 			BPF_EXIT_INSN(),
8813 		},
8814 		.fixup_map_hash_8b = { 3 },
8815 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8816 		.result = REJECT,
8817 	},
8818 	{
8819 		"bounds check based on zero-extended MOV",
8820 		.insns = {
8821 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8822 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8823 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8824 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8825 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8826 				     BPF_FUNC_map_lookup_elem),
8827 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8828 			/* r2 = 0x0000'0000'ffff'ffff */
8829 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8830 			/* r2 = 0 */
8831 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8832 			/* no-op */
8833 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8834 			/* access at offset 0 */
8835 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8836 			/* exit */
8837 			BPF_MOV64_IMM(BPF_REG_0, 0),
8838 			BPF_EXIT_INSN(),
8839 		},
8840 		.fixup_map_hash_8b = { 3 },
8841 		.result = ACCEPT
8842 	},
8843 	{
8844 		"bounds check based on sign-extended MOV. test1",
8845 		.insns = {
8846 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8847 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8849 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8850 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8851 				     BPF_FUNC_map_lookup_elem),
8852 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8853 			/* r2 = 0xffff'ffff'ffff'ffff */
8854 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8855 			/* r2 = 0xffff'ffff */
8856 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8857 			/* r0 = <oob pointer> */
8858 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8859 			/* access to OOB pointer */
8860 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8861 			/* exit */
8862 			BPF_MOV64_IMM(BPF_REG_0, 0),
8863 			BPF_EXIT_INSN(),
8864 		},
8865 		.fixup_map_hash_8b = { 3 },
8866 		.errstr = "map_value pointer and 4294967295",
8867 		.result = REJECT
8868 	},
8869 	{
8870 		"bounds check based on sign-extended MOV. test2",
8871 		.insns = {
8872 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8873 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8874 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8875 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8876 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8877 				     BPF_FUNC_map_lookup_elem),
8878 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8879 			/* r2 = 0xffff'ffff'ffff'ffff */
8880 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8881 			/* r2 = 0xfff'ffff */
8882 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8883 			/* r0 = <oob pointer> */
8884 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8885 			/* access to OOB pointer */
8886 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8887 			/* exit */
8888 			BPF_MOV64_IMM(BPF_REG_0, 0),
8889 			BPF_EXIT_INSN(),
8890 		},
8891 		.fixup_map_hash_8b = { 3 },
8892 		.errstr = "R0 min value is outside of the array range",
8893 		.result = REJECT
8894 	},
8895 	{
8896 		"bounds check based on reg_off + var_off + insn_off. test1",
8897 		.insns = {
8898 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8899 				    offsetof(struct __sk_buff, mark)),
8900 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8901 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8903 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8904 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8905 				     BPF_FUNC_map_lookup_elem),
8906 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8907 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8909 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8910 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8911 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8912 			BPF_MOV64_IMM(BPF_REG_0, 0),
8913 			BPF_EXIT_INSN(),
8914 		},
8915 		.fixup_map_hash_8b = { 4 },
8916 		.errstr = "value_size=8 off=1073741825",
8917 		.result = REJECT,
8918 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8919 	},
8920 	{
8921 		"bounds check based on reg_off + var_off + insn_off. test2",
8922 		.insns = {
8923 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8924 				    offsetof(struct __sk_buff, mark)),
8925 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8928 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8929 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8930 				     BPF_FUNC_map_lookup_elem),
8931 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8932 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8933 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8934 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8935 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8936 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8937 			BPF_MOV64_IMM(BPF_REG_0, 0),
8938 			BPF_EXIT_INSN(),
8939 		},
8940 		.fixup_map_hash_8b = { 4 },
8941 		.errstr = "value 1073741823",
8942 		.result = REJECT,
8943 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8944 	},
8945 	{
8946 		"bounds check after truncation of non-boundary-crossing range",
8947 		.insns = {
8948 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8949 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8950 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8951 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8952 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8953 				     BPF_FUNC_map_lookup_elem),
8954 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8955 			/* r1 = [0x00, 0xff] */
8956 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8957 			BPF_MOV64_IMM(BPF_REG_2, 1),
8958 			/* r2 = 0x10'0000'0000 */
8959 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8960 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8961 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8962 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8963 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8964 			/* r1 = [0x00, 0xff] */
8965 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8966 			/* r1 = 0 */
8967 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8968 			/* no-op */
8969 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8970 			/* access at offset 0 */
8971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8972 			/* exit */
8973 			BPF_MOV64_IMM(BPF_REG_0, 0),
8974 			BPF_EXIT_INSN(),
8975 		},
8976 		.fixup_map_hash_8b = { 3 },
8977 		.result = ACCEPT
8978 	},
8979 	{
8980 		"bounds check after truncation of boundary-crossing range (1)",
8981 		.insns = {
8982 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8983 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8984 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8985 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8986 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8987 				     BPF_FUNC_map_lookup_elem),
8988 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8989 			/* r1 = [0x00, 0xff] */
8990 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8991 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8992 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8994 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8995 			 *      [0x0000'0000, 0x0000'007f]
8996 			 */
8997 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8998 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8999 			/* r1 = [0x00, 0xff] or
9000 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9001 			 */
9002 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9003 			/* r1 = 0 or
9004 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9005 			 */
9006 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9007 			/* no-op or OOB pointer computation */
9008 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9009 			/* potentially OOB access */
9010 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9011 			/* exit */
9012 			BPF_MOV64_IMM(BPF_REG_0, 0),
9013 			BPF_EXIT_INSN(),
9014 		},
9015 		.fixup_map_hash_8b = { 3 },
9016 		/* not actually fully unbounded, but the bound is very high */
9017 		.errstr = "R0 unbounded memory access",
9018 		.result = REJECT
9019 	},
9020 	{
9021 		"bounds check after truncation of boundary-crossing range (2)",
9022 		.insns = {
9023 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9024 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9025 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9026 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9027 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9028 				     BPF_FUNC_map_lookup_elem),
9029 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
9030 			/* r1 = [0x00, 0xff] */
9031 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9032 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9033 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
9034 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
9035 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
9036 			 *      [0x0000'0000, 0x0000'007f]
9037 			 * difference to previous test: truncation via MOV32
9038 			 * instead of ALU32.
9039 			 */
9040 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
9041 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9042 			/* r1 = [0x00, 0xff] or
9043 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
9044 			 */
9045 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
9046 			/* r1 = 0 or
9047 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
9048 			 */
9049 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9050 			/* no-op or OOB pointer computation */
9051 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9052 			/* potentially OOB access */
9053 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9054 			/* exit */
9055 			BPF_MOV64_IMM(BPF_REG_0, 0),
9056 			BPF_EXIT_INSN(),
9057 		},
9058 		.fixup_map_hash_8b = { 3 },
9059 		/* not actually fully unbounded, but the bound is very high */
9060 		.errstr = "R0 unbounded memory access",
9061 		.result = REJECT
9062 	},
9063 	{
9064 		"bounds check after wrapping 32-bit addition",
9065 		.insns = {
9066 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9067 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9068 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9069 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9070 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9071 				     BPF_FUNC_map_lookup_elem),
9072 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9073 			/* r1 = 0x7fff'ffff */
9074 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9075 			/* r1 = 0xffff'fffe */
9076 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9077 			/* r1 = 0 */
9078 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9079 			/* no-op */
9080 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9081 			/* access at offset 0 */
9082 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9083 			/* exit */
9084 			BPF_MOV64_IMM(BPF_REG_0, 0),
9085 			BPF_EXIT_INSN(),
9086 		},
9087 		.fixup_map_hash_8b = { 3 },
9088 		.result = ACCEPT
9089 	},
9090 	{
9091 		"bounds check after shift with oversized count operand",
9092 		.insns = {
9093 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9094 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9096 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9098 				     BPF_FUNC_map_lookup_elem),
9099 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9100 			BPF_MOV64_IMM(BPF_REG_2, 32),
9101 			BPF_MOV64_IMM(BPF_REG_1, 1),
9102 			/* r1 = (u32)1 << (u32)32 = ? */
9103 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9104 			/* r1 = [0x0000, 0xffff] */
9105 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9106 			/* computes unknown pointer, potentially OOB */
9107 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9108 			/* potentially OOB access */
9109 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9110 			/* exit */
9111 			BPF_MOV64_IMM(BPF_REG_0, 0),
9112 			BPF_EXIT_INSN(),
9113 		},
9114 		.fixup_map_hash_8b = { 3 },
9115 		.errstr = "R0 max value is outside of the array range",
9116 		.result = REJECT
9117 	},
9118 	{
9119 		"bounds check after right shift of maybe-negative number",
9120 		.insns = {
9121 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9122 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9123 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9124 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9125 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9126 				     BPF_FUNC_map_lookup_elem),
9127 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9128 			/* r1 = [0x00, 0xff] */
9129 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9130 			/* r1 = [-0x01, 0xfe] */
9131 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9132 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9133 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9134 			/* r1 = 0 or 0xffff'ffff'ffff */
9135 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9136 			/* computes unknown pointer, potentially OOB */
9137 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9138 			/* potentially OOB access */
9139 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9140 			/* exit */
9141 			BPF_MOV64_IMM(BPF_REG_0, 0),
9142 			BPF_EXIT_INSN(),
9143 		},
9144 		.fixup_map_hash_8b = { 3 },
9145 		.errstr = "R0 unbounded memory access",
9146 		.result = REJECT
9147 	},
9148 	{
9149 		"bounds check map access with off+size signed 32bit overflow. test1",
9150 		.insns = {
9151 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9152 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9154 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9155 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9156 				     BPF_FUNC_map_lookup_elem),
9157 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9158 			BPF_EXIT_INSN(),
9159 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9160 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9161 			BPF_JMP_A(0),
9162 			BPF_EXIT_INSN(),
9163 		},
9164 		.fixup_map_hash_8b = { 3 },
9165 		.errstr = "map_value pointer and 2147483646",
9166 		.result = REJECT
9167 	},
9168 	{
9169 		"bounds check map access with off+size signed 32bit overflow. test2",
9170 		.insns = {
9171 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9172 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9174 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9175 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9176 				     BPF_FUNC_map_lookup_elem),
9177 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9178 			BPF_EXIT_INSN(),
9179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9180 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9182 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9183 			BPF_JMP_A(0),
9184 			BPF_EXIT_INSN(),
9185 		},
9186 		.fixup_map_hash_8b = { 3 },
9187 		.errstr = "pointer offset 1073741822",
9188 		.result = REJECT
9189 	},
9190 	{
9191 		"bounds check map access with off+size signed 32bit overflow. test3",
9192 		.insns = {
9193 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9194 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9196 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9197 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9198 				     BPF_FUNC_map_lookup_elem),
9199 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9200 			BPF_EXIT_INSN(),
9201 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9202 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9203 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9204 			BPF_JMP_A(0),
9205 			BPF_EXIT_INSN(),
9206 		},
9207 		.fixup_map_hash_8b = { 3 },
9208 		.errstr = "pointer offset -1073741822",
9209 		.result = REJECT
9210 	},
9211 	{
9212 		"bounds check map access with off+size signed 32bit overflow. test4",
9213 		.insns = {
9214 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9215 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9217 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9218 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9219 				     BPF_FUNC_map_lookup_elem),
9220 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9221 			BPF_EXIT_INSN(),
9222 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
9223 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9224 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9225 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9226 			BPF_JMP_A(0),
9227 			BPF_EXIT_INSN(),
9228 		},
9229 		.fixup_map_hash_8b = { 3 },
9230 		.errstr = "map_value pointer and 1000000000000",
9231 		.result = REJECT
9232 	},
9233 	{
9234 		"pointer/scalar confusion in state equality check (way 1)",
9235 		.insns = {
9236 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9237 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9241 				     BPF_FUNC_map_lookup_elem),
9242 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9243 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9244 			BPF_JMP_A(1),
9245 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9246 			BPF_JMP_A(0),
9247 			BPF_EXIT_INSN(),
9248 		},
9249 		.fixup_map_hash_8b = { 3 },
9250 		.result = ACCEPT,
9251 		.retval = POINTER_VALUE,
9252 		.result_unpriv = REJECT,
9253 		.errstr_unpriv = "R0 leaks addr as return value"
9254 	},
9255 	{
9256 		"pointer/scalar confusion in state equality check (way 2)",
9257 		.insns = {
9258 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9259 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9260 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9261 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9262 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9263 				     BPF_FUNC_map_lookup_elem),
9264 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9265 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9266 			BPF_JMP_A(1),
9267 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9268 			BPF_EXIT_INSN(),
9269 		},
9270 		.fixup_map_hash_8b = { 3 },
9271 		.result = ACCEPT,
9272 		.retval = POINTER_VALUE,
9273 		.result_unpriv = REJECT,
9274 		.errstr_unpriv = "R0 leaks addr as return value"
9275 	},
9276 	{
9277 		"variable-offset ctx access",
9278 		.insns = {
9279 			/* Get an unknown value */
9280 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9281 			/* Make it small and 4-byte aligned */
9282 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9283 			/* add it to skb.  We now have either &skb->len or
9284 			 * &skb->pkt_type, but we don't know which
9285 			 */
9286 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9287 			/* dereference it */
9288 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9289 			BPF_EXIT_INSN(),
9290 		},
9291 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
9292 		.result = REJECT,
9293 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9294 	},
9295 	{
9296 		"variable-offset stack access",
9297 		.insns = {
9298 			/* Fill the top 8 bytes of the stack */
9299 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9300 			/* Get an unknown value */
9301 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9302 			/* Make it small and 4-byte aligned */
9303 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9304 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9305 			/* add it to fp.  We now have either fp-4 or fp-8, but
9306 			 * we don't know which
9307 			 */
9308 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9309 			/* dereference it */
9310 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9311 			BPF_EXIT_INSN(),
9312 		},
9313 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9314 		.result = REJECT,
9315 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9316 	},
9317 	{
9318 		"indirect variable-offset stack access",
9319 		.insns = {
9320 			/* Fill the top 8 bytes of the stack */
9321 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9322 			/* Get an unknown value */
9323 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9324 			/* Make it small and 4-byte aligned */
9325 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9326 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9327 			/* add it to fp.  We now have either fp-4 or fp-8, but
9328 			 * we don't know which
9329 			 */
9330 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9331 			/* dereference it indirectly */
9332 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9333 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9334 				     BPF_FUNC_map_lookup_elem),
9335 			BPF_MOV64_IMM(BPF_REG_0, 0),
9336 			BPF_EXIT_INSN(),
9337 		},
9338 		.fixup_map_hash_8b = { 5 },
9339 		.errstr = "variable stack read R2",
9340 		.result = REJECT,
9341 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9342 	},
9343 	{
9344 		"direct stack access with 32-bit wraparound. test1",
9345 		.insns = {
9346 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9349 			BPF_MOV32_IMM(BPF_REG_0, 0),
9350 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9351 			BPF_EXIT_INSN()
9352 		},
9353 		.errstr = "fp pointer and 2147483647",
9354 		.result = REJECT
9355 	},
9356 	{
9357 		"direct stack access with 32-bit wraparound. test2",
9358 		.insns = {
9359 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9362 			BPF_MOV32_IMM(BPF_REG_0, 0),
9363 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9364 			BPF_EXIT_INSN()
9365 		},
9366 		.errstr = "fp pointer and 1073741823",
9367 		.result = REJECT
9368 	},
9369 	{
9370 		"direct stack access with 32-bit wraparound. test3",
9371 		.insns = {
9372 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9375 			BPF_MOV32_IMM(BPF_REG_0, 0),
9376 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9377 			BPF_EXIT_INSN()
9378 		},
9379 		.errstr = "fp pointer offset 1073741822",
9380 		.result = REJECT
9381 	},
9382 	{
9383 		"liveness pruning and write screening",
9384 		.insns = {
9385 			/* Get an unknown value */
9386 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9387 			/* branch conditions teach us nothing about R2 */
9388 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9389 			BPF_MOV64_IMM(BPF_REG_0, 0),
9390 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9391 			BPF_MOV64_IMM(BPF_REG_0, 0),
9392 			BPF_EXIT_INSN(),
9393 		},
9394 		.errstr = "R0 !read_ok",
9395 		.result = REJECT,
9396 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9397 	},
9398 	{
9399 		"varlen_map_value_access pruning",
9400 		.insns = {
9401 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9402 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9403 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9404 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9405 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9406 				     BPF_FUNC_map_lookup_elem),
9407 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9408 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9409 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9410 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9411 			BPF_MOV32_IMM(BPF_REG_1, 0),
9412 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9413 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9414 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9415 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9416 				   offsetof(struct test_val, foo)),
9417 			BPF_EXIT_INSN(),
9418 		},
9419 		.fixup_map_hash_48b = { 3 },
9420 		.errstr_unpriv = "R0 leaks addr",
9421 		.errstr = "R0 unbounded memory access",
9422 		.result_unpriv = REJECT,
9423 		.result = REJECT,
9424 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9425 	},
9426 	{
9427 		"invalid 64-bit BPF_END",
9428 		.insns = {
9429 			BPF_MOV32_IMM(BPF_REG_0, 0),
9430 			{
9431 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9432 				.dst_reg = BPF_REG_0,
9433 				.src_reg = 0,
9434 				.off   = 0,
9435 				.imm   = 32,
9436 			},
9437 			BPF_EXIT_INSN(),
9438 		},
9439 		.errstr = "unknown opcode d7",
9440 		.result = REJECT,
9441 	},
9442 	{
9443 		"XDP, using ifindex from netdev",
9444 		.insns = {
9445 			BPF_MOV64_IMM(BPF_REG_0, 0),
9446 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9447 				    offsetof(struct xdp_md, ingress_ifindex)),
9448 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9449 			BPF_MOV64_IMM(BPF_REG_0, 1),
9450 			BPF_EXIT_INSN(),
9451 		},
9452 		.result = ACCEPT,
9453 		.prog_type = BPF_PROG_TYPE_XDP,
9454 		.retval = 1,
9455 	},
9456 	{
9457 		"meta access, test1",
9458 		.insns = {
9459 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9460 				    offsetof(struct xdp_md, data_meta)),
9461 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9462 				    offsetof(struct xdp_md, data)),
9463 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9464 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9465 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9466 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9467 			BPF_MOV64_IMM(BPF_REG_0, 0),
9468 			BPF_EXIT_INSN(),
9469 		},
9470 		.result = ACCEPT,
9471 		.prog_type = BPF_PROG_TYPE_XDP,
9472 	},
9473 	{
9474 		"meta access, test2",
9475 		.insns = {
9476 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9477 				    offsetof(struct xdp_md, data_meta)),
9478 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9479 				    offsetof(struct xdp_md, data)),
9480 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9481 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9482 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9484 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9485 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9486 			BPF_MOV64_IMM(BPF_REG_0, 0),
9487 			BPF_EXIT_INSN(),
9488 		},
9489 		.result = REJECT,
9490 		.errstr = "invalid access to packet, off=-8",
9491 		.prog_type = BPF_PROG_TYPE_XDP,
9492 	},
9493 	{
9494 		"meta access, test3",
9495 		.insns = {
9496 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9497 				    offsetof(struct xdp_md, data_meta)),
9498 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9499 				    offsetof(struct xdp_md, data_end)),
9500 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9501 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9502 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9503 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9504 			BPF_MOV64_IMM(BPF_REG_0, 0),
9505 			BPF_EXIT_INSN(),
9506 		},
9507 		.result = REJECT,
9508 		.errstr = "invalid access to packet",
9509 		.prog_type = BPF_PROG_TYPE_XDP,
9510 	},
9511 	{
9512 		"meta access, test4",
9513 		.insns = {
9514 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9515 				    offsetof(struct xdp_md, data_meta)),
9516 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9517 				    offsetof(struct xdp_md, data_end)),
9518 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9519 				    offsetof(struct xdp_md, data)),
9520 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9522 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9523 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9524 			BPF_MOV64_IMM(BPF_REG_0, 0),
9525 			BPF_EXIT_INSN(),
9526 		},
9527 		.result = REJECT,
9528 		.errstr = "invalid access to packet",
9529 		.prog_type = BPF_PROG_TYPE_XDP,
9530 	},
9531 	{
9532 		"meta access, test5",
9533 		.insns = {
9534 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9535 				    offsetof(struct xdp_md, data_meta)),
9536 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9537 				    offsetof(struct xdp_md, data)),
9538 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9539 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9540 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9541 			BPF_MOV64_IMM(BPF_REG_2, -8),
9542 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9543 				     BPF_FUNC_xdp_adjust_meta),
9544 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9545 			BPF_MOV64_IMM(BPF_REG_0, 0),
9546 			BPF_EXIT_INSN(),
9547 		},
9548 		.result = REJECT,
9549 		.errstr = "R3 !read_ok",
9550 		.prog_type = BPF_PROG_TYPE_XDP,
9551 	},
9552 	{
9553 		"meta access, test6",
9554 		.insns = {
9555 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9556 				    offsetof(struct xdp_md, data_meta)),
9557 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9558 				    offsetof(struct xdp_md, data)),
9559 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9561 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9563 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9564 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9565 			BPF_MOV64_IMM(BPF_REG_0, 0),
9566 			BPF_EXIT_INSN(),
9567 		},
9568 		.result = REJECT,
9569 		.errstr = "invalid access to packet",
9570 		.prog_type = BPF_PROG_TYPE_XDP,
9571 	},
9572 	{
9573 		"meta access, test7",
9574 		.insns = {
9575 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9576 				    offsetof(struct xdp_md, data_meta)),
9577 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9578 				    offsetof(struct xdp_md, data)),
9579 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9581 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9582 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9583 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9584 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9585 			BPF_MOV64_IMM(BPF_REG_0, 0),
9586 			BPF_EXIT_INSN(),
9587 		},
9588 		.result = ACCEPT,
9589 		.prog_type = BPF_PROG_TYPE_XDP,
9590 	},
9591 	{
9592 		"meta access, test8",
9593 		.insns = {
9594 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9595 				    offsetof(struct xdp_md, data_meta)),
9596 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9597 				    offsetof(struct xdp_md, data)),
9598 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9599 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9600 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9601 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9602 			BPF_MOV64_IMM(BPF_REG_0, 0),
9603 			BPF_EXIT_INSN(),
9604 		},
9605 		.result = ACCEPT,
9606 		.prog_type = BPF_PROG_TYPE_XDP,
9607 	},
9608 	{
9609 		"meta access, test9",
9610 		.insns = {
9611 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9612 				    offsetof(struct xdp_md, data_meta)),
9613 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9614 				    offsetof(struct xdp_md, data)),
9615 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9618 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9619 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9620 			BPF_MOV64_IMM(BPF_REG_0, 0),
9621 			BPF_EXIT_INSN(),
9622 		},
9623 		.result = REJECT,
9624 		.errstr = "invalid access to packet",
9625 		.prog_type = BPF_PROG_TYPE_XDP,
9626 	},
9627 	{
9628 		"meta access, test10",
9629 		.insns = {
9630 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9631 				    offsetof(struct xdp_md, data_meta)),
9632 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9633 				    offsetof(struct xdp_md, data)),
9634 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9635 				    offsetof(struct xdp_md, data_end)),
9636 			BPF_MOV64_IMM(BPF_REG_5, 42),
9637 			BPF_MOV64_IMM(BPF_REG_6, 24),
9638 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9639 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9640 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9641 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9642 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9643 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9644 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9645 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9646 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9647 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9648 			BPF_MOV64_IMM(BPF_REG_0, 0),
9649 			BPF_EXIT_INSN(),
9650 		},
9651 		.result = REJECT,
9652 		.errstr = "invalid access to packet",
9653 		.prog_type = BPF_PROG_TYPE_XDP,
9654 	},
9655 	{
9656 		"meta access, test11",
9657 		.insns = {
9658 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9659 				    offsetof(struct xdp_md, data_meta)),
9660 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9661 				    offsetof(struct xdp_md, data)),
9662 			BPF_MOV64_IMM(BPF_REG_5, 42),
9663 			BPF_MOV64_IMM(BPF_REG_6, 24),
9664 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9665 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9666 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9667 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9668 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9669 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9670 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9671 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9672 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9673 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9674 			BPF_MOV64_IMM(BPF_REG_0, 0),
9675 			BPF_EXIT_INSN(),
9676 		},
9677 		.result = ACCEPT,
9678 		.prog_type = BPF_PROG_TYPE_XDP,
9679 	},
9680 	{
9681 		"meta access, test12",
9682 		.insns = {
9683 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9684 				    offsetof(struct xdp_md, data_meta)),
9685 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9686 				    offsetof(struct xdp_md, data)),
9687 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9688 				    offsetof(struct xdp_md, data_end)),
9689 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9691 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9692 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9693 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9694 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9695 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9696 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9697 			BPF_MOV64_IMM(BPF_REG_0, 0),
9698 			BPF_EXIT_INSN(),
9699 		},
9700 		.result = ACCEPT,
9701 		.prog_type = BPF_PROG_TYPE_XDP,
9702 	},
9703 	{
9704 		"arithmetic ops make PTR_TO_CTX unusable",
9705 		.insns = {
9706 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9707 				      offsetof(struct __sk_buff, data) -
9708 				      offsetof(struct __sk_buff, mark)),
9709 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9710 				    offsetof(struct __sk_buff, mark)),
9711 			BPF_EXIT_INSN(),
9712 		},
9713 		.errstr = "dereference of modified ctx ptr",
9714 		.result = REJECT,
9715 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9716 	},
9717 	{
9718 		"pkt_end - pkt_start is allowed",
9719 		.insns = {
9720 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9721 				    offsetof(struct __sk_buff, data_end)),
9722 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9723 				    offsetof(struct __sk_buff, data)),
9724 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9725 			BPF_EXIT_INSN(),
9726 		},
9727 		.result = ACCEPT,
9728 		.retval = TEST_DATA_LEN,
9729 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9730 	},
9731 	{
9732 		"XDP pkt read, pkt_end mangling, bad access 1",
9733 		.insns = {
9734 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9735 				    offsetof(struct xdp_md, data)),
9736 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9737 				    offsetof(struct xdp_md, data_end)),
9738 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9739 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9741 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9742 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9743 			BPF_MOV64_IMM(BPF_REG_0, 0),
9744 			BPF_EXIT_INSN(),
9745 		},
9746 		.errstr = "R3 pointer arithmetic on pkt_end",
9747 		.result = REJECT,
9748 		.prog_type = BPF_PROG_TYPE_XDP,
9749 	},
9750 	{
9751 		"XDP pkt read, pkt_end mangling, bad access 2",
9752 		.insns = {
9753 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9754 				    offsetof(struct xdp_md, data)),
9755 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9756 				    offsetof(struct xdp_md, data_end)),
9757 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9758 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9759 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9760 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9761 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9762 			BPF_MOV64_IMM(BPF_REG_0, 0),
9763 			BPF_EXIT_INSN(),
9764 		},
9765 		.errstr = "R3 pointer arithmetic on pkt_end",
9766 		.result = REJECT,
9767 		.prog_type = BPF_PROG_TYPE_XDP,
9768 	},
9769 	{
9770 		"XDP pkt read, pkt_data' > pkt_end, good access",
9771 		.insns = {
9772 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9773 				    offsetof(struct xdp_md, data)),
9774 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9775 				    offsetof(struct xdp_md, data_end)),
9776 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9778 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9779 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9780 			BPF_MOV64_IMM(BPF_REG_0, 0),
9781 			BPF_EXIT_INSN(),
9782 		},
9783 		.result = ACCEPT,
9784 		.prog_type = BPF_PROG_TYPE_XDP,
9785 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9786 	},
9787 	{
9788 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9789 		.insns = {
9790 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9791 				    offsetof(struct xdp_md, data)),
9792 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9793 				    offsetof(struct xdp_md, data_end)),
9794 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9795 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9796 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9797 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9798 			BPF_MOV64_IMM(BPF_REG_0, 0),
9799 			BPF_EXIT_INSN(),
9800 		},
9801 		.errstr = "R1 offset is outside of the packet",
9802 		.result = REJECT,
9803 		.prog_type = BPF_PROG_TYPE_XDP,
9804 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9805 	},
9806 	{
9807 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9808 		.insns = {
9809 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9810 				    offsetof(struct xdp_md, data)),
9811 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9812 				    offsetof(struct xdp_md, data_end)),
9813 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9814 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9815 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9816 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9817 			BPF_MOV64_IMM(BPF_REG_0, 0),
9818 			BPF_EXIT_INSN(),
9819 		},
9820 		.errstr = "R1 offset is outside of the packet",
9821 		.result = REJECT,
9822 		.prog_type = BPF_PROG_TYPE_XDP,
9823 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9824 	},
9825 	{
9826 		"XDP pkt read, pkt_end > pkt_data', good access",
9827 		.insns = {
9828 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9829 				    offsetof(struct xdp_md, data)),
9830 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9831 				    offsetof(struct xdp_md, data_end)),
9832 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9834 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9835 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9836 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9837 			BPF_MOV64_IMM(BPF_REG_0, 0),
9838 			BPF_EXIT_INSN(),
9839 		},
9840 		.result = ACCEPT,
9841 		.prog_type = BPF_PROG_TYPE_XDP,
9842 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9843 	},
9844 	{
9845 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9846 		.insns = {
9847 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9848 				    offsetof(struct xdp_md, data)),
9849 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9850 				    offsetof(struct xdp_md, data_end)),
9851 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9853 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9854 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9855 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9856 			BPF_MOV64_IMM(BPF_REG_0, 0),
9857 			BPF_EXIT_INSN(),
9858 		},
9859 		.errstr = "R1 offset is outside of the packet",
9860 		.result = REJECT,
9861 		.prog_type = BPF_PROG_TYPE_XDP,
9862 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9863 	},
9864 	{
9865 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9866 		.insns = {
9867 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9868 				    offsetof(struct xdp_md, data)),
9869 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9870 				    offsetof(struct xdp_md, data_end)),
9871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9873 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9874 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9875 			BPF_MOV64_IMM(BPF_REG_0, 0),
9876 			BPF_EXIT_INSN(),
9877 		},
9878 		.errstr = "R1 offset is outside of the packet",
9879 		.result = REJECT,
9880 		.prog_type = BPF_PROG_TYPE_XDP,
9881 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9882 	},
9883 	{
9884 		"XDP pkt read, pkt_data' < pkt_end, good access",
9885 		.insns = {
9886 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9887 				    offsetof(struct xdp_md, data)),
9888 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9889 				    offsetof(struct xdp_md, data_end)),
9890 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9892 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9893 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9895 			BPF_MOV64_IMM(BPF_REG_0, 0),
9896 			BPF_EXIT_INSN(),
9897 		},
9898 		.result = ACCEPT,
9899 		.prog_type = BPF_PROG_TYPE_XDP,
9900 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9901 	},
9902 	{
9903 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9904 		.insns = {
9905 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9906 				    offsetof(struct xdp_md, data)),
9907 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9908 				    offsetof(struct xdp_md, data_end)),
9909 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9910 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9911 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9912 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9913 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9914 			BPF_MOV64_IMM(BPF_REG_0, 0),
9915 			BPF_EXIT_INSN(),
9916 		},
9917 		.errstr = "R1 offset is outside of the packet",
9918 		.result = REJECT,
9919 		.prog_type = BPF_PROG_TYPE_XDP,
9920 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9921 	},
9922 	{
9923 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9924 		.insns = {
9925 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9926 				    offsetof(struct xdp_md, data)),
9927 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9928 				    offsetof(struct xdp_md, data_end)),
9929 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9930 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9931 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9932 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9933 			BPF_MOV64_IMM(BPF_REG_0, 0),
9934 			BPF_EXIT_INSN(),
9935 		},
9936 		.errstr = "R1 offset is outside of the packet",
9937 		.result = REJECT,
9938 		.prog_type = BPF_PROG_TYPE_XDP,
9939 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9940 	},
9941 	{
9942 		"XDP pkt read, pkt_end < pkt_data', good access",
9943 		.insns = {
9944 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9945 				    offsetof(struct xdp_md, data)),
9946 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9947 				    offsetof(struct xdp_md, data_end)),
9948 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9950 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9951 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9952 			BPF_MOV64_IMM(BPF_REG_0, 0),
9953 			BPF_EXIT_INSN(),
9954 		},
9955 		.result = ACCEPT,
9956 		.prog_type = BPF_PROG_TYPE_XDP,
9957 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9958 	},
9959 	{
9960 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9961 		.insns = {
9962 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9963 				    offsetof(struct xdp_md, data)),
9964 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9965 				    offsetof(struct xdp_md, data_end)),
9966 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9967 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9968 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9969 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9970 			BPF_MOV64_IMM(BPF_REG_0, 0),
9971 			BPF_EXIT_INSN(),
9972 		},
9973 		.errstr = "R1 offset is outside of the packet",
9974 		.result = REJECT,
9975 		.prog_type = BPF_PROG_TYPE_XDP,
9976 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9977 	},
9978 	{
9979 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9980 		.insns = {
9981 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9982 				    offsetof(struct xdp_md, data)),
9983 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9984 				    offsetof(struct xdp_md, data_end)),
9985 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9986 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9987 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9988 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9989 			BPF_MOV64_IMM(BPF_REG_0, 0),
9990 			BPF_EXIT_INSN(),
9991 		},
9992 		.errstr = "R1 offset is outside of the packet",
9993 		.result = REJECT,
9994 		.prog_type = BPF_PROG_TYPE_XDP,
9995 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9996 	},
9997 	{
9998 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9999 		.insns = {
10000 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10001 				    offsetof(struct xdp_md, data)),
10002 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10003 				    offsetof(struct xdp_md, data_end)),
10004 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10005 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10006 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10007 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10008 			BPF_MOV64_IMM(BPF_REG_0, 0),
10009 			BPF_EXIT_INSN(),
10010 		},
10011 		.result = ACCEPT,
10012 		.prog_type = BPF_PROG_TYPE_XDP,
10013 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10014 	},
10015 	{
10016 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
10017 		.insns = {
10018 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10019 				    offsetof(struct xdp_md, data)),
10020 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10021 				    offsetof(struct xdp_md, data_end)),
10022 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10023 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10024 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10025 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10026 			BPF_MOV64_IMM(BPF_REG_0, 0),
10027 			BPF_EXIT_INSN(),
10028 		},
10029 		.errstr = "R1 offset is outside of the packet",
10030 		.result = REJECT,
10031 		.prog_type = BPF_PROG_TYPE_XDP,
10032 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10033 	},
10034 	{
10035 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
10036 		.insns = {
10037 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10038 				    offsetof(struct xdp_md, data)),
10039 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10040 				    offsetof(struct xdp_md, data_end)),
10041 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10042 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10043 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10044 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10045 			BPF_MOV64_IMM(BPF_REG_0, 0),
10046 			BPF_EXIT_INSN(),
10047 		},
10048 		.errstr = "R1 offset is outside of the packet",
10049 		.result = REJECT,
10050 		.prog_type = BPF_PROG_TYPE_XDP,
10051 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10052 	},
10053 	{
10054 		"XDP pkt read, pkt_end >= pkt_data', good access",
10055 		.insns = {
10056 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10057 				    offsetof(struct xdp_md, data)),
10058 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10059 				    offsetof(struct xdp_md, data_end)),
10060 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10061 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10062 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10063 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10064 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10065 			BPF_MOV64_IMM(BPF_REG_0, 0),
10066 			BPF_EXIT_INSN(),
10067 		},
10068 		.result = ACCEPT,
10069 		.prog_type = BPF_PROG_TYPE_XDP,
10070 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10071 	},
10072 	{
10073 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
10074 		.insns = {
10075 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10076 				    offsetof(struct xdp_md, data)),
10077 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10078 				    offsetof(struct xdp_md, data_end)),
10079 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10081 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10082 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10083 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10084 			BPF_MOV64_IMM(BPF_REG_0, 0),
10085 			BPF_EXIT_INSN(),
10086 		},
10087 		.errstr = "R1 offset is outside of the packet",
10088 		.result = REJECT,
10089 		.prog_type = BPF_PROG_TYPE_XDP,
10090 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10091 	},
10092 	{
10093 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
10094 		.insns = {
10095 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10096 				    offsetof(struct xdp_md, data)),
10097 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10098 				    offsetof(struct xdp_md, data_end)),
10099 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10101 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10102 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10103 			BPF_MOV64_IMM(BPF_REG_0, 0),
10104 			BPF_EXIT_INSN(),
10105 		},
10106 		.errstr = "R1 offset is outside of the packet",
10107 		.result = REJECT,
10108 		.prog_type = BPF_PROG_TYPE_XDP,
10109 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10110 	},
10111 	{
10112 		"XDP pkt read, pkt_data' <= pkt_end, good access",
10113 		.insns = {
10114 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10115 				    offsetof(struct xdp_md, data)),
10116 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10117 				    offsetof(struct xdp_md, data_end)),
10118 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10119 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10120 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10121 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10122 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10123 			BPF_MOV64_IMM(BPF_REG_0, 0),
10124 			BPF_EXIT_INSN(),
10125 		},
10126 		.result = ACCEPT,
10127 		.prog_type = BPF_PROG_TYPE_XDP,
10128 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10129 	},
10130 	{
10131 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10132 		.insns = {
10133 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10134 				    offsetof(struct xdp_md, data)),
10135 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10136 				    offsetof(struct xdp_md, data_end)),
10137 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10138 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10139 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10140 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10141 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10142 			BPF_MOV64_IMM(BPF_REG_0, 0),
10143 			BPF_EXIT_INSN(),
10144 		},
10145 		.errstr = "R1 offset is outside of the packet",
10146 		.result = REJECT,
10147 		.prog_type = BPF_PROG_TYPE_XDP,
10148 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10149 	},
10150 	{
10151 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10152 		.insns = {
10153 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10154 				    offsetof(struct xdp_md, data)),
10155 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10156 				    offsetof(struct xdp_md, data_end)),
10157 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10158 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10159 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10160 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10161 			BPF_MOV64_IMM(BPF_REG_0, 0),
10162 			BPF_EXIT_INSN(),
10163 		},
10164 		.errstr = "R1 offset is outside of the packet",
10165 		.result = REJECT,
10166 		.prog_type = BPF_PROG_TYPE_XDP,
10167 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10168 	},
10169 	{
10170 		"XDP pkt read, pkt_end <= pkt_data', good access",
10171 		.insns = {
10172 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10173 				    offsetof(struct xdp_md, data)),
10174 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10175 				    offsetof(struct xdp_md, data_end)),
10176 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10177 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10178 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10179 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10180 			BPF_MOV64_IMM(BPF_REG_0, 0),
10181 			BPF_EXIT_INSN(),
10182 		},
10183 		.result = ACCEPT,
10184 		.prog_type = BPF_PROG_TYPE_XDP,
10185 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10186 	},
10187 	{
10188 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
10189 		.insns = {
10190 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10191 				    offsetof(struct xdp_md, data)),
10192 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10193 				    offsetof(struct xdp_md, data_end)),
10194 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10196 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10197 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10198 			BPF_MOV64_IMM(BPF_REG_0, 0),
10199 			BPF_EXIT_INSN(),
10200 		},
10201 		.errstr = "R1 offset is outside of the packet",
10202 		.result = REJECT,
10203 		.prog_type = BPF_PROG_TYPE_XDP,
10204 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10205 	},
10206 	{
10207 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
10208 		.insns = {
10209 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10210 				    offsetof(struct xdp_md, data)),
10211 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10212 				    offsetof(struct xdp_md, data_end)),
10213 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10214 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10215 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10216 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10217 			BPF_MOV64_IMM(BPF_REG_0, 0),
10218 			BPF_EXIT_INSN(),
10219 		},
10220 		.errstr = "R1 offset is outside of the packet",
10221 		.result = REJECT,
10222 		.prog_type = BPF_PROG_TYPE_XDP,
10223 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10224 	},
10225 	{
10226 		"XDP pkt read, pkt_meta' > pkt_data, good access",
10227 		.insns = {
10228 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10229 				    offsetof(struct xdp_md, data_meta)),
10230 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10231 				    offsetof(struct xdp_md, data)),
10232 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10233 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10234 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10235 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10236 			BPF_MOV64_IMM(BPF_REG_0, 0),
10237 			BPF_EXIT_INSN(),
10238 		},
10239 		.result = ACCEPT,
10240 		.prog_type = BPF_PROG_TYPE_XDP,
10241 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10242 	},
10243 	{
10244 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10245 		.insns = {
10246 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10247 				    offsetof(struct xdp_md, data_meta)),
10248 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10249 				    offsetof(struct xdp_md, data)),
10250 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10252 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10253 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10254 			BPF_MOV64_IMM(BPF_REG_0, 0),
10255 			BPF_EXIT_INSN(),
10256 		},
10257 		.errstr = "R1 offset is outside of the packet",
10258 		.result = REJECT,
10259 		.prog_type = BPF_PROG_TYPE_XDP,
10260 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10261 	},
10262 	{
10263 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10264 		.insns = {
10265 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10266 				    offsetof(struct xdp_md, data_meta)),
10267 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10268 				    offsetof(struct xdp_md, data)),
10269 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10271 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10272 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10273 			BPF_MOV64_IMM(BPF_REG_0, 0),
10274 			BPF_EXIT_INSN(),
10275 		},
10276 		.errstr = "R1 offset is outside of the packet",
10277 		.result = REJECT,
10278 		.prog_type = BPF_PROG_TYPE_XDP,
10279 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10280 	},
10281 	{
10282 		"XDP pkt read, pkt_data > pkt_meta', good access",
10283 		.insns = {
10284 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10285 				    offsetof(struct xdp_md, data_meta)),
10286 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10287 				    offsetof(struct xdp_md, data)),
10288 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10289 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10290 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10291 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10292 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10293 			BPF_MOV64_IMM(BPF_REG_0, 0),
10294 			BPF_EXIT_INSN(),
10295 		},
10296 		.result = ACCEPT,
10297 		.prog_type = BPF_PROG_TYPE_XDP,
10298 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10299 	},
10300 	{
10301 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
10302 		.insns = {
10303 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10304 				    offsetof(struct xdp_md, data_meta)),
10305 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10306 				    offsetof(struct xdp_md, data)),
10307 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10308 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10309 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10310 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10311 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10312 			BPF_MOV64_IMM(BPF_REG_0, 0),
10313 			BPF_EXIT_INSN(),
10314 		},
10315 		.errstr = "R1 offset is outside of the packet",
10316 		.result = REJECT,
10317 		.prog_type = BPF_PROG_TYPE_XDP,
10318 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10319 	},
10320 	{
10321 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
10322 		.insns = {
10323 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10324 				    offsetof(struct xdp_md, data_meta)),
10325 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10326 				    offsetof(struct xdp_md, data)),
10327 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10328 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10329 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10330 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10331 			BPF_MOV64_IMM(BPF_REG_0, 0),
10332 			BPF_EXIT_INSN(),
10333 		},
10334 		.errstr = "R1 offset is outside of the packet",
10335 		.result = REJECT,
10336 		.prog_type = BPF_PROG_TYPE_XDP,
10337 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10338 	},
10339 	{
10340 		"XDP pkt read, pkt_meta' < pkt_data, good access",
10341 		.insns = {
10342 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10343 				    offsetof(struct xdp_md, data_meta)),
10344 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10345 				    offsetof(struct xdp_md, data)),
10346 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10348 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10349 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10350 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10351 			BPF_MOV64_IMM(BPF_REG_0, 0),
10352 			BPF_EXIT_INSN(),
10353 		},
10354 		.result = ACCEPT,
10355 		.prog_type = BPF_PROG_TYPE_XDP,
10356 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10357 	},
10358 	{
10359 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10360 		.insns = {
10361 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10362 				    offsetof(struct xdp_md, data_meta)),
10363 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10364 				    offsetof(struct xdp_md, data)),
10365 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10366 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10367 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10368 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10369 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10370 			BPF_MOV64_IMM(BPF_REG_0, 0),
10371 			BPF_EXIT_INSN(),
10372 		},
10373 		.errstr = "R1 offset is outside of the packet",
10374 		.result = REJECT,
10375 		.prog_type = BPF_PROG_TYPE_XDP,
10376 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10377 	},
10378 	{
10379 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10380 		.insns = {
10381 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10382 				    offsetof(struct xdp_md, data_meta)),
10383 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10384 				    offsetof(struct xdp_md, data)),
10385 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10387 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10388 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10389 			BPF_MOV64_IMM(BPF_REG_0, 0),
10390 			BPF_EXIT_INSN(),
10391 		},
10392 		.errstr = "R1 offset is outside of the packet",
10393 		.result = REJECT,
10394 		.prog_type = BPF_PROG_TYPE_XDP,
10395 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10396 	},
10397 	{
10398 		"XDP pkt read, pkt_data < pkt_meta', good access",
10399 		.insns = {
10400 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10401 				    offsetof(struct xdp_md, data_meta)),
10402 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10403 				    offsetof(struct xdp_md, data)),
10404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10405 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10406 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10407 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10408 			BPF_MOV64_IMM(BPF_REG_0, 0),
10409 			BPF_EXIT_INSN(),
10410 		},
10411 		.result = ACCEPT,
10412 		.prog_type = BPF_PROG_TYPE_XDP,
10413 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10414 	},
10415 	{
10416 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
10417 		.insns = {
10418 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10419 				    offsetof(struct xdp_md, data_meta)),
10420 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10421 				    offsetof(struct xdp_md, data)),
10422 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10424 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10425 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10426 			BPF_MOV64_IMM(BPF_REG_0, 0),
10427 			BPF_EXIT_INSN(),
10428 		},
10429 		.errstr = "R1 offset is outside of the packet",
10430 		.result = REJECT,
10431 		.prog_type = BPF_PROG_TYPE_XDP,
10432 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10433 	},
10434 	{
10435 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10436 		.insns = {
10437 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10438 				    offsetof(struct xdp_md, data_meta)),
10439 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10440 				    offsetof(struct xdp_md, data)),
10441 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10443 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10444 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10445 			BPF_MOV64_IMM(BPF_REG_0, 0),
10446 			BPF_EXIT_INSN(),
10447 		},
10448 		.errstr = "R1 offset is outside of the packet",
10449 		.result = REJECT,
10450 		.prog_type = BPF_PROG_TYPE_XDP,
10451 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10452 	},
10453 	{
10454 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10455 		.insns = {
10456 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10457 				    offsetof(struct xdp_md, data_meta)),
10458 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10459 				    offsetof(struct xdp_md, data)),
10460 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10462 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10464 			BPF_MOV64_IMM(BPF_REG_0, 0),
10465 			BPF_EXIT_INSN(),
10466 		},
10467 		.result = ACCEPT,
10468 		.prog_type = BPF_PROG_TYPE_XDP,
10469 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10470 	},
10471 	{
10472 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10473 		.insns = {
10474 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10475 				    offsetof(struct xdp_md, data_meta)),
10476 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10477 				    offsetof(struct xdp_md, data)),
10478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10480 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10481 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10482 			BPF_MOV64_IMM(BPF_REG_0, 0),
10483 			BPF_EXIT_INSN(),
10484 		},
10485 		.errstr = "R1 offset is outside of the packet",
10486 		.result = REJECT,
10487 		.prog_type = BPF_PROG_TYPE_XDP,
10488 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10489 	},
10490 	{
10491 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10492 		.insns = {
10493 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10494 				    offsetof(struct xdp_md, data_meta)),
10495 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10496 				    offsetof(struct xdp_md, data)),
10497 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10499 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10500 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10501 			BPF_MOV64_IMM(BPF_REG_0, 0),
10502 			BPF_EXIT_INSN(),
10503 		},
10504 		.errstr = "R1 offset is outside of the packet",
10505 		.result = REJECT,
10506 		.prog_type = BPF_PROG_TYPE_XDP,
10507 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10508 	},
10509 	{
10510 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10511 		.insns = {
10512 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10513 				    offsetof(struct xdp_md, data_meta)),
10514 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10515 				    offsetof(struct xdp_md, data)),
10516 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10517 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10518 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10519 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10520 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10521 			BPF_MOV64_IMM(BPF_REG_0, 0),
10522 			BPF_EXIT_INSN(),
10523 		},
10524 		.result = ACCEPT,
10525 		.prog_type = BPF_PROG_TYPE_XDP,
10526 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10527 	},
10528 	{
10529 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10530 		.insns = {
10531 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10532 				    offsetof(struct xdp_md, data_meta)),
10533 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10534 				    offsetof(struct xdp_md, data)),
10535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10536 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10537 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10538 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10539 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10540 			BPF_MOV64_IMM(BPF_REG_0, 0),
10541 			BPF_EXIT_INSN(),
10542 		},
10543 		.errstr = "R1 offset is outside of the packet",
10544 		.result = REJECT,
10545 		.prog_type = BPF_PROG_TYPE_XDP,
10546 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10547 	},
10548 	{
10549 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10550 		.insns = {
10551 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10552 				    offsetof(struct xdp_md, data_meta)),
10553 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10554 				    offsetof(struct xdp_md, data)),
10555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10557 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10558 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10559 			BPF_MOV64_IMM(BPF_REG_0, 0),
10560 			BPF_EXIT_INSN(),
10561 		},
10562 		.errstr = "R1 offset is outside of the packet",
10563 		.result = REJECT,
10564 		.prog_type = BPF_PROG_TYPE_XDP,
10565 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10566 	},
10567 	{
10568 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10569 		.insns = {
10570 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10571 				    offsetof(struct xdp_md, data_meta)),
10572 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10573 				    offsetof(struct xdp_md, data)),
10574 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10576 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10577 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10578 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10579 			BPF_MOV64_IMM(BPF_REG_0, 0),
10580 			BPF_EXIT_INSN(),
10581 		},
10582 		.result = ACCEPT,
10583 		.prog_type = BPF_PROG_TYPE_XDP,
10584 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10585 	},
10586 	{
10587 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10588 		.insns = {
10589 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10590 				    offsetof(struct xdp_md, data_meta)),
10591 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10592 				    offsetof(struct xdp_md, data)),
10593 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10594 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10595 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10596 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10597 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10598 			BPF_MOV64_IMM(BPF_REG_0, 0),
10599 			BPF_EXIT_INSN(),
10600 		},
10601 		.errstr = "R1 offset is outside of the packet",
10602 		.result = REJECT,
10603 		.prog_type = BPF_PROG_TYPE_XDP,
10604 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10605 	},
10606 	{
10607 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10608 		.insns = {
10609 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10610 				    offsetof(struct xdp_md, data_meta)),
10611 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10612 				    offsetof(struct xdp_md, data)),
10613 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10614 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10615 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10616 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10617 			BPF_MOV64_IMM(BPF_REG_0, 0),
10618 			BPF_EXIT_INSN(),
10619 		},
10620 		.errstr = "R1 offset is outside of the packet",
10621 		.result = REJECT,
10622 		.prog_type = BPF_PROG_TYPE_XDP,
10623 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10624 	},
10625 	{
10626 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10627 		.insns = {
10628 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10629 				    offsetof(struct xdp_md, data_meta)),
10630 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10631 				    offsetof(struct xdp_md, data)),
10632 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10633 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10634 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10635 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10636 			BPF_MOV64_IMM(BPF_REG_0, 0),
10637 			BPF_EXIT_INSN(),
10638 		},
10639 		.result = ACCEPT,
10640 		.prog_type = BPF_PROG_TYPE_XDP,
10641 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10642 	},
10643 	{
10644 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10645 		.insns = {
10646 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10647 				    offsetof(struct xdp_md, data_meta)),
10648 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10649 				    offsetof(struct xdp_md, data)),
10650 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10652 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10653 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10654 			BPF_MOV64_IMM(BPF_REG_0, 0),
10655 			BPF_EXIT_INSN(),
10656 		},
10657 		.errstr = "R1 offset is outside of the packet",
10658 		.result = REJECT,
10659 		.prog_type = BPF_PROG_TYPE_XDP,
10660 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10661 	},
10662 	{
10663 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10664 		.insns = {
10665 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10666 				    offsetof(struct xdp_md, data_meta)),
10667 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10668 				    offsetof(struct xdp_md, data)),
10669 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10670 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10671 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10672 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10673 			BPF_MOV64_IMM(BPF_REG_0, 0),
10674 			BPF_EXIT_INSN(),
10675 		},
10676 		.errstr = "R1 offset is outside of the packet",
10677 		.result = REJECT,
10678 		.prog_type = BPF_PROG_TYPE_XDP,
10679 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10680 	},
10681 	{
10682 		"check deducing bounds from const, 1",
10683 		.insns = {
10684 			BPF_MOV64_IMM(BPF_REG_0, 1),
10685 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10686 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10687 			BPF_EXIT_INSN(),
10688 		},
10689 		.result = REJECT,
10690 		.errstr = "R0 tried to subtract pointer from scalar",
10691 	},
10692 	{
10693 		"check deducing bounds from const, 2",
10694 		.insns = {
10695 			BPF_MOV64_IMM(BPF_REG_0, 1),
10696 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10697 			BPF_EXIT_INSN(),
10698 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10699 			BPF_EXIT_INSN(),
10700 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10701 			BPF_EXIT_INSN(),
10702 		},
10703 		.result = ACCEPT,
10704 		.retval = 1,
10705 	},
10706 	{
10707 		"check deducing bounds from const, 3",
10708 		.insns = {
10709 			BPF_MOV64_IMM(BPF_REG_0, 0),
10710 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10711 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10712 			BPF_EXIT_INSN(),
10713 		},
10714 		.result = REJECT,
10715 		.errstr = "R0 tried to subtract pointer from scalar",
10716 	},
10717 	{
10718 		"check deducing bounds from const, 4",
10719 		.insns = {
10720 			BPF_MOV64_IMM(BPF_REG_0, 0),
10721 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10722 			BPF_EXIT_INSN(),
10723 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10724 			BPF_EXIT_INSN(),
10725 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10726 			BPF_EXIT_INSN(),
10727 		},
10728 		.result = ACCEPT,
10729 	},
10730 	{
10731 		"check deducing bounds from const, 5",
10732 		.insns = {
10733 			BPF_MOV64_IMM(BPF_REG_0, 0),
10734 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10735 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10736 			BPF_EXIT_INSN(),
10737 		},
10738 		.result = REJECT,
10739 		.errstr = "R0 tried to subtract pointer from scalar",
10740 	},
10741 	{
10742 		"check deducing bounds from const, 6",
10743 		.insns = {
10744 			BPF_MOV64_IMM(BPF_REG_0, 0),
10745 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10746 			BPF_EXIT_INSN(),
10747 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10748 			BPF_EXIT_INSN(),
10749 		},
10750 		.result = REJECT,
10751 		.errstr = "R0 tried to subtract pointer from scalar",
10752 	},
10753 	{
10754 		"check deducing bounds from const, 7",
10755 		.insns = {
10756 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10757 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10758 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10759 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10760 				    offsetof(struct __sk_buff, mark)),
10761 			BPF_EXIT_INSN(),
10762 		},
10763 		.result = REJECT,
10764 		.errstr = "dereference of modified ctx ptr",
10765 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10766 	},
10767 	{
10768 		"check deducing bounds from const, 8",
10769 		.insns = {
10770 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10771 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10772 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10773 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10774 				    offsetof(struct __sk_buff, mark)),
10775 			BPF_EXIT_INSN(),
10776 		},
10777 		.result = REJECT,
10778 		.errstr = "dereference of modified ctx ptr",
10779 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10780 	},
10781 	{
10782 		"check deducing bounds from const, 9",
10783 		.insns = {
10784 			BPF_MOV64_IMM(BPF_REG_0, 0),
10785 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10786 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10787 			BPF_EXIT_INSN(),
10788 		},
10789 		.result = REJECT,
10790 		.errstr = "R0 tried to subtract pointer from scalar",
10791 	},
10792 	{
10793 		"check deducing bounds from const, 10",
10794 		.insns = {
10795 			BPF_MOV64_IMM(BPF_REG_0, 0),
10796 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10797 			/* Marks reg as unknown. */
10798 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10799 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10800 			BPF_EXIT_INSN(),
10801 		},
10802 		.result = REJECT,
10803 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10804 	},
10805 	{
10806 		"bpf_exit with invalid return code. test1",
10807 		.insns = {
10808 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10809 			BPF_EXIT_INSN(),
10810 		},
10811 		.errstr = "R0 has value (0x0; 0xffffffff)",
10812 		.result = REJECT,
10813 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10814 	},
10815 	{
10816 		"bpf_exit with invalid return code. test2",
10817 		.insns = {
10818 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10819 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10820 			BPF_EXIT_INSN(),
10821 		},
10822 		.result = ACCEPT,
10823 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10824 	},
10825 	{
10826 		"bpf_exit with invalid return code. test3",
10827 		.insns = {
10828 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10829 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10830 			BPF_EXIT_INSN(),
10831 		},
10832 		.errstr = "R0 has value (0x0; 0x3)",
10833 		.result = REJECT,
10834 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10835 	},
10836 	{
10837 		"bpf_exit with invalid return code. test4",
10838 		.insns = {
10839 			BPF_MOV64_IMM(BPF_REG_0, 1),
10840 			BPF_EXIT_INSN(),
10841 		},
10842 		.result = ACCEPT,
10843 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10844 	},
10845 	{
10846 		"bpf_exit with invalid return code. test5",
10847 		.insns = {
10848 			BPF_MOV64_IMM(BPF_REG_0, 2),
10849 			BPF_EXIT_INSN(),
10850 		},
10851 		.errstr = "R0 has value (0x2; 0x0)",
10852 		.result = REJECT,
10853 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10854 	},
10855 	{
10856 		"bpf_exit with invalid return code. test6",
10857 		.insns = {
10858 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10859 			BPF_EXIT_INSN(),
10860 		},
10861 		.errstr = "R0 is not a known value (ctx)",
10862 		.result = REJECT,
10863 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10864 	},
10865 	{
10866 		"bpf_exit with invalid return code. test7",
10867 		.insns = {
10868 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10869 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10870 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10871 			BPF_EXIT_INSN(),
10872 		},
10873 		.errstr = "R0 has unknown scalar value",
10874 		.result = REJECT,
10875 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10876 	},
10877 	{
10878 		"calls: basic sanity",
10879 		.insns = {
10880 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10881 			BPF_MOV64_IMM(BPF_REG_0, 1),
10882 			BPF_EXIT_INSN(),
10883 			BPF_MOV64_IMM(BPF_REG_0, 2),
10884 			BPF_EXIT_INSN(),
10885 		},
10886 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10887 		.result = ACCEPT,
10888 	},
10889 	{
10890 		"calls: not on unpriviledged",
10891 		.insns = {
10892 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10893 			BPF_MOV64_IMM(BPF_REG_0, 1),
10894 			BPF_EXIT_INSN(),
10895 			BPF_MOV64_IMM(BPF_REG_0, 2),
10896 			BPF_EXIT_INSN(),
10897 		},
10898 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10899 		.result_unpriv = REJECT,
10900 		.result = ACCEPT,
10901 		.retval = 1,
10902 	},
10903 	{
10904 		"calls: div by 0 in subprog",
10905 		.insns = {
10906 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10907 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10908 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10909 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10910 				    offsetof(struct __sk_buff, data_end)),
10911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10913 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10914 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10915 			BPF_MOV64_IMM(BPF_REG_0, 1),
10916 			BPF_EXIT_INSN(),
10917 			BPF_MOV32_IMM(BPF_REG_2, 0),
10918 			BPF_MOV32_IMM(BPF_REG_3, 1),
10919 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10920 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10921 				    offsetof(struct __sk_buff, data)),
10922 			BPF_EXIT_INSN(),
10923 		},
10924 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10925 		.result = ACCEPT,
10926 		.retval = 1,
10927 	},
10928 	{
10929 		"calls: multiple ret types in subprog 1",
10930 		.insns = {
10931 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10932 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10933 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10934 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10935 				    offsetof(struct __sk_buff, data_end)),
10936 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10937 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10938 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10939 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10940 			BPF_MOV64_IMM(BPF_REG_0, 1),
10941 			BPF_EXIT_INSN(),
10942 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10943 				    offsetof(struct __sk_buff, data)),
10944 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10945 			BPF_MOV32_IMM(BPF_REG_0, 42),
10946 			BPF_EXIT_INSN(),
10947 		},
10948 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10949 		.result = REJECT,
10950 		.errstr = "R0 invalid mem access 'inv'",
10951 	},
10952 	{
10953 		"calls: multiple ret types in subprog 2",
10954 		.insns = {
10955 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10958 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10959 				    offsetof(struct __sk_buff, data_end)),
10960 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10961 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10962 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10963 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10964 			BPF_MOV64_IMM(BPF_REG_0, 1),
10965 			BPF_EXIT_INSN(),
10966 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10967 				    offsetof(struct __sk_buff, data)),
10968 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10969 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10970 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10971 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10973 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10974 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10975 				     BPF_FUNC_map_lookup_elem),
10976 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10977 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10978 				    offsetof(struct __sk_buff, data)),
10979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10980 			BPF_EXIT_INSN(),
10981 		},
10982 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10983 		.fixup_map_hash_8b = { 16 },
10984 		.result = REJECT,
10985 		.errstr = "R0 min value is outside of the array range",
10986 	},
10987 	{
10988 		"calls: overlapping caller/callee",
10989 		.insns = {
10990 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10991 			BPF_MOV64_IMM(BPF_REG_0, 1),
10992 			BPF_EXIT_INSN(),
10993 		},
10994 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10995 		.errstr = "last insn is not an exit or jmp",
10996 		.result = REJECT,
10997 	},
10998 	{
10999 		"calls: wrong recursive calls",
11000 		.insns = {
11001 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11002 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11003 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11004 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11005 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
11006 			BPF_MOV64_IMM(BPF_REG_0, 1),
11007 			BPF_EXIT_INSN(),
11008 		},
11009 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11010 		.errstr = "jump out of range",
11011 		.result = REJECT,
11012 	},
11013 	{
11014 		"calls: wrong src reg",
11015 		.insns = {
11016 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
11017 			BPF_MOV64_IMM(BPF_REG_0, 1),
11018 			BPF_EXIT_INSN(),
11019 		},
11020 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11021 		.errstr = "BPF_CALL uses reserved fields",
11022 		.result = REJECT,
11023 	},
11024 	{
11025 		"calls: wrong off value",
11026 		.insns = {
11027 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
11028 			BPF_MOV64_IMM(BPF_REG_0, 1),
11029 			BPF_EXIT_INSN(),
11030 			BPF_MOV64_IMM(BPF_REG_0, 2),
11031 			BPF_EXIT_INSN(),
11032 		},
11033 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11034 		.errstr = "BPF_CALL uses reserved fields",
11035 		.result = REJECT,
11036 	},
11037 	{
11038 		"calls: jump back loop",
11039 		.insns = {
11040 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11041 			BPF_MOV64_IMM(BPF_REG_0, 1),
11042 			BPF_EXIT_INSN(),
11043 		},
11044 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11045 		.errstr = "back-edge from insn 0 to 0",
11046 		.result = REJECT,
11047 	},
11048 	{
11049 		"calls: conditional call",
11050 		.insns = {
11051 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11052 				    offsetof(struct __sk_buff, mark)),
11053 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11054 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11055 			BPF_MOV64_IMM(BPF_REG_0, 1),
11056 			BPF_EXIT_INSN(),
11057 			BPF_MOV64_IMM(BPF_REG_0, 2),
11058 			BPF_EXIT_INSN(),
11059 		},
11060 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11061 		.errstr = "jump out of range",
11062 		.result = REJECT,
11063 	},
11064 	{
11065 		"calls: conditional call 2",
11066 		.insns = {
11067 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11068 				    offsetof(struct __sk_buff, mark)),
11069 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11070 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11071 			BPF_MOV64_IMM(BPF_REG_0, 1),
11072 			BPF_EXIT_INSN(),
11073 			BPF_MOV64_IMM(BPF_REG_0, 2),
11074 			BPF_EXIT_INSN(),
11075 			BPF_MOV64_IMM(BPF_REG_0, 3),
11076 			BPF_EXIT_INSN(),
11077 		},
11078 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11079 		.result = ACCEPT,
11080 	},
11081 	{
11082 		"calls: conditional call 3",
11083 		.insns = {
11084 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11085 				    offsetof(struct __sk_buff, mark)),
11086 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11087 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11088 			BPF_MOV64_IMM(BPF_REG_0, 1),
11089 			BPF_EXIT_INSN(),
11090 			BPF_MOV64_IMM(BPF_REG_0, 1),
11091 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11092 			BPF_MOV64_IMM(BPF_REG_0, 3),
11093 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11094 		},
11095 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11096 		.errstr = "back-edge from insn",
11097 		.result = REJECT,
11098 	},
11099 	{
11100 		"calls: conditional call 4",
11101 		.insns = {
11102 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11103 				    offsetof(struct __sk_buff, mark)),
11104 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11105 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11106 			BPF_MOV64_IMM(BPF_REG_0, 1),
11107 			BPF_EXIT_INSN(),
11108 			BPF_MOV64_IMM(BPF_REG_0, 1),
11109 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11110 			BPF_MOV64_IMM(BPF_REG_0, 3),
11111 			BPF_EXIT_INSN(),
11112 		},
11113 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11114 		.result = ACCEPT,
11115 	},
11116 	{
11117 		"calls: conditional call 5",
11118 		.insns = {
11119 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11120 				    offsetof(struct __sk_buff, mark)),
11121 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11122 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11123 			BPF_MOV64_IMM(BPF_REG_0, 1),
11124 			BPF_EXIT_INSN(),
11125 			BPF_MOV64_IMM(BPF_REG_0, 1),
11126 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11127 			BPF_MOV64_IMM(BPF_REG_0, 3),
11128 			BPF_EXIT_INSN(),
11129 		},
11130 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11131 		.errstr = "back-edge from insn",
11132 		.result = REJECT,
11133 	},
11134 	{
11135 		"calls: conditional call 6",
11136 		.insns = {
11137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11138 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11139 			BPF_EXIT_INSN(),
11140 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11141 				    offsetof(struct __sk_buff, mark)),
11142 			BPF_EXIT_INSN(),
11143 		},
11144 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11145 		.errstr = "back-edge from insn",
11146 		.result = REJECT,
11147 	},
11148 	{
11149 		"calls: using r0 returned by callee",
11150 		.insns = {
11151 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11152 			BPF_EXIT_INSN(),
11153 			BPF_MOV64_IMM(BPF_REG_0, 2),
11154 			BPF_EXIT_INSN(),
11155 		},
11156 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11157 		.result = ACCEPT,
11158 	},
11159 	{
11160 		"calls: using uninit r0 from callee",
11161 		.insns = {
11162 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11163 			BPF_EXIT_INSN(),
11164 			BPF_EXIT_INSN(),
11165 		},
11166 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11167 		.errstr = "!read_ok",
11168 		.result = REJECT,
11169 	},
11170 	{
11171 		"calls: callee is using r1",
11172 		.insns = {
11173 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11174 			BPF_EXIT_INSN(),
11175 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11176 				    offsetof(struct __sk_buff, len)),
11177 			BPF_EXIT_INSN(),
11178 		},
11179 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
11180 		.result = ACCEPT,
11181 		.retval = TEST_DATA_LEN,
11182 	},
11183 	{
11184 		"calls: callee using args1",
11185 		.insns = {
11186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11187 			BPF_EXIT_INSN(),
11188 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11189 			BPF_EXIT_INSN(),
11190 		},
11191 		.errstr_unpriv = "allowed for root only",
11192 		.result_unpriv = REJECT,
11193 		.result = ACCEPT,
11194 		.retval = POINTER_VALUE,
11195 	},
11196 	{
11197 		"calls: callee using wrong args2",
11198 		.insns = {
11199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11200 			BPF_EXIT_INSN(),
11201 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11202 			BPF_EXIT_INSN(),
11203 		},
11204 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11205 		.errstr = "R2 !read_ok",
11206 		.result = REJECT,
11207 	},
11208 	{
11209 		"calls: callee using two args",
11210 		.insns = {
11211 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11212 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11213 				    offsetof(struct __sk_buff, len)),
11214 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11215 				    offsetof(struct __sk_buff, len)),
11216 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11217 			BPF_EXIT_INSN(),
11218 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11219 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11220 			BPF_EXIT_INSN(),
11221 		},
11222 		.errstr_unpriv = "allowed for root only",
11223 		.result_unpriv = REJECT,
11224 		.result = ACCEPT,
11225 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11226 	},
11227 	{
11228 		"calls: callee changing pkt pointers",
11229 		.insns = {
11230 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11231 				    offsetof(struct xdp_md, data)),
11232 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11233 				    offsetof(struct xdp_md, data_end)),
11234 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11235 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11236 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11237 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11238 			/* clear_all_pkt_pointers() has to walk all frames
11239 			 * to make sure that pkt pointers in the caller
11240 			 * are cleared when callee is calling a helper that
11241 			 * adjusts packet size
11242 			 */
11243 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11244 			BPF_MOV32_IMM(BPF_REG_0, 0),
11245 			BPF_EXIT_INSN(),
11246 			BPF_MOV64_IMM(BPF_REG_2, 0),
11247 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11248 				     BPF_FUNC_xdp_adjust_head),
11249 			BPF_EXIT_INSN(),
11250 		},
11251 		.result = REJECT,
11252 		.errstr = "R6 invalid mem access 'inv'",
11253 		.prog_type = BPF_PROG_TYPE_XDP,
11254 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11255 	},
11256 	{
11257 		"calls: two calls with args",
11258 		.insns = {
11259 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11260 			BPF_EXIT_INSN(),
11261 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11262 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11263 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11264 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11265 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11266 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11267 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11268 			BPF_EXIT_INSN(),
11269 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11270 				    offsetof(struct __sk_buff, len)),
11271 			BPF_EXIT_INSN(),
11272 		},
11273 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11274 		.result = ACCEPT,
11275 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
11276 	},
11277 	{
11278 		"calls: calls with stack arith",
11279 		.insns = {
11280 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11282 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11283 			BPF_EXIT_INSN(),
11284 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11285 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11286 			BPF_EXIT_INSN(),
11287 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11288 			BPF_MOV64_IMM(BPF_REG_0, 42),
11289 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11290 			BPF_EXIT_INSN(),
11291 		},
11292 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11293 		.result = ACCEPT,
11294 		.retval = 42,
11295 	},
11296 	{
11297 		"calls: calls with misaligned stack access",
11298 		.insns = {
11299 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11300 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11301 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11302 			BPF_EXIT_INSN(),
11303 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11304 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11305 			BPF_EXIT_INSN(),
11306 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11307 			BPF_MOV64_IMM(BPF_REG_0, 42),
11308 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11309 			BPF_EXIT_INSN(),
11310 		},
11311 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11312 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11313 		.errstr = "misaligned stack access",
11314 		.result = REJECT,
11315 	},
11316 	{
11317 		"calls: calls control flow, jump test",
11318 		.insns = {
11319 			BPF_MOV64_IMM(BPF_REG_0, 42),
11320 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11321 			BPF_MOV64_IMM(BPF_REG_0, 43),
11322 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11323 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11324 			BPF_EXIT_INSN(),
11325 		},
11326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11327 		.result = ACCEPT,
11328 		.retval = 43,
11329 	},
11330 	{
11331 		"calls: calls control flow, jump test 2",
11332 		.insns = {
11333 			BPF_MOV64_IMM(BPF_REG_0, 42),
11334 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11335 			BPF_MOV64_IMM(BPF_REG_0, 43),
11336 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11338 			BPF_EXIT_INSN(),
11339 		},
11340 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11341 		.errstr = "jump out of range from insn 1 to 4",
11342 		.result = REJECT,
11343 	},
11344 	{
11345 		"calls: two calls with bad jump",
11346 		.insns = {
11347 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11348 			BPF_EXIT_INSN(),
11349 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11351 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11352 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11353 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11354 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11355 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11356 			BPF_EXIT_INSN(),
11357 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11358 				    offsetof(struct __sk_buff, len)),
11359 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11360 			BPF_EXIT_INSN(),
11361 		},
11362 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11363 		.errstr = "jump out of range from insn 11 to 9",
11364 		.result = REJECT,
11365 	},
11366 	{
11367 		"calls: recursive call. test1",
11368 		.insns = {
11369 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11370 			BPF_EXIT_INSN(),
11371 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11372 			BPF_EXIT_INSN(),
11373 		},
11374 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11375 		.errstr = "back-edge",
11376 		.result = REJECT,
11377 	},
11378 	{
11379 		"calls: recursive call. test2",
11380 		.insns = {
11381 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11382 			BPF_EXIT_INSN(),
11383 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11384 			BPF_EXIT_INSN(),
11385 		},
11386 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11387 		.errstr = "back-edge",
11388 		.result = REJECT,
11389 	},
11390 	{
11391 		"calls: unreachable code",
11392 		.insns = {
11393 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11394 			BPF_EXIT_INSN(),
11395 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11396 			BPF_EXIT_INSN(),
11397 			BPF_MOV64_IMM(BPF_REG_0, 0),
11398 			BPF_EXIT_INSN(),
11399 			BPF_MOV64_IMM(BPF_REG_0, 0),
11400 			BPF_EXIT_INSN(),
11401 		},
11402 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11403 		.errstr = "unreachable insn 6",
11404 		.result = REJECT,
11405 	},
11406 	{
11407 		"calls: invalid call",
11408 		.insns = {
11409 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11410 			BPF_EXIT_INSN(),
11411 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11412 			BPF_EXIT_INSN(),
11413 		},
11414 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11415 		.errstr = "invalid destination",
11416 		.result = REJECT,
11417 	},
11418 	{
11419 		"calls: invalid call 2",
11420 		.insns = {
11421 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11422 			BPF_EXIT_INSN(),
11423 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11424 			BPF_EXIT_INSN(),
11425 		},
11426 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11427 		.errstr = "invalid destination",
11428 		.result = REJECT,
11429 	},
11430 	{
11431 		"calls: jumping across function bodies. test1",
11432 		.insns = {
11433 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11434 			BPF_MOV64_IMM(BPF_REG_0, 0),
11435 			BPF_EXIT_INSN(),
11436 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11437 			BPF_EXIT_INSN(),
11438 		},
11439 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11440 		.errstr = "jump out of range",
11441 		.result = REJECT,
11442 	},
11443 	{
11444 		"calls: jumping across function bodies. test2",
11445 		.insns = {
11446 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11447 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11448 			BPF_MOV64_IMM(BPF_REG_0, 0),
11449 			BPF_EXIT_INSN(),
11450 			BPF_EXIT_INSN(),
11451 		},
11452 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11453 		.errstr = "jump out of range",
11454 		.result = REJECT,
11455 	},
11456 	{
11457 		"calls: call without exit",
11458 		.insns = {
11459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11460 			BPF_EXIT_INSN(),
11461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11462 			BPF_EXIT_INSN(),
11463 			BPF_MOV64_IMM(BPF_REG_0, 0),
11464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11465 		},
11466 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11467 		.errstr = "not an exit",
11468 		.result = REJECT,
11469 	},
11470 	{
11471 		"calls: call into middle of ld_imm64",
11472 		.insns = {
11473 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11474 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11475 			BPF_MOV64_IMM(BPF_REG_0, 0),
11476 			BPF_EXIT_INSN(),
11477 			BPF_LD_IMM64(BPF_REG_0, 0),
11478 			BPF_EXIT_INSN(),
11479 		},
11480 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11481 		.errstr = "last insn",
11482 		.result = REJECT,
11483 	},
11484 	{
11485 		"calls: call into middle of other call",
11486 		.insns = {
11487 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11488 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11489 			BPF_MOV64_IMM(BPF_REG_0, 0),
11490 			BPF_EXIT_INSN(),
11491 			BPF_MOV64_IMM(BPF_REG_0, 0),
11492 			BPF_MOV64_IMM(BPF_REG_0, 0),
11493 			BPF_EXIT_INSN(),
11494 		},
11495 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11496 		.errstr = "last insn",
11497 		.result = REJECT,
11498 	},
11499 	{
11500 		"calls: ld_abs with changing ctx data in callee",
11501 		.insns = {
11502 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11503 			BPF_LD_ABS(BPF_B, 0),
11504 			BPF_LD_ABS(BPF_H, 0),
11505 			BPF_LD_ABS(BPF_W, 0),
11506 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11508 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11509 			BPF_LD_ABS(BPF_B, 0),
11510 			BPF_LD_ABS(BPF_H, 0),
11511 			BPF_LD_ABS(BPF_W, 0),
11512 			BPF_EXIT_INSN(),
11513 			BPF_MOV64_IMM(BPF_REG_2, 1),
11514 			BPF_MOV64_IMM(BPF_REG_3, 2),
11515 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11516 				     BPF_FUNC_skb_vlan_push),
11517 			BPF_EXIT_INSN(),
11518 		},
11519 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11520 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11521 		.result = REJECT,
11522 	},
11523 	{
11524 		"calls: two calls with bad fallthrough",
11525 		.insns = {
11526 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11527 			BPF_EXIT_INSN(),
11528 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11529 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11530 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11531 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11532 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11533 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11534 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11535 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11536 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11537 				    offsetof(struct __sk_buff, len)),
11538 			BPF_EXIT_INSN(),
11539 		},
11540 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11541 		.errstr = "not an exit",
11542 		.result = REJECT,
11543 	},
11544 	{
11545 		"calls: two calls with stack read",
11546 		.insns = {
11547 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11548 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11550 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11551 			BPF_EXIT_INSN(),
11552 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11553 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11554 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11556 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11557 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11558 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11559 			BPF_EXIT_INSN(),
11560 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11561 			BPF_EXIT_INSN(),
11562 		},
11563 		.prog_type = BPF_PROG_TYPE_XDP,
11564 		.result = ACCEPT,
11565 	},
11566 	{
11567 		"calls: two calls with stack write",
11568 		.insns = {
11569 			/* main prog */
11570 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11571 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11572 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11573 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11575 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11576 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11577 			BPF_EXIT_INSN(),
11578 
11579 			/* subprog 1 */
11580 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11581 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11582 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11583 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11584 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11585 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11586 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11587 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11588 			/* write into stack frame of main prog */
11589 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11590 			BPF_EXIT_INSN(),
11591 
11592 			/* subprog 2 */
11593 			/* read from stack frame of main prog */
11594 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11595 			BPF_EXIT_INSN(),
11596 		},
11597 		.prog_type = BPF_PROG_TYPE_XDP,
11598 		.result = ACCEPT,
11599 	},
11600 	{
11601 		"calls: stack overflow using two frames (pre-call access)",
11602 		.insns = {
11603 			/* prog 1 */
11604 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11605 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11606 			BPF_EXIT_INSN(),
11607 
11608 			/* prog 2 */
11609 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11610 			BPF_MOV64_IMM(BPF_REG_0, 0),
11611 			BPF_EXIT_INSN(),
11612 		},
11613 		.prog_type = BPF_PROG_TYPE_XDP,
11614 		.errstr = "combined stack size",
11615 		.result = REJECT,
11616 	},
11617 	{
11618 		"calls: stack overflow using two frames (post-call access)",
11619 		.insns = {
11620 			/* prog 1 */
11621 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11622 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11623 			BPF_EXIT_INSN(),
11624 
11625 			/* prog 2 */
11626 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11627 			BPF_MOV64_IMM(BPF_REG_0, 0),
11628 			BPF_EXIT_INSN(),
11629 		},
11630 		.prog_type = BPF_PROG_TYPE_XDP,
11631 		.errstr = "combined stack size",
11632 		.result = REJECT,
11633 	},
11634 	{
11635 		"calls: stack depth check using three frames. test1",
11636 		.insns = {
11637 			/* main */
11638 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11639 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11640 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11641 			BPF_MOV64_IMM(BPF_REG_0, 0),
11642 			BPF_EXIT_INSN(),
11643 			/* A */
11644 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11645 			BPF_EXIT_INSN(),
11646 			/* B */
11647 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11648 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11649 			BPF_EXIT_INSN(),
11650 		},
11651 		.prog_type = BPF_PROG_TYPE_XDP,
11652 		/* stack_main=32, stack_A=256, stack_B=64
11653 		 * and max(main+A, main+A+B) < 512
11654 		 */
11655 		.result = ACCEPT,
11656 	},
11657 	{
11658 		"calls: stack depth check using three frames. test2",
11659 		.insns = {
11660 			/* main */
11661 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11662 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11663 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11664 			BPF_MOV64_IMM(BPF_REG_0, 0),
11665 			BPF_EXIT_INSN(),
11666 			/* A */
11667 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11668 			BPF_EXIT_INSN(),
11669 			/* B */
11670 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11671 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11672 			BPF_EXIT_INSN(),
11673 		},
11674 		.prog_type = BPF_PROG_TYPE_XDP,
11675 		/* stack_main=32, stack_A=64, stack_B=256
11676 		 * and max(main+A, main+A+B) < 512
11677 		 */
11678 		.result = ACCEPT,
11679 	},
11680 	{
11681 		"calls: stack depth check using three frames. test3",
11682 		.insns = {
11683 			/* main */
11684 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11685 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11686 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11687 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11688 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11689 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11690 			BPF_MOV64_IMM(BPF_REG_0, 0),
11691 			BPF_EXIT_INSN(),
11692 			/* A */
11693 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11694 			BPF_EXIT_INSN(),
11695 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11696 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11697 			/* B */
11698 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11699 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11700 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11701 			BPF_EXIT_INSN(),
11702 		},
11703 		.prog_type = BPF_PROG_TYPE_XDP,
11704 		/* stack_main=64, stack_A=224, stack_B=256
11705 		 * and max(main+A, main+A+B) > 512
11706 		 */
11707 		.errstr = "combined stack",
11708 		.result = REJECT,
11709 	},
11710 	{
11711 		"calls: stack depth check using three frames. test4",
11712 		/* void main(void) {
11713 		 *   func1(0);
11714 		 *   func1(1);
11715 		 *   func2(1);
11716 		 * }
11717 		 * void func1(int alloc_or_recurse) {
11718 		 *   if (alloc_or_recurse) {
11719 		 *     frame_pointer[-300] = 1;
11720 		 *   } else {
11721 		 *     func2(alloc_or_recurse);
11722 		 *   }
11723 		 * }
11724 		 * void func2(int alloc_or_recurse) {
11725 		 *   if (alloc_or_recurse) {
11726 		 *     frame_pointer[-300] = 1;
11727 		 *   }
11728 		 * }
11729 		 */
11730 		.insns = {
11731 			/* main */
11732 			BPF_MOV64_IMM(BPF_REG_1, 0),
11733 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11734 			BPF_MOV64_IMM(BPF_REG_1, 1),
11735 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11736 			BPF_MOV64_IMM(BPF_REG_1, 1),
11737 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11738 			BPF_MOV64_IMM(BPF_REG_0, 0),
11739 			BPF_EXIT_INSN(),
11740 			/* A */
11741 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11742 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11743 			BPF_EXIT_INSN(),
11744 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11745 			BPF_EXIT_INSN(),
11746 			/* B */
11747 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11748 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11749 			BPF_EXIT_INSN(),
11750 		},
11751 		.prog_type = BPF_PROG_TYPE_XDP,
11752 		.result = REJECT,
11753 		.errstr = "combined stack",
11754 	},
11755 	{
11756 		"calls: stack depth check using three frames. test5",
11757 		.insns = {
11758 			/* main */
11759 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11760 			BPF_EXIT_INSN(),
11761 			/* A */
11762 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11763 			BPF_EXIT_INSN(),
11764 			/* B */
11765 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11766 			BPF_EXIT_INSN(),
11767 			/* C */
11768 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11769 			BPF_EXIT_INSN(),
11770 			/* D */
11771 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11772 			BPF_EXIT_INSN(),
11773 			/* E */
11774 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11775 			BPF_EXIT_INSN(),
11776 			/* F */
11777 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11778 			BPF_EXIT_INSN(),
11779 			/* G */
11780 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11781 			BPF_EXIT_INSN(),
11782 			/* H */
11783 			BPF_MOV64_IMM(BPF_REG_0, 0),
11784 			BPF_EXIT_INSN(),
11785 		},
11786 		.prog_type = BPF_PROG_TYPE_XDP,
11787 		.errstr = "call stack",
11788 		.result = REJECT,
11789 	},
11790 	{
11791 		"calls: spill into caller stack frame",
11792 		.insns = {
11793 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11794 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11795 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11796 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11797 			BPF_EXIT_INSN(),
11798 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11799 			BPF_MOV64_IMM(BPF_REG_0, 0),
11800 			BPF_EXIT_INSN(),
11801 		},
11802 		.prog_type = BPF_PROG_TYPE_XDP,
11803 		.errstr = "cannot spill",
11804 		.result = REJECT,
11805 	},
11806 	{
11807 		"calls: write into caller stack frame",
11808 		.insns = {
11809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11810 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11811 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11812 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11813 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11814 			BPF_EXIT_INSN(),
11815 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11816 			BPF_MOV64_IMM(BPF_REG_0, 0),
11817 			BPF_EXIT_INSN(),
11818 		},
11819 		.prog_type = BPF_PROG_TYPE_XDP,
11820 		.result = ACCEPT,
11821 		.retval = 42,
11822 	},
11823 	{
11824 		"calls: write into callee stack frame",
11825 		.insns = {
11826 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11827 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11828 			BPF_EXIT_INSN(),
11829 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11831 			BPF_EXIT_INSN(),
11832 		},
11833 		.prog_type = BPF_PROG_TYPE_XDP,
11834 		.errstr = "cannot return stack pointer",
11835 		.result = REJECT,
11836 	},
11837 	{
11838 		"calls: two calls with stack write and void return",
11839 		.insns = {
11840 			/* main prog */
11841 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11842 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11843 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11844 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11846 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11847 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11848 			BPF_EXIT_INSN(),
11849 
11850 			/* subprog 1 */
11851 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11852 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11853 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11854 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11855 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11856 			BPF_EXIT_INSN(),
11857 
11858 			/* subprog 2 */
11859 			/* write into stack frame of main prog */
11860 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11861 			BPF_EXIT_INSN(), /* void return */
11862 		},
11863 		.prog_type = BPF_PROG_TYPE_XDP,
11864 		.result = ACCEPT,
11865 	},
11866 	{
11867 		"calls: ambiguous return value",
11868 		.insns = {
11869 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11870 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11872 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11873 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11874 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11875 			BPF_EXIT_INSN(),
11876 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11877 			BPF_MOV64_IMM(BPF_REG_0, 0),
11878 			BPF_EXIT_INSN(),
11879 		},
11880 		.errstr_unpriv = "allowed for root only",
11881 		.result_unpriv = REJECT,
11882 		.errstr = "R0 !read_ok",
11883 		.result = REJECT,
11884 	},
11885 	{
11886 		"calls: two calls that return map_value",
11887 		.insns = {
11888 			/* main prog */
11889 			/* pass fp-16, fp-8 into a function */
11890 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11893 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11894 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11895 
11896 			/* fetch map_value_ptr from the stack of this function */
11897 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11898 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11899 			/* write into map value */
11900 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11901 			/* fetch secound map_value_ptr from the stack */
11902 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11903 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11904 			/* write into map value */
11905 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11906 			BPF_MOV64_IMM(BPF_REG_0, 0),
11907 			BPF_EXIT_INSN(),
11908 
11909 			/* subprog 1 */
11910 			/* call 3rd function twice */
11911 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11912 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11913 			/* first time with fp-8 */
11914 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11915 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11916 			/* second time with fp-16 */
11917 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11918 			BPF_EXIT_INSN(),
11919 
11920 			/* subprog 2 */
11921 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11922 			/* lookup from map */
11923 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11924 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11926 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11927 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11928 				     BPF_FUNC_map_lookup_elem),
11929 			/* write map_value_ptr into stack frame of main prog */
11930 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11931 			BPF_MOV64_IMM(BPF_REG_0, 0),
11932 			BPF_EXIT_INSN(), /* return 0 */
11933 		},
11934 		.prog_type = BPF_PROG_TYPE_XDP,
11935 		.fixup_map_hash_8b = { 23 },
11936 		.result = ACCEPT,
11937 	},
11938 	{
11939 		"calls: two calls that return map_value with bool condition",
11940 		.insns = {
11941 			/* main prog */
11942 			/* pass fp-16, fp-8 into a function */
11943 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11944 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11945 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11946 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11947 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11948 			BPF_MOV64_IMM(BPF_REG_0, 0),
11949 			BPF_EXIT_INSN(),
11950 
11951 			/* subprog 1 */
11952 			/* call 3rd function twice */
11953 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11954 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11955 			/* first time with fp-8 */
11956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11957 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11958 			/* fetch map_value_ptr from the stack of this function */
11959 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11960 			/* write into map value */
11961 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11962 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11963 			/* second time with fp-16 */
11964 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11965 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11966 			/* fetch secound map_value_ptr from the stack */
11967 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11968 			/* write into map value */
11969 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11970 			BPF_EXIT_INSN(),
11971 
11972 			/* subprog 2 */
11973 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11974 			/* lookup from map */
11975 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11976 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11978 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11979 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11980 				     BPF_FUNC_map_lookup_elem),
11981 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11982 			BPF_MOV64_IMM(BPF_REG_0, 0),
11983 			BPF_EXIT_INSN(), /* return 0 */
11984 			/* write map_value_ptr into stack frame of main prog */
11985 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11986 			BPF_MOV64_IMM(BPF_REG_0, 1),
11987 			BPF_EXIT_INSN(), /* return 1 */
11988 		},
11989 		.prog_type = BPF_PROG_TYPE_XDP,
11990 		.fixup_map_hash_8b = { 23 },
11991 		.result = ACCEPT,
11992 	},
11993 	{
11994 		"calls: two calls that return map_value with incorrect bool check",
11995 		.insns = {
11996 			/* main prog */
11997 			/* pass fp-16, fp-8 into a function */
11998 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11999 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12000 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12001 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12002 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12003 			BPF_MOV64_IMM(BPF_REG_0, 0),
12004 			BPF_EXIT_INSN(),
12005 
12006 			/* subprog 1 */
12007 			/* call 3rd function twice */
12008 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12009 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12010 			/* first time with fp-8 */
12011 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
12012 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
12013 			/* fetch map_value_ptr from the stack of this function */
12014 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
12015 			/* write into map value */
12016 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12017 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12018 			/* second time with fp-16 */
12019 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12020 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12021 			/* fetch secound map_value_ptr from the stack */
12022 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
12023 			/* write into map value */
12024 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12025 			BPF_EXIT_INSN(),
12026 
12027 			/* subprog 2 */
12028 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12029 			/* lookup from map */
12030 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12031 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12032 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12033 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12034 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12035 				     BPF_FUNC_map_lookup_elem),
12036 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12037 			BPF_MOV64_IMM(BPF_REG_0, 0),
12038 			BPF_EXIT_INSN(), /* return 0 */
12039 			/* write map_value_ptr into stack frame of main prog */
12040 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12041 			BPF_MOV64_IMM(BPF_REG_0, 1),
12042 			BPF_EXIT_INSN(), /* return 1 */
12043 		},
12044 		.prog_type = BPF_PROG_TYPE_XDP,
12045 		.fixup_map_hash_8b = { 23 },
12046 		.result = REJECT,
12047 		.errstr = "invalid read from stack off -16+0 size 8",
12048 	},
12049 	{
12050 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
12051 		.insns = {
12052 			/* main prog */
12053 			/* pass fp-16, fp-8 into a function */
12054 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12055 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12056 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12058 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12059 			BPF_MOV64_IMM(BPF_REG_0, 0),
12060 			BPF_EXIT_INSN(),
12061 
12062 			/* subprog 1 */
12063 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12064 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12065 			/* 1st lookup from map */
12066 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12067 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12068 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12069 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12070 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12071 				     BPF_FUNC_map_lookup_elem),
12072 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12073 			BPF_MOV64_IMM(BPF_REG_8, 0),
12074 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12075 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12076 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12077 			BPF_MOV64_IMM(BPF_REG_8, 1),
12078 
12079 			/* 2nd lookup from map */
12080 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12082 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12083 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12084 				     BPF_FUNC_map_lookup_elem),
12085 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12086 			BPF_MOV64_IMM(BPF_REG_9, 0),
12087 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12088 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12089 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12090 			BPF_MOV64_IMM(BPF_REG_9, 1),
12091 
12092 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12093 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12094 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12095 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12096 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12097 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12098 			BPF_EXIT_INSN(),
12099 
12100 			/* subprog 2 */
12101 			/* if arg2 == 1 do *arg1 = 0 */
12102 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12103 			/* fetch map_value_ptr from the stack of this function */
12104 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12105 			/* write into map value */
12106 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12107 
12108 			/* if arg4 == 1 do *arg3 = 0 */
12109 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12110 			/* fetch map_value_ptr from the stack of this function */
12111 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12112 			/* write into map value */
12113 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12114 			BPF_EXIT_INSN(),
12115 		},
12116 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12117 		.fixup_map_hash_8b = { 12, 22 },
12118 		.result = REJECT,
12119 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12120 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12121 	},
12122 	{
12123 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
12124 		.insns = {
12125 			/* main prog */
12126 			/* pass fp-16, fp-8 into a function */
12127 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12129 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12130 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12132 			BPF_MOV64_IMM(BPF_REG_0, 0),
12133 			BPF_EXIT_INSN(),
12134 
12135 			/* subprog 1 */
12136 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12137 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12138 			/* 1st lookup from map */
12139 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12140 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12141 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12142 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12143 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12144 				     BPF_FUNC_map_lookup_elem),
12145 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12146 			BPF_MOV64_IMM(BPF_REG_8, 0),
12147 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12148 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12149 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12150 			BPF_MOV64_IMM(BPF_REG_8, 1),
12151 
12152 			/* 2nd lookup from map */
12153 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12156 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12157 				     BPF_FUNC_map_lookup_elem),
12158 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12159 			BPF_MOV64_IMM(BPF_REG_9, 0),
12160 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12161 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12162 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12163 			BPF_MOV64_IMM(BPF_REG_9, 1),
12164 
12165 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12166 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12167 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12168 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12169 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12170 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12171 			BPF_EXIT_INSN(),
12172 
12173 			/* subprog 2 */
12174 			/* if arg2 == 1 do *arg1 = 0 */
12175 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12176 			/* fetch map_value_ptr from the stack of this function */
12177 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12178 			/* write into map value */
12179 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12180 
12181 			/* if arg4 == 1 do *arg3 = 0 */
12182 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12183 			/* fetch map_value_ptr from the stack of this function */
12184 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12185 			/* write into map value */
12186 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12187 			BPF_EXIT_INSN(),
12188 		},
12189 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12190 		.fixup_map_hash_8b = { 12, 22 },
12191 		.result = ACCEPT,
12192 	},
12193 	{
12194 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12195 		.insns = {
12196 			/* main prog */
12197 			/* pass fp-16, fp-8 into a function */
12198 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12199 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12200 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12201 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12202 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12203 			BPF_MOV64_IMM(BPF_REG_0, 0),
12204 			BPF_EXIT_INSN(),
12205 
12206 			/* subprog 1 */
12207 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12208 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12209 			/* 1st lookup from map */
12210 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12213 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12214 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12215 				     BPF_FUNC_map_lookup_elem),
12216 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12217 			BPF_MOV64_IMM(BPF_REG_8, 0),
12218 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12219 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12220 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12221 			BPF_MOV64_IMM(BPF_REG_8, 1),
12222 
12223 			/* 2nd lookup from map */
12224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12226 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12227 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12228 				     BPF_FUNC_map_lookup_elem),
12229 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12230 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
12231 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12232 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12233 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12234 			BPF_MOV64_IMM(BPF_REG_9, 1),
12235 
12236 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12237 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12238 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12239 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12240 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12241 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12242 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12243 
12244 			/* subprog 2 */
12245 			/* if arg2 == 1 do *arg1 = 0 */
12246 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12247 			/* fetch map_value_ptr from the stack of this function */
12248 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12249 			/* write into map value */
12250 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12251 
12252 			/* if arg4 == 1 do *arg3 = 0 */
12253 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12254 			/* fetch map_value_ptr from the stack of this function */
12255 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12256 			/* write into map value */
12257 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12258 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12259 		},
12260 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12261 		.fixup_map_hash_8b = { 12, 22 },
12262 		.result = REJECT,
12263 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12264 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12265 	},
12266 	{
12267 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
12268 		.insns = {
12269 			/* main prog */
12270 			/* pass fp-16, fp-8 into a function */
12271 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12272 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12273 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12274 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12275 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12276 			BPF_MOV64_IMM(BPF_REG_0, 0),
12277 			BPF_EXIT_INSN(),
12278 
12279 			/* subprog 1 */
12280 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12281 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12282 			/* 1st lookup from map */
12283 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12284 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12286 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12287 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12288 				     BPF_FUNC_map_lookup_elem),
12289 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12290 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12291 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12292 			BPF_MOV64_IMM(BPF_REG_8, 0),
12293 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12294 			BPF_MOV64_IMM(BPF_REG_8, 1),
12295 
12296 			/* 2nd lookup from map */
12297 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12299 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12300 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12301 				     BPF_FUNC_map_lookup_elem),
12302 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12303 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12304 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12305 			BPF_MOV64_IMM(BPF_REG_9, 0),
12306 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12307 			BPF_MOV64_IMM(BPF_REG_9, 1),
12308 
12309 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12310 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12311 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12312 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12313 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12314 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12315 			BPF_EXIT_INSN(),
12316 
12317 			/* subprog 2 */
12318 			/* if arg2 == 1 do *arg1 = 0 */
12319 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12320 			/* fetch map_value_ptr from the stack of this function */
12321 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12322 			/* write into map value */
12323 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12324 
12325 			/* if arg4 == 1 do *arg3 = 0 */
12326 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12327 			/* fetch map_value_ptr from the stack of this function */
12328 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12329 			/* write into map value */
12330 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12331 			BPF_EXIT_INSN(),
12332 		},
12333 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12334 		.fixup_map_hash_8b = { 12, 22 },
12335 		.result = ACCEPT,
12336 	},
12337 	{
12338 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
12339 		.insns = {
12340 			/* main prog */
12341 			/* pass fp-16, fp-8 into a function */
12342 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12343 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12344 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12345 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12346 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12347 			BPF_MOV64_IMM(BPF_REG_0, 0),
12348 			BPF_EXIT_INSN(),
12349 
12350 			/* subprog 1 */
12351 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12352 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12353 			/* 1st lookup from map */
12354 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12355 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12357 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12358 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12359 				     BPF_FUNC_map_lookup_elem),
12360 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12361 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12362 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12363 			BPF_MOV64_IMM(BPF_REG_8, 0),
12364 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12365 			BPF_MOV64_IMM(BPF_REG_8, 1),
12366 
12367 			/* 2nd lookup from map */
12368 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12370 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12371 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12372 				     BPF_FUNC_map_lookup_elem),
12373 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12374 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12375 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12376 			BPF_MOV64_IMM(BPF_REG_9, 0),
12377 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12378 			BPF_MOV64_IMM(BPF_REG_9, 1),
12379 
12380 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12381 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12382 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12383 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12384 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12386 			BPF_EXIT_INSN(),
12387 
12388 			/* subprog 2 */
12389 			/* if arg2 == 1 do *arg1 = 0 */
12390 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12391 			/* fetch map_value_ptr from the stack of this function */
12392 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12393 			/* write into map value */
12394 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12395 
12396 			/* if arg4 == 0 do *arg3 = 0 */
12397 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12398 			/* fetch map_value_ptr from the stack of this function */
12399 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12400 			/* write into map value */
12401 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12402 			BPF_EXIT_INSN(),
12403 		},
12404 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12405 		.fixup_map_hash_8b = { 12, 22 },
12406 		.result = REJECT,
12407 		.errstr = "R0 invalid mem access 'inv'",
12408 	},
12409 	{
12410 		"calls: pkt_ptr spill into caller stack",
12411 		.insns = {
12412 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12413 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12415 			BPF_EXIT_INSN(),
12416 
12417 			/* subprog 1 */
12418 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12419 				    offsetof(struct __sk_buff, data)),
12420 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12421 				    offsetof(struct __sk_buff, data_end)),
12422 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12424 			/* spill unchecked pkt_ptr into stack of caller */
12425 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12426 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12427 			/* now the pkt range is verified, read pkt_ptr from stack */
12428 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12429 			/* write 4 bytes into packet */
12430 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12431 			BPF_EXIT_INSN(),
12432 		},
12433 		.result = ACCEPT,
12434 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12435 		.retval = POINTER_VALUE,
12436 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12437 	},
12438 	{
12439 		"calls: pkt_ptr spill into caller stack 2",
12440 		.insns = {
12441 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12443 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12444 			/* Marking is still kept, but not in all cases safe. */
12445 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12446 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12447 			BPF_EXIT_INSN(),
12448 
12449 			/* subprog 1 */
12450 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12451 				    offsetof(struct __sk_buff, data)),
12452 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12453 				    offsetof(struct __sk_buff, data_end)),
12454 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12456 			/* spill unchecked pkt_ptr into stack of caller */
12457 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12458 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12459 			/* now the pkt range is verified, read pkt_ptr from stack */
12460 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12461 			/* write 4 bytes into packet */
12462 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12463 			BPF_EXIT_INSN(),
12464 		},
12465 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12466 		.errstr = "invalid access to packet",
12467 		.result = REJECT,
12468 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12469 	},
12470 	{
12471 		"calls: pkt_ptr spill into caller stack 3",
12472 		.insns = {
12473 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12474 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12475 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12476 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12477 			/* Marking is still kept and safe here. */
12478 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12479 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12480 			BPF_EXIT_INSN(),
12481 
12482 			/* subprog 1 */
12483 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12484 				    offsetof(struct __sk_buff, data)),
12485 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12486 				    offsetof(struct __sk_buff, data_end)),
12487 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12489 			/* spill unchecked pkt_ptr into stack of caller */
12490 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12491 			BPF_MOV64_IMM(BPF_REG_5, 0),
12492 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12493 			BPF_MOV64_IMM(BPF_REG_5, 1),
12494 			/* now the pkt range is verified, read pkt_ptr from stack */
12495 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12496 			/* write 4 bytes into packet */
12497 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12498 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12499 			BPF_EXIT_INSN(),
12500 		},
12501 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12502 		.result = ACCEPT,
12503 		.retval = 1,
12504 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12505 	},
12506 	{
12507 		"calls: pkt_ptr spill into caller stack 4",
12508 		.insns = {
12509 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12510 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12511 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12512 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12513 			/* Check marking propagated. */
12514 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12515 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12516 			BPF_EXIT_INSN(),
12517 
12518 			/* subprog 1 */
12519 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12520 				    offsetof(struct __sk_buff, data)),
12521 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12522 				    offsetof(struct __sk_buff, data_end)),
12523 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12524 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12525 			/* spill unchecked pkt_ptr into stack of caller */
12526 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12527 			BPF_MOV64_IMM(BPF_REG_5, 0),
12528 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12529 			BPF_MOV64_IMM(BPF_REG_5, 1),
12530 			/* don't read back pkt_ptr from stack here */
12531 			/* write 4 bytes into packet */
12532 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12533 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12534 			BPF_EXIT_INSN(),
12535 		},
12536 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12537 		.result = ACCEPT,
12538 		.retval = 1,
12539 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12540 	},
12541 	{
12542 		"calls: pkt_ptr spill into caller stack 5",
12543 		.insns = {
12544 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12545 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12546 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12547 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12548 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12549 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12550 			BPF_EXIT_INSN(),
12551 
12552 			/* subprog 1 */
12553 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12554 				    offsetof(struct __sk_buff, data)),
12555 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12556 				    offsetof(struct __sk_buff, data_end)),
12557 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12558 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12559 			BPF_MOV64_IMM(BPF_REG_5, 0),
12560 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12561 			/* spill checked pkt_ptr into stack of caller */
12562 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12563 			BPF_MOV64_IMM(BPF_REG_5, 1),
12564 			/* don't read back pkt_ptr from stack here */
12565 			/* write 4 bytes into packet */
12566 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12567 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12568 			BPF_EXIT_INSN(),
12569 		},
12570 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12571 		.errstr = "same insn cannot be used with different",
12572 		.result = REJECT,
12573 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12574 	},
12575 	{
12576 		"calls: pkt_ptr spill into caller stack 6",
12577 		.insns = {
12578 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12579 				    offsetof(struct __sk_buff, data_end)),
12580 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12581 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12582 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12583 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12584 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12585 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12586 			BPF_EXIT_INSN(),
12587 
12588 			/* subprog 1 */
12589 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12590 				    offsetof(struct __sk_buff, data)),
12591 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12592 				    offsetof(struct __sk_buff, data_end)),
12593 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12594 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12595 			BPF_MOV64_IMM(BPF_REG_5, 0),
12596 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12597 			/* spill checked pkt_ptr into stack of caller */
12598 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12599 			BPF_MOV64_IMM(BPF_REG_5, 1),
12600 			/* don't read back pkt_ptr from stack here */
12601 			/* write 4 bytes into packet */
12602 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12603 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12604 			BPF_EXIT_INSN(),
12605 		},
12606 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12607 		.errstr = "R4 invalid mem access",
12608 		.result = REJECT,
12609 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12610 	},
12611 	{
12612 		"calls: pkt_ptr spill into caller stack 7",
12613 		.insns = {
12614 			BPF_MOV64_IMM(BPF_REG_2, 0),
12615 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12617 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12619 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12620 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12621 			BPF_EXIT_INSN(),
12622 
12623 			/* subprog 1 */
12624 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12625 				    offsetof(struct __sk_buff, data)),
12626 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12627 				    offsetof(struct __sk_buff, data_end)),
12628 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12630 			BPF_MOV64_IMM(BPF_REG_5, 0),
12631 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12632 			/* spill checked pkt_ptr into stack of caller */
12633 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12634 			BPF_MOV64_IMM(BPF_REG_5, 1),
12635 			/* don't read back pkt_ptr from stack here */
12636 			/* write 4 bytes into packet */
12637 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12638 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12639 			BPF_EXIT_INSN(),
12640 		},
12641 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12642 		.errstr = "R4 invalid mem access",
12643 		.result = REJECT,
12644 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12645 	},
12646 	{
12647 		"calls: pkt_ptr spill into caller stack 8",
12648 		.insns = {
12649 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12650 				    offsetof(struct __sk_buff, data)),
12651 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12652 				    offsetof(struct __sk_buff, data_end)),
12653 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12654 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12655 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12656 			BPF_EXIT_INSN(),
12657 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12658 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12659 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12660 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12661 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12662 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12663 			BPF_EXIT_INSN(),
12664 
12665 			/* subprog 1 */
12666 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12667 				    offsetof(struct __sk_buff, data)),
12668 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12669 				    offsetof(struct __sk_buff, data_end)),
12670 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12671 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12672 			BPF_MOV64_IMM(BPF_REG_5, 0),
12673 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12674 			/* spill checked pkt_ptr into stack of caller */
12675 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12676 			BPF_MOV64_IMM(BPF_REG_5, 1),
12677 			/* don't read back pkt_ptr from stack here */
12678 			/* write 4 bytes into packet */
12679 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12680 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12681 			BPF_EXIT_INSN(),
12682 		},
12683 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12684 		.result = ACCEPT,
12685 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12686 	},
12687 	{
12688 		"calls: pkt_ptr spill into caller stack 9",
12689 		.insns = {
12690 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12691 				    offsetof(struct __sk_buff, data)),
12692 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12693 				    offsetof(struct __sk_buff, data_end)),
12694 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12695 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12696 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12697 			BPF_EXIT_INSN(),
12698 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12700 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12701 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12702 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12703 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12704 			BPF_EXIT_INSN(),
12705 
12706 			/* subprog 1 */
12707 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12708 				    offsetof(struct __sk_buff, data)),
12709 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12710 				    offsetof(struct __sk_buff, data_end)),
12711 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12712 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12713 			BPF_MOV64_IMM(BPF_REG_5, 0),
12714 			/* spill unchecked pkt_ptr into stack of caller */
12715 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12716 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12717 			BPF_MOV64_IMM(BPF_REG_5, 1),
12718 			/* don't read back pkt_ptr from stack here */
12719 			/* write 4 bytes into packet */
12720 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12721 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12722 			BPF_EXIT_INSN(),
12723 		},
12724 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12725 		.errstr = "invalid access to packet",
12726 		.result = REJECT,
12727 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12728 	},
12729 	{
12730 		"calls: caller stack init to zero or map_value_or_null",
12731 		.insns = {
12732 			BPF_MOV64_IMM(BPF_REG_0, 0),
12733 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12734 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12735 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12736 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12737 			/* fetch map_value_or_null or const_zero from stack */
12738 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12739 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12740 			/* store into map_value */
12741 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12742 			BPF_EXIT_INSN(),
12743 
12744 			/* subprog 1 */
12745 			/* if (ctx == 0) return; */
12746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12747 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12748 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12749 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12750 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12751 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12752 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12753 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12754 				     BPF_FUNC_map_lookup_elem),
12755 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12756 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12757 			BPF_EXIT_INSN(),
12758 		},
12759 		.fixup_map_hash_8b = { 13 },
12760 		.result = ACCEPT,
12761 		.prog_type = BPF_PROG_TYPE_XDP,
12762 	},
12763 	{
12764 		"calls: stack init to zero and pruning",
12765 		.insns = {
12766 			/* first make allocated_stack 16 byte */
12767 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12768 			/* now fork the execution such that the false branch
12769 			 * of JGT insn will be verified second and it skisp zero
12770 			 * init of fp-8 stack slot. If stack liveness marking
12771 			 * is missing live_read marks from call map_lookup
12772 			 * processing then pruning will incorrectly assume
12773 			 * that fp-8 stack slot was unused in the fall-through
12774 			 * branch and will accept the program incorrectly
12775 			 */
12776 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12777 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12778 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12779 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12780 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12781 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12782 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12783 				     BPF_FUNC_map_lookup_elem),
12784 			BPF_EXIT_INSN(),
12785 		},
12786 		.fixup_map_hash_48b = { 6 },
12787 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12788 		.result = REJECT,
12789 		.prog_type = BPF_PROG_TYPE_XDP,
12790 	},
12791 	{
12792 		"calls: two calls returning different map pointers for lookup (hash, array)",
12793 		.insns = {
12794 			/* main prog */
12795 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12796 			BPF_CALL_REL(11),
12797 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12798 			BPF_CALL_REL(12),
12799 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12800 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12801 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12802 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12803 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12804 				     BPF_FUNC_map_lookup_elem),
12805 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12806 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12807 				   offsetof(struct test_val, foo)),
12808 			BPF_MOV64_IMM(BPF_REG_0, 1),
12809 			BPF_EXIT_INSN(),
12810 			/* subprog 1 */
12811 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12812 			BPF_EXIT_INSN(),
12813 			/* subprog 2 */
12814 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12815 			BPF_EXIT_INSN(),
12816 		},
12817 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12818 		.fixup_map_hash_48b = { 13 },
12819 		.fixup_map_array_48b = { 16 },
12820 		.result = ACCEPT,
12821 		.retval = 1,
12822 	},
12823 	{
12824 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12825 		.insns = {
12826 			/* main prog */
12827 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12828 			BPF_CALL_REL(11),
12829 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12830 			BPF_CALL_REL(12),
12831 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12832 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12833 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12834 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12835 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12836 				     BPF_FUNC_map_lookup_elem),
12837 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12838 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12839 				   offsetof(struct test_val, foo)),
12840 			BPF_MOV64_IMM(BPF_REG_0, 1),
12841 			BPF_EXIT_INSN(),
12842 			/* subprog 1 */
12843 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12844 			BPF_EXIT_INSN(),
12845 			/* subprog 2 */
12846 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12847 			BPF_EXIT_INSN(),
12848 		},
12849 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12850 		.fixup_map_in_map = { 16 },
12851 		.fixup_map_array_48b = { 13 },
12852 		.result = REJECT,
12853 		.errstr = "R0 invalid mem access 'map_ptr'",
12854 	},
12855 	{
12856 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12857 		.insns = {
12858 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12859 				    offsetof(struct __sk_buff, mark)),
12860 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12861 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12862 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12863 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12864 			BPF_MOV64_IMM(BPF_REG_3, 7),
12865 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12866 				     BPF_FUNC_tail_call),
12867 			BPF_MOV64_IMM(BPF_REG_0, 1),
12868 			BPF_EXIT_INSN(),
12869 		},
12870 		.fixup_prog1 = { 5 },
12871 		.fixup_prog2 = { 2 },
12872 		.result_unpriv = REJECT,
12873 		.errstr_unpriv = "tail_call abusing map_ptr",
12874 		.result = ACCEPT,
12875 		.retval = 42,
12876 	},
12877 	{
12878 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12879 		.insns = {
12880 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12881 				    offsetof(struct __sk_buff, mark)),
12882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12883 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12884 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12885 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12886 			BPF_MOV64_IMM(BPF_REG_3, 7),
12887 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12888 				     BPF_FUNC_tail_call),
12889 			BPF_MOV64_IMM(BPF_REG_0, 1),
12890 			BPF_EXIT_INSN(),
12891 		},
12892 		.fixup_prog2 = { 2, 5 },
12893 		.result_unpriv = ACCEPT,
12894 		.result = ACCEPT,
12895 		.retval = 42,
12896 	},
12897 	{
12898 		"search pruning: all branches should be verified (nop operation)",
12899 		.insns = {
12900 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12902 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12903 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12904 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12905 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12906 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12907 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12908 			BPF_MOV64_IMM(BPF_REG_4, 0),
12909 			BPF_JMP_A(1),
12910 			BPF_MOV64_IMM(BPF_REG_4, 1),
12911 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12912 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12913 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12914 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12915 			BPF_MOV64_IMM(BPF_REG_6, 0),
12916 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12917 			BPF_EXIT_INSN(),
12918 		},
12919 		.fixup_map_hash_8b = { 3 },
12920 		.errstr = "R6 invalid mem access 'inv'",
12921 		.result = REJECT,
12922 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12923 	},
12924 	{
12925 		"search pruning: all branches should be verified (invalid stack access)",
12926 		.insns = {
12927 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12928 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12929 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12930 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12931 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12932 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12933 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12934 			BPF_MOV64_IMM(BPF_REG_4, 0),
12935 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12936 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12937 			BPF_JMP_A(1),
12938 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12939 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12940 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12941 			BPF_EXIT_INSN(),
12942 		},
12943 		.fixup_map_hash_8b = { 3 },
12944 		.errstr = "invalid read from stack off -16+0 size 8",
12945 		.result = REJECT,
12946 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12947 	},
12948 	{
12949 		"jit: lsh, rsh, arsh by 1",
12950 		.insns = {
12951 			BPF_MOV64_IMM(BPF_REG_0, 1),
12952 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12953 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12954 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12955 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12956 			BPF_EXIT_INSN(),
12957 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12958 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12959 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12960 			BPF_EXIT_INSN(),
12961 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12962 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12963 			BPF_EXIT_INSN(),
12964 			BPF_MOV64_IMM(BPF_REG_0, 2),
12965 			BPF_EXIT_INSN(),
12966 		},
12967 		.result = ACCEPT,
12968 		.retval = 2,
12969 	},
12970 	{
12971 		"jit: mov32 for ldimm64, 1",
12972 		.insns = {
12973 			BPF_MOV64_IMM(BPF_REG_0, 2),
12974 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12975 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12976 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12977 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12978 			BPF_MOV64_IMM(BPF_REG_0, 1),
12979 			BPF_EXIT_INSN(),
12980 		},
12981 		.result = ACCEPT,
12982 		.retval = 2,
12983 	},
12984 	{
12985 		"jit: mov32 for ldimm64, 2",
12986 		.insns = {
12987 			BPF_MOV64_IMM(BPF_REG_0, 1),
12988 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12989 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12990 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12991 			BPF_MOV64_IMM(BPF_REG_0, 2),
12992 			BPF_EXIT_INSN(),
12993 		},
12994 		.result = ACCEPT,
12995 		.retval = 2,
12996 	},
12997 	{
12998 		"jit: various mul tests",
12999 		.insns = {
13000 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13001 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13002 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
13003 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13004 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13005 			BPF_MOV64_IMM(BPF_REG_0, 1),
13006 			BPF_EXIT_INSN(),
13007 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13008 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13009 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13010 			BPF_MOV64_IMM(BPF_REG_0, 1),
13011 			BPF_EXIT_INSN(),
13012 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
13013 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
13014 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
13015 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
13016 			BPF_MOV64_IMM(BPF_REG_0, 1),
13017 			BPF_EXIT_INSN(),
13018 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
13019 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
13020 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
13021 			BPF_MOV64_IMM(BPF_REG_0, 1),
13022 			BPF_EXIT_INSN(),
13023 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
13024 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
13025 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
13026 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
13027 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
13028 			BPF_MOV64_IMM(BPF_REG_0, 1),
13029 			BPF_EXIT_INSN(),
13030 			BPF_MOV64_IMM(BPF_REG_0, 2),
13031 			BPF_EXIT_INSN(),
13032 		},
13033 		.result = ACCEPT,
13034 		.retval = 2,
13035 	},
13036 	{
13037 		"xadd/w check unaligned stack",
13038 		.insns = {
13039 			BPF_MOV64_IMM(BPF_REG_0, 1),
13040 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13041 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
13042 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13043 			BPF_EXIT_INSN(),
13044 		},
13045 		.result = REJECT,
13046 		.errstr = "misaligned stack access off",
13047 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13048 	},
13049 	{
13050 		"xadd/w check unaligned map",
13051 		.insns = {
13052 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13053 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13054 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13055 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13056 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13057 				     BPF_FUNC_map_lookup_elem),
13058 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13059 			BPF_EXIT_INSN(),
13060 			BPF_MOV64_IMM(BPF_REG_1, 1),
13061 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13062 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13063 			BPF_EXIT_INSN(),
13064 		},
13065 		.fixup_map_hash_8b = { 3 },
13066 		.result = REJECT,
13067 		.errstr = "misaligned value access off",
13068 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13069 	},
13070 	{
13071 		"xadd/w check unaligned pkt",
13072 		.insns = {
13073 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13074 				    offsetof(struct xdp_md, data)),
13075 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13076 				    offsetof(struct xdp_md, data_end)),
13077 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13078 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13079 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13080 			BPF_MOV64_IMM(BPF_REG_0, 99),
13081 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13082 			BPF_MOV64_IMM(BPF_REG_0, 1),
13083 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13084 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13085 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13086 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13087 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13088 			BPF_EXIT_INSN(),
13089 		},
13090 		.result = REJECT,
13091 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
13092 		.prog_type = BPF_PROG_TYPE_XDP,
13093 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13094 	},
13095 	{
13096 		"xadd/w check whether src/dst got mangled, 1",
13097 		.insns = {
13098 			BPF_MOV64_IMM(BPF_REG_0, 1),
13099 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13100 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13101 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13102 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13103 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13104 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13105 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13106 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13107 			BPF_EXIT_INSN(),
13108 			BPF_MOV64_IMM(BPF_REG_0, 42),
13109 			BPF_EXIT_INSN(),
13110 		},
13111 		.result = ACCEPT,
13112 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13113 		.retval = 3,
13114 	},
13115 	{
13116 		"xadd/w check whether src/dst got mangled, 2",
13117 		.insns = {
13118 			BPF_MOV64_IMM(BPF_REG_0, 1),
13119 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13120 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13121 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13122 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13123 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13124 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13125 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13126 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
13127 			BPF_EXIT_INSN(),
13128 			BPF_MOV64_IMM(BPF_REG_0, 42),
13129 			BPF_EXIT_INSN(),
13130 		},
13131 		.result = ACCEPT,
13132 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13133 		.retval = 3,
13134 	},
13135 	{
13136 		"bpf_get_stack return R0 within range",
13137 		.insns = {
13138 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13139 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13140 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13141 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13142 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13143 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13144 				     BPF_FUNC_map_lookup_elem),
13145 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13146 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13147 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13148 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13149 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13150 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13151 			BPF_MOV64_IMM(BPF_REG_4, 256),
13152 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13153 			BPF_MOV64_IMM(BPF_REG_1, 0),
13154 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13155 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13156 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13157 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13158 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13159 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13160 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13161 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13162 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13163 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13164 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13165 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13166 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13167 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13168 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13169 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13170 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13171 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13172 			BPF_MOV64_IMM(BPF_REG_4, 0),
13173 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13174 			BPF_EXIT_INSN(),
13175 		},
13176 		.fixup_map_hash_48b = { 4 },
13177 		.result = ACCEPT,
13178 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13179 	},
13180 	{
13181 		"ld_abs: invalid op 1",
13182 		.insns = {
13183 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13184 			BPF_LD_ABS(BPF_DW, 0),
13185 			BPF_EXIT_INSN(),
13186 		},
13187 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13188 		.result = REJECT,
13189 		.errstr = "unknown opcode",
13190 	},
13191 	{
13192 		"ld_abs: invalid op 2",
13193 		.insns = {
13194 			BPF_MOV32_IMM(BPF_REG_0, 256),
13195 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13196 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13197 			BPF_EXIT_INSN(),
13198 		},
13199 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13200 		.result = REJECT,
13201 		.errstr = "unknown opcode",
13202 	},
13203 	{
13204 		"ld_abs: nmap reduced",
13205 		.insns = {
13206 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13207 			BPF_LD_ABS(BPF_H, 12),
13208 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13209 			BPF_LD_ABS(BPF_H, 12),
13210 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13211 			BPF_MOV32_IMM(BPF_REG_0, 18),
13212 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13213 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13214 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13215 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13216 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
13217 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13218 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13219 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13220 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13221 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13222 			BPF_LD_ABS(BPF_H, 12),
13223 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13224 			BPF_MOV32_IMM(BPF_REG_0, 22),
13225 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13226 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13227 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13228 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13229 			BPF_MOV32_IMM(BPF_REG_0, 17366),
13230 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13231 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13232 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13233 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13234 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13235 			BPF_MOV32_IMM(BPF_REG_0, 256),
13236 			BPF_EXIT_INSN(),
13237 			BPF_MOV32_IMM(BPF_REG_0, 0),
13238 			BPF_EXIT_INSN(),
13239 		},
13240 		.data = {
13241 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13242 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13243 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13244 		},
13245 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13246 		.result = ACCEPT,
13247 		.retval = 256,
13248 	},
13249 	{
13250 		"ld_abs: div + abs, test 1",
13251 		.insns = {
13252 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13253 			BPF_LD_ABS(BPF_B, 3),
13254 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13255 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13256 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13257 			BPF_LD_ABS(BPF_B, 4),
13258 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13259 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13260 			BPF_EXIT_INSN(),
13261 		},
13262 		.data = {
13263 			10, 20, 30, 40, 50,
13264 		},
13265 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13266 		.result = ACCEPT,
13267 		.retval = 10,
13268 	},
13269 	{
13270 		"ld_abs: div + abs, test 2",
13271 		.insns = {
13272 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13273 			BPF_LD_ABS(BPF_B, 3),
13274 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13275 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13276 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13277 			BPF_LD_ABS(BPF_B, 128),
13278 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13279 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13280 			BPF_EXIT_INSN(),
13281 		},
13282 		.data = {
13283 			10, 20, 30, 40, 50,
13284 		},
13285 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13286 		.result = ACCEPT,
13287 		.retval = 0,
13288 	},
13289 	{
13290 		"ld_abs: div + abs, test 3",
13291 		.insns = {
13292 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13293 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13294 			BPF_LD_ABS(BPF_B, 3),
13295 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13296 			BPF_EXIT_INSN(),
13297 		},
13298 		.data = {
13299 			10, 20, 30, 40, 50,
13300 		},
13301 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13302 		.result = ACCEPT,
13303 		.retval = 0,
13304 	},
13305 	{
13306 		"ld_abs: div + abs, test 4",
13307 		.insns = {
13308 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13309 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13310 			BPF_LD_ABS(BPF_B, 256),
13311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13312 			BPF_EXIT_INSN(),
13313 		},
13314 		.data = {
13315 			10, 20, 30, 40, 50,
13316 		},
13317 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13318 		.result = ACCEPT,
13319 		.retval = 0,
13320 	},
13321 	{
13322 		"ld_abs: vlan + abs, test 1",
13323 		.insns = { },
13324 		.data = {
13325 			0x34,
13326 		},
13327 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13328 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13329 		.result = ACCEPT,
13330 		.retval = 0xbef,
13331 	},
13332 	{
13333 		"ld_abs: vlan + abs, test 2",
13334 		.insns = {
13335 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13336 			BPF_LD_ABS(BPF_B, 0),
13337 			BPF_LD_ABS(BPF_H, 0),
13338 			BPF_LD_ABS(BPF_W, 0),
13339 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13340 			BPF_MOV64_IMM(BPF_REG_6, 0),
13341 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13342 			BPF_MOV64_IMM(BPF_REG_2, 1),
13343 			BPF_MOV64_IMM(BPF_REG_3, 2),
13344 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13345 				     BPF_FUNC_skb_vlan_push),
13346 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13347 			BPF_LD_ABS(BPF_B, 0),
13348 			BPF_LD_ABS(BPF_H, 0),
13349 			BPF_LD_ABS(BPF_W, 0),
13350 			BPF_MOV64_IMM(BPF_REG_0, 42),
13351 			BPF_EXIT_INSN(),
13352 		},
13353 		.data = {
13354 			0x34,
13355 		},
13356 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13357 		.result = ACCEPT,
13358 		.retval = 42,
13359 	},
13360 	{
13361 		"ld_abs: jump around ld_abs",
13362 		.insns = { },
13363 		.data = {
13364 			10, 11,
13365 		},
13366 		.fill_helper = bpf_fill_jump_around_ld_abs,
13367 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13368 		.result = ACCEPT,
13369 		.retval = 10,
13370 	},
13371 	{
13372 		"ld_dw: xor semi-random 64 bit imms, test 1",
13373 		.insns = { },
13374 		.data = { },
13375 		.fill_helper = bpf_fill_rand_ld_dw,
13376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13377 		.result = ACCEPT,
13378 		.retval = 4090,
13379 	},
13380 	{
13381 		"ld_dw: xor semi-random 64 bit imms, test 2",
13382 		.insns = { },
13383 		.data = { },
13384 		.fill_helper = bpf_fill_rand_ld_dw,
13385 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13386 		.result = ACCEPT,
13387 		.retval = 2047,
13388 	},
13389 	{
13390 		"ld_dw: xor semi-random 64 bit imms, test 3",
13391 		.insns = { },
13392 		.data = { },
13393 		.fill_helper = bpf_fill_rand_ld_dw,
13394 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13395 		.result = ACCEPT,
13396 		.retval = 511,
13397 	},
13398 	{
13399 		"ld_dw: xor semi-random 64 bit imms, test 4",
13400 		.insns = { },
13401 		.data = { },
13402 		.fill_helper = bpf_fill_rand_ld_dw,
13403 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13404 		.result = ACCEPT,
13405 		.retval = 5,
13406 	},
13407 	{
13408 		"pass unmodified ctx pointer to helper",
13409 		.insns = {
13410 			BPF_MOV64_IMM(BPF_REG_2, 0),
13411 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13412 				     BPF_FUNC_csum_update),
13413 			BPF_MOV64_IMM(BPF_REG_0, 0),
13414 			BPF_EXIT_INSN(),
13415 		},
13416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13417 		.result = ACCEPT,
13418 	},
13419 	{
13420 		"reference tracking: leak potential reference",
13421 		.insns = {
13422 			BPF_SK_LOOKUP,
13423 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13424 			BPF_EXIT_INSN(),
13425 		},
13426 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13427 		.errstr = "Unreleased reference",
13428 		.result = REJECT,
13429 	},
13430 	{
13431 		"reference tracking: leak potential reference on stack",
13432 		.insns = {
13433 			BPF_SK_LOOKUP,
13434 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13435 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13436 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13437 			BPF_MOV64_IMM(BPF_REG_0, 0),
13438 			BPF_EXIT_INSN(),
13439 		},
13440 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13441 		.errstr = "Unreleased reference",
13442 		.result = REJECT,
13443 	},
13444 	{
13445 		"reference tracking: leak potential reference on stack 2",
13446 		.insns = {
13447 			BPF_SK_LOOKUP,
13448 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13449 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13450 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13451 			BPF_MOV64_IMM(BPF_REG_0, 0),
13452 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13453 			BPF_EXIT_INSN(),
13454 		},
13455 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13456 		.errstr = "Unreleased reference",
13457 		.result = REJECT,
13458 	},
13459 	{
13460 		"reference tracking: zero potential reference",
13461 		.insns = {
13462 			BPF_SK_LOOKUP,
13463 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13464 			BPF_EXIT_INSN(),
13465 		},
13466 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13467 		.errstr = "Unreleased reference",
13468 		.result = REJECT,
13469 	},
13470 	{
13471 		"reference tracking: copy and zero potential references",
13472 		.insns = {
13473 			BPF_SK_LOOKUP,
13474 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13475 			BPF_MOV64_IMM(BPF_REG_0, 0),
13476 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13477 			BPF_EXIT_INSN(),
13478 		},
13479 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13480 		.errstr = "Unreleased reference",
13481 		.result = REJECT,
13482 	},
13483 	{
13484 		"reference tracking: release reference without check",
13485 		.insns = {
13486 			BPF_SK_LOOKUP,
13487 			/* reference in r0 may be NULL */
13488 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13489 			BPF_MOV64_IMM(BPF_REG_2, 0),
13490 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13491 			BPF_EXIT_INSN(),
13492 		},
13493 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13494 		.errstr = "type=sock_or_null expected=sock",
13495 		.result = REJECT,
13496 	},
13497 	{
13498 		"reference tracking: release reference",
13499 		.insns = {
13500 			BPF_SK_LOOKUP,
13501 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13502 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13503 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13504 			BPF_EXIT_INSN(),
13505 		},
13506 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13507 		.result = ACCEPT,
13508 	},
13509 	{
13510 		"reference tracking: release reference 2",
13511 		.insns = {
13512 			BPF_SK_LOOKUP,
13513 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13514 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13515 			BPF_EXIT_INSN(),
13516 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13517 			BPF_EXIT_INSN(),
13518 		},
13519 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13520 		.result = ACCEPT,
13521 	},
13522 	{
13523 		"reference tracking: release reference twice",
13524 		.insns = {
13525 			BPF_SK_LOOKUP,
13526 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13527 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13528 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13529 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13530 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13531 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13532 			BPF_EXIT_INSN(),
13533 		},
13534 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13535 		.errstr = "type=inv expected=sock",
13536 		.result = REJECT,
13537 	},
13538 	{
13539 		"reference tracking: release reference twice inside branch",
13540 		.insns = {
13541 			BPF_SK_LOOKUP,
13542 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13543 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13544 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13545 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13546 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13547 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13548 			BPF_EXIT_INSN(),
13549 		},
13550 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13551 		.errstr = "type=inv expected=sock",
13552 		.result = REJECT,
13553 	},
13554 	{
13555 		"reference tracking: alloc, check, free in one subbranch",
13556 		.insns = {
13557 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13558 				    offsetof(struct __sk_buff, data)),
13559 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13560 				    offsetof(struct __sk_buff, data_end)),
13561 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13563 			/* if (offsetof(skb, mark) > data_len) exit; */
13564 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13565 			BPF_EXIT_INSN(),
13566 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13567 				    offsetof(struct __sk_buff, mark)),
13568 			BPF_SK_LOOKUP,
13569 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13570 			/* Leak reference in R0 */
13571 			BPF_EXIT_INSN(),
13572 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13573 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13574 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13575 			BPF_EXIT_INSN(),
13576 		},
13577 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13578 		.errstr = "Unreleased reference",
13579 		.result = REJECT,
13580 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13581 	},
13582 	{
13583 		"reference tracking: alloc, check, free in both subbranches",
13584 		.insns = {
13585 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13586 				    offsetof(struct __sk_buff, data)),
13587 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13588 				    offsetof(struct __sk_buff, data_end)),
13589 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13590 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13591 			/* if (offsetof(skb, mark) > data_len) exit; */
13592 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13593 			BPF_EXIT_INSN(),
13594 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13595 				    offsetof(struct __sk_buff, mark)),
13596 			BPF_SK_LOOKUP,
13597 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13598 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13599 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13600 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13601 			BPF_EXIT_INSN(),
13602 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13603 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13604 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13605 			BPF_EXIT_INSN(),
13606 		},
13607 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13608 		.result = ACCEPT,
13609 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13610 	},
13611 	{
13612 		"reference tracking in call: free reference in subprog",
13613 		.insns = {
13614 			BPF_SK_LOOKUP,
13615 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13616 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13617 			BPF_MOV64_IMM(BPF_REG_0, 0),
13618 			BPF_EXIT_INSN(),
13619 
13620 			/* subprog 1 */
13621 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13622 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13623 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13624 			BPF_EXIT_INSN(),
13625 		},
13626 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13627 		.result = ACCEPT,
13628 	},
13629 	{
13630 		"pass modified ctx pointer to helper, 1",
13631 		.insns = {
13632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13633 			BPF_MOV64_IMM(BPF_REG_2, 0),
13634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13635 				     BPF_FUNC_csum_update),
13636 			BPF_MOV64_IMM(BPF_REG_0, 0),
13637 			BPF_EXIT_INSN(),
13638 		},
13639 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13640 		.result = REJECT,
13641 		.errstr = "dereference of modified ctx ptr",
13642 	},
13643 	{
13644 		"pass modified ctx pointer to helper, 2",
13645 		.insns = {
13646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13647 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13648 				     BPF_FUNC_get_socket_cookie),
13649 			BPF_MOV64_IMM(BPF_REG_0, 0),
13650 			BPF_EXIT_INSN(),
13651 		},
13652 		.result_unpriv = REJECT,
13653 		.result = REJECT,
13654 		.errstr_unpriv = "dereference of modified ctx ptr",
13655 		.errstr = "dereference of modified ctx ptr",
13656 	},
13657 	{
13658 		"pass modified ctx pointer to helper, 3",
13659 		.insns = {
13660 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13661 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13662 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13663 			BPF_MOV64_IMM(BPF_REG_2, 0),
13664 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13665 				     BPF_FUNC_csum_update),
13666 			BPF_MOV64_IMM(BPF_REG_0, 0),
13667 			BPF_EXIT_INSN(),
13668 		},
13669 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13670 		.result = REJECT,
13671 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13672 	},
13673 	{
13674 		"mov64 src == dst",
13675 		.insns = {
13676 			BPF_MOV64_IMM(BPF_REG_2, 0),
13677 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13678 			// Check bounds are OK
13679 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13680 			BPF_MOV64_IMM(BPF_REG_0, 0),
13681 			BPF_EXIT_INSN(),
13682 		},
13683 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13684 		.result = ACCEPT,
13685 	},
13686 	{
13687 		"mov64 src != dst",
13688 		.insns = {
13689 			BPF_MOV64_IMM(BPF_REG_3, 0),
13690 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13691 			// Check bounds are OK
13692 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13693 			BPF_MOV64_IMM(BPF_REG_0, 0),
13694 			BPF_EXIT_INSN(),
13695 		},
13696 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13697 		.result = ACCEPT,
13698 	},
13699 	{
13700 		"allocated_stack",
13701 		.insns = {
13702 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13703 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
13704 			BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
13705 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13706 			BPF_MOV64_IMM(BPF_REG_0, 0),
13707 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
13708 			BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
13709 			BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
13710 			BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
13711 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13712 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13713 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13714 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
13715 			BPF_EXIT_INSN(),
13716 		},
13717 		.result = ACCEPT,
13718 		.result_unpriv = ACCEPT,
13719 		.insn_processed = 15,
13720 	},
13721 	{
13722 		"reference tracking in call: free reference in subprog and outside",
13723 		.insns = {
13724 			BPF_SK_LOOKUP,
13725 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13726 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13727 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13728 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13729 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13730 			BPF_EXIT_INSN(),
13731 
13732 			/* subprog 1 */
13733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13734 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13735 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13736 			BPF_EXIT_INSN(),
13737 		},
13738 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13739 		.errstr = "type=inv expected=sock",
13740 		.result = REJECT,
13741 	},
13742 	{
13743 		"reference tracking in call: alloc & leak reference in subprog",
13744 		.insns = {
13745 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13747 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13748 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13749 			BPF_MOV64_IMM(BPF_REG_0, 0),
13750 			BPF_EXIT_INSN(),
13751 
13752 			/* subprog 1 */
13753 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13754 			BPF_SK_LOOKUP,
13755 			/* spill unchecked sk_ptr into stack of caller */
13756 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13757 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13758 			BPF_EXIT_INSN(),
13759 		},
13760 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13761 		.errstr = "Unreleased reference",
13762 		.result = REJECT,
13763 	},
13764 	{
13765 		"reference tracking in call: alloc in subprog, release outside",
13766 		.insns = {
13767 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13768 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13769 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13770 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13771 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13772 			BPF_EXIT_INSN(),
13773 
13774 			/* subprog 1 */
13775 			BPF_SK_LOOKUP,
13776 			BPF_EXIT_INSN(), /* return sk */
13777 		},
13778 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13779 		.retval = POINTER_VALUE,
13780 		.result = ACCEPT,
13781 	},
13782 	{
13783 		"reference tracking in call: sk_ptr leak into caller stack",
13784 		.insns = {
13785 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13787 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13788 			BPF_MOV64_IMM(BPF_REG_0, 0),
13789 			BPF_EXIT_INSN(),
13790 
13791 			/* subprog 1 */
13792 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13793 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13794 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13795 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13796 			/* spill unchecked sk_ptr into stack of caller */
13797 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13798 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13799 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13800 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13801 			BPF_EXIT_INSN(),
13802 
13803 			/* subprog 2 */
13804 			BPF_SK_LOOKUP,
13805 			BPF_EXIT_INSN(),
13806 		},
13807 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13808 		.errstr = "Unreleased reference",
13809 		.result = REJECT,
13810 	},
13811 	{
13812 		"reference tracking in call: sk_ptr spill into caller stack",
13813 		.insns = {
13814 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13815 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13816 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13817 			BPF_MOV64_IMM(BPF_REG_0, 0),
13818 			BPF_EXIT_INSN(),
13819 
13820 			/* subprog 1 */
13821 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13823 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13824 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13825 			/* spill unchecked sk_ptr into stack of caller */
13826 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13827 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13828 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13829 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13830 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13831 			/* now the sk_ptr is verified, free the reference */
13832 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13833 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13834 			BPF_EXIT_INSN(),
13835 
13836 			/* subprog 2 */
13837 			BPF_SK_LOOKUP,
13838 			BPF_EXIT_INSN(),
13839 		},
13840 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13841 		.result = ACCEPT,
13842 	},
13843 	{
13844 		"reference tracking: allow LD_ABS",
13845 		.insns = {
13846 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13847 			BPF_SK_LOOKUP,
13848 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13849 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13850 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13851 			BPF_LD_ABS(BPF_B, 0),
13852 			BPF_LD_ABS(BPF_H, 0),
13853 			BPF_LD_ABS(BPF_W, 0),
13854 			BPF_EXIT_INSN(),
13855 		},
13856 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13857 		.result = ACCEPT,
13858 	},
13859 	{
13860 		"reference tracking: forbid LD_ABS while holding reference",
13861 		.insns = {
13862 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13863 			BPF_SK_LOOKUP,
13864 			BPF_LD_ABS(BPF_B, 0),
13865 			BPF_LD_ABS(BPF_H, 0),
13866 			BPF_LD_ABS(BPF_W, 0),
13867 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13868 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13869 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13870 			BPF_EXIT_INSN(),
13871 		},
13872 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13873 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13874 		.result = REJECT,
13875 	},
13876 	{
13877 		"reference tracking: allow LD_IND",
13878 		.insns = {
13879 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13880 			BPF_SK_LOOKUP,
13881 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13882 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13883 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13884 			BPF_MOV64_IMM(BPF_REG_7, 1),
13885 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13886 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13887 			BPF_EXIT_INSN(),
13888 		},
13889 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13890 		.result = ACCEPT,
13891 		.retval = 1,
13892 	},
13893 	{
13894 		"reference tracking: forbid LD_IND while holding reference",
13895 		.insns = {
13896 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13897 			BPF_SK_LOOKUP,
13898 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13899 			BPF_MOV64_IMM(BPF_REG_7, 1),
13900 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13901 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13903 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13904 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13905 			BPF_EXIT_INSN(),
13906 		},
13907 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13908 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13909 		.result = REJECT,
13910 	},
13911 	{
13912 		"reference tracking: check reference or tail call",
13913 		.insns = {
13914 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13915 			BPF_SK_LOOKUP,
13916 			/* if (sk) bpf_sk_release() */
13917 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13918 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13919 			/* bpf_tail_call() */
13920 			BPF_MOV64_IMM(BPF_REG_3, 2),
13921 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13922 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13923 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13924 				     BPF_FUNC_tail_call),
13925 			BPF_MOV64_IMM(BPF_REG_0, 0),
13926 			BPF_EXIT_INSN(),
13927 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13928 			BPF_EXIT_INSN(),
13929 		},
13930 		.fixup_prog1 = { 17 },
13931 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13932 		.result = ACCEPT,
13933 	},
13934 	{
13935 		"reference tracking: release reference then tail call",
13936 		.insns = {
13937 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13938 			BPF_SK_LOOKUP,
13939 			/* if (sk) bpf_sk_release() */
13940 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13941 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13942 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13943 			/* bpf_tail_call() */
13944 			BPF_MOV64_IMM(BPF_REG_3, 2),
13945 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13946 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13947 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13948 				     BPF_FUNC_tail_call),
13949 			BPF_MOV64_IMM(BPF_REG_0, 0),
13950 			BPF_EXIT_INSN(),
13951 		},
13952 		.fixup_prog1 = { 18 },
13953 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13954 		.result = ACCEPT,
13955 	},
13956 	{
13957 		"reference tracking: leak possible reference over tail call",
13958 		.insns = {
13959 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13960 			/* Look up socket and store in REG_6 */
13961 			BPF_SK_LOOKUP,
13962 			/* bpf_tail_call() */
13963 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13964 			BPF_MOV64_IMM(BPF_REG_3, 2),
13965 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13966 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13967 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13968 				     BPF_FUNC_tail_call),
13969 			BPF_MOV64_IMM(BPF_REG_0, 0),
13970 			/* if (sk) bpf_sk_release() */
13971 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13973 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13974 			BPF_EXIT_INSN(),
13975 		},
13976 		.fixup_prog1 = { 16 },
13977 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13978 		.errstr = "tail_call would lead to reference leak",
13979 		.result = REJECT,
13980 	},
13981 	{
13982 		"reference tracking: leak checked reference over tail call",
13983 		.insns = {
13984 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13985 			/* Look up socket and store in REG_6 */
13986 			BPF_SK_LOOKUP,
13987 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13988 			/* if (!sk) goto end */
13989 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13990 			/* bpf_tail_call() */
13991 			BPF_MOV64_IMM(BPF_REG_3, 0),
13992 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13993 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13995 				     BPF_FUNC_tail_call),
13996 			BPF_MOV64_IMM(BPF_REG_0, 0),
13997 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13998 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13999 			BPF_EXIT_INSN(),
14000 		},
14001 		.fixup_prog1 = { 17 },
14002 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14003 		.errstr = "tail_call would lead to reference leak",
14004 		.result = REJECT,
14005 	},
14006 	{
14007 		"reference tracking: mangle and release sock_or_null",
14008 		.insns = {
14009 			BPF_SK_LOOKUP,
14010 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14011 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
14012 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
14013 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14014 			BPF_EXIT_INSN(),
14015 		},
14016 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14017 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
14018 		.result = REJECT,
14019 	},
14020 	{
14021 		"reference tracking: mangle and release sock",
14022 		.insns = {
14023 			BPF_SK_LOOKUP,
14024 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14025 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14026 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
14027 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14028 			BPF_EXIT_INSN(),
14029 		},
14030 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14031 		.errstr = "R1 pointer arithmetic on sock prohibited",
14032 		.result = REJECT,
14033 	},
14034 	{
14035 		"reference tracking: access member",
14036 		.insns = {
14037 			BPF_SK_LOOKUP,
14038 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14039 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14040 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14041 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14042 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14043 			BPF_EXIT_INSN(),
14044 		},
14045 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14046 		.result = ACCEPT,
14047 	},
14048 	{
14049 		"reference tracking: write to member",
14050 		.insns = {
14051 			BPF_SK_LOOKUP,
14052 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14053 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
14054 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14055 			BPF_LD_IMM64(BPF_REG_2, 42),
14056 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
14057 				    offsetof(struct bpf_sock, mark)),
14058 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14059 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14060 			BPF_LD_IMM64(BPF_REG_0, 0),
14061 			BPF_EXIT_INSN(),
14062 		},
14063 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14064 		.errstr = "cannot write into socket",
14065 		.result = REJECT,
14066 	},
14067 	{
14068 		"reference tracking: invalid 64-bit access of member",
14069 		.insns = {
14070 			BPF_SK_LOOKUP,
14071 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14072 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14073 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
14074 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14075 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14076 			BPF_EXIT_INSN(),
14077 		},
14078 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14079 		.errstr = "invalid bpf_sock access off=0 size=8",
14080 		.result = REJECT,
14081 	},
14082 	{
14083 		"reference tracking: access after release",
14084 		.insns = {
14085 			BPF_SK_LOOKUP,
14086 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14087 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14088 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14089 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
14090 			BPF_EXIT_INSN(),
14091 		},
14092 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14093 		.errstr = "!read_ok",
14094 		.result = REJECT,
14095 	},
14096 	{
14097 		"reference tracking: direct access for lookup",
14098 		.insns = {
14099 			/* Check that the packet is at least 64B long */
14100 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14101 				    offsetof(struct __sk_buff, data)),
14102 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14103 				    offsetof(struct __sk_buff, data_end)),
14104 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14105 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
14106 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
14107 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
14108 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
14109 			BPF_MOV64_IMM(BPF_REG_4, 0),
14110 			BPF_MOV64_IMM(BPF_REG_5, 0),
14111 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
14112 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14113 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14114 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14115 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14116 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14117 			BPF_EXIT_INSN(),
14118 		},
14119 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14120 		.result = ACCEPT,
14121 	},
14122 	{
14123 		"calls: ctx read at start of subprog",
14124 		.insns = {
14125 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14126 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
14127 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
14128 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14129 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14130 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14131 			BPF_EXIT_INSN(),
14132 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14133 			BPF_MOV64_IMM(BPF_REG_0, 0),
14134 			BPF_EXIT_INSN(),
14135 		},
14136 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14137 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14138 		.result_unpriv = REJECT,
14139 		.result = ACCEPT,
14140 	},
14141 	{
14142 		"check wire_len is not readable by sockets",
14143 		.insns = {
14144 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14145 				    offsetof(struct __sk_buff, wire_len)),
14146 			BPF_EXIT_INSN(),
14147 		},
14148 		.errstr = "invalid bpf_context access",
14149 		.result = REJECT,
14150 	},
14151 	{
14152 		"check wire_len is readable by tc classifier",
14153 		.insns = {
14154 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14155 				    offsetof(struct __sk_buff, wire_len)),
14156 			BPF_EXIT_INSN(),
14157 		},
14158 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14159 		.result = ACCEPT,
14160 	},
14161 	{
14162 		"check wire_len is not writable by tc classifier",
14163 		.insns = {
14164 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
14165 				    offsetof(struct __sk_buff, wire_len)),
14166 			BPF_EXIT_INSN(),
14167 		},
14168 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14169 		.errstr = "invalid bpf_context access",
14170 		.errstr_unpriv = "R1 leaks addr",
14171 		.result = REJECT,
14172 	},
14173 	{
14174 		"calls: cross frame pruning",
14175 		.insns = {
14176 			/* r8 = !!random();
14177 			 * call pruner()
14178 			 * if (r8)
14179 			 *     do something bad;
14180 			 */
14181 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14182 				     BPF_FUNC_get_prandom_u32),
14183 			BPF_MOV64_IMM(BPF_REG_8, 0),
14184 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
14185 			BPF_MOV64_IMM(BPF_REG_8, 1),
14186 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
14187 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
14188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
14189 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14190 			BPF_MOV64_IMM(BPF_REG_0, 0),
14191 			BPF_EXIT_INSN(),
14192 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
14193 			BPF_EXIT_INSN(),
14194 		},
14195 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14196 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14197 		.errstr = "!read_ok",
14198 		.result = REJECT,
14199 	},
14200 	{
14201 		"jset: functional",
14202 		.insns = {
14203 			/* r0 = 0 */
14204 			BPF_MOV64_IMM(BPF_REG_0, 0),
14205 			/* prep for direct packet access via r2 */
14206 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14207 				    offsetof(struct __sk_buff, data)),
14208 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14209 				    offsetof(struct __sk_buff, data_end)),
14210 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14211 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14212 			BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14213 			BPF_EXIT_INSN(),
14214 
14215 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14216 
14217 			/* reg, bit 63 or bit 0 set, taken */
14218 			BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
14219 			BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14220 			BPF_EXIT_INSN(),
14221 
14222 			/* reg, bit 62, not taken */
14223 			BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
14224 			BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
14225 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14226 			BPF_EXIT_INSN(),
14227 
14228 			/* imm, any bit set, taken */
14229 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
14230 			BPF_EXIT_INSN(),
14231 
14232 			/* imm, bit 31 set, taken */
14233 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14234 			BPF_EXIT_INSN(),
14235 
14236 			/* all good - return r0 == 2 */
14237 			BPF_MOV64_IMM(BPF_REG_0, 2),
14238 			BPF_EXIT_INSN(),
14239 		},
14240 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14241 		.result = ACCEPT,
14242 		.runs = 7,
14243 		.retvals = {
14244 			{ .retval = 2,
14245 			  .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
14246 			},
14247 			{ .retval = 2,
14248 			  .data64 = { (1ULL << 63) | (1U << 31), }
14249 			},
14250 			{ .retval = 2,
14251 			  .data64 = { (1ULL << 31) | (1U << 0), }
14252 			},
14253 			{ .retval = 2,
14254 			  .data64 = { (__u32)-1, }
14255 			},
14256 			{ .retval = 2,
14257 			  .data64 = { ~0x4000000000000000ULL, }
14258 			},
14259 			{ .retval = 0,
14260 			  .data64 = { 0, }
14261 			},
14262 			{ .retval = 0,
14263 			  .data64 = { ~0ULL, }
14264 			},
14265 		},
14266 	},
14267 	{
14268 		"jset: sign-extend",
14269 		.insns = {
14270 			/* r0 = 0 */
14271 			BPF_MOV64_IMM(BPF_REG_0, 0),
14272 			/* prep for direct packet access via r2 */
14273 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14274 				    offsetof(struct __sk_buff, data)),
14275 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14276 				    offsetof(struct __sk_buff, data_end)),
14277 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
14278 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
14279 			BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
14280 			BPF_EXIT_INSN(),
14281 
14282 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
14283 
14284 			BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
14285 			BPF_EXIT_INSN(),
14286 
14287 			BPF_MOV64_IMM(BPF_REG_0, 2),
14288 			BPF_EXIT_INSN(),
14289 		},
14290 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14291 		.result = ACCEPT,
14292 		.retval = 2,
14293 		.data = { 1, 0, 0, 0, 0, 0, 0, 1, },
14294 	},
14295 	{
14296 		"jset: known const compare",
14297 		.insns = {
14298 			BPF_MOV64_IMM(BPF_REG_0, 1),
14299 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14300 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14301 			BPF_EXIT_INSN(),
14302 		},
14303 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14304 		.retval_unpriv = 1,
14305 		.result_unpriv = ACCEPT,
14306 		.retval = 1,
14307 		.result = ACCEPT,
14308 	},
14309 	{
14310 		"jset: known const compare bad",
14311 		.insns = {
14312 			BPF_MOV64_IMM(BPF_REG_0, 0),
14313 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14314 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14315 			BPF_EXIT_INSN(),
14316 		},
14317 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14318 		.errstr_unpriv = "!read_ok",
14319 		.result_unpriv = REJECT,
14320 		.errstr = "!read_ok",
14321 		.result = REJECT,
14322 	},
14323 	{
14324 		"jset: unknown const compare taken",
14325 		.insns = {
14326 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14327 				     BPF_FUNC_get_prandom_u32),
14328 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14329 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
14330 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14331 			BPF_EXIT_INSN(),
14332 		},
14333 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14334 		.errstr_unpriv = "!read_ok",
14335 		.result_unpriv = REJECT,
14336 		.errstr = "!read_ok",
14337 		.result = REJECT,
14338 	},
14339 	{
14340 		"jset: unknown const compare not taken",
14341 		.insns = {
14342 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14343 				     BPF_FUNC_get_prandom_u32),
14344 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
14345 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14346 			BPF_EXIT_INSN(),
14347 		},
14348 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14349 		.errstr_unpriv = "!read_ok",
14350 		.result_unpriv = REJECT,
14351 		.errstr = "!read_ok",
14352 		.result = REJECT,
14353 	},
14354 	{
14355 		"jset: half-known const compare",
14356 		.insns = {
14357 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14358 				     BPF_FUNC_get_prandom_u32),
14359 			BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
14360 			BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
14361 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14362 			BPF_MOV64_IMM(BPF_REG_0, 0),
14363 			BPF_EXIT_INSN(),
14364 		},
14365 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14366 		.result_unpriv = ACCEPT,
14367 		.result = ACCEPT,
14368 	},
14369 	{
14370 		"jset: range",
14371 		.insns = {
14372 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14373 				     BPF_FUNC_get_prandom_u32),
14374 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14375 			BPF_MOV64_IMM(BPF_REG_0, 0),
14376 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
14377 			BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
14378 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
14379 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14380 			BPF_EXIT_INSN(),
14381 			BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
14382 			BPF_EXIT_INSN(),
14383 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
14384 			BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
14385 			BPF_EXIT_INSN(),
14386 		},
14387 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14388 		.result_unpriv = ACCEPT,
14389 		.result = ACCEPT,
14390 	},
14391 };
14392 
14393 static int probe_filter_length(const struct bpf_insn *fp)
14394 {
14395 	int len;
14396 
14397 	for (len = MAX_INSNS - 1; len > 0; --len)
14398 		if (fp[len].code != 0 || fp[len].imm != 0)
14399 			break;
14400 	return len + 1;
14401 }
14402 
14403 static int create_map(uint32_t type, uint32_t size_key,
14404 		      uint32_t size_value, uint32_t max_elem)
14405 {
14406 	int fd;
14407 
14408 	fd = bpf_create_map(type, size_key, size_value, max_elem,
14409 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
14410 	if (fd < 0)
14411 		printf("Failed to create hash map '%s'!\n", strerror(errno));
14412 
14413 	return fd;
14414 }
14415 
14416 static int create_prog_dummy1(enum bpf_prog_type prog_type)
14417 {
14418 	struct bpf_insn prog[] = {
14419 		BPF_MOV64_IMM(BPF_REG_0, 42),
14420 		BPF_EXIT_INSN(),
14421 	};
14422 
14423 	return bpf_load_program(prog_type, prog,
14424 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14425 }
14426 
14427 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
14428 {
14429 	struct bpf_insn prog[] = {
14430 		BPF_MOV64_IMM(BPF_REG_3, idx),
14431 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
14432 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14433 			     BPF_FUNC_tail_call),
14434 		BPF_MOV64_IMM(BPF_REG_0, 41),
14435 		BPF_EXIT_INSN(),
14436 	};
14437 
14438 	return bpf_load_program(prog_type, prog,
14439 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14440 }
14441 
14442 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
14443 			     int p1key)
14444 {
14445 	int p2key = 1;
14446 	int mfd, p1fd, p2fd;
14447 
14448 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14449 			     sizeof(int), max_elem, 0);
14450 	if (mfd < 0) {
14451 		printf("Failed to create prog array '%s'!\n", strerror(errno));
14452 		return -1;
14453 	}
14454 
14455 	p1fd = create_prog_dummy1(prog_type);
14456 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14457 	if (p1fd < 0 || p2fd < 0)
14458 		goto out;
14459 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14460 		goto out;
14461 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14462 		goto out;
14463 	close(p2fd);
14464 	close(p1fd);
14465 
14466 	return mfd;
14467 out:
14468 	close(p2fd);
14469 	close(p1fd);
14470 	close(mfd);
14471 	return -1;
14472 }
14473 
14474 static int create_map_in_map(void)
14475 {
14476 	int inner_map_fd, outer_map_fd;
14477 
14478 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14479 				      sizeof(int), 1, 0);
14480 	if (inner_map_fd < 0) {
14481 		printf("Failed to create array '%s'!\n", strerror(errno));
14482 		return inner_map_fd;
14483 	}
14484 
14485 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14486 					     sizeof(int), inner_map_fd, 1, 0);
14487 	if (outer_map_fd < 0)
14488 		printf("Failed to create array of maps '%s'!\n",
14489 		       strerror(errno));
14490 
14491 	close(inner_map_fd);
14492 
14493 	return outer_map_fd;
14494 }
14495 
14496 static int create_cgroup_storage(bool percpu)
14497 {
14498 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14499 		BPF_MAP_TYPE_CGROUP_STORAGE;
14500 	int fd;
14501 
14502 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14503 			    TEST_DATA_LEN, 0, 0);
14504 	if (fd < 0)
14505 		printf("Failed to create cgroup storage '%s'!\n",
14506 		       strerror(errno));
14507 
14508 	return fd;
14509 }
14510 
14511 static char bpf_vlog[UINT_MAX >> 8];
14512 
14513 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
14514 			  struct bpf_insn *prog, int *map_fds)
14515 {
14516 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14517 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14518 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14519 	int *fixup_map_array_48b = test->fixup_map_array_48b;
14520 	int *fixup_map_sockmap = test->fixup_map_sockmap;
14521 	int *fixup_map_sockhash = test->fixup_map_sockhash;
14522 	int *fixup_map_xskmap = test->fixup_map_xskmap;
14523 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14524 	int *fixup_prog1 = test->fixup_prog1;
14525 	int *fixup_prog2 = test->fixup_prog2;
14526 	int *fixup_map_in_map = test->fixup_map_in_map;
14527 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14528 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14529 
14530 	if (test->fill_helper)
14531 		test->fill_helper(test);
14532 
14533 	/* Allocating HTs with 1 elem is fine here, since we only test
14534 	 * for verifier and not do a runtime lookup, so the only thing
14535 	 * that really matters is value size in this case.
14536 	 */
14537 	if (*fixup_map_hash_8b) {
14538 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14539 					sizeof(long long), 1);
14540 		do {
14541 			prog[*fixup_map_hash_8b].imm = map_fds[0];
14542 			fixup_map_hash_8b++;
14543 		} while (*fixup_map_hash_8b);
14544 	}
14545 
14546 	if (*fixup_map_hash_48b) {
14547 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14548 					sizeof(struct test_val), 1);
14549 		do {
14550 			prog[*fixup_map_hash_48b].imm = map_fds[1];
14551 			fixup_map_hash_48b++;
14552 		} while (*fixup_map_hash_48b);
14553 	}
14554 
14555 	if (*fixup_map_hash_16b) {
14556 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14557 					sizeof(struct other_val), 1);
14558 		do {
14559 			prog[*fixup_map_hash_16b].imm = map_fds[2];
14560 			fixup_map_hash_16b++;
14561 		} while (*fixup_map_hash_16b);
14562 	}
14563 
14564 	if (*fixup_map_array_48b) {
14565 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14566 					sizeof(struct test_val), 1);
14567 		do {
14568 			prog[*fixup_map_array_48b].imm = map_fds[3];
14569 			fixup_map_array_48b++;
14570 		} while (*fixup_map_array_48b);
14571 	}
14572 
14573 	if (*fixup_prog1) {
14574 		map_fds[4] = create_prog_array(prog_type, 4, 0);
14575 		do {
14576 			prog[*fixup_prog1].imm = map_fds[4];
14577 			fixup_prog1++;
14578 		} while (*fixup_prog1);
14579 	}
14580 
14581 	if (*fixup_prog2) {
14582 		map_fds[5] = create_prog_array(prog_type, 8, 7);
14583 		do {
14584 			prog[*fixup_prog2].imm = map_fds[5];
14585 			fixup_prog2++;
14586 		} while (*fixup_prog2);
14587 	}
14588 
14589 	if (*fixup_map_in_map) {
14590 		map_fds[6] = create_map_in_map();
14591 		do {
14592 			prog[*fixup_map_in_map].imm = map_fds[6];
14593 			fixup_map_in_map++;
14594 		} while (*fixup_map_in_map);
14595 	}
14596 
14597 	if (*fixup_cgroup_storage) {
14598 		map_fds[7] = create_cgroup_storage(false);
14599 		do {
14600 			prog[*fixup_cgroup_storage].imm = map_fds[7];
14601 			fixup_cgroup_storage++;
14602 		} while (*fixup_cgroup_storage);
14603 	}
14604 
14605 	if (*fixup_percpu_cgroup_storage) {
14606 		map_fds[8] = create_cgroup_storage(true);
14607 		do {
14608 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14609 			fixup_percpu_cgroup_storage++;
14610 		} while (*fixup_percpu_cgroup_storage);
14611 	}
14612 	if (*fixup_map_sockmap) {
14613 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14614 					sizeof(int), 1);
14615 		do {
14616 			prog[*fixup_map_sockmap].imm = map_fds[9];
14617 			fixup_map_sockmap++;
14618 		} while (*fixup_map_sockmap);
14619 	}
14620 	if (*fixup_map_sockhash) {
14621 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14622 					sizeof(int), 1);
14623 		do {
14624 			prog[*fixup_map_sockhash].imm = map_fds[10];
14625 			fixup_map_sockhash++;
14626 		} while (*fixup_map_sockhash);
14627 	}
14628 	if (*fixup_map_xskmap) {
14629 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14630 					sizeof(int), 1);
14631 		do {
14632 			prog[*fixup_map_xskmap].imm = map_fds[11];
14633 			fixup_map_xskmap++;
14634 		} while (*fixup_map_xskmap);
14635 	}
14636 	if (*fixup_map_stacktrace) {
14637 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14638 					 sizeof(u64), 1);
14639 		do {
14640 			prog[*fixup_map_stacktrace].imm = map_fds[12];
14641 			fixup_map_stacktrace++;
14642 		} while (*fixup_map_stacktrace);
14643 	}
14644 }
14645 
14646 static int set_admin(bool admin)
14647 {
14648 	cap_t caps;
14649 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14650 	int ret = -1;
14651 
14652 	caps = cap_get_proc();
14653 	if (!caps) {
14654 		perror("cap_get_proc");
14655 		return -1;
14656 	}
14657 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14658 				admin ? CAP_SET : CAP_CLEAR)) {
14659 		perror("cap_set_flag");
14660 		goto out;
14661 	}
14662 	if (cap_set_proc(caps)) {
14663 		perror("cap_set_proc");
14664 		goto out;
14665 	}
14666 	ret = 0;
14667 out:
14668 	if (cap_free(caps))
14669 		perror("cap_free");
14670 	return ret;
14671 }
14672 
14673 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
14674 			    void *data, size_t size_data)
14675 {
14676 	__u8 tmp[TEST_DATA_LEN << 2];
14677 	__u32 size_tmp = sizeof(tmp);
14678 	uint32_t retval;
14679 	int err;
14680 
14681 	if (unpriv)
14682 		set_admin(true);
14683 	err = bpf_prog_test_run(fd_prog, 1, data, size_data,
14684 				tmp, &size_tmp, &retval, NULL);
14685 	if (unpriv)
14686 		set_admin(false);
14687 	if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14688 		printf("Unexpected bpf_prog_test_run error ");
14689 		return err;
14690 	}
14691 	if (!err && retval != expected_val &&
14692 	    expected_val != POINTER_VALUE) {
14693 		printf("FAIL retval %d != %d ", retval, expected_val);
14694 		return 1;
14695 	}
14696 
14697 	return 0;
14698 }
14699 
14700 static void do_test_single(struct bpf_test *test, bool unpriv,
14701 			   int *passes, int *errors)
14702 {
14703 	int fd_prog, expected_ret, alignment_prevented_execution;
14704 	int prog_len, prog_type = test->prog_type;
14705 	struct bpf_insn *prog = test->insns;
14706 	int run_errs, run_successes;
14707 	int map_fds[MAX_NR_MAPS];
14708 	const char *expected_err;
14709 	__u32 pflags;
14710 	int i, err;
14711 
14712 	for (i = 0; i < MAX_NR_MAPS; i++)
14713 		map_fds[i] = -1;
14714 
14715 	if (!prog_type)
14716 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14717 	do_test_fixup(test, prog_type, prog, map_fds);
14718 	prog_len = probe_filter_length(prog);
14719 
14720 	pflags = 0;
14721 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
14722 		pflags |= BPF_F_STRICT_ALIGNMENT;
14723 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
14724 		pflags |= BPF_F_ANY_ALIGNMENT;
14725 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
14726 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14727 
14728 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
14729 		       test->result_unpriv : test->result;
14730 	expected_err = unpriv && test->errstr_unpriv ?
14731 		       test->errstr_unpriv : test->errstr;
14732 
14733 	alignment_prevented_execution = 0;
14734 
14735 	if (expected_ret == ACCEPT) {
14736 		if (fd_prog < 0) {
14737 			printf("FAIL\nFailed to load prog '%s'!\n",
14738 			       strerror(errno));
14739 			goto fail_log;
14740 		}
14741 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14742 		if (fd_prog >= 0 &&
14743 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
14744 			alignment_prevented_execution = 1;
14745 #endif
14746 	} else {
14747 		if (fd_prog >= 0) {
14748 			printf("FAIL\nUnexpected success to load!\n");
14749 			goto fail_log;
14750 		}
14751 		if (!strstr(bpf_vlog, expected_err)) {
14752 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14753 			      expected_err, bpf_vlog);
14754 			goto fail_log;
14755 		}
14756 	}
14757 
14758 	if (test->insn_processed) {
14759 		uint32_t insn_processed;
14760 		char *proc;
14761 
14762 		proc = strstr(bpf_vlog, "processed ");
14763 		insn_processed = atoi(proc + 10);
14764 		if (test->insn_processed != insn_processed) {
14765 			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
14766 			       insn_processed, test->insn_processed);
14767 			goto fail_log;
14768 		}
14769 	}
14770 
14771 	run_errs = 0;
14772 	run_successes = 0;
14773 	if (!alignment_prevented_execution && fd_prog >= 0) {
14774 		uint32_t expected_val;
14775 		int i;
14776 
14777 		if (!test->runs) {
14778 			expected_val = unpriv && test->retval_unpriv ?
14779 				test->retval_unpriv : test->retval;
14780 
14781 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
14782 					       test->data, sizeof(test->data));
14783 			if (err)
14784 				run_errs++;
14785 			else
14786 				run_successes++;
14787 		}
14788 
14789 		for (i = 0; i < test->runs; i++) {
14790 			if (unpriv && test->retvals[i].retval_unpriv)
14791 				expected_val = test->retvals[i].retval_unpriv;
14792 			else
14793 				expected_val = test->retvals[i].retval;
14794 
14795 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
14796 					       test->retvals[i].data,
14797 					       sizeof(test->retvals[i].data));
14798 			if (err) {
14799 				printf("(run %d/%d) ", i + 1, test->runs);
14800 				run_errs++;
14801 			} else {
14802 				run_successes++;
14803 			}
14804 		}
14805 	}
14806 
14807 	if (!run_errs) {
14808 		(*passes)++;
14809 		if (run_successes > 1)
14810 			printf("%d cases ", run_successes);
14811 		printf("OK");
14812 		if (alignment_prevented_execution)
14813 			printf(" (NOTE: not executed due to unknown alignment)");
14814 		printf("\n");
14815 	} else {
14816 		printf("\n");
14817 		goto fail_log;
14818 	}
14819 close_fds:
14820 	close(fd_prog);
14821 	for (i = 0; i < MAX_NR_MAPS; i++)
14822 		close(map_fds[i]);
14823 	sched_yield();
14824 	return;
14825 fail_log:
14826 	(*errors)++;
14827 	printf("%s", bpf_vlog);
14828 	goto close_fds;
14829 }
14830 
14831 static bool is_admin(void)
14832 {
14833 	cap_t caps;
14834 	cap_flag_value_t sysadmin = CAP_CLEAR;
14835 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14836 
14837 #ifdef CAP_IS_SUPPORTED
14838 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14839 		perror("cap_get_flag");
14840 		return false;
14841 	}
14842 #endif
14843 	caps = cap_get_proc();
14844 	if (!caps) {
14845 		perror("cap_get_proc");
14846 		return false;
14847 	}
14848 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14849 		perror("cap_get_flag");
14850 	if (cap_free(caps))
14851 		perror("cap_free");
14852 	return (sysadmin == CAP_SET);
14853 }
14854 
14855 static void get_unpriv_disabled()
14856 {
14857 	char buf[2];
14858 	FILE *fd;
14859 
14860 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14861 	if (!fd) {
14862 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14863 		unpriv_disabled = true;
14864 		return;
14865 	}
14866 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14867 		unpriv_disabled = true;
14868 	fclose(fd);
14869 }
14870 
14871 static bool test_as_unpriv(struct bpf_test *test)
14872 {
14873 	return !test->prog_type ||
14874 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14875 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14876 }
14877 
14878 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14879 {
14880 	int i, passes = 0, errors = 0, skips = 0;
14881 
14882 	for (i = from; i < to; i++) {
14883 		struct bpf_test *test = &tests[i];
14884 
14885 		/* Program types that are not supported by non-root we
14886 		 * skip right away.
14887 		 */
14888 		if (test_as_unpriv(test) && unpriv_disabled) {
14889 			printf("#%d/u %s SKIP\n", i, test->descr);
14890 			skips++;
14891 		} else if (test_as_unpriv(test)) {
14892 			if (!unpriv)
14893 				set_admin(false);
14894 			printf("#%d/u %s ", i, test->descr);
14895 			do_test_single(test, true, &passes, &errors);
14896 			if (!unpriv)
14897 				set_admin(true);
14898 		}
14899 
14900 		if (unpriv) {
14901 			printf("#%d/p %s SKIP\n", i, test->descr);
14902 			skips++;
14903 		} else {
14904 			printf("#%d/p %s ", i, test->descr);
14905 			do_test_single(test, false, &passes, &errors);
14906 		}
14907 	}
14908 
14909 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14910 	       skips, errors);
14911 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14912 }
14913 
14914 int main(int argc, char **argv)
14915 {
14916 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14917 	bool unpriv = !is_admin();
14918 
14919 	if (argc == 3) {
14920 		unsigned int l = atoi(argv[argc - 2]);
14921 		unsigned int u = atoi(argv[argc - 1]);
14922 
14923 		if (l < to && u < to) {
14924 			from = l;
14925 			to   = u + 1;
14926 		}
14927 	} else if (argc == 2) {
14928 		unsigned int t = atoi(argv[argc - 1]);
14929 
14930 		if (t < to) {
14931 			from = t;
14932 			to   = t + 1;
14933 		}
14934 	}
14935 
14936 	get_unpriv_disabled();
14937 	if (unpriv && unpriv_disabled) {
14938 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14939 		       UNPRIV_SYSCTL);
14940 		return EXIT_FAILURE;
14941 	}
14942 
14943 	bpf_semi_rand_init();
14944 	return do_test(unpriv, from, to);
14945 }
14946