xref: /linux/tools/testing/selftests/bpf/test_verifier.c (revision c4c14c3bd177ea769fee938674f73a8ec0cdd47a)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = REJECT,
725 		.errstr = "unknown opcode c4",
726 	},
727 	{
728 		"arsh32 on reg",
729 		.insns = {
730 			BPF_MOV64_IMM(BPF_REG_0, 1),
731 			BPF_MOV64_IMM(BPF_REG_1, 5),
732 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
733 			BPF_EXIT_INSN(),
734 		},
735 		.result = REJECT,
736 		.errstr = "unknown opcode cc",
737 	},
738 	{
739 		"arsh64 on imm",
740 		.insns = {
741 			BPF_MOV64_IMM(BPF_REG_0, 1),
742 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 	},
747 	{
748 		"arsh64 on reg",
749 		.insns = {
750 			BPF_MOV64_IMM(BPF_REG_0, 1),
751 			BPF_MOV64_IMM(BPF_REG_1, 5),
752 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
753 			BPF_EXIT_INSN(),
754 		},
755 		.result = ACCEPT,
756 	},
757 	{
758 		"no bpf_exit",
759 		.insns = {
760 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
761 		},
762 		.errstr = "not an exit",
763 		.result = REJECT,
764 	},
765 	{
766 		"loop (back-edge)",
767 		.insns = {
768 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
769 			BPF_EXIT_INSN(),
770 		},
771 		.errstr = "back-edge",
772 		.result = REJECT,
773 	},
774 	{
775 		"loop2 (back-edge)",
776 		.insns = {
777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
778 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
779 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
780 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
781 			BPF_EXIT_INSN(),
782 		},
783 		.errstr = "back-edge",
784 		.result = REJECT,
785 	},
786 	{
787 		"conditional loop",
788 		.insns = {
789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
791 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
793 			BPF_EXIT_INSN(),
794 		},
795 		.errstr = "back-edge",
796 		.result = REJECT,
797 	},
798 	{
799 		"read uninitialized register",
800 		.insns = {
801 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "R2 !read_ok",
805 		.result = REJECT,
806 	},
807 	{
808 		"read invalid register",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_0, -1),
811 			BPF_EXIT_INSN(),
812 		},
813 		.errstr = "R15 is invalid",
814 		.result = REJECT,
815 	},
816 	{
817 		"program doesn't init R0 before exit",
818 		.insns = {
819 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
820 			BPF_EXIT_INSN(),
821 		},
822 		.errstr = "R0 !read_ok",
823 		.result = REJECT,
824 	},
825 	{
826 		"program doesn't init R0 before exit in all branches",
827 		.insns = {
828 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
829 			BPF_MOV64_IMM(BPF_REG_0, 1),
830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
831 			BPF_EXIT_INSN(),
832 		},
833 		.errstr = "R0 !read_ok",
834 		.errstr_unpriv = "R1 pointer comparison",
835 		.result = REJECT,
836 	},
837 	{
838 		"stack out of bounds",
839 		.insns = {
840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "invalid stack",
844 		.result = REJECT,
845 	},
846 	{
847 		"invalid call insn1",
848 		.insns = {
849 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
850 			BPF_EXIT_INSN(),
851 		},
852 		.errstr = "unknown opcode 8d",
853 		.result = REJECT,
854 	},
855 	{
856 		"invalid call insn2",
857 		.insns = {
858 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
859 			BPF_EXIT_INSN(),
860 		},
861 		.errstr = "BPF_CALL uses reserved",
862 		.result = REJECT,
863 	},
864 	{
865 		"invalid function call",
866 		.insns = {
867 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
868 			BPF_EXIT_INSN(),
869 		},
870 		.errstr = "invalid func unknown#1234567",
871 		.result = REJECT,
872 	},
873 	{
874 		"uninitialized stack1",
875 		.insns = {
876 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
877 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
878 			BPF_LD_MAP_FD(BPF_REG_1, 0),
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
880 				     BPF_FUNC_map_lookup_elem),
881 			BPF_EXIT_INSN(),
882 		},
883 		.fixup_map_hash_8b = { 2 },
884 		.errstr = "invalid indirect read from stack",
885 		.result = REJECT,
886 	},
887 	{
888 		"uninitialized stack2",
889 		.insns = {
890 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
891 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
892 			BPF_EXIT_INSN(),
893 		},
894 		.errstr = "invalid read from stack",
895 		.result = REJECT,
896 	},
897 	{
898 		"invalid fp arithmetic",
899 		/* If this gets ever changed, make sure JITs can deal with it. */
900 		.insns = {
901 			BPF_MOV64_IMM(BPF_REG_0, 0),
902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
903 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
904 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
905 			BPF_EXIT_INSN(),
906 		},
907 		.errstr = "R1 subtraction from stack pointer",
908 		.result = REJECT,
909 	},
910 	{
911 		"non-invalid fp arithmetic",
912 		.insns = {
913 			BPF_MOV64_IMM(BPF_REG_0, 0),
914 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
915 			BPF_EXIT_INSN(),
916 		},
917 		.result = ACCEPT,
918 	},
919 	{
920 		"invalid argument register",
921 		.insns = {
922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
923 				     BPF_FUNC_get_cgroup_classid),
924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
925 				     BPF_FUNC_get_cgroup_classid),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 !read_ok",
929 		.result = REJECT,
930 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
931 	},
932 	{
933 		"non-invalid argument register",
934 		.insns = {
935 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
936 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
937 				     BPF_FUNC_get_cgroup_classid),
938 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
939 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
940 				     BPF_FUNC_get_cgroup_classid),
941 			BPF_EXIT_INSN(),
942 		},
943 		.result = ACCEPT,
944 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
945 	},
946 	{
947 		"check valid spill/fill",
948 		.insns = {
949 			/* spill R1(ctx) into stack */
950 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
951 			/* fill it back into R2 */
952 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
953 			/* should be able to access R0 = *(R2 + 8) */
954 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
955 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
956 			BPF_EXIT_INSN(),
957 		},
958 		.errstr_unpriv = "R0 leaks addr",
959 		.result = ACCEPT,
960 		.result_unpriv = REJECT,
961 		.retval = POINTER_VALUE,
962 	},
963 	{
964 		"check valid spill/fill, skb mark",
965 		.insns = {
966 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
967 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
968 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
969 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
970 				    offsetof(struct __sk_buff, mark)),
971 			BPF_EXIT_INSN(),
972 		},
973 		.result = ACCEPT,
974 		.result_unpriv = ACCEPT,
975 	},
976 	{
977 		"check corrupted spill/fill",
978 		.insns = {
979 			/* spill R1(ctx) into stack */
980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
981 			/* mess up with R1 pointer on stack */
982 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
983 			/* fill back into R0 should fail */
984 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
985 			BPF_EXIT_INSN(),
986 		},
987 		.errstr_unpriv = "attempt to corrupt spilled",
988 		.errstr = "corrupted spill",
989 		.result = REJECT,
990 	},
991 	{
992 		"invalid src register in STX",
993 		.insns = {
994 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
995 			BPF_EXIT_INSN(),
996 		},
997 		.errstr = "R15 is invalid",
998 		.result = REJECT,
999 	},
1000 	{
1001 		"invalid dst register in STX",
1002 		.insns = {
1003 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1004 			BPF_EXIT_INSN(),
1005 		},
1006 		.errstr = "R14 is invalid",
1007 		.result = REJECT,
1008 	},
1009 	{
1010 		"invalid dst register in ST",
1011 		.insns = {
1012 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1013 			BPF_EXIT_INSN(),
1014 		},
1015 		.errstr = "R14 is invalid",
1016 		.result = REJECT,
1017 	},
1018 	{
1019 		"invalid src register in LDX",
1020 		.insns = {
1021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1022 			BPF_EXIT_INSN(),
1023 		},
1024 		.errstr = "R12 is invalid",
1025 		.result = REJECT,
1026 	},
1027 	{
1028 		"invalid dst register in LDX",
1029 		.insns = {
1030 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1031 			BPF_EXIT_INSN(),
1032 		},
1033 		.errstr = "R11 is invalid",
1034 		.result = REJECT,
1035 	},
1036 	{
1037 		"junk insn",
1038 		.insns = {
1039 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1040 			BPF_EXIT_INSN(),
1041 		},
1042 		.errstr = "unknown opcode 00",
1043 		.result = REJECT,
1044 	},
1045 	{
1046 		"junk insn2",
1047 		.insns = {
1048 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1049 			BPF_EXIT_INSN(),
1050 		},
1051 		.errstr = "BPF_LDX uses reserved fields",
1052 		.result = REJECT,
1053 	},
1054 	{
1055 		"junk insn3",
1056 		.insns = {
1057 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1058 			BPF_EXIT_INSN(),
1059 		},
1060 		.errstr = "unknown opcode ff",
1061 		.result = REJECT,
1062 	},
1063 	{
1064 		"junk insn4",
1065 		.insns = {
1066 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1067 			BPF_EXIT_INSN(),
1068 		},
1069 		.errstr = "unknown opcode ff",
1070 		.result = REJECT,
1071 	},
1072 	{
1073 		"junk insn5",
1074 		.insns = {
1075 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1076 			BPF_EXIT_INSN(),
1077 		},
1078 		.errstr = "BPF_ALU uses reserved fields",
1079 		.result = REJECT,
1080 	},
1081 	{
1082 		"misaligned read from stack",
1083 		.insns = {
1084 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1085 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1086 			BPF_EXIT_INSN(),
1087 		},
1088 		.errstr = "misaligned stack access",
1089 		.result = REJECT,
1090 	},
1091 	{
1092 		"invalid map_fd for function call",
1093 		.insns = {
1094 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1095 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1097 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1098 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1099 				     BPF_FUNC_map_delete_elem),
1100 			BPF_EXIT_INSN(),
1101 		},
1102 		.errstr = "fd 0 is not pointing to valid bpf_map",
1103 		.result = REJECT,
1104 	},
1105 	{
1106 		"don't check return value before access",
1107 		.insns = {
1108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1109 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1111 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1112 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1113 				     BPF_FUNC_map_lookup_elem),
1114 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1115 			BPF_EXIT_INSN(),
1116 		},
1117 		.fixup_map_hash_8b = { 3 },
1118 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1119 		.result = REJECT,
1120 	},
1121 	{
1122 		"access memory with incorrect alignment",
1123 		.insns = {
1124 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1125 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1126 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1127 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1128 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1129 				     BPF_FUNC_map_lookup_elem),
1130 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1131 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1132 			BPF_EXIT_INSN(),
1133 		},
1134 		.fixup_map_hash_8b = { 3 },
1135 		.errstr = "misaligned value access",
1136 		.result = REJECT,
1137 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1138 	},
1139 	{
1140 		"sometimes access memory with incorrect alignment",
1141 		.insns = {
1142 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1143 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1144 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1145 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1146 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1147 				     BPF_FUNC_map_lookup_elem),
1148 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1149 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1150 			BPF_EXIT_INSN(),
1151 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1152 			BPF_EXIT_INSN(),
1153 		},
1154 		.fixup_map_hash_8b = { 3 },
1155 		.errstr = "R0 invalid mem access",
1156 		.errstr_unpriv = "R0 leaks addr",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"jump test 1",
1162 		.insns = {
1163 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1164 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1165 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1166 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1167 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1168 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1171 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1173 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1174 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1175 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1176 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1177 			BPF_MOV64_IMM(BPF_REG_0, 0),
1178 			BPF_EXIT_INSN(),
1179 		},
1180 		.errstr_unpriv = "R1 pointer comparison",
1181 		.result_unpriv = REJECT,
1182 		.result = ACCEPT,
1183 	},
1184 	{
1185 		"jump test 2",
1186 		.insns = {
1187 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1190 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1191 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1192 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1193 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1196 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1197 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1198 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1199 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1201 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1202 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1203 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1204 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1205 			BPF_MOV64_IMM(BPF_REG_0, 0),
1206 			BPF_EXIT_INSN(),
1207 		},
1208 		.errstr_unpriv = "R1 pointer comparison",
1209 		.result_unpriv = REJECT,
1210 		.result = ACCEPT,
1211 	},
1212 	{
1213 		"jump test 3",
1214 		.insns = {
1215 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1217 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1218 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1219 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1220 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1221 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1227 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1228 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1229 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1230 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1231 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1232 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1233 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1235 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1236 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1237 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1241 				     BPF_FUNC_map_delete_elem),
1242 			BPF_EXIT_INSN(),
1243 		},
1244 		.fixup_map_hash_8b = { 24 },
1245 		.errstr_unpriv = "R1 pointer comparison",
1246 		.result_unpriv = REJECT,
1247 		.result = ACCEPT,
1248 		.retval = -ENOENT,
1249 	},
1250 	{
1251 		"jump test 4",
1252 		.insns = {
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1255 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1256 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1258 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1259 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1260 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1261 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1262 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1263 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1264 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1265 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1266 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1267 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1268 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1269 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1270 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1271 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1272 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1273 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1293 			BPF_MOV64_IMM(BPF_REG_0, 0),
1294 			BPF_EXIT_INSN(),
1295 		},
1296 		.errstr_unpriv = "R1 pointer comparison",
1297 		.result_unpriv = REJECT,
1298 		.result = ACCEPT,
1299 	},
1300 	{
1301 		"jump test 5",
1302 		.insns = {
1303 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1304 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1305 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1306 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1307 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1308 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1309 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1310 			BPF_MOV64_IMM(BPF_REG_0, 0),
1311 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1312 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1313 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1314 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1315 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1316 			BPF_MOV64_IMM(BPF_REG_0, 0),
1317 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1318 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1319 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1320 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1321 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1322 			BPF_MOV64_IMM(BPF_REG_0, 0),
1323 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1324 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1325 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1326 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1327 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1328 			BPF_MOV64_IMM(BPF_REG_0, 0),
1329 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1330 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1331 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1332 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1333 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1334 			BPF_MOV64_IMM(BPF_REG_0, 0),
1335 			BPF_EXIT_INSN(),
1336 		},
1337 		.errstr_unpriv = "R1 pointer comparison",
1338 		.result_unpriv = REJECT,
1339 		.result = ACCEPT,
1340 	},
1341 	{
1342 		"access skb fields ok",
1343 		.insns = {
1344 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1345 				    offsetof(struct __sk_buff, len)),
1346 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1347 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1348 				    offsetof(struct __sk_buff, mark)),
1349 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1350 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1351 				    offsetof(struct __sk_buff, pkt_type)),
1352 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1353 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1354 				    offsetof(struct __sk_buff, queue_mapping)),
1355 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1356 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1357 				    offsetof(struct __sk_buff, protocol)),
1358 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1359 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1360 				    offsetof(struct __sk_buff, vlan_present)),
1361 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1362 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1363 				    offsetof(struct __sk_buff, vlan_tci)),
1364 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, napi_id)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1368 			BPF_EXIT_INSN(),
1369 		},
1370 		.result = ACCEPT,
1371 	},
1372 	{
1373 		"access skb fields bad1",
1374 		.insns = {
1375 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1376 			BPF_EXIT_INSN(),
1377 		},
1378 		.errstr = "invalid bpf_context access",
1379 		.result = REJECT,
1380 	},
1381 	{
1382 		"access skb fields bad2",
1383 		.insns = {
1384 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1385 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1386 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1388 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1390 				     BPF_FUNC_map_lookup_elem),
1391 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1392 			BPF_EXIT_INSN(),
1393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1394 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1395 				    offsetof(struct __sk_buff, pkt_type)),
1396 			BPF_EXIT_INSN(),
1397 		},
1398 		.fixup_map_hash_8b = { 4 },
1399 		.errstr = "different pointers",
1400 		.errstr_unpriv = "R1 pointer comparison",
1401 		.result = REJECT,
1402 	},
1403 	{
1404 		"access skb fields bad3",
1405 		.insns = {
1406 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1407 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1408 				    offsetof(struct __sk_buff, pkt_type)),
1409 			BPF_EXIT_INSN(),
1410 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1413 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1415 				     BPF_FUNC_map_lookup_elem),
1416 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1417 			BPF_EXIT_INSN(),
1418 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1419 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1420 		},
1421 		.fixup_map_hash_8b = { 6 },
1422 		.errstr = "different pointers",
1423 		.errstr_unpriv = "R1 pointer comparison",
1424 		.result = REJECT,
1425 	},
1426 	{
1427 		"access skb fields bad4",
1428 		.insns = {
1429 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1430 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1431 				    offsetof(struct __sk_buff, len)),
1432 			BPF_MOV64_IMM(BPF_REG_0, 0),
1433 			BPF_EXIT_INSN(),
1434 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1438 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1439 				     BPF_FUNC_map_lookup_elem),
1440 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1441 			BPF_EXIT_INSN(),
1442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1443 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1444 		},
1445 		.fixup_map_hash_8b = { 7 },
1446 		.errstr = "different pointers",
1447 		.errstr_unpriv = "R1 pointer comparison",
1448 		.result = REJECT,
1449 	},
1450 	{
1451 		"invalid access __sk_buff family",
1452 		.insns = {
1453 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1454 				    offsetof(struct __sk_buff, family)),
1455 			BPF_EXIT_INSN(),
1456 		},
1457 		.errstr = "invalid bpf_context access",
1458 		.result = REJECT,
1459 	},
1460 	{
1461 		"invalid access __sk_buff remote_ip4",
1462 		.insns = {
1463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1464 				    offsetof(struct __sk_buff, remote_ip4)),
1465 			BPF_EXIT_INSN(),
1466 		},
1467 		.errstr = "invalid bpf_context access",
1468 		.result = REJECT,
1469 	},
1470 	{
1471 		"invalid access __sk_buff local_ip4",
1472 		.insns = {
1473 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1474 				    offsetof(struct __sk_buff, local_ip4)),
1475 			BPF_EXIT_INSN(),
1476 		},
1477 		.errstr = "invalid bpf_context access",
1478 		.result = REJECT,
1479 	},
1480 	{
1481 		"invalid access __sk_buff remote_ip6",
1482 		.insns = {
1483 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 				    offsetof(struct __sk_buff, remote_ip6)),
1485 			BPF_EXIT_INSN(),
1486 		},
1487 		.errstr = "invalid bpf_context access",
1488 		.result = REJECT,
1489 	},
1490 	{
1491 		"invalid access __sk_buff local_ip6",
1492 		.insns = {
1493 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1494 				    offsetof(struct __sk_buff, local_ip6)),
1495 			BPF_EXIT_INSN(),
1496 		},
1497 		.errstr = "invalid bpf_context access",
1498 		.result = REJECT,
1499 	},
1500 	{
1501 		"invalid access __sk_buff remote_port",
1502 		.insns = {
1503 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 				    offsetof(struct __sk_buff, remote_port)),
1505 			BPF_EXIT_INSN(),
1506 		},
1507 		.errstr = "invalid bpf_context access",
1508 		.result = REJECT,
1509 	},
1510 	{
1511 		"invalid access __sk_buff remote_port",
1512 		.insns = {
1513 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1514 				    offsetof(struct __sk_buff, local_port)),
1515 			BPF_EXIT_INSN(),
1516 		},
1517 		.errstr = "invalid bpf_context access",
1518 		.result = REJECT,
1519 	},
1520 	{
1521 		"valid access __sk_buff family",
1522 		.insns = {
1523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1524 				    offsetof(struct __sk_buff, family)),
1525 			BPF_EXIT_INSN(),
1526 		},
1527 		.result = ACCEPT,
1528 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1529 	},
1530 	{
1531 		"valid access __sk_buff remote_ip4",
1532 		.insns = {
1533 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1534 				    offsetof(struct __sk_buff, remote_ip4)),
1535 			BPF_EXIT_INSN(),
1536 		},
1537 		.result = ACCEPT,
1538 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1539 	},
1540 	{
1541 		"valid access __sk_buff local_ip4",
1542 		.insns = {
1543 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1544 				    offsetof(struct __sk_buff, local_ip4)),
1545 			BPF_EXIT_INSN(),
1546 		},
1547 		.result = ACCEPT,
1548 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1549 	},
1550 	{
1551 		"valid access __sk_buff remote_ip6",
1552 		.insns = {
1553 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1554 				    offsetof(struct __sk_buff, remote_ip6[0])),
1555 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1556 				    offsetof(struct __sk_buff, remote_ip6[1])),
1557 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1558 				    offsetof(struct __sk_buff, remote_ip6[2])),
1559 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1560 				    offsetof(struct __sk_buff, remote_ip6[3])),
1561 			BPF_EXIT_INSN(),
1562 		},
1563 		.result = ACCEPT,
1564 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1565 	},
1566 	{
1567 		"valid access __sk_buff local_ip6",
1568 		.insns = {
1569 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1570 				    offsetof(struct __sk_buff, local_ip6[0])),
1571 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1572 				    offsetof(struct __sk_buff, local_ip6[1])),
1573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1574 				    offsetof(struct __sk_buff, local_ip6[2])),
1575 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1576 				    offsetof(struct __sk_buff, local_ip6[3])),
1577 			BPF_EXIT_INSN(),
1578 		},
1579 		.result = ACCEPT,
1580 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1581 	},
1582 	{
1583 		"valid access __sk_buff remote_port",
1584 		.insns = {
1585 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1586 				    offsetof(struct __sk_buff, remote_port)),
1587 			BPF_EXIT_INSN(),
1588 		},
1589 		.result = ACCEPT,
1590 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1591 	},
1592 	{
1593 		"valid access __sk_buff remote_port",
1594 		.insns = {
1595 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1596 				    offsetof(struct __sk_buff, local_port)),
1597 			BPF_EXIT_INSN(),
1598 		},
1599 		.result = ACCEPT,
1600 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1601 	},
1602 	{
1603 		"invalid access of tc_classid for SK_SKB",
1604 		.insns = {
1605 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1606 				    offsetof(struct __sk_buff, tc_classid)),
1607 			BPF_EXIT_INSN(),
1608 		},
1609 		.result = REJECT,
1610 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1611 		.errstr = "invalid bpf_context access",
1612 	},
1613 	{
1614 		"invalid access of skb->mark for SK_SKB",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, mark)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result =  REJECT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 		.errstr = "invalid bpf_context access",
1623 	},
1624 	{
1625 		"check skb->mark is not writeable by SK_SKB",
1626 		.insns = {
1627 			BPF_MOV64_IMM(BPF_REG_0, 0),
1628 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1629 				    offsetof(struct __sk_buff, mark)),
1630 			BPF_EXIT_INSN(),
1631 		},
1632 		.result =  REJECT,
1633 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1634 		.errstr = "invalid bpf_context access",
1635 	},
1636 	{
1637 		"check skb->tc_index is writeable by SK_SKB",
1638 		.insns = {
1639 			BPF_MOV64_IMM(BPF_REG_0, 0),
1640 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1641 				    offsetof(struct __sk_buff, tc_index)),
1642 			BPF_EXIT_INSN(),
1643 		},
1644 		.result = ACCEPT,
1645 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1646 	},
1647 	{
1648 		"check skb->priority is writeable by SK_SKB",
1649 		.insns = {
1650 			BPF_MOV64_IMM(BPF_REG_0, 0),
1651 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1652 				    offsetof(struct __sk_buff, priority)),
1653 			BPF_EXIT_INSN(),
1654 		},
1655 		.result = ACCEPT,
1656 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1657 	},
1658 	{
1659 		"direct packet read for SK_SKB",
1660 		.insns = {
1661 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1662 				    offsetof(struct __sk_buff, data)),
1663 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1664 				    offsetof(struct __sk_buff, data_end)),
1665 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1667 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1668 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1669 			BPF_MOV64_IMM(BPF_REG_0, 0),
1670 			BPF_EXIT_INSN(),
1671 		},
1672 		.result = ACCEPT,
1673 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1674 	},
1675 	{
1676 		"direct packet write for SK_SKB",
1677 		.insns = {
1678 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1679 				    offsetof(struct __sk_buff, data)),
1680 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1681 				    offsetof(struct __sk_buff, data_end)),
1682 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1684 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1685 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1686 			BPF_MOV64_IMM(BPF_REG_0, 0),
1687 			BPF_EXIT_INSN(),
1688 		},
1689 		.result = ACCEPT,
1690 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1691 	},
1692 	{
1693 		"overlapping checks for direct packet access SK_SKB",
1694 		.insns = {
1695 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1696 				    offsetof(struct __sk_buff, data)),
1697 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1698 				    offsetof(struct __sk_buff, data_end)),
1699 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1701 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1704 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1705 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1706 			BPF_MOV64_IMM(BPF_REG_0, 0),
1707 			BPF_EXIT_INSN(),
1708 		},
1709 		.result = ACCEPT,
1710 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1711 	},
1712 	{
1713 		"valid access family in SK_MSG",
1714 		.insns = {
1715 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1716 				    offsetof(struct sk_msg_md, family)),
1717 			BPF_EXIT_INSN(),
1718 		},
1719 		.result = ACCEPT,
1720 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1721 	},
1722 	{
1723 		"valid access remote_ip4 in SK_MSG",
1724 		.insns = {
1725 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1726 				    offsetof(struct sk_msg_md, remote_ip4)),
1727 			BPF_EXIT_INSN(),
1728 		},
1729 		.result = ACCEPT,
1730 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1731 	},
1732 	{
1733 		"valid access local_ip4 in SK_MSG",
1734 		.insns = {
1735 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1736 				    offsetof(struct sk_msg_md, local_ip4)),
1737 			BPF_EXIT_INSN(),
1738 		},
1739 		.result = ACCEPT,
1740 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1741 	},
1742 	{
1743 		"valid access remote_port in SK_MSG",
1744 		.insns = {
1745 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1746 				    offsetof(struct sk_msg_md, remote_port)),
1747 			BPF_EXIT_INSN(),
1748 		},
1749 		.result = ACCEPT,
1750 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1751 	},
1752 	{
1753 		"valid access local_port in SK_MSG",
1754 		.insns = {
1755 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1756 				    offsetof(struct sk_msg_md, local_port)),
1757 			BPF_EXIT_INSN(),
1758 		},
1759 		.result = ACCEPT,
1760 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1761 	},
1762 	{
1763 		"valid access remote_ip6 in SK_MSG",
1764 		.insns = {
1765 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1766 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1767 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1768 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1769 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1770 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1771 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1772 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1773 			BPF_EXIT_INSN(),
1774 		},
1775 		.result = ACCEPT,
1776 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1777 	},
1778 	{
1779 		"valid access local_ip6 in SK_MSG",
1780 		.insns = {
1781 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1782 				    offsetof(struct sk_msg_md, local_ip6[0])),
1783 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1784 				    offsetof(struct sk_msg_md, local_ip6[1])),
1785 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1786 				    offsetof(struct sk_msg_md, local_ip6[2])),
1787 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1788 				    offsetof(struct sk_msg_md, local_ip6[3])),
1789 			BPF_EXIT_INSN(),
1790 		},
1791 		.result = ACCEPT,
1792 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1793 	},
1794 	{
1795 		"invalid 64B read of family in SK_MSG",
1796 		.insns = {
1797 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1798 				    offsetof(struct sk_msg_md, family)),
1799 			BPF_EXIT_INSN(),
1800 		},
1801 		.errstr = "invalid bpf_context access",
1802 		.result = REJECT,
1803 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1804 	},
1805 	{
1806 		"invalid read past end of SK_MSG",
1807 		.insns = {
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_port) + 4),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.errstr = "R0 !read_ok",
1813 		.result = REJECT,
1814 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1815 	},
1816 	{
1817 		"invalid read offset in SK_MSG",
1818 		.insns = {
1819 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1820 				    offsetof(struct sk_msg_md, family) + 1),
1821 			BPF_EXIT_INSN(),
1822 		},
1823 		.errstr = "invalid bpf_context access",
1824 		.result = REJECT,
1825 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1826 	},
1827 	{
1828 		"direct packet read for SK_MSG",
1829 		.insns = {
1830 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1831 				    offsetof(struct sk_msg_md, data)),
1832 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1833 				    offsetof(struct sk_msg_md, data_end)),
1834 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1836 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1837 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1838 			BPF_MOV64_IMM(BPF_REG_0, 0),
1839 			BPF_EXIT_INSN(),
1840 		},
1841 		.result = ACCEPT,
1842 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1843 	},
1844 	{
1845 		"direct packet write for SK_MSG",
1846 		.insns = {
1847 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1848 				    offsetof(struct sk_msg_md, data)),
1849 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1850 				    offsetof(struct sk_msg_md, data_end)),
1851 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1853 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1854 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1855 			BPF_MOV64_IMM(BPF_REG_0, 0),
1856 			BPF_EXIT_INSN(),
1857 		},
1858 		.result = ACCEPT,
1859 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1860 	},
1861 	{
1862 		"overlapping checks for direct packet access SK_MSG",
1863 		.insns = {
1864 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1865 				    offsetof(struct sk_msg_md, data)),
1866 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1867 				    offsetof(struct sk_msg_md, data_end)),
1868 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1870 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1872 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1873 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1874 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1875 			BPF_MOV64_IMM(BPF_REG_0, 0),
1876 			BPF_EXIT_INSN(),
1877 		},
1878 		.result = ACCEPT,
1879 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1880 	},
1881 	{
1882 		"check skb->mark is not writeable by sockets",
1883 		.insns = {
1884 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1885 				    offsetof(struct __sk_buff, mark)),
1886 			BPF_EXIT_INSN(),
1887 		},
1888 		.errstr = "invalid bpf_context access",
1889 		.errstr_unpriv = "R1 leaks addr",
1890 		.result = REJECT,
1891 	},
1892 	{
1893 		"check skb->tc_index is not writeable by sockets",
1894 		.insns = {
1895 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1896 				    offsetof(struct __sk_buff, tc_index)),
1897 			BPF_EXIT_INSN(),
1898 		},
1899 		.errstr = "invalid bpf_context access",
1900 		.errstr_unpriv = "R1 leaks addr",
1901 		.result = REJECT,
1902 	},
1903 	{
1904 		"check cb access: byte",
1905 		.insns = {
1906 			BPF_MOV64_IMM(BPF_REG_0, 0),
1907 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1908 				    offsetof(struct __sk_buff, cb[0])),
1909 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1910 				    offsetof(struct __sk_buff, cb[0]) + 1),
1911 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1912 				    offsetof(struct __sk_buff, cb[0]) + 2),
1913 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1914 				    offsetof(struct __sk_buff, cb[0]) + 3),
1915 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1916 				    offsetof(struct __sk_buff, cb[1])),
1917 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1918 				    offsetof(struct __sk_buff, cb[1]) + 1),
1919 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1920 				    offsetof(struct __sk_buff, cb[1]) + 2),
1921 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1922 				    offsetof(struct __sk_buff, cb[1]) + 3),
1923 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1924 				    offsetof(struct __sk_buff, cb[2])),
1925 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1926 				    offsetof(struct __sk_buff, cb[2]) + 1),
1927 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1928 				    offsetof(struct __sk_buff, cb[2]) + 2),
1929 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 				    offsetof(struct __sk_buff, cb[2]) + 3),
1931 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 				    offsetof(struct __sk_buff, cb[3])),
1933 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 				    offsetof(struct __sk_buff, cb[3]) + 1),
1935 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 				    offsetof(struct __sk_buff, cb[3]) + 2),
1937 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 				    offsetof(struct __sk_buff, cb[3]) + 3),
1939 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 				    offsetof(struct __sk_buff, cb[4])),
1941 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 				    offsetof(struct __sk_buff, cb[4]) + 1),
1943 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 				    offsetof(struct __sk_buff, cb[4]) + 2),
1945 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 				    offsetof(struct __sk_buff, cb[4]) + 3),
1947 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1948 				    offsetof(struct __sk_buff, cb[0])),
1949 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1950 				    offsetof(struct __sk_buff, cb[0]) + 1),
1951 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1952 				    offsetof(struct __sk_buff, cb[0]) + 2),
1953 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1954 				    offsetof(struct __sk_buff, cb[0]) + 3),
1955 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1956 				    offsetof(struct __sk_buff, cb[1])),
1957 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1958 				    offsetof(struct __sk_buff, cb[1]) + 1),
1959 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1960 				    offsetof(struct __sk_buff, cb[1]) + 2),
1961 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1962 				    offsetof(struct __sk_buff, cb[1]) + 3),
1963 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1964 				    offsetof(struct __sk_buff, cb[2])),
1965 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1966 				    offsetof(struct __sk_buff, cb[2]) + 1),
1967 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1968 				    offsetof(struct __sk_buff, cb[2]) + 2),
1969 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 				    offsetof(struct __sk_buff, cb[2]) + 3),
1971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 				    offsetof(struct __sk_buff, cb[3])),
1973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 				    offsetof(struct __sk_buff, cb[3]) + 1),
1975 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 				    offsetof(struct __sk_buff, cb[3]) + 2),
1977 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 				    offsetof(struct __sk_buff, cb[3]) + 3),
1979 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 				    offsetof(struct __sk_buff, cb[4])),
1981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 				    offsetof(struct __sk_buff, cb[4]) + 1),
1983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 				    offsetof(struct __sk_buff, cb[4]) + 2),
1985 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 				    offsetof(struct __sk_buff, cb[4]) + 3),
1987 			BPF_EXIT_INSN(),
1988 		},
1989 		.result = ACCEPT,
1990 	},
1991 	{
1992 		"__sk_buff->hash, offset 0, byte store not permitted",
1993 		.insns = {
1994 			BPF_MOV64_IMM(BPF_REG_0, 0),
1995 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1996 				    offsetof(struct __sk_buff, hash)),
1997 			BPF_EXIT_INSN(),
1998 		},
1999 		.errstr = "invalid bpf_context access",
2000 		.result = REJECT,
2001 	},
2002 	{
2003 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2004 		.insns = {
2005 			BPF_MOV64_IMM(BPF_REG_0, 0),
2006 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2007 				    offsetof(struct __sk_buff, tc_index) + 3),
2008 			BPF_EXIT_INSN(),
2009 		},
2010 		.errstr = "invalid bpf_context access",
2011 		.result = REJECT,
2012 	},
2013 	{
2014 		"check skb->hash byte load permitted",
2015 		.insns = {
2016 			BPF_MOV64_IMM(BPF_REG_0, 0),
2017 #if __BYTE_ORDER == __LITTLE_ENDIAN
2018 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2019 				    offsetof(struct __sk_buff, hash)),
2020 #else
2021 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2022 				    offsetof(struct __sk_buff, hash) + 3),
2023 #endif
2024 			BPF_EXIT_INSN(),
2025 		},
2026 		.result = ACCEPT,
2027 	},
2028 	{
2029 		"check skb->hash byte load not permitted 1",
2030 		.insns = {
2031 			BPF_MOV64_IMM(BPF_REG_0, 0),
2032 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2033 				    offsetof(struct __sk_buff, hash) + 1),
2034 			BPF_EXIT_INSN(),
2035 		},
2036 		.errstr = "invalid bpf_context access",
2037 		.result = REJECT,
2038 	},
2039 	{
2040 		"check skb->hash byte load not permitted 2",
2041 		.insns = {
2042 			BPF_MOV64_IMM(BPF_REG_0, 0),
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, hash) + 2),
2045 			BPF_EXIT_INSN(),
2046 		},
2047 		.errstr = "invalid bpf_context access",
2048 		.result = REJECT,
2049 	},
2050 	{
2051 		"check skb->hash byte load not permitted 3",
2052 		.insns = {
2053 			BPF_MOV64_IMM(BPF_REG_0, 0),
2054 #if __BYTE_ORDER == __LITTLE_ENDIAN
2055 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2056 				    offsetof(struct __sk_buff, hash) + 3),
2057 #else
2058 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2059 				    offsetof(struct __sk_buff, hash)),
2060 #endif
2061 			BPF_EXIT_INSN(),
2062 		},
2063 		.errstr = "invalid bpf_context access",
2064 		.result = REJECT,
2065 	},
2066 	{
2067 		"check cb access: byte, wrong type",
2068 		.insns = {
2069 			BPF_MOV64_IMM(BPF_REG_0, 0),
2070 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2071 				    offsetof(struct __sk_buff, cb[0])),
2072 			BPF_EXIT_INSN(),
2073 		},
2074 		.errstr = "invalid bpf_context access",
2075 		.result = REJECT,
2076 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2077 	},
2078 	{
2079 		"check cb access: half",
2080 		.insns = {
2081 			BPF_MOV64_IMM(BPF_REG_0, 0),
2082 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2083 				    offsetof(struct __sk_buff, cb[0])),
2084 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2085 				    offsetof(struct __sk_buff, cb[0]) + 2),
2086 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2087 				    offsetof(struct __sk_buff, cb[1])),
2088 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2089 				    offsetof(struct __sk_buff, cb[1]) + 2),
2090 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2091 				    offsetof(struct __sk_buff, cb[2])),
2092 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2093 				    offsetof(struct __sk_buff, cb[2]) + 2),
2094 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2095 				    offsetof(struct __sk_buff, cb[3])),
2096 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2097 				    offsetof(struct __sk_buff, cb[3]) + 2),
2098 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2099 				    offsetof(struct __sk_buff, cb[4])),
2100 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2101 				    offsetof(struct __sk_buff, cb[4]) + 2),
2102 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2103 				    offsetof(struct __sk_buff, cb[0])),
2104 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2105 				    offsetof(struct __sk_buff, cb[0]) + 2),
2106 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2107 				    offsetof(struct __sk_buff, cb[1])),
2108 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2109 				    offsetof(struct __sk_buff, cb[1]) + 2),
2110 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2111 				    offsetof(struct __sk_buff, cb[2])),
2112 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2113 				    offsetof(struct __sk_buff, cb[2]) + 2),
2114 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2115 				    offsetof(struct __sk_buff, cb[3])),
2116 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2117 				    offsetof(struct __sk_buff, cb[3]) + 2),
2118 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2119 				    offsetof(struct __sk_buff, cb[4])),
2120 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2121 				    offsetof(struct __sk_buff, cb[4]) + 2),
2122 			BPF_EXIT_INSN(),
2123 		},
2124 		.result = ACCEPT,
2125 	},
2126 	{
2127 		"check cb access: half, unaligned",
2128 		.insns = {
2129 			BPF_MOV64_IMM(BPF_REG_0, 0),
2130 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2131 				    offsetof(struct __sk_buff, cb[0]) + 1),
2132 			BPF_EXIT_INSN(),
2133 		},
2134 		.errstr = "misaligned context access",
2135 		.result = REJECT,
2136 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2137 	},
2138 	{
2139 		"check __sk_buff->hash, offset 0, half store not permitted",
2140 		.insns = {
2141 			BPF_MOV64_IMM(BPF_REG_0, 0),
2142 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2143 				    offsetof(struct __sk_buff, hash)),
2144 			BPF_EXIT_INSN(),
2145 		},
2146 		.errstr = "invalid bpf_context access",
2147 		.result = REJECT,
2148 	},
2149 	{
2150 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2151 		.insns = {
2152 			BPF_MOV64_IMM(BPF_REG_0, 0),
2153 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2154 				    offsetof(struct __sk_buff, tc_index) + 2),
2155 			BPF_EXIT_INSN(),
2156 		},
2157 		.errstr = "invalid bpf_context access",
2158 		.result = REJECT,
2159 	},
2160 	{
2161 		"check skb->hash half load permitted",
2162 		.insns = {
2163 			BPF_MOV64_IMM(BPF_REG_0, 0),
2164 #if __BYTE_ORDER == __LITTLE_ENDIAN
2165 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2166 				    offsetof(struct __sk_buff, hash)),
2167 #else
2168 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2169 				    offsetof(struct __sk_buff, hash) + 2),
2170 #endif
2171 			BPF_EXIT_INSN(),
2172 		},
2173 		.result = ACCEPT,
2174 	},
2175 	{
2176 		"check skb->hash half load not permitted",
2177 		.insns = {
2178 			BPF_MOV64_IMM(BPF_REG_0, 0),
2179 #if __BYTE_ORDER == __LITTLE_ENDIAN
2180 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2181 				    offsetof(struct __sk_buff, hash) + 2),
2182 #else
2183 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2184 				    offsetof(struct __sk_buff, hash)),
2185 #endif
2186 			BPF_EXIT_INSN(),
2187 		},
2188 		.errstr = "invalid bpf_context access",
2189 		.result = REJECT,
2190 	},
2191 	{
2192 		"check cb access: half, wrong type",
2193 		.insns = {
2194 			BPF_MOV64_IMM(BPF_REG_0, 0),
2195 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2196 				    offsetof(struct __sk_buff, cb[0])),
2197 			BPF_EXIT_INSN(),
2198 		},
2199 		.errstr = "invalid bpf_context access",
2200 		.result = REJECT,
2201 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2202 	},
2203 	{
2204 		"check cb access: word",
2205 		.insns = {
2206 			BPF_MOV64_IMM(BPF_REG_0, 0),
2207 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2208 				    offsetof(struct __sk_buff, cb[0])),
2209 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2210 				    offsetof(struct __sk_buff, cb[1])),
2211 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2212 				    offsetof(struct __sk_buff, cb[2])),
2213 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2214 				    offsetof(struct __sk_buff, cb[3])),
2215 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2216 				    offsetof(struct __sk_buff, cb[4])),
2217 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2218 				    offsetof(struct __sk_buff, cb[0])),
2219 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2220 				    offsetof(struct __sk_buff, cb[1])),
2221 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2222 				    offsetof(struct __sk_buff, cb[2])),
2223 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2224 				    offsetof(struct __sk_buff, cb[3])),
2225 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2226 				    offsetof(struct __sk_buff, cb[4])),
2227 			BPF_EXIT_INSN(),
2228 		},
2229 		.result = ACCEPT,
2230 	},
2231 	{
2232 		"check cb access: word, unaligned 1",
2233 		.insns = {
2234 			BPF_MOV64_IMM(BPF_REG_0, 0),
2235 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2236 				    offsetof(struct __sk_buff, cb[0]) + 2),
2237 			BPF_EXIT_INSN(),
2238 		},
2239 		.errstr = "misaligned context access",
2240 		.result = REJECT,
2241 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2242 	},
2243 	{
2244 		"check cb access: word, unaligned 2",
2245 		.insns = {
2246 			BPF_MOV64_IMM(BPF_REG_0, 0),
2247 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2248 				    offsetof(struct __sk_buff, cb[4]) + 1),
2249 			BPF_EXIT_INSN(),
2250 		},
2251 		.errstr = "misaligned context access",
2252 		.result = REJECT,
2253 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2254 	},
2255 	{
2256 		"check cb access: word, unaligned 3",
2257 		.insns = {
2258 			BPF_MOV64_IMM(BPF_REG_0, 0),
2259 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2260 				    offsetof(struct __sk_buff, cb[4]) + 2),
2261 			BPF_EXIT_INSN(),
2262 		},
2263 		.errstr = "misaligned context access",
2264 		.result = REJECT,
2265 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2266 	},
2267 	{
2268 		"check cb access: word, unaligned 4",
2269 		.insns = {
2270 			BPF_MOV64_IMM(BPF_REG_0, 0),
2271 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2272 				    offsetof(struct __sk_buff, cb[4]) + 3),
2273 			BPF_EXIT_INSN(),
2274 		},
2275 		.errstr = "misaligned context access",
2276 		.result = REJECT,
2277 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2278 	},
2279 	{
2280 		"check cb access: double",
2281 		.insns = {
2282 			BPF_MOV64_IMM(BPF_REG_0, 0),
2283 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2284 				    offsetof(struct __sk_buff, cb[0])),
2285 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2286 				    offsetof(struct __sk_buff, cb[2])),
2287 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2288 				    offsetof(struct __sk_buff, cb[0])),
2289 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2290 				    offsetof(struct __sk_buff, cb[2])),
2291 			BPF_EXIT_INSN(),
2292 		},
2293 		.result = ACCEPT,
2294 	},
2295 	{
2296 		"check cb access: double, unaligned 1",
2297 		.insns = {
2298 			BPF_MOV64_IMM(BPF_REG_0, 0),
2299 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2300 				    offsetof(struct __sk_buff, cb[1])),
2301 			BPF_EXIT_INSN(),
2302 		},
2303 		.errstr = "misaligned context access",
2304 		.result = REJECT,
2305 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2306 	},
2307 	{
2308 		"check cb access: double, unaligned 2",
2309 		.insns = {
2310 			BPF_MOV64_IMM(BPF_REG_0, 0),
2311 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[3])),
2313 			BPF_EXIT_INSN(),
2314 		},
2315 		.errstr = "misaligned context access",
2316 		.result = REJECT,
2317 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2318 	},
2319 	{
2320 		"check cb access: double, oob 1",
2321 		.insns = {
2322 			BPF_MOV64_IMM(BPF_REG_0, 0),
2323 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2324 				    offsetof(struct __sk_buff, cb[4])),
2325 			BPF_EXIT_INSN(),
2326 		},
2327 		.errstr = "invalid bpf_context access",
2328 		.result = REJECT,
2329 	},
2330 	{
2331 		"check cb access: double, oob 2",
2332 		.insns = {
2333 			BPF_MOV64_IMM(BPF_REG_0, 0),
2334 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2335 				    offsetof(struct __sk_buff, cb[4])),
2336 			BPF_EXIT_INSN(),
2337 		},
2338 		.errstr = "invalid bpf_context access",
2339 		.result = REJECT,
2340 	},
2341 	{
2342 		"check __sk_buff->ifindex dw store not permitted",
2343 		.insns = {
2344 			BPF_MOV64_IMM(BPF_REG_0, 0),
2345 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2346 				    offsetof(struct __sk_buff, ifindex)),
2347 			BPF_EXIT_INSN(),
2348 		},
2349 		.errstr = "invalid bpf_context access",
2350 		.result = REJECT,
2351 	},
2352 	{
2353 		"check __sk_buff->ifindex dw load not permitted",
2354 		.insns = {
2355 			BPF_MOV64_IMM(BPF_REG_0, 0),
2356 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2357 				    offsetof(struct __sk_buff, ifindex)),
2358 			BPF_EXIT_INSN(),
2359 		},
2360 		.errstr = "invalid bpf_context access",
2361 		.result = REJECT,
2362 	},
2363 	{
2364 		"check cb access: double, wrong type",
2365 		.insns = {
2366 			BPF_MOV64_IMM(BPF_REG_0, 0),
2367 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2368 				    offsetof(struct __sk_buff, cb[0])),
2369 			BPF_EXIT_INSN(),
2370 		},
2371 		.errstr = "invalid bpf_context access",
2372 		.result = REJECT,
2373 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2374 	},
2375 	{
2376 		"check out of range skb->cb access",
2377 		.insns = {
2378 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2379 				    offsetof(struct __sk_buff, cb[0]) + 256),
2380 			BPF_EXIT_INSN(),
2381 		},
2382 		.errstr = "invalid bpf_context access",
2383 		.errstr_unpriv = "",
2384 		.result = REJECT,
2385 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2386 	},
2387 	{
2388 		"write skb fields from socket prog",
2389 		.insns = {
2390 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2391 				    offsetof(struct __sk_buff, cb[4])),
2392 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2393 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2394 				    offsetof(struct __sk_buff, mark)),
2395 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2396 				    offsetof(struct __sk_buff, tc_index)),
2397 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2398 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2399 				    offsetof(struct __sk_buff, cb[0])),
2400 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2401 				    offsetof(struct __sk_buff, cb[2])),
2402 			BPF_EXIT_INSN(),
2403 		},
2404 		.result = ACCEPT,
2405 		.errstr_unpriv = "R1 leaks addr",
2406 		.result_unpriv = REJECT,
2407 	},
2408 	{
2409 		"write skb fields from tc_cls_act prog",
2410 		.insns = {
2411 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2412 				    offsetof(struct __sk_buff, cb[0])),
2413 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2414 				    offsetof(struct __sk_buff, mark)),
2415 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2416 				    offsetof(struct __sk_buff, tc_index)),
2417 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2418 				    offsetof(struct __sk_buff, tc_index)),
2419 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2420 				    offsetof(struct __sk_buff, cb[3])),
2421 			BPF_EXIT_INSN(),
2422 		},
2423 		.errstr_unpriv = "",
2424 		.result_unpriv = REJECT,
2425 		.result = ACCEPT,
2426 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2427 	},
2428 	{
2429 		"PTR_TO_STACK store/load",
2430 		.insns = {
2431 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2433 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2434 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2435 			BPF_EXIT_INSN(),
2436 		},
2437 		.result = ACCEPT,
2438 		.retval = 0xfaceb00c,
2439 	},
2440 	{
2441 		"PTR_TO_STACK store/load - bad alignment on off",
2442 		.insns = {
2443 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2444 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2445 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2446 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2447 			BPF_EXIT_INSN(),
2448 		},
2449 		.result = REJECT,
2450 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2451 	},
2452 	{
2453 		"PTR_TO_STACK store/load - bad alignment on reg",
2454 		.insns = {
2455 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2457 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2459 			BPF_EXIT_INSN(),
2460 		},
2461 		.result = REJECT,
2462 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2463 	},
2464 	{
2465 		"PTR_TO_STACK store/load - out of bounds low",
2466 		.insns = {
2467 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2468 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2469 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2470 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2471 			BPF_EXIT_INSN(),
2472 		},
2473 		.result = REJECT,
2474 		.errstr = "invalid stack off=-79992 size=8",
2475 	},
2476 	{
2477 		"PTR_TO_STACK store/load - out of bounds high",
2478 		.insns = {
2479 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2480 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2481 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2482 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2483 			BPF_EXIT_INSN(),
2484 		},
2485 		.result = REJECT,
2486 		.errstr = "invalid stack off=0 size=8",
2487 	},
2488 	{
2489 		"unpriv: return pointer",
2490 		.insns = {
2491 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2492 			BPF_EXIT_INSN(),
2493 		},
2494 		.result = ACCEPT,
2495 		.result_unpriv = REJECT,
2496 		.errstr_unpriv = "R0 leaks addr",
2497 		.retval = POINTER_VALUE,
2498 	},
2499 	{
2500 		"unpriv: add const to pointer",
2501 		.insns = {
2502 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2503 			BPF_MOV64_IMM(BPF_REG_0, 0),
2504 			BPF_EXIT_INSN(),
2505 		},
2506 		.result = ACCEPT,
2507 	},
2508 	{
2509 		"unpriv: add pointer to pointer",
2510 		.insns = {
2511 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2512 			BPF_MOV64_IMM(BPF_REG_0, 0),
2513 			BPF_EXIT_INSN(),
2514 		},
2515 		.result = REJECT,
2516 		.errstr = "R1 pointer += pointer",
2517 	},
2518 	{
2519 		"unpriv: neg pointer",
2520 		.insns = {
2521 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2522 			BPF_MOV64_IMM(BPF_REG_0, 0),
2523 			BPF_EXIT_INSN(),
2524 		},
2525 		.result = ACCEPT,
2526 		.result_unpriv = REJECT,
2527 		.errstr_unpriv = "R1 pointer arithmetic",
2528 	},
2529 	{
2530 		"unpriv: cmp pointer with const",
2531 		.insns = {
2532 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2533 			BPF_MOV64_IMM(BPF_REG_0, 0),
2534 			BPF_EXIT_INSN(),
2535 		},
2536 		.result = ACCEPT,
2537 		.result_unpriv = REJECT,
2538 		.errstr_unpriv = "R1 pointer comparison",
2539 	},
2540 	{
2541 		"unpriv: cmp pointer with pointer",
2542 		.insns = {
2543 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2544 			BPF_MOV64_IMM(BPF_REG_0, 0),
2545 			BPF_EXIT_INSN(),
2546 		},
2547 		.result = ACCEPT,
2548 		.result_unpriv = REJECT,
2549 		.errstr_unpriv = "R10 pointer comparison",
2550 	},
2551 	{
2552 		"unpriv: check that printk is disallowed",
2553 		.insns = {
2554 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2557 			BPF_MOV64_IMM(BPF_REG_2, 8),
2558 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2559 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2560 				     BPF_FUNC_trace_printk),
2561 			BPF_MOV64_IMM(BPF_REG_0, 0),
2562 			BPF_EXIT_INSN(),
2563 		},
2564 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2565 		.result_unpriv = REJECT,
2566 		.result = ACCEPT,
2567 	},
2568 	{
2569 		"unpriv: pass pointer to helper function",
2570 		.insns = {
2571 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2572 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2573 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2574 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2575 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2576 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2577 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2578 				     BPF_FUNC_map_update_elem),
2579 			BPF_MOV64_IMM(BPF_REG_0, 0),
2580 			BPF_EXIT_INSN(),
2581 		},
2582 		.fixup_map_hash_8b = { 3 },
2583 		.errstr_unpriv = "R4 leaks addr",
2584 		.result_unpriv = REJECT,
2585 		.result = ACCEPT,
2586 	},
2587 	{
2588 		"unpriv: indirectly pass pointer on stack to helper function",
2589 		.insns = {
2590 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2591 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2592 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2593 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2594 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2595 				     BPF_FUNC_map_lookup_elem),
2596 			BPF_MOV64_IMM(BPF_REG_0, 0),
2597 			BPF_EXIT_INSN(),
2598 		},
2599 		.fixup_map_hash_8b = { 3 },
2600 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2601 		.result = REJECT,
2602 	},
2603 	{
2604 		"unpriv: mangle pointer on stack 1",
2605 		.insns = {
2606 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2607 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2608 			BPF_MOV64_IMM(BPF_REG_0, 0),
2609 			BPF_EXIT_INSN(),
2610 		},
2611 		.errstr_unpriv = "attempt to corrupt spilled",
2612 		.result_unpriv = REJECT,
2613 		.result = ACCEPT,
2614 	},
2615 	{
2616 		"unpriv: mangle pointer on stack 2",
2617 		.insns = {
2618 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2619 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2620 			BPF_MOV64_IMM(BPF_REG_0, 0),
2621 			BPF_EXIT_INSN(),
2622 		},
2623 		.errstr_unpriv = "attempt to corrupt spilled",
2624 		.result_unpriv = REJECT,
2625 		.result = ACCEPT,
2626 	},
2627 	{
2628 		"unpriv: read pointer from stack in small chunks",
2629 		.insns = {
2630 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2631 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2632 			BPF_MOV64_IMM(BPF_REG_0, 0),
2633 			BPF_EXIT_INSN(),
2634 		},
2635 		.errstr = "invalid size",
2636 		.result = REJECT,
2637 	},
2638 	{
2639 		"unpriv: write pointer into ctx",
2640 		.insns = {
2641 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2642 			BPF_MOV64_IMM(BPF_REG_0, 0),
2643 			BPF_EXIT_INSN(),
2644 		},
2645 		.errstr_unpriv = "R1 leaks addr",
2646 		.result_unpriv = REJECT,
2647 		.errstr = "invalid bpf_context access",
2648 		.result = REJECT,
2649 	},
2650 	{
2651 		"unpriv: spill/fill of ctx",
2652 		.insns = {
2653 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2654 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2655 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2656 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2657 			BPF_MOV64_IMM(BPF_REG_0, 0),
2658 			BPF_EXIT_INSN(),
2659 		},
2660 		.result = ACCEPT,
2661 	},
2662 	{
2663 		"unpriv: spill/fill of ctx 2",
2664 		.insns = {
2665 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2666 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2667 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2668 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2669 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2670 				     BPF_FUNC_get_hash_recalc),
2671 			BPF_MOV64_IMM(BPF_REG_0, 0),
2672 			BPF_EXIT_INSN(),
2673 		},
2674 		.result = ACCEPT,
2675 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2676 	},
2677 	{
2678 		"unpriv: spill/fill of ctx 3",
2679 		.insns = {
2680 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2681 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2682 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2683 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2684 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2685 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2686 				     BPF_FUNC_get_hash_recalc),
2687 			BPF_EXIT_INSN(),
2688 		},
2689 		.result = REJECT,
2690 		.errstr = "R1 type=fp expected=ctx",
2691 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2692 	},
2693 	{
2694 		"unpriv: spill/fill of ctx 4",
2695 		.insns = {
2696 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2697 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2698 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2699 			BPF_MOV64_IMM(BPF_REG_0, 1),
2700 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2701 				     BPF_REG_0, -8, 0),
2702 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2703 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2704 				     BPF_FUNC_get_hash_recalc),
2705 			BPF_EXIT_INSN(),
2706 		},
2707 		.result = REJECT,
2708 		.errstr = "R1 type=inv expected=ctx",
2709 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2710 	},
2711 	{
2712 		"unpriv: spill/fill of different pointers stx",
2713 		.insns = {
2714 			BPF_MOV64_IMM(BPF_REG_3, 42),
2715 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2716 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2717 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2718 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2720 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2721 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2722 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2723 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2724 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2725 				    offsetof(struct __sk_buff, mark)),
2726 			BPF_MOV64_IMM(BPF_REG_0, 0),
2727 			BPF_EXIT_INSN(),
2728 		},
2729 		.result = REJECT,
2730 		.errstr = "same insn cannot be used with different pointers",
2731 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732 	},
2733 	{
2734 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2735 		.insns = {
2736 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2737 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2738 			BPF_SK_LOOKUP,
2739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2740 			/* u64 foo; */
2741 			/* void *target = &foo; */
2742 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2744 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2745 			/* if (skb == NULL) *target = sock; */
2746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2747 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2748 			/* else *target = skb; */
2749 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2750 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2751 			/* struct __sk_buff *skb = *target; */
2752 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2753 			/* skb->mark = 42; */
2754 			BPF_MOV64_IMM(BPF_REG_3, 42),
2755 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2756 				    offsetof(struct __sk_buff, mark)),
2757 			/* if (sk) bpf_sk_release(sk) */
2758 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2759 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2760 			BPF_MOV64_IMM(BPF_REG_0, 0),
2761 			BPF_EXIT_INSN(),
2762 		},
2763 		.result = REJECT,
2764 		.errstr = "type=ctx expected=sock",
2765 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2766 	},
2767 	{
2768 		"unpriv: spill/fill of different pointers stx - leak sock",
2769 		.insns = {
2770 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2771 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2772 			BPF_SK_LOOKUP,
2773 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2774 			/* u64 foo; */
2775 			/* void *target = &foo; */
2776 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2777 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2778 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2779 			/* if (skb == NULL) *target = sock; */
2780 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2781 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2782 			/* else *target = skb; */
2783 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2784 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2785 			/* struct __sk_buff *skb = *target; */
2786 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2787 			/* skb->mark = 42; */
2788 			BPF_MOV64_IMM(BPF_REG_3, 42),
2789 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2790 				    offsetof(struct __sk_buff, mark)),
2791 			BPF_EXIT_INSN(),
2792 		},
2793 		.result = REJECT,
2794 		//.errstr = "same insn cannot be used with different pointers",
2795 		.errstr = "Unreleased reference",
2796 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2797 	},
2798 	{
2799 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2800 		.insns = {
2801 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2802 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2803 			BPF_SK_LOOKUP,
2804 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2805 			/* u64 foo; */
2806 			/* void *target = &foo; */
2807 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2810 			/* if (skb) *target = skb */
2811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2812 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2813 			/* else *target = sock */
2814 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2815 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2816 			/* struct bpf_sock *sk = *target; */
2817 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2818 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2819 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2820 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2821 					    offsetof(struct bpf_sock, mark)),
2822 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2823 			BPF_MOV64_IMM(BPF_REG_0, 0),
2824 			BPF_EXIT_INSN(),
2825 		},
2826 		.result = REJECT,
2827 		.errstr = "same insn cannot be used with different pointers",
2828 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2829 	},
2830 	{
2831 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2832 		.insns = {
2833 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2834 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2835 			BPF_SK_LOOKUP,
2836 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2837 			/* u64 foo; */
2838 			/* void *target = &foo; */
2839 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2841 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2842 			/* if (skb) *target = skb */
2843 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2844 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2845 			/* else *target = sock */
2846 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2847 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2848 			/* struct bpf_sock *sk = *target; */
2849 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2850 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2851 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2852 				BPF_MOV64_IMM(BPF_REG_3, 42),
2853 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2854 					    offsetof(struct bpf_sock, mark)),
2855 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2856 			BPF_MOV64_IMM(BPF_REG_0, 0),
2857 			BPF_EXIT_INSN(),
2858 		},
2859 		.result = REJECT,
2860 		//.errstr = "same insn cannot be used with different pointers",
2861 		.errstr = "cannot write into socket",
2862 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2863 	},
2864 	{
2865 		"unpriv: spill/fill of different pointers ldx",
2866 		.insns = {
2867 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2872 				      -(__s32)offsetof(struct bpf_perf_event_data,
2873 						       sample_period) - 8),
2874 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2875 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2876 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2877 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2878 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2879 				    offsetof(struct bpf_perf_event_data,
2880 					     sample_period)),
2881 			BPF_MOV64_IMM(BPF_REG_0, 0),
2882 			BPF_EXIT_INSN(),
2883 		},
2884 		.result = REJECT,
2885 		.errstr = "same insn cannot be used with different pointers",
2886 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2887 	},
2888 	{
2889 		"unpriv: write pointer into map elem value",
2890 		.insns = {
2891 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2893 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2894 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2895 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2896 				     BPF_FUNC_map_lookup_elem),
2897 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2898 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2899 			BPF_EXIT_INSN(),
2900 		},
2901 		.fixup_map_hash_8b = { 3 },
2902 		.errstr_unpriv = "R0 leaks addr",
2903 		.result_unpriv = REJECT,
2904 		.result = ACCEPT,
2905 	},
2906 	{
2907 		"unpriv: partial copy of pointer",
2908 		.insns = {
2909 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2910 			BPF_MOV64_IMM(BPF_REG_0, 0),
2911 			BPF_EXIT_INSN(),
2912 		},
2913 		.errstr_unpriv = "R10 partial copy",
2914 		.result_unpriv = REJECT,
2915 		.result = ACCEPT,
2916 	},
2917 	{
2918 		"unpriv: pass pointer to tail_call",
2919 		.insns = {
2920 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2921 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2922 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2923 				     BPF_FUNC_tail_call),
2924 			BPF_MOV64_IMM(BPF_REG_0, 0),
2925 			BPF_EXIT_INSN(),
2926 		},
2927 		.fixup_prog1 = { 1 },
2928 		.errstr_unpriv = "R3 leaks addr into helper",
2929 		.result_unpriv = REJECT,
2930 		.result = ACCEPT,
2931 	},
2932 	{
2933 		"unpriv: cmp map pointer with zero",
2934 		.insns = {
2935 			BPF_MOV64_IMM(BPF_REG_1, 0),
2936 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2937 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2938 			BPF_MOV64_IMM(BPF_REG_0, 0),
2939 			BPF_EXIT_INSN(),
2940 		},
2941 		.fixup_map_hash_8b = { 1 },
2942 		.errstr_unpriv = "R1 pointer comparison",
2943 		.result_unpriv = REJECT,
2944 		.result = ACCEPT,
2945 	},
2946 	{
2947 		"unpriv: write into frame pointer",
2948 		.insns = {
2949 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2950 			BPF_MOV64_IMM(BPF_REG_0, 0),
2951 			BPF_EXIT_INSN(),
2952 		},
2953 		.errstr = "frame pointer is read only",
2954 		.result = REJECT,
2955 	},
2956 	{
2957 		"unpriv: spill/fill frame pointer",
2958 		.insns = {
2959 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2961 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2962 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2963 			BPF_MOV64_IMM(BPF_REG_0, 0),
2964 			BPF_EXIT_INSN(),
2965 		},
2966 		.errstr = "frame pointer is read only",
2967 		.result = REJECT,
2968 	},
2969 	{
2970 		"unpriv: cmp of frame pointer",
2971 		.insns = {
2972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2973 			BPF_MOV64_IMM(BPF_REG_0, 0),
2974 			BPF_EXIT_INSN(),
2975 		},
2976 		.errstr_unpriv = "R10 pointer comparison",
2977 		.result_unpriv = REJECT,
2978 		.result = ACCEPT,
2979 	},
2980 	{
2981 		"unpriv: adding of fp",
2982 		.insns = {
2983 			BPF_MOV64_IMM(BPF_REG_0, 0),
2984 			BPF_MOV64_IMM(BPF_REG_1, 0),
2985 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2986 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2987 			BPF_EXIT_INSN(),
2988 		},
2989 		.result = ACCEPT,
2990 	},
2991 	{
2992 		"unpriv: cmp of stack pointer",
2993 		.insns = {
2994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2996 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2997 			BPF_MOV64_IMM(BPF_REG_0, 0),
2998 			BPF_EXIT_INSN(),
2999 		},
3000 		.errstr_unpriv = "R2 pointer comparison",
3001 		.result_unpriv = REJECT,
3002 		.result = ACCEPT,
3003 	},
3004 	{
3005 		"runtime/jit: tail_call within bounds, prog once",
3006 		.insns = {
3007 			BPF_MOV64_IMM(BPF_REG_3, 0),
3008 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3009 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3010 				     BPF_FUNC_tail_call),
3011 			BPF_MOV64_IMM(BPF_REG_0, 1),
3012 			BPF_EXIT_INSN(),
3013 		},
3014 		.fixup_prog1 = { 1 },
3015 		.result = ACCEPT,
3016 		.retval = 42,
3017 	},
3018 	{
3019 		"runtime/jit: tail_call within bounds, prog loop",
3020 		.insns = {
3021 			BPF_MOV64_IMM(BPF_REG_3, 1),
3022 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3023 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3024 				     BPF_FUNC_tail_call),
3025 			BPF_MOV64_IMM(BPF_REG_0, 1),
3026 			BPF_EXIT_INSN(),
3027 		},
3028 		.fixup_prog1 = { 1 },
3029 		.result = ACCEPT,
3030 		.retval = 41,
3031 	},
3032 	{
3033 		"runtime/jit: tail_call within bounds, no prog",
3034 		.insns = {
3035 			BPF_MOV64_IMM(BPF_REG_3, 2),
3036 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3037 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3038 				     BPF_FUNC_tail_call),
3039 			BPF_MOV64_IMM(BPF_REG_0, 1),
3040 			BPF_EXIT_INSN(),
3041 		},
3042 		.fixup_prog1 = { 1 },
3043 		.result = ACCEPT,
3044 		.retval = 1,
3045 	},
3046 	{
3047 		"runtime/jit: tail_call out of bounds",
3048 		.insns = {
3049 			BPF_MOV64_IMM(BPF_REG_3, 256),
3050 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3051 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3052 				     BPF_FUNC_tail_call),
3053 			BPF_MOV64_IMM(BPF_REG_0, 2),
3054 			BPF_EXIT_INSN(),
3055 		},
3056 		.fixup_prog1 = { 1 },
3057 		.result = ACCEPT,
3058 		.retval = 2,
3059 	},
3060 	{
3061 		"runtime/jit: pass negative index to tail_call",
3062 		.insns = {
3063 			BPF_MOV64_IMM(BPF_REG_3, -1),
3064 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3065 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 				     BPF_FUNC_tail_call),
3067 			BPF_MOV64_IMM(BPF_REG_0, 2),
3068 			BPF_EXIT_INSN(),
3069 		},
3070 		.fixup_prog1 = { 1 },
3071 		.result = ACCEPT,
3072 		.retval = 2,
3073 	},
3074 	{
3075 		"runtime/jit: pass > 32bit index to tail_call",
3076 		.insns = {
3077 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3078 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3079 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3080 				     BPF_FUNC_tail_call),
3081 			BPF_MOV64_IMM(BPF_REG_0, 2),
3082 			BPF_EXIT_INSN(),
3083 		},
3084 		.fixup_prog1 = { 2 },
3085 		.result = ACCEPT,
3086 		.retval = 42,
3087 	},
3088 	{
3089 		"stack pointer arithmetic",
3090 		.insns = {
3091 			BPF_MOV64_IMM(BPF_REG_1, 4),
3092 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3093 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3095 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3096 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3097 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3098 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3099 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3101 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3102 			BPF_MOV64_IMM(BPF_REG_0, 0),
3103 			BPF_EXIT_INSN(),
3104 		},
3105 		.result = ACCEPT,
3106 	},
3107 	{
3108 		"raw_stack: no skb_load_bytes",
3109 		.insns = {
3110 			BPF_MOV64_IMM(BPF_REG_2, 4),
3111 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3112 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3113 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3114 			BPF_MOV64_IMM(BPF_REG_4, 8),
3115 			/* Call to skb_load_bytes() omitted. */
3116 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3117 			BPF_EXIT_INSN(),
3118 		},
3119 		.result = REJECT,
3120 		.errstr = "invalid read from stack off -8+0 size 8",
3121 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3122 	},
3123 	{
3124 		"raw_stack: skb_load_bytes, negative len",
3125 		.insns = {
3126 			BPF_MOV64_IMM(BPF_REG_2, 4),
3127 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3128 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3129 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3130 			BPF_MOV64_IMM(BPF_REG_4, -8),
3131 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3132 				     BPF_FUNC_skb_load_bytes),
3133 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3134 			BPF_EXIT_INSN(),
3135 		},
3136 		.result = REJECT,
3137 		.errstr = "R4 min value is negative",
3138 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3139 	},
3140 	{
3141 		"raw_stack: skb_load_bytes, negative len 2",
3142 		.insns = {
3143 			BPF_MOV64_IMM(BPF_REG_2, 4),
3144 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3145 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3146 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3147 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3149 				     BPF_FUNC_skb_load_bytes),
3150 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3151 			BPF_EXIT_INSN(),
3152 		},
3153 		.result = REJECT,
3154 		.errstr = "R4 min value is negative",
3155 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3156 	},
3157 	{
3158 		"raw_stack: skb_load_bytes, zero len",
3159 		.insns = {
3160 			BPF_MOV64_IMM(BPF_REG_2, 4),
3161 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3162 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3163 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3164 			BPF_MOV64_IMM(BPF_REG_4, 0),
3165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3166 				     BPF_FUNC_skb_load_bytes),
3167 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3168 			BPF_EXIT_INSN(),
3169 		},
3170 		.result = REJECT,
3171 		.errstr = "invalid stack type R3",
3172 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3173 	},
3174 	{
3175 		"raw_stack: skb_load_bytes, no init",
3176 		.insns = {
3177 			BPF_MOV64_IMM(BPF_REG_2, 4),
3178 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3180 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3181 			BPF_MOV64_IMM(BPF_REG_4, 8),
3182 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3183 				     BPF_FUNC_skb_load_bytes),
3184 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3185 			BPF_EXIT_INSN(),
3186 		},
3187 		.result = ACCEPT,
3188 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3189 	},
3190 	{
3191 		"raw_stack: skb_load_bytes, init",
3192 		.insns = {
3193 			BPF_MOV64_IMM(BPF_REG_2, 4),
3194 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3196 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3197 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3198 			BPF_MOV64_IMM(BPF_REG_4, 8),
3199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3200 				     BPF_FUNC_skb_load_bytes),
3201 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3202 			BPF_EXIT_INSN(),
3203 		},
3204 		.result = ACCEPT,
3205 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3206 	},
3207 	{
3208 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3209 		.insns = {
3210 			BPF_MOV64_IMM(BPF_REG_2, 4),
3211 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3213 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3214 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3215 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3216 			BPF_MOV64_IMM(BPF_REG_4, 8),
3217 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3218 				     BPF_FUNC_skb_load_bytes),
3219 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3220 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3221 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3222 				    offsetof(struct __sk_buff, mark)),
3223 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3224 				    offsetof(struct __sk_buff, priority)),
3225 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3226 			BPF_EXIT_INSN(),
3227 		},
3228 		.result = ACCEPT,
3229 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3230 	},
3231 	{
3232 		"raw_stack: skb_load_bytes, spilled regs corruption",
3233 		.insns = {
3234 			BPF_MOV64_IMM(BPF_REG_2, 4),
3235 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3237 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3238 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3239 			BPF_MOV64_IMM(BPF_REG_4, 8),
3240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3241 				     BPF_FUNC_skb_load_bytes),
3242 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3243 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3244 				    offsetof(struct __sk_buff, mark)),
3245 			BPF_EXIT_INSN(),
3246 		},
3247 		.result = REJECT,
3248 		.errstr = "R0 invalid mem access 'inv'",
3249 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3250 	},
3251 	{
3252 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3253 		.insns = {
3254 			BPF_MOV64_IMM(BPF_REG_2, 4),
3255 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3257 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3258 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3259 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3260 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3261 			BPF_MOV64_IMM(BPF_REG_4, 8),
3262 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3263 				     BPF_FUNC_skb_load_bytes),
3264 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3265 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3266 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3267 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3268 				    offsetof(struct __sk_buff, mark)),
3269 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3270 				    offsetof(struct __sk_buff, priority)),
3271 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3272 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3273 				    offsetof(struct __sk_buff, pkt_type)),
3274 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3275 			BPF_EXIT_INSN(),
3276 		},
3277 		.result = REJECT,
3278 		.errstr = "R3 invalid mem access 'inv'",
3279 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3280 	},
3281 	{
3282 		"raw_stack: skb_load_bytes, spilled regs + data",
3283 		.insns = {
3284 			BPF_MOV64_IMM(BPF_REG_2, 4),
3285 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3287 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3288 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3289 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3290 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3291 			BPF_MOV64_IMM(BPF_REG_4, 8),
3292 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3293 				     BPF_FUNC_skb_load_bytes),
3294 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3295 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3296 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3297 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3298 				    offsetof(struct __sk_buff, mark)),
3299 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3300 				    offsetof(struct __sk_buff, priority)),
3301 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3302 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3303 			BPF_EXIT_INSN(),
3304 		},
3305 		.result = ACCEPT,
3306 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3307 	},
3308 	{
3309 		"raw_stack: skb_load_bytes, invalid access 1",
3310 		.insns = {
3311 			BPF_MOV64_IMM(BPF_REG_2, 4),
3312 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3313 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3314 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3315 			BPF_MOV64_IMM(BPF_REG_4, 8),
3316 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3317 				     BPF_FUNC_skb_load_bytes),
3318 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3319 			BPF_EXIT_INSN(),
3320 		},
3321 		.result = REJECT,
3322 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3324 	},
3325 	{
3326 		"raw_stack: skb_load_bytes, invalid access 2",
3327 		.insns = {
3328 			BPF_MOV64_IMM(BPF_REG_2, 4),
3329 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3331 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3332 			BPF_MOV64_IMM(BPF_REG_4, 8),
3333 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3334 				     BPF_FUNC_skb_load_bytes),
3335 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3336 			BPF_EXIT_INSN(),
3337 		},
3338 		.result = REJECT,
3339 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3340 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3341 	},
3342 	{
3343 		"raw_stack: skb_load_bytes, invalid access 3",
3344 		.insns = {
3345 			BPF_MOV64_IMM(BPF_REG_2, 4),
3346 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3348 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3349 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3351 				     BPF_FUNC_skb_load_bytes),
3352 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3353 			BPF_EXIT_INSN(),
3354 		},
3355 		.result = REJECT,
3356 		.errstr = "R4 min value is negative",
3357 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3358 	},
3359 	{
3360 		"raw_stack: skb_load_bytes, invalid access 4",
3361 		.insns = {
3362 			BPF_MOV64_IMM(BPF_REG_2, 4),
3363 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3365 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3366 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3368 				     BPF_FUNC_skb_load_bytes),
3369 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3370 			BPF_EXIT_INSN(),
3371 		},
3372 		.result = REJECT,
3373 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3374 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3375 	},
3376 	{
3377 		"raw_stack: skb_load_bytes, invalid access 5",
3378 		.insns = {
3379 			BPF_MOV64_IMM(BPF_REG_2, 4),
3380 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3381 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3382 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3383 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3385 				     BPF_FUNC_skb_load_bytes),
3386 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3387 			BPF_EXIT_INSN(),
3388 		},
3389 		.result = REJECT,
3390 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3392 	},
3393 	{
3394 		"raw_stack: skb_load_bytes, invalid access 6",
3395 		.insns = {
3396 			BPF_MOV64_IMM(BPF_REG_2, 4),
3397 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3398 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3399 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3400 			BPF_MOV64_IMM(BPF_REG_4, 0),
3401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3402 				     BPF_FUNC_skb_load_bytes),
3403 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3404 			BPF_EXIT_INSN(),
3405 		},
3406 		.result = REJECT,
3407 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3408 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3409 	},
3410 	{
3411 		"raw_stack: skb_load_bytes, large access",
3412 		.insns = {
3413 			BPF_MOV64_IMM(BPF_REG_2, 4),
3414 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3416 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3417 			BPF_MOV64_IMM(BPF_REG_4, 512),
3418 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3419 				     BPF_FUNC_skb_load_bytes),
3420 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3421 			BPF_EXIT_INSN(),
3422 		},
3423 		.result = ACCEPT,
3424 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3425 	},
3426 	{
3427 		"context stores via ST",
3428 		.insns = {
3429 			BPF_MOV64_IMM(BPF_REG_0, 0),
3430 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3431 			BPF_EXIT_INSN(),
3432 		},
3433 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3434 		.result = REJECT,
3435 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3436 	},
3437 	{
3438 		"context stores via XADD",
3439 		.insns = {
3440 			BPF_MOV64_IMM(BPF_REG_0, 0),
3441 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3442 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3443 			BPF_EXIT_INSN(),
3444 		},
3445 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3446 		.result = REJECT,
3447 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3448 	},
3449 	{
3450 		"direct packet access: test1",
3451 		.insns = {
3452 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3453 				    offsetof(struct __sk_buff, data)),
3454 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3455 				    offsetof(struct __sk_buff, data_end)),
3456 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3458 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3459 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3460 			BPF_MOV64_IMM(BPF_REG_0, 0),
3461 			BPF_EXIT_INSN(),
3462 		},
3463 		.result = ACCEPT,
3464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3465 	},
3466 	{
3467 		"direct packet access: test2",
3468 		.insns = {
3469 			BPF_MOV64_IMM(BPF_REG_0, 1),
3470 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3471 				    offsetof(struct __sk_buff, data_end)),
3472 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3473 				    offsetof(struct __sk_buff, data)),
3474 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3476 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3477 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3478 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3479 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3480 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3481 				    offsetof(struct __sk_buff, data)),
3482 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3483 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3484 				    offsetof(struct __sk_buff, len)),
3485 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3486 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3487 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3488 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3489 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3490 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3491 				    offsetof(struct __sk_buff, data_end)),
3492 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3493 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3494 			BPF_MOV64_IMM(BPF_REG_0, 0),
3495 			BPF_EXIT_INSN(),
3496 		},
3497 		.result = ACCEPT,
3498 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3499 	},
3500 	{
3501 		"direct packet access: test3",
3502 		.insns = {
3503 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3504 				    offsetof(struct __sk_buff, data)),
3505 			BPF_MOV64_IMM(BPF_REG_0, 0),
3506 			BPF_EXIT_INSN(),
3507 		},
3508 		.errstr = "invalid bpf_context access off=76",
3509 		.result = REJECT,
3510 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3511 	},
3512 	{
3513 		"direct packet access: test4 (write)",
3514 		.insns = {
3515 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3516 				    offsetof(struct __sk_buff, data)),
3517 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3518 				    offsetof(struct __sk_buff, data_end)),
3519 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3521 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3522 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3523 			BPF_MOV64_IMM(BPF_REG_0, 0),
3524 			BPF_EXIT_INSN(),
3525 		},
3526 		.result = ACCEPT,
3527 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3528 	},
3529 	{
3530 		"direct packet access: test5 (pkt_end >= reg, good access)",
3531 		.insns = {
3532 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3533 				    offsetof(struct __sk_buff, data)),
3534 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3535 				    offsetof(struct __sk_buff, data_end)),
3536 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3538 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3539 			BPF_MOV64_IMM(BPF_REG_0, 1),
3540 			BPF_EXIT_INSN(),
3541 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3542 			BPF_MOV64_IMM(BPF_REG_0, 0),
3543 			BPF_EXIT_INSN(),
3544 		},
3545 		.result = ACCEPT,
3546 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3547 	},
3548 	{
3549 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3550 		.insns = {
3551 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3552 				    offsetof(struct __sk_buff, data)),
3553 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3554 				    offsetof(struct __sk_buff, data_end)),
3555 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3557 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3558 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3559 			BPF_MOV64_IMM(BPF_REG_0, 1),
3560 			BPF_EXIT_INSN(),
3561 			BPF_MOV64_IMM(BPF_REG_0, 0),
3562 			BPF_EXIT_INSN(),
3563 		},
3564 		.errstr = "invalid access to packet",
3565 		.result = REJECT,
3566 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3567 	},
3568 	{
3569 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3570 		.insns = {
3571 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3572 				    offsetof(struct __sk_buff, data)),
3573 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3574 				    offsetof(struct __sk_buff, data_end)),
3575 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3577 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3578 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3579 			BPF_MOV64_IMM(BPF_REG_0, 1),
3580 			BPF_EXIT_INSN(),
3581 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3582 			BPF_MOV64_IMM(BPF_REG_0, 0),
3583 			BPF_EXIT_INSN(),
3584 		},
3585 		.errstr = "invalid access to packet",
3586 		.result = REJECT,
3587 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3588 	},
3589 	{
3590 		"direct packet access: test8 (double test, variant 1)",
3591 		.insns = {
3592 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3593 				    offsetof(struct __sk_buff, data)),
3594 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 				    offsetof(struct __sk_buff, data_end)),
3596 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3598 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3599 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3600 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3601 			BPF_MOV64_IMM(BPF_REG_0, 1),
3602 			BPF_EXIT_INSN(),
3603 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3604 			BPF_MOV64_IMM(BPF_REG_0, 0),
3605 			BPF_EXIT_INSN(),
3606 		},
3607 		.result = ACCEPT,
3608 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3609 	},
3610 	{
3611 		"direct packet access: test9 (double test, variant 2)",
3612 		.insns = {
3613 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3614 				    offsetof(struct __sk_buff, data)),
3615 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3616 				    offsetof(struct __sk_buff, data_end)),
3617 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3618 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3619 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3620 			BPF_MOV64_IMM(BPF_REG_0, 1),
3621 			BPF_EXIT_INSN(),
3622 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3623 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3624 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3625 			BPF_MOV64_IMM(BPF_REG_0, 0),
3626 			BPF_EXIT_INSN(),
3627 		},
3628 		.result = ACCEPT,
3629 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3630 	},
3631 	{
3632 		"direct packet access: test10 (write invalid)",
3633 		.insns = {
3634 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3635 				    offsetof(struct __sk_buff, data)),
3636 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3637 				    offsetof(struct __sk_buff, data_end)),
3638 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3639 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3640 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3641 			BPF_MOV64_IMM(BPF_REG_0, 0),
3642 			BPF_EXIT_INSN(),
3643 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3644 			BPF_MOV64_IMM(BPF_REG_0, 0),
3645 			BPF_EXIT_INSN(),
3646 		},
3647 		.errstr = "invalid access to packet",
3648 		.result = REJECT,
3649 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3650 	},
3651 	{
3652 		"direct packet access: test11 (shift, good access)",
3653 		.insns = {
3654 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3655 				    offsetof(struct __sk_buff, data)),
3656 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3657 				    offsetof(struct __sk_buff, data_end)),
3658 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3659 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3660 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3661 			BPF_MOV64_IMM(BPF_REG_3, 144),
3662 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3663 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3664 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3665 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3666 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3667 			BPF_MOV64_IMM(BPF_REG_0, 1),
3668 			BPF_EXIT_INSN(),
3669 			BPF_MOV64_IMM(BPF_REG_0, 0),
3670 			BPF_EXIT_INSN(),
3671 		},
3672 		.result = ACCEPT,
3673 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3674 		.retval = 1,
3675 	},
3676 	{
3677 		"direct packet access: test12 (and, good access)",
3678 		.insns = {
3679 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3680 				    offsetof(struct __sk_buff, data)),
3681 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3682 				    offsetof(struct __sk_buff, data_end)),
3683 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3684 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3685 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3686 			BPF_MOV64_IMM(BPF_REG_3, 144),
3687 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3688 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3689 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3690 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3691 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3692 			BPF_MOV64_IMM(BPF_REG_0, 1),
3693 			BPF_EXIT_INSN(),
3694 			BPF_MOV64_IMM(BPF_REG_0, 0),
3695 			BPF_EXIT_INSN(),
3696 		},
3697 		.result = ACCEPT,
3698 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3699 		.retval = 1,
3700 	},
3701 	{
3702 		"direct packet access: test13 (branches, good access)",
3703 		.insns = {
3704 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3705 				    offsetof(struct __sk_buff, data)),
3706 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3707 				    offsetof(struct __sk_buff, data_end)),
3708 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3710 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3711 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3712 				    offsetof(struct __sk_buff, mark)),
3713 			BPF_MOV64_IMM(BPF_REG_4, 1),
3714 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3715 			BPF_MOV64_IMM(BPF_REG_3, 14),
3716 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3717 			BPF_MOV64_IMM(BPF_REG_3, 24),
3718 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3720 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3721 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3722 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3723 			BPF_MOV64_IMM(BPF_REG_0, 1),
3724 			BPF_EXIT_INSN(),
3725 			BPF_MOV64_IMM(BPF_REG_0, 0),
3726 			BPF_EXIT_INSN(),
3727 		},
3728 		.result = ACCEPT,
3729 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3730 		.retval = 1,
3731 	},
3732 	{
3733 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3734 		.insns = {
3735 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3736 				    offsetof(struct __sk_buff, data)),
3737 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3738 				    offsetof(struct __sk_buff, data_end)),
3739 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3741 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3742 			BPF_MOV64_IMM(BPF_REG_5, 12),
3743 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3744 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3745 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3746 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3747 			BPF_MOV64_IMM(BPF_REG_0, 1),
3748 			BPF_EXIT_INSN(),
3749 			BPF_MOV64_IMM(BPF_REG_0, 0),
3750 			BPF_EXIT_INSN(),
3751 		},
3752 		.result = ACCEPT,
3753 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3754 		.retval = 1,
3755 	},
3756 	{
3757 		"direct packet access: test15 (spill with xadd)",
3758 		.insns = {
3759 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3760 				    offsetof(struct __sk_buff, data)),
3761 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3762 				    offsetof(struct __sk_buff, data_end)),
3763 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3765 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3766 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3767 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3768 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3769 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3770 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3771 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3772 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3773 			BPF_MOV64_IMM(BPF_REG_0, 0),
3774 			BPF_EXIT_INSN(),
3775 		},
3776 		.errstr = "R2 invalid mem access 'inv'",
3777 		.result = REJECT,
3778 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3779 	},
3780 	{
3781 		"direct packet access: test16 (arith on data_end)",
3782 		.insns = {
3783 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3784 				    offsetof(struct __sk_buff, data)),
3785 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3786 				    offsetof(struct __sk_buff, data_end)),
3787 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3789 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3790 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3791 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3792 			BPF_MOV64_IMM(BPF_REG_0, 0),
3793 			BPF_EXIT_INSN(),
3794 		},
3795 		.errstr = "R3 pointer arithmetic on pkt_end",
3796 		.result = REJECT,
3797 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3798 	},
3799 	{
3800 		"direct packet access: test17 (pruning, alignment)",
3801 		.insns = {
3802 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3803 				    offsetof(struct __sk_buff, data)),
3804 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3805 				    offsetof(struct __sk_buff, data_end)),
3806 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3807 				    offsetof(struct __sk_buff, mark)),
3808 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3809 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3810 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3811 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3812 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3813 			BPF_MOV64_IMM(BPF_REG_0, 0),
3814 			BPF_EXIT_INSN(),
3815 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3816 			BPF_JMP_A(-6),
3817 		},
3818 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3819 		.result = REJECT,
3820 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3821 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3822 	},
3823 	{
3824 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3825 		.insns = {
3826 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3827 				    offsetof(struct __sk_buff, data)),
3828 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3829 				    offsetof(struct __sk_buff, data_end)),
3830 			BPF_MOV64_IMM(BPF_REG_0, 8),
3831 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3832 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3833 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3834 			BPF_MOV64_IMM(BPF_REG_0, 0),
3835 			BPF_EXIT_INSN(),
3836 		},
3837 		.result = ACCEPT,
3838 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3839 	},
3840 	{
3841 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3842 		.insns = {
3843 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3844 				    offsetof(struct __sk_buff, data)),
3845 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3846 				    offsetof(struct __sk_buff, data_end)),
3847 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3849 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3850 			BPF_MOV64_IMM(BPF_REG_4, 4),
3851 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3852 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3853 			BPF_MOV64_IMM(BPF_REG_0, 0),
3854 			BPF_EXIT_INSN(),
3855 		},
3856 		.result = ACCEPT,
3857 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3858 	},
3859 	{
3860 		"direct packet access: test20 (x += pkt_ptr, 1)",
3861 		.insns = {
3862 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3863 				    offsetof(struct __sk_buff, data)),
3864 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3865 				    offsetof(struct __sk_buff, data_end)),
3866 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3867 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3868 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3869 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3870 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3871 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3872 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3873 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3874 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3875 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3876 			BPF_MOV64_IMM(BPF_REG_0, 0),
3877 			BPF_EXIT_INSN(),
3878 		},
3879 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3880 		.result = ACCEPT,
3881 	},
3882 	{
3883 		"direct packet access: test21 (x += pkt_ptr, 2)",
3884 		.insns = {
3885 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3886 				    offsetof(struct __sk_buff, data)),
3887 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3888 				    offsetof(struct __sk_buff, data_end)),
3889 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3891 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3892 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3893 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3894 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3895 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3896 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3897 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3899 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3900 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3901 			BPF_MOV64_IMM(BPF_REG_0, 0),
3902 			BPF_EXIT_INSN(),
3903 		},
3904 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3905 		.result = ACCEPT,
3906 	},
3907 	{
3908 		"direct packet access: test22 (x += pkt_ptr, 3)",
3909 		.insns = {
3910 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3911 				    offsetof(struct __sk_buff, data)),
3912 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3913 				    offsetof(struct __sk_buff, data_end)),
3914 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3916 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3917 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3918 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3919 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3920 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3921 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3922 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3923 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3924 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3925 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3926 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3928 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3929 			BPF_MOV64_IMM(BPF_REG_2, 1),
3930 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3931 			BPF_MOV64_IMM(BPF_REG_0, 0),
3932 			BPF_EXIT_INSN(),
3933 		},
3934 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3935 		.result = ACCEPT,
3936 	},
3937 	{
3938 		"direct packet access: test23 (x += pkt_ptr, 4)",
3939 		.insns = {
3940 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3941 				    offsetof(struct __sk_buff, data)),
3942 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3943 				    offsetof(struct __sk_buff, data_end)),
3944 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3945 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3946 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3947 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3948 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3949 			BPF_MOV64_IMM(BPF_REG_0, 31),
3950 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3951 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3952 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3954 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3955 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3956 			BPF_MOV64_IMM(BPF_REG_0, 0),
3957 			BPF_EXIT_INSN(),
3958 		},
3959 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3960 		.result = REJECT,
3961 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3962 	},
3963 	{
3964 		"direct packet access: test24 (x += pkt_ptr, 5)",
3965 		.insns = {
3966 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3967 				    offsetof(struct __sk_buff, data)),
3968 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3969 				    offsetof(struct __sk_buff, data_end)),
3970 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3971 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3972 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3973 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3974 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3975 			BPF_MOV64_IMM(BPF_REG_0, 64),
3976 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3977 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3978 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3980 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3981 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3982 			BPF_MOV64_IMM(BPF_REG_0, 0),
3983 			BPF_EXIT_INSN(),
3984 		},
3985 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3986 		.result = ACCEPT,
3987 	},
3988 	{
3989 		"direct packet access: test25 (marking on <, good access)",
3990 		.insns = {
3991 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3992 				    offsetof(struct __sk_buff, data)),
3993 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3994 				    offsetof(struct __sk_buff, data_end)),
3995 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3996 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3997 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3998 			BPF_MOV64_IMM(BPF_REG_0, 0),
3999 			BPF_EXIT_INSN(),
4000 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4001 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4002 		},
4003 		.result = ACCEPT,
4004 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4005 	},
4006 	{
4007 		"direct packet access: test26 (marking on <, bad access)",
4008 		.insns = {
4009 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4010 				    offsetof(struct __sk_buff, data)),
4011 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4012 				    offsetof(struct __sk_buff, data_end)),
4013 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4015 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4016 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4017 			BPF_MOV64_IMM(BPF_REG_0, 0),
4018 			BPF_EXIT_INSN(),
4019 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4020 		},
4021 		.result = REJECT,
4022 		.errstr = "invalid access to packet",
4023 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 	},
4025 	{
4026 		"direct packet access: test27 (marking on <=, good access)",
4027 		.insns = {
4028 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4029 				    offsetof(struct __sk_buff, data)),
4030 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4031 				    offsetof(struct __sk_buff, data_end)),
4032 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4034 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4035 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4036 			BPF_MOV64_IMM(BPF_REG_0, 1),
4037 			BPF_EXIT_INSN(),
4038 		},
4039 		.result = ACCEPT,
4040 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4041 		.retval = 1,
4042 	},
4043 	{
4044 		"direct packet access: test28 (marking on <=, bad access)",
4045 		.insns = {
4046 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4047 				    offsetof(struct __sk_buff, data)),
4048 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4049 				    offsetof(struct __sk_buff, data_end)),
4050 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4052 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4053 			BPF_MOV64_IMM(BPF_REG_0, 1),
4054 			BPF_EXIT_INSN(),
4055 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4056 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4057 		},
4058 		.result = REJECT,
4059 		.errstr = "invalid access to packet",
4060 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4061 	},
4062 	{
4063 		"helper access to packet: test1, valid packet_ptr range",
4064 		.insns = {
4065 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4066 				    offsetof(struct xdp_md, data)),
4067 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4068 				    offsetof(struct xdp_md, data_end)),
4069 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4070 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4071 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4072 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4073 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4074 			BPF_MOV64_IMM(BPF_REG_4, 0),
4075 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4076 				     BPF_FUNC_map_update_elem),
4077 			BPF_MOV64_IMM(BPF_REG_0, 0),
4078 			BPF_EXIT_INSN(),
4079 		},
4080 		.fixup_map_hash_8b = { 5 },
4081 		.result_unpriv = ACCEPT,
4082 		.result = ACCEPT,
4083 		.prog_type = BPF_PROG_TYPE_XDP,
4084 	},
4085 	{
4086 		"helper access to packet: test2, unchecked packet_ptr",
4087 		.insns = {
4088 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4089 				    offsetof(struct xdp_md, data)),
4090 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4091 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4092 				     BPF_FUNC_map_lookup_elem),
4093 			BPF_MOV64_IMM(BPF_REG_0, 0),
4094 			BPF_EXIT_INSN(),
4095 		},
4096 		.fixup_map_hash_8b = { 1 },
4097 		.result = REJECT,
4098 		.errstr = "invalid access to packet",
4099 		.prog_type = BPF_PROG_TYPE_XDP,
4100 	},
4101 	{
4102 		"helper access to packet: test3, variable add",
4103 		.insns = {
4104 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4105 					offsetof(struct xdp_md, data)),
4106 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4107 					offsetof(struct xdp_md, data_end)),
4108 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4109 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4110 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4111 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4112 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4113 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4114 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4115 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4116 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4117 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4118 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4119 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4120 				     BPF_FUNC_map_lookup_elem),
4121 			BPF_MOV64_IMM(BPF_REG_0, 0),
4122 			BPF_EXIT_INSN(),
4123 		},
4124 		.fixup_map_hash_8b = { 11 },
4125 		.result = ACCEPT,
4126 		.prog_type = BPF_PROG_TYPE_XDP,
4127 	},
4128 	{
4129 		"helper access to packet: test4, packet_ptr with bad range",
4130 		.insns = {
4131 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4132 				    offsetof(struct xdp_md, data)),
4133 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4134 				    offsetof(struct xdp_md, data_end)),
4135 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4137 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4138 			BPF_MOV64_IMM(BPF_REG_0, 0),
4139 			BPF_EXIT_INSN(),
4140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4142 				     BPF_FUNC_map_lookup_elem),
4143 			BPF_MOV64_IMM(BPF_REG_0, 0),
4144 			BPF_EXIT_INSN(),
4145 		},
4146 		.fixup_map_hash_8b = { 7 },
4147 		.result = REJECT,
4148 		.errstr = "invalid access to packet",
4149 		.prog_type = BPF_PROG_TYPE_XDP,
4150 	},
4151 	{
4152 		"helper access to packet: test5, packet_ptr with too short range",
4153 		.insns = {
4154 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 				    offsetof(struct xdp_md, data)),
4156 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4157 				    offsetof(struct xdp_md, data_end)),
4158 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4159 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4161 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4162 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4163 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4164 				     BPF_FUNC_map_lookup_elem),
4165 			BPF_MOV64_IMM(BPF_REG_0, 0),
4166 			BPF_EXIT_INSN(),
4167 		},
4168 		.fixup_map_hash_8b = { 6 },
4169 		.result = REJECT,
4170 		.errstr = "invalid access to packet",
4171 		.prog_type = BPF_PROG_TYPE_XDP,
4172 	},
4173 	{
4174 		"helper access to packet: test6, cls valid packet_ptr range",
4175 		.insns = {
4176 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4177 				    offsetof(struct __sk_buff, data)),
4178 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4179 				    offsetof(struct __sk_buff, data_end)),
4180 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4182 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4183 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4184 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4185 			BPF_MOV64_IMM(BPF_REG_4, 0),
4186 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4187 				     BPF_FUNC_map_update_elem),
4188 			BPF_MOV64_IMM(BPF_REG_0, 0),
4189 			BPF_EXIT_INSN(),
4190 		},
4191 		.fixup_map_hash_8b = { 5 },
4192 		.result = ACCEPT,
4193 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4194 	},
4195 	{
4196 		"helper access to packet: test7, cls unchecked packet_ptr",
4197 		.insns = {
4198 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4199 				    offsetof(struct __sk_buff, data)),
4200 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4202 				     BPF_FUNC_map_lookup_elem),
4203 			BPF_MOV64_IMM(BPF_REG_0, 0),
4204 			BPF_EXIT_INSN(),
4205 		},
4206 		.fixup_map_hash_8b = { 1 },
4207 		.result = REJECT,
4208 		.errstr = "invalid access to packet",
4209 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4210 	},
4211 	{
4212 		"helper access to packet: test8, cls variable add",
4213 		.insns = {
4214 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4215 					offsetof(struct __sk_buff, data)),
4216 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4217 					offsetof(struct __sk_buff, data_end)),
4218 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4220 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4221 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4222 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4223 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4224 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4226 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4227 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4228 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4229 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4230 				     BPF_FUNC_map_lookup_elem),
4231 			BPF_MOV64_IMM(BPF_REG_0, 0),
4232 			BPF_EXIT_INSN(),
4233 		},
4234 		.fixup_map_hash_8b = { 11 },
4235 		.result = ACCEPT,
4236 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4237 	},
4238 	{
4239 		"helper access to packet: test9, cls packet_ptr with bad range",
4240 		.insns = {
4241 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4242 				    offsetof(struct __sk_buff, data)),
4243 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4244 				    offsetof(struct __sk_buff, data_end)),
4245 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4246 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4247 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4248 			BPF_MOV64_IMM(BPF_REG_0, 0),
4249 			BPF_EXIT_INSN(),
4250 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4251 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4252 				     BPF_FUNC_map_lookup_elem),
4253 			BPF_MOV64_IMM(BPF_REG_0, 0),
4254 			BPF_EXIT_INSN(),
4255 		},
4256 		.fixup_map_hash_8b = { 7 },
4257 		.result = REJECT,
4258 		.errstr = "invalid access to packet",
4259 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4260 	},
4261 	{
4262 		"helper access to packet: test10, cls packet_ptr with too short range",
4263 		.insns = {
4264 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4265 				    offsetof(struct __sk_buff, data)),
4266 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4267 				    offsetof(struct __sk_buff, data_end)),
4268 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4269 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4271 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4272 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4273 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4274 				     BPF_FUNC_map_lookup_elem),
4275 			BPF_MOV64_IMM(BPF_REG_0, 0),
4276 			BPF_EXIT_INSN(),
4277 		},
4278 		.fixup_map_hash_8b = { 6 },
4279 		.result = REJECT,
4280 		.errstr = "invalid access to packet",
4281 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4282 	},
4283 	{
4284 		"helper access to packet: test11, cls unsuitable helper 1",
4285 		.insns = {
4286 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4287 				    offsetof(struct __sk_buff, data)),
4288 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4289 				    offsetof(struct __sk_buff, data_end)),
4290 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4291 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4292 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4293 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4294 			BPF_MOV64_IMM(BPF_REG_2, 0),
4295 			BPF_MOV64_IMM(BPF_REG_4, 42),
4296 			BPF_MOV64_IMM(BPF_REG_5, 0),
4297 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4298 				     BPF_FUNC_skb_store_bytes),
4299 			BPF_MOV64_IMM(BPF_REG_0, 0),
4300 			BPF_EXIT_INSN(),
4301 		},
4302 		.result = REJECT,
4303 		.errstr = "helper access to the packet",
4304 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4305 	},
4306 	{
4307 		"helper access to packet: test12, cls unsuitable helper 2",
4308 		.insns = {
4309 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4310 				    offsetof(struct __sk_buff, data)),
4311 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4312 				    offsetof(struct __sk_buff, data_end)),
4313 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4314 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4315 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4316 			BPF_MOV64_IMM(BPF_REG_2, 0),
4317 			BPF_MOV64_IMM(BPF_REG_4, 4),
4318 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4319 				     BPF_FUNC_skb_load_bytes),
4320 			BPF_MOV64_IMM(BPF_REG_0, 0),
4321 			BPF_EXIT_INSN(),
4322 		},
4323 		.result = REJECT,
4324 		.errstr = "helper access to the packet",
4325 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4326 	},
4327 	{
4328 		"helper access to packet: test13, cls helper ok",
4329 		.insns = {
4330 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4331 				    offsetof(struct __sk_buff, data)),
4332 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4333 				    offsetof(struct __sk_buff, data_end)),
4334 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4335 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4336 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4337 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4338 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4339 			BPF_MOV64_IMM(BPF_REG_2, 4),
4340 			BPF_MOV64_IMM(BPF_REG_3, 0),
4341 			BPF_MOV64_IMM(BPF_REG_4, 0),
4342 			BPF_MOV64_IMM(BPF_REG_5, 0),
4343 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4344 				     BPF_FUNC_csum_diff),
4345 			BPF_MOV64_IMM(BPF_REG_0, 0),
4346 			BPF_EXIT_INSN(),
4347 		},
4348 		.result = ACCEPT,
4349 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4350 	},
4351 	{
4352 		"helper access to packet: test14, cls helper ok sub",
4353 		.insns = {
4354 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4355 				    offsetof(struct __sk_buff, data)),
4356 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4357 				    offsetof(struct __sk_buff, data_end)),
4358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4359 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4360 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4361 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4362 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4363 			BPF_MOV64_IMM(BPF_REG_2, 4),
4364 			BPF_MOV64_IMM(BPF_REG_3, 0),
4365 			BPF_MOV64_IMM(BPF_REG_4, 0),
4366 			BPF_MOV64_IMM(BPF_REG_5, 0),
4367 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4368 				     BPF_FUNC_csum_diff),
4369 			BPF_MOV64_IMM(BPF_REG_0, 0),
4370 			BPF_EXIT_INSN(),
4371 		},
4372 		.result = ACCEPT,
4373 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4374 	},
4375 	{
4376 		"helper access to packet: test15, cls helper fail sub",
4377 		.insns = {
4378 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4379 				    offsetof(struct __sk_buff, data)),
4380 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4381 				    offsetof(struct __sk_buff, data_end)),
4382 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4383 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4385 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4386 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4387 			BPF_MOV64_IMM(BPF_REG_2, 4),
4388 			BPF_MOV64_IMM(BPF_REG_3, 0),
4389 			BPF_MOV64_IMM(BPF_REG_4, 0),
4390 			BPF_MOV64_IMM(BPF_REG_5, 0),
4391 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4392 				     BPF_FUNC_csum_diff),
4393 			BPF_MOV64_IMM(BPF_REG_0, 0),
4394 			BPF_EXIT_INSN(),
4395 		},
4396 		.result = REJECT,
4397 		.errstr = "invalid access to packet",
4398 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4399 	},
4400 	{
4401 		"helper access to packet: test16, cls helper fail range 1",
4402 		.insns = {
4403 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4404 				    offsetof(struct __sk_buff, data)),
4405 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4406 				    offsetof(struct __sk_buff, data_end)),
4407 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4408 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4409 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4410 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4411 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4412 			BPF_MOV64_IMM(BPF_REG_2, 8),
4413 			BPF_MOV64_IMM(BPF_REG_3, 0),
4414 			BPF_MOV64_IMM(BPF_REG_4, 0),
4415 			BPF_MOV64_IMM(BPF_REG_5, 0),
4416 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4417 				     BPF_FUNC_csum_diff),
4418 			BPF_MOV64_IMM(BPF_REG_0, 0),
4419 			BPF_EXIT_INSN(),
4420 		},
4421 		.result = REJECT,
4422 		.errstr = "invalid access to packet",
4423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4424 	},
4425 	{
4426 		"helper access to packet: test17, cls helper fail range 2",
4427 		.insns = {
4428 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4429 				    offsetof(struct __sk_buff, data)),
4430 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4431 				    offsetof(struct __sk_buff, data_end)),
4432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4433 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4435 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4436 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4437 			BPF_MOV64_IMM(BPF_REG_2, -9),
4438 			BPF_MOV64_IMM(BPF_REG_3, 0),
4439 			BPF_MOV64_IMM(BPF_REG_4, 0),
4440 			BPF_MOV64_IMM(BPF_REG_5, 0),
4441 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4442 				     BPF_FUNC_csum_diff),
4443 			BPF_MOV64_IMM(BPF_REG_0, 0),
4444 			BPF_EXIT_INSN(),
4445 		},
4446 		.result = REJECT,
4447 		.errstr = "R2 min value is negative",
4448 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4449 	},
4450 	{
4451 		"helper access to packet: test18, cls helper fail range 3",
4452 		.insns = {
4453 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4454 				    offsetof(struct __sk_buff, data)),
4455 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4456 				    offsetof(struct __sk_buff, data_end)),
4457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4458 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4459 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4460 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4461 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4462 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4463 			BPF_MOV64_IMM(BPF_REG_3, 0),
4464 			BPF_MOV64_IMM(BPF_REG_4, 0),
4465 			BPF_MOV64_IMM(BPF_REG_5, 0),
4466 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4467 				     BPF_FUNC_csum_diff),
4468 			BPF_MOV64_IMM(BPF_REG_0, 0),
4469 			BPF_EXIT_INSN(),
4470 		},
4471 		.result = REJECT,
4472 		.errstr = "R2 min value is negative",
4473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4474 	},
4475 	{
4476 		"helper access to packet: test19, cls helper range zero",
4477 		.insns = {
4478 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4479 				    offsetof(struct __sk_buff, data)),
4480 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4481 				    offsetof(struct __sk_buff, data_end)),
4482 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4483 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4484 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4485 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4487 			BPF_MOV64_IMM(BPF_REG_2, 0),
4488 			BPF_MOV64_IMM(BPF_REG_3, 0),
4489 			BPF_MOV64_IMM(BPF_REG_4, 0),
4490 			BPF_MOV64_IMM(BPF_REG_5, 0),
4491 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4492 				     BPF_FUNC_csum_diff),
4493 			BPF_MOV64_IMM(BPF_REG_0, 0),
4494 			BPF_EXIT_INSN(),
4495 		},
4496 		.result = ACCEPT,
4497 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4498 	},
4499 	{
4500 		"helper access to packet: test20, pkt end as input",
4501 		.insns = {
4502 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4503 				    offsetof(struct __sk_buff, data)),
4504 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4505 				    offsetof(struct __sk_buff, data_end)),
4506 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4507 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4508 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4509 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4510 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4511 			BPF_MOV64_IMM(BPF_REG_2, 4),
4512 			BPF_MOV64_IMM(BPF_REG_3, 0),
4513 			BPF_MOV64_IMM(BPF_REG_4, 0),
4514 			BPF_MOV64_IMM(BPF_REG_5, 0),
4515 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4516 				     BPF_FUNC_csum_diff),
4517 			BPF_MOV64_IMM(BPF_REG_0, 0),
4518 			BPF_EXIT_INSN(),
4519 		},
4520 		.result = REJECT,
4521 		.errstr = "R1 type=pkt_end expected=fp",
4522 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4523 	},
4524 	{
4525 		"helper access to packet: test21, wrong reg",
4526 		.insns = {
4527 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4528 				    offsetof(struct __sk_buff, data)),
4529 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4530 				    offsetof(struct __sk_buff, data_end)),
4531 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4532 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4533 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4534 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4535 			BPF_MOV64_IMM(BPF_REG_2, 4),
4536 			BPF_MOV64_IMM(BPF_REG_3, 0),
4537 			BPF_MOV64_IMM(BPF_REG_4, 0),
4538 			BPF_MOV64_IMM(BPF_REG_5, 0),
4539 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4540 				     BPF_FUNC_csum_diff),
4541 			BPF_MOV64_IMM(BPF_REG_0, 0),
4542 			BPF_EXIT_INSN(),
4543 		},
4544 		.result = REJECT,
4545 		.errstr = "invalid access to packet",
4546 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4547 	},
4548 	{
4549 		"prevent map lookup in sockmap",
4550 		.insns = {
4551 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4552 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4553 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4554 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4555 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4556 				     BPF_FUNC_map_lookup_elem),
4557 			BPF_EXIT_INSN(),
4558 		},
4559 		.fixup_map_sockmap = { 3 },
4560 		.result = REJECT,
4561 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4562 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4563 	},
4564 	{
4565 		"prevent map lookup in sockhash",
4566 		.insns = {
4567 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4568 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4570 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4572 				     BPF_FUNC_map_lookup_elem),
4573 			BPF_EXIT_INSN(),
4574 		},
4575 		.fixup_map_sockhash = { 3 },
4576 		.result = REJECT,
4577 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4578 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4579 	},
4580 	{
4581 		"prevent map lookup in xskmap",
4582 		.insns = {
4583 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4584 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4586 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4587 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4588 				     BPF_FUNC_map_lookup_elem),
4589 			BPF_EXIT_INSN(),
4590 		},
4591 		.fixup_map_xskmap = { 3 },
4592 		.result = REJECT,
4593 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4594 		.prog_type = BPF_PROG_TYPE_XDP,
4595 	},
4596 	{
4597 		"prevent map lookup in stack trace",
4598 		.insns = {
4599 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4600 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4602 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4603 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4604 				     BPF_FUNC_map_lookup_elem),
4605 			BPF_EXIT_INSN(),
4606 		},
4607 		.fixup_map_stacktrace = { 3 },
4608 		.result = REJECT,
4609 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4610 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4611 	},
4612 	{
4613 		"prevent map lookup in prog array",
4614 		.insns = {
4615 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4618 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4620 				     BPF_FUNC_map_lookup_elem),
4621 			BPF_EXIT_INSN(),
4622 		},
4623 		.fixup_prog2 = { 3 },
4624 		.result = REJECT,
4625 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4626 	},
4627 	{
4628 		"valid map access into an array with a constant",
4629 		.insns = {
4630 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4631 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4633 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4634 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4635 				     BPF_FUNC_map_lookup_elem),
4636 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4637 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4638 				   offsetof(struct test_val, foo)),
4639 			BPF_EXIT_INSN(),
4640 		},
4641 		.fixup_map_hash_48b = { 3 },
4642 		.errstr_unpriv = "R0 leaks addr",
4643 		.result_unpriv = REJECT,
4644 		.result = ACCEPT,
4645 	},
4646 	{
4647 		"valid map access into an array with a register",
4648 		.insns = {
4649 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4650 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4652 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4654 				     BPF_FUNC_map_lookup_elem),
4655 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4656 			BPF_MOV64_IMM(BPF_REG_1, 4),
4657 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4658 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4659 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4660 				   offsetof(struct test_val, foo)),
4661 			BPF_EXIT_INSN(),
4662 		},
4663 		.fixup_map_hash_48b = { 3 },
4664 		.errstr_unpriv = "R0 leaks addr",
4665 		.result_unpriv = REJECT,
4666 		.result = ACCEPT,
4667 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4668 	},
4669 	{
4670 		"valid map access into an array with a variable",
4671 		.insns = {
4672 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4673 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4674 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4675 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4676 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4677 				     BPF_FUNC_map_lookup_elem),
4678 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4679 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4680 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4681 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4682 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4683 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4684 				   offsetof(struct test_val, foo)),
4685 			BPF_EXIT_INSN(),
4686 		},
4687 		.fixup_map_hash_48b = { 3 },
4688 		.errstr_unpriv = "R0 leaks addr",
4689 		.result_unpriv = REJECT,
4690 		.result = ACCEPT,
4691 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4692 	},
4693 	{
4694 		"valid map access into an array with a signed variable",
4695 		.insns = {
4696 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4697 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4700 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4701 				     BPF_FUNC_map_lookup_elem),
4702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4703 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4704 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4705 			BPF_MOV32_IMM(BPF_REG_1, 0),
4706 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4707 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4708 			BPF_MOV32_IMM(BPF_REG_1, 0),
4709 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4710 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4711 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4712 				   offsetof(struct test_val, foo)),
4713 			BPF_EXIT_INSN(),
4714 		},
4715 		.fixup_map_hash_48b = { 3 },
4716 		.errstr_unpriv = "R0 leaks addr",
4717 		.result_unpriv = REJECT,
4718 		.result = ACCEPT,
4719 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4720 	},
4721 	{
4722 		"invalid map access into an array with a constant",
4723 		.insns = {
4724 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4725 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4726 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4727 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4728 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4729 				     BPF_FUNC_map_lookup_elem),
4730 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4731 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4732 				   offsetof(struct test_val, foo)),
4733 			BPF_EXIT_INSN(),
4734 		},
4735 		.fixup_map_hash_48b = { 3 },
4736 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4737 		.result = REJECT,
4738 	},
4739 	{
4740 		"invalid map access into an array with a register",
4741 		.insns = {
4742 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4743 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4745 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4746 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4747 				     BPF_FUNC_map_lookup_elem),
4748 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4749 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4750 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4751 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4752 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4753 				   offsetof(struct test_val, foo)),
4754 			BPF_EXIT_INSN(),
4755 		},
4756 		.fixup_map_hash_48b = { 3 },
4757 		.errstr = "R0 min value is outside of the array range",
4758 		.result = REJECT,
4759 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4760 	},
4761 	{
4762 		"invalid map access into an array with a variable",
4763 		.insns = {
4764 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4765 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4766 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4767 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4768 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4769 				     BPF_FUNC_map_lookup_elem),
4770 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4771 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4772 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4773 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4774 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4775 				   offsetof(struct test_val, foo)),
4776 			BPF_EXIT_INSN(),
4777 		},
4778 		.fixup_map_hash_48b = { 3 },
4779 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4780 		.result = REJECT,
4781 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4782 	},
4783 	{
4784 		"invalid map access into an array with no floor check",
4785 		.insns = {
4786 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4787 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4788 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4789 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4790 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4791 				     BPF_FUNC_map_lookup_elem),
4792 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4793 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4794 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4795 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4796 			BPF_MOV32_IMM(BPF_REG_1, 0),
4797 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4798 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4799 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4800 				   offsetof(struct test_val, foo)),
4801 			BPF_EXIT_INSN(),
4802 		},
4803 		.fixup_map_hash_48b = { 3 },
4804 		.errstr_unpriv = "R0 leaks addr",
4805 		.errstr = "R0 unbounded memory access",
4806 		.result_unpriv = REJECT,
4807 		.result = REJECT,
4808 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4809 	},
4810 	{
4811 		"invalid map access into an array with a invalid max check",
4812 		.insns = {
4813 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4814 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4815 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4816 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4817 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4818 				     BPF_FUNC_map_lookup_elem),
4819 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4820 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4821 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4822 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4823 			BPF_MOV32_IMM(BPF_REG_1, 0),
4824 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4825 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4826 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4827 				   offsetof(struct test_val, foo)),
4828 			BPF_EXIT_INSN(),
4829 		},
4830 		.fixup_map_hash_48b = { 3 },
4831 		.errstr_unpriv = "R0 leaks addr",
4832 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4833 		.result_unpriv = REJECT,
4834 		.result = REJECT,
4835 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4836 	},
4837 	{
4838 		"invalid map access into an array with a invalid max check",
4839 		.insns = {
4840 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4841 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4843 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4844 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4845 				     BPF_FUNC_map_lookup_elem),
4846 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4847 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4848 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4851 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4852 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4853 				     BPF_FUNC_map_lookup_elem),
4854 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4855 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4856 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4857 				    offsetof(struct test_val, foo)),
4858 			BPF_EXIT_INSN(),
4859 		},
4860 		.fixup_map_hash_48b = { 3, 11 },
4861 		.errstr = "R0 pointer += pointer",
4862 		.result = REJECT,
4863 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4864 	},
4865 	{
4866 		"direct packet read test#1 for CGROUP_SKB",
4867 		.insns = {
4868 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4869 				    offsetof(struct __sk_buff, data)),
4870 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4871 				    offsetof(struct __sk_buff, data_end)),
4872 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4873 				    offsetof(struct __sk_buff, len)),
4874 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4875 				    offsetof(struct __sk_buff, pkt_type)),
4876 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4877 				    offsetof(struct __sk_buff, mark)),
4878 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4879 				    offsetof(struct __sk_buff, mark)),
4880 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4881 				    offsetof(struct __sk_buff, queue_mapping)),
4882 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4883 				    offsetof(struct __sk_buff, protocol)),
4884 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4885 				    offsetof(struct __sk_buff, vlan_present)),
4886 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4887 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4888 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4889 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4890 			BPF_MOV64_IMM(BPF_REG_0, 0),
4891 			BPF_EXIT_INSN(),
4892 		},
4893 		.result = ACCEPT,
4894 		.result_unpriv = REJECT,
4895 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
4896 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4897 	},
4898 	{
4899 		"direct packet read test#2 for CGROUP_SKB",
4900 		.insns = {
4901 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4902 				    offsetof(struct __sk_buff, vlan_tci)),
4903 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4904 				    offsetof(struct __sk_buff, vlan_proto)),
4905 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4906 				    offsetof(struct __sk_buff, priority)),
4907 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4908 				    offsetof(struct __sk_buff, priority)),
4909 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4910 				    offsetof(struct __sk_buff,
4911 					     ingress_ifindex)),
4912 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4913 				    offsetof(struct __sk_buff, tc_index)),
4914 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4915 				    offsetof(struct __sk_buff, hash)),
4916 			BPF_MOV64_IMM(BPF_REG_0, 0),
4917 			BPF_EXIT_INSN(),
4918 		},
4919 		.result = ACCEPT,
4920 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4921 	},
4922 	{
4923 		"direct packet read test#3 for CGROUP_SKB",
4924 		.insns = {
4925 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4926 				    offsetof(struct __sk_buff, cb[0])),
4927 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4928 				    offsetof(struct __sk_buff, cb[1])),
4929 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4930 				    offsetof(struct __sk_buff, cb[2])),
4931 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4932 				    offsetof(struct __sk_buff, cb[3])),
4933 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4934 				    offsetof(struct __sk_buff, cb[4])),
4935 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4936 				    offsetof(struct __sk_buff, napi_id)),
4937 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
4938 				    offsetof(struct __sk_buff, cb[0])),
4939 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
4940 				    offsetof(struct __sk_buff, cb[1])),
4941 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4942 				    offsetof(struct __sk_buff, cb[2])),
4943 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
4944 				    offsetof(struct __sk_buff, cb[3])),
4945 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
4946 				    offsetof(struct __sk_buff, cb[4])),
4947 			BPF_MOV64_IMM(BPF_REG_0, 0),
4948 			BPF_EXIT_INSN(),
4949 		},
4950 		.result = ACCEPT,
4951 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4952 	},
4953 	{
4954 		"direct packet read test#4 for CGROUP_SKB",
4955 		.insns = {
4956 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4957 				    offsetof(struct __sk_buff, family)),
4958 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4959 				    offsetof(struct __sk_buff, remote_ip4)),
4960 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4961 				    offsetof(struct __sk_buff, local_ip4)),
4962 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4963 				    offsetof(struct __sk_buff, remote_ip6[0])),
4964 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4965 				    offsetof(struct __sk_buff, remote_ip6[1])),
4966 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4967 				    offsetof(struct __sk_buff, remote_ip6[2])),
4968 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4969 				    offsetof(struct __sk_buff, remote_ip6[3])),
4970 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4971 				    offsetof(struct __sk_buff, local_ip6[0])),
4972 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4973 				    offsetof(struct __sk_buff, local_ip6[1])),
4974 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4975 				    offsetof(struct __sk_buff, local_ip6[2])),
4976 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4977 				    offsetof(struct __sk_buff, local_ip6[3])),
4978 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4979 				    offsetof(struct __sk_buff, remote_port)),
4980 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4981 				    offsetof(struct __sk_buff, local_port)),
4982 			BPF_MOV64_IMM(BPF_REG_0, 0),
4983 			BPF_EXIT_INSN(),
4984 		},
4985 		.result = ACCEPT,
4986 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4987 	},
4988 	{
4989 		"invalid access of tc_classid for CGROUP_SKB",
4990 		.insns = {
4991 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4992 				    offsetof(struct __sk_buff, tc_classid)),
4993 			BPF_MOV64_IMM(BPF_REG_0, 0),
4994 			BPF_EXIT_INSN(),
4995 		},
4996 		.result = REJECT,
4997 		.errstr = "invalid bpf_context access",
4998 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4999 	},
5000 	{
5001 		"invalid access of data_meta for CGROUP_SKB",
5002 		.insns = {
5003 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5004 				    offsetof(struct __sk_buff, data_meta)),
5005 			BPF_MOV64_IMM(BPF_REG_0, 0),
5006 			BPF_EXIT_INSN(),
5007 		},
5008 		.result = REJECT,
5009 		.errstr = "invalid bpf_context access",
5010 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5011 	},
5012 	{
5013 		"invalid access of flow_keys for CGROUP_SKB",
5014 		.insns = {
5015 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5016 				    offsetof(struct __sk_buff, flow_keys)),
5017 			BPF_MOV64_IMM(BPF_REG_0, 0),
5018 			BPF_EXIT_INSN(),
5019 		},
5020 		.result = REJECT,
5021 		.errstr = "invalid bpf_context access",
5022 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5023 	},
5024 	{
5025 		"invalid write access to napi_id for CGROUP_SKB",
5026 		.insns = {
5027 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5028 				    offsetof(struct __sk_buff, napi_id)),
5029 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5030 				    offsetof(struct __sk_buff, napi_id)),
5031 			BPF_MOV64_IMM(BPF_REG_0, 0),
5032 			BPF_EXIT_INSN(),
5033 		},
5034 		.result = REJECT,
5035 		.errstr = "invalid bpf_context access",
5036 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5037 	},
5038 	{
5039 		"valid cgroup storage access",
5040 		.insns = {
5041 			BPF_MOV64_IMM(BPF_REG_2, 0),
5042 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5043 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5044 				     BPF_FUNC_get_local_storage),
5045 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5046 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5047 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5048 			BPF_EXIT_INSN(),
5049 		},
5050 		.fixup_cgroup_storage = { 1 },
5051 		.result = ACCEPT,
5052 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5053 	},
5054 	{
5055 		"invalid cgroup storage access 1",
5056 		.insns = {
5057 			BPF_MOV64_IMM(BPF_REG_2, 0),
5058 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5059 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5060 				     BPF_FUNC_get_local_storage),
5061 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5062 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5063 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5064 			BPF_EXIT_INSN(),
5065 		},
5066 		.fixup_map_hash_8b = { 1 },
5067 		.result = REJECT,
5068 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5069 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5070 	},
5071 	{
5072 		"invalid cgroup storage access 2",
5073 		.insns = {
5074 			BPF_MOV64_IMM(BPF_REG_2, 0),
5075 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5076 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5077 				     BPF_FUNC_get_local_storage),
5078 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5079 			BPF_EXIT_INSN(),
5080 		},
5081 		.result = REJECT,
5082 		.errstr = "fd 1 is not pointing to valid bpf_map",
5083 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5084 	},
5085 	{
5086 		"invalid cgroup storage access 3",
5087 		.insns = {
5088 			BPF_MOV64_IMM(BPF_REG_2, 0),
5089 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5090 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5091 				     BPF_FUNC_get_local_storage),
5092 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5093 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5094 			BPF_MOV64_IMM(BPF_REG_0, 0),
5095 			BPF_EXIT_INSN(),
5096 		},
5097 		.fixup_cgroup_storage = { 1 },
5098 		.result = REJECT,
5099 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5100 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5101 	},
5102 	{
5103 		"invalid cgroup storage access 4",
5104 		.insns = {
5105 			BPF_MOV64_IMM(BPF_REG_2, 0),
5106 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5107 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5108 				     BPF_FUNC_get_local_storage),
5109 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5110 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5112 			BPF_EXIT_INSN(),
5113 		},
5114 		.fixup_cgroup_storage = { 1 },
5115 		.result = REJECT,
5116 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5117 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5118 	},
5119 	{
5120 		"invalid cgroup storage access 5",
5121 		.insns = {
5122 			BPF_MOV64_IMM(BPF_REG_2, 7),
5123 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5124 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5125 				     BPF_FUNC_get_local_storage),
5126 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5127 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5128 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5129 			BPF_EXIT_INSN(),
5130 		},
5131 		.fixup_cgroup_storage = { 1 },
5132 		.result = REJECT,
5133 		.errstr = "get_local_storage() doesn't support non-zero flags",
5134 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5135 	},
5136 	{
5137 		"invalid cgroup storage access 6",
5138 		.insns = {
5139 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5140 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5142 				     BPF_FUNC_get_local_storage),
5143 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5144 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5145 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5146 			BPF_EXIT_INSN(),
5147 		},
5148 		.fixup_cgroup_storage = { 1 },
5149 		.result = REJECT,
5150 		.errstr = "get_local_storage() doesn't support non-zero flags",
5151 		.errstr_unpriv = "R2 leaks addr into helper function",
5152 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5153 	},
5154 	{
5155 		"valid per-cpu cgroup storage access",
5156 		.insns = {
5157 			BPF_MOV64_IMM(BPF_REG_2, 0),
5158 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5159 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5160 				     BPF_FUNC_get_local_storage),
5161 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5162 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5163 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5164 			BPF_EXIT_INSN(),
5165 		},
5166 		.fixup_percpu_cgroup_storage = { 1 },
5167 		.result = ACCEPT,
5168 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5169 	},
5170 	{
5171 		"invalid per-cpu cgroup storage access 1",
5172 		.insns = {
5173 			BPF_MOV64_IMM(BPF_REG_2, 0),
5174 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5175 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5176 				     BPF_FUNC_get_local_storage),
5177 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5178 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5179 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5180 			BPF_EXIT_INSN(),
5181 		},
5182 		.fixup_map_hash_8b = { 1 },
5183 		.result = REJECT,
5184 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5185 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5186 	},
5187 	{
5188 		"invalid per-cpu cgroup storage access 2",
5189 		.insns = {
5190 			BPF_MOV64_IMM(BPF_REG_2, 0),
5191 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5192 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5193 				     BPF_FUNC_get_local_storage),
5194 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5195 			BPF_EXIT_INSN(),
5196 		},
5197 		.result = REJECT,
5198 		.errstr = "fd 1 is not pointing to valid bpf_map",
5199 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5200 	},
5201 	{
5202 		"invalid per-cpu cgroup storage access 3",
5203 		.insns = {
5204 			BPF_MOV64_IMM(BPF_REG_2, 0),
5205 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5206 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5207 				     BPF_FUNC_get_local_storage),
5208 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5210 			BPF_MOV64_IMM(BPF_REG_0, 0),
5211 			BPF_EXIT_INSN(),
5212 		},
5213 		.fixup_percpu_cgroup_storage = { 1 },
5214 		.result = REJECT,
5215 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5216 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5217 	},
5218 	{
5219 		"invalid per-cpu cgroup storage access 4",
5220 		.insns = {
5221 			BPF_MOV64_IMM(BPF_REG_2, 0),
5222 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5223 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5224 				     BPF_FUNC_get_local_storage),
5225 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5226 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5227 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5228 			BPF_EXIT_INSN(),
5229 		},
5230 		.fixup_cgroup_storage = { 1 },
5231 		.result = REJECT,
5232 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5233 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5234 	},
5235 	{
5236 		"invalid per-cpu cgroup storage access 5",
5237 		.insns = {
5238 			BPF_MOV64_IMM(BPF_REG_2, 7),
5239 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5241 				     BPF_FUNC_get_local_storage),
5242 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5243 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5244 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5245 			BPF_EXIT_INSN(),
5246 		},
5247 		.fixup_percpu_cgroup_storage = { 1 },
5248 		.result = REJECT,
5249 		.errstr = "get_local_storage() doesn't support non-zero flags",
5250 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5251 	},
5252 	{
5253 		"invalid per-cpu cgroup storage access 6",
5254 		.insns = {
5255 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5256 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5257 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5258 				     BPF_FUNC_get_local_storage),
5259 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5260 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5261 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5262 			BPF_EXIT_INSN(),
5263 		},
5264 		.fixup_percpu_cgroup_storage = { 1 },
5265 		.result = REJECT,
5266 		.errstr = "get_local_storage() doesn't support non-zero flags",
5267 		.errstr_unpriv = "R2 leaks addr into helper function",
5268 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5269 	},
5270 	{
5271 		"multiple registers share map_lookup_elem result",
5272 		.insns = {
5273 			BPF_MOV64_IMM(BPF_REG_1, 10),
5274 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5275 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5277 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5278 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5279 				     BPF_FUNC_map_lookup_elem),
5280 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5282 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5283 			BPF_EXIT_INSN(),
5284 		},
5285 		.fixup_map_hash_8b = { 4 },
5286 		.result = ACCEPT,
5287 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5288 	},
5289 	{
5290 		"alu ops on ptr_to_map_value_or_null, 1",
5291 		.insns = {
5292 			BPF_MOV64_IMM(BPF_REG_1, 10),
5293 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5294 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5295 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5296 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5297 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5298 				     BPF_FUNC_map_lookup_elem),
5299 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5300 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5301 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5302 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5303 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5304 			BPF_EXIT_INSN(),
5305 		},
5306 		.fixup_map_hash_8b = { 4 },
5307 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5308 		.result = REJECT,
5309 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5310 	},
5311 	{
5312 		"alu ops on ptr_to_map_value_or_null, 2",
5313 		.insns = {
5314 			BPF_MOV64_IMM(BPF_REG_1, 10),
5315 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5316 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5317 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5318 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5319 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5320 				     BPF_FUNC_map_lookup_elem),
5321 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5322 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5323 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5324 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5325 			BPF_EXIT_INSN(),
5326 		},
5327 		.fixup_map_hash_8b = { 4 },
5328 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5329 		.result = REJECT,
5330 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5331 	},
5332 	{
5333 		"alu ops on ptr_to_map_value_or_null, 3",
5334 		.insns = {
5335 			BPF_MOV64_IMM(BPF_REG_1, 10),
5336 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5337 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5338 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5339 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5340 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5341 				     BPF_FUNC_map_lookup_elem),
5342 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5343 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5344 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5345 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5346 			BPF_EXIT_INSN(),
5347 		},
5348 		.fixup_map_hash_8b = { 4 },
5349 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5350 		.result = REJECT,
5351 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5352 	},
5353 	{
5354 		"invalid memory access with multiple map_lookup_elem calls",
5355 		.insns = {
5356 			BPF_MOV64_IMM(BPF_REG_1, 10),
5357 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5358 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5359 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5360 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5361 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5362 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5364 				     BPF_FUNC_map_lookup_elem),
5365 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5366 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5367 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5368 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5369 				     BPF_FUNC_map_lookup_elem),
5370 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5371 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5372 			BPF_EXIT_INSN(),
5373 		},
5374 		.fixup_map_hash_8b = { 4 },
5375 		.result = REJECT,
5376 		.errstr = "R4 !read_ok",
5377 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5378 	},
5379 	{
5380 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5381 		.insns = {
5382 			BPF_MOV64_IMM(BPF_REG_1, 10),
5383 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5384 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5385 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5386 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5387 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5388 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5389 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5390 				     BPF_FUNC_map_lookup_elem),
5391 			BPF_MOV64_IMM(BPF_REG_2, 10),
5392 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5394 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5395 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5396 				     BPF_FUNC_map_lookup_elem),
5397 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5398 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5399 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5400 			BPF_EXIT_INSN(),
5401 		},
5402 		.fixup_map_hash_8b = { 4 },
5403 		.result = ACCEPT,
5404 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5405 	},
5406 	{
5407 		"invalid map access from else condition",
5408 		.insns = {
5409 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5410 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5411 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5412 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5413 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5414 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5415 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5416 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5417 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5418 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5419 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5420 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5421 			BPF_EXIT_INSN(),
5422 		},
5423 		.fixup_map_hash_48b = { 3 },
5424 		.errstr = "R0 unbounded memory access",
5425 		.result = REJECT,
5426 		.errstr_unpriv = "R0 leaks addr",
5427 		.result_unpriv = REJECT,
5428 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5429 	},
5430 	{
5431 		"constant register |= constant should keep constant type",
5432 		.insns = {
5433 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5434 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5435 			BPF_MOV64_IMM(BPF_REG_2, 34),
5436 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5437 			BPF_MOV64_IMM(BPF_REG_3, 0),
5438 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5439 			BPF_EXIT_INSN(),
5440 		},
5441 		.result = ACCEPT,
5442 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5443 	},
5444 	{
5445 		"constant register |= constant should not bypass stack boundary checks",
5446 		.insns = {
5447 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5449 			BPF_MOV64_IMM(BPF_REG_2, 34),
5450 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5451 			BPF_MOV64_IMM(BPF_REG_3, 0),
5452 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5453 			BPF_EXIT_INSN(),
5454 		},
5455 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5456 		.result = REJECT,
5457 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5458 	},
5459 	{
5460 		"constant register |= constant register should keep constant type",
5461 		.insns = {
5462 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5463 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5464 			BPF_MOV64_IMM(BPF_REG_2, 34),
5465 			BPF_MOV64_IMM(BPF_REG_4, 13),
5466 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5467 			BPF_MOV64_IMM(BPF_REG_3, 0),
5468 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5469 			BPF_EXIT_INSN(),
5470 		},
5471 		.result = ACCEPT,
5472 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5473 	},
5474 	{
5475 		"constant register |= constant register should not bypass stack boundary checks",
5476 		.insns = {
5477 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5479 			BPF_MOV64_IMM(BPF_REG_2, 34),
5480 			BPF_MOV64_IMM(BPF_REG_4, 24),
5481 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5482 			BPF_MOV64_IMM(BPF_REG_3, 0),
5483 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5484 			BPF_EXIT_INSN(),
5485 		},
5486 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5487 		.result = REJECT,
5488 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5489 	},
5490 	{
5491 		"invalid direct packet write for LWT_IN",
5492 		.insns = {
5493 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5494 				    offsetof(struct __sk_buff, data)),
5495 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5496 				    offsetof(struct __sk_buff, data_end)),
5497 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5499 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5500 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5501 			BPF_MOV64_IMM(BPF_REG_0, 0),
5502 			BPF_EXIT_INSN(),
5503 		},
5504 		.errstr = "cannot write into packet",
5505 		.result = REJECT,
5506 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5507 	},
5508 	{
5509 		"invalid direct packet write for LWT_OUT",
5510 		.insns = {
5511 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5512 				    offsetof(struct __sk_buff, data)),
5513 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5514 				    offsetof(struct __sk_buff, data_end)),
5515 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5516 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5517 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5518 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5519 			BPF_MOV64_IMM(BPF_REG_0, 0),
5520 			BPF_EXIT_INSN(),
5521 		},
5522 		.errstr = "cannot write into packet",
5523 		.result = REJECT,
5524 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5525 	},
5526 	{
5527 		"direct packet write for LWT_XMIT",
5528 		.insns = {
5529 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5530 				    offsetof(struct __sk_buff, data)),
5531 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5532 				    offsetof(struct __sk_buff, data_end)),
5533 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5535 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5536 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5537 			BPF_MOV64_IMM(BPF_REG_0, 0),
5538 			BPF_EXIT_INSN(),
5539 		},
5540 		.result = ACCEPT,
5541 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5542 	},
5543 	{
5544 		"direct packet read for LWT_IN",
5545 		.insns = {
5546 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5547 				    offsetof(struct __sk_buff, data)),
5548 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5549 				    offsetof(struct __sk_buff, data_end)),
5550 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5551 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5552 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5553 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5554 			BPF_MOV64_IMM(BPF_REG_0, 0),
5555 			BPF_EXIT_INSN(),
5556 		},
5557 		.result = ACCEPT,
5558 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5559 	},
5560 	{
5561 		"direct packet read for LWT_OUT",
5562 		.insns = {
5563 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5564 				    offsetof(struct __sk_buff, data)),
5565 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5566 				    offsetof(struct __sk_buff, data_end)),
5567 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5569 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5570 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5571 			BPF_MOV64_IMM(BPF_REG_0, 0),
5572 			BPF_EXIT_INSN(),
5573 		},
5574 		.result = ACCEPT,
5575 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5576 	},
5577 	{
5578 		"direct packet read for LWT_XMIT",
5579 		.insns = {
5580 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5581 				    offsetof(struct __sk_buff, data)),
5582 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5583 				    offsetof(struct __sk_buff, data_end)),
5584 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5586 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5587 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5588 			BPF_MOV64_IMM(BPF_REG_0, 0),
5589 			BPF_EXIT_INSN(),
5590 		},
5591 		.result = ACCEPT,
5592 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5593 	},
5594 	{
5595 		"overlapping checks for direct packet access",
5596 		.insns = {
5597 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5598 				    offsetof(struct __sk_buff, data)),
5599 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5600 				    offsetof(struct __sk_buff, data_end)),
5601 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5602 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5603 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5604 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5605 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5606 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5607 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5608 			BPF_MOV64_IMM(BPF_REG_0, 0),
5609 			BPF_EXIT_INSN(),
5610 		},
5611 		.result = ACCEPT,
5612 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5613 	},
5614 	{
5615 		"make headroom for LWT_XMIT",
5616 		.insns = {
5617 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5618 			BPF_MOV64_IMM(BPF_REG_2, 34),
5619 			BPF_MOV64_IMM(BPF_REG_3, 0),
5620 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5621 			/* split for s390 to succeed */
5622 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5623 			BPF_MOV64_IMM(BPF_REG_2, 42),
5624 			BPF_MOV64_IMM(BPF_REG_3, 0),
5625 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5626 			BPF_MOV64_IMM(BPF_REG_0, 0),
5627 			BPF_EXIT_INSN(),
5628 		},
5629 		.result = ACCEPT,
5630 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5631 	},
5632 	{
5633 		"invalid access of tc_classid for LWT_IN",
5634 		.insns = {
5635 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5636 				    offsetof(struct __sk_buff, tc_classid)),
5637 			BPF_EXIT_INSN(),
5638 		},
5639 		.result = REJECT,
5640 		.errstr = "invalid bpf_context access",
5641 	},
5642 	{
5643 		"invalid access of tc_classid for LWT_OUT",
5644 		.insns = {
5645 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5646 				    offsetof(struct __sk_buff, tc_classid)),
5647 			BPF_EXIT_INSN(),
5648 		},
5649 		.result = REJECT,
5650 		.errstr = "invalid bpf_context access",
5651 	},
5652 	{
5653 		"invalid access of tc_classid for LWT_XMIT",
5654 		.insns = {
5655 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5656 				    offsetof(struct __sk_buff, tc_classid)),
5657 			BPF_EXIT_INSN(),
5658 		},
5659 		.result = REJECT,
5660 		.errstr = "invalid bpf_context access",
5661 	},
5662 	{
5663 		"leak pointer into ctx 1",
5664 		.insns = {
5665 			BPF_MOV64_IMM(BPF_REG_0, 0),
5666 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5667 				    offsetof(struct __sk_buff, cb[0])),
5668 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5669 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5670 				      offsetof(struct __sk_buff, cb[0])),
5671 			BPF_EXIT_INSN(),
5672 		},
5673 		.fixup_map_hash_8b = { 2 },
5674 		.errstr_unpriv = "R2 leaks addr into mem",
5675 		.result_unpriv = REJECT,
5676 		.result = REJECT,
5677 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5678 	},
5679 	{
5680 		"leak pointer into ctx 2",
5681 		.insns = {
5682 			BPF_MOV64_IMM(BPF_REG_0, 0),
5683 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5684 				    offsetof(struct __sk_buff, cb[0])),
5685 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5686 				      offsetof(struct __sk_buff, cb[0])),
5687 			BPF_EXIT_INSN(),
5688 		},
5689 		.errstr_unpriv = "R10 leaks addr into mem",
5690 		.result_unpriv = REJECT,
5691 		.result = REJECT,
5692 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5693 	},
5694 	{
5695 		"leak pointer into ctx 3",
5696 		.insns = {
5697 			BPF_MOV64_IMM(BPF_REG_0, 0),
5698 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5699 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5700 				      offsetof(struct __sk_buff, cb[0])),
5701 			BPF_EXIT_INSN(),
5702 		},
5703 		.fixup_map_hash_8b = { 1 },
5704 		.errstr_unpriv = "R2 leaks addr into ctx",
5705 		.result_unpriv = REJECT,
5706 		.result = ACCEPT,
5707 	},
5708 	{
5709 		"leak pointer into map val",
5710 		.insns = {
5711 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5712 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5713 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5714 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5715 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5716 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5717 				     BPF_FUNC_map_lookup_elem),
5718 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5719 			BPF_MOV64_IMM(BPF_REG_3, 0),
5720 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5721 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5722 			BPF_MOV64_IMM(BPF_REG_0, 0),
5723 			BPF_EXIT_INSN(),
5724 		},
5725 		.fixup_map_hash_8b = { 4 },
5726 		.errstr_unpriv = "R6 leaks addr into mem",
5727 		.result_unpriv = REJECT,
5728 		.result = ACCEPT,
5729 	},
5730 	{
5731 		"helper access to map: full range",
5732 		.insns = {
5733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5734 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5735 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5736 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5737 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5738 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5739 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5740 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5741 			BPF_MOV64_IMM(BPF_REG_3, 0),
5742 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5743 			BPF_EXIT_INSN(),
5744 		},
5745 		.fixup_map_hash_48b = { 3 },
5746 		.result = ACCEPT,
5747 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5748 	},
5749 	{
5750 		"helper access to map: partial range",
5751 		.insns = {
5752 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5754 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5755 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5756 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5757 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5758 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5759 			BPF_MOV64_IMM(BPF_REG_2, 8),
5760 			BPF_MOV64_IMM(BPF_REG_3, 0),
5761 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5762 			BPF_EXIT_INSN(),
5763 		},
5764 		.fixup_map_hash_48b = { 3 },
5765 		.result = ACCEPT,
5766 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5767 	},
5768 	{
5769 		"helper access to map: empty range",
5770 		.insns = {
5771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5773 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5774 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5775 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5776 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5777 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5778 			BPF_MOV64_IMM(BPF_REG_2, 0),
5779 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5780 			BPF_EXIT_INSN(),
5781 		},
5782 		.fixup_map_hash_48b = { 3 },
5783 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5784 		.result = REJECT,
5785 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5786 	},
5787 	{
5788 		"helper access to map: out-of-bound range",
5789 		.insns = {
5790 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5791 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5792 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5793 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5794 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5795 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5796 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5797 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5798 			BPF_MOV64_IMM(BPF_REG_3, 0),
5799 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5800 			BPF_EXIT_INSN(),
5801 		},
5802 		.fixup_map_hash_48b = { 3 },
5803 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5804 		.result = REJECT,
5805 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5806 	},
5807 	{
5808 		"helper access to map: negative range",
5809 		.insns = {
5810 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5811 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5812 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5813 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5814 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5815 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5816 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5817 			BPF_MOV64_IMM(BPF_REG_2, -8),
5818 			BPF_MOV64_IMM(BPF_REG_3, 0),
5819 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5820 			BPF_EXIT_INSN(),
5821 		},
5822 		.fixup_map_hash_48b = { 3 },
5823 		.errstr = "R2 min value is negative",
5824 		.result = REJECT,
5825 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5826 	},
5827 	{
5828 		"helper access to adjusted map (via const imm): full range",
5829 		.insns = {
5830 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5832 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5833 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5834 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5835 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5836 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5837 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5838 				offsetof(struct test_val, foo)),
5839 			BPF_MOV64_IMM(BPF_REG_2,
5840 				sizeof(struct test_val) -
5841 				offsetof(struct test_val, foo)),
5842 			BPF_MOV64_IMM(BPF_REG_3, 0),
5843 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5844 			BPF_EXIT_INSN(),
5845 		},
5846 		.fixup_map_hash_48b = { 3 },
5847 		.result = ACCEPT,
5848 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5849 	},
5850 	{
5851 		"helper access to adjusted map (via const imm): partial range",
5852 		.insns = {
5853 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5854 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5855 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5856 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5857 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5858 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5859 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5860 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5861 				offsetof(struct test_val, foo)),
5862 			BPF_MOV64_IMM(BPF_REG_2, 8),
5863 			BPF_MOV64_IMM(BPF_REG_3, 0),
5864 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5865 			BPF_EXIT_INSN(),
5866 		},
5867 		.fixup_map_hash_48b = { 3 },
5868 		.result = ACCEPT,
5869 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5870 	},
5871 	{
5872 		"helper access to adjusted map (via const imm): empty range",
5873 		.insns = {
5874 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5876 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5877 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5878 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5879 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5880 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5882 				offsetof(struct test_val, foo)),
5883 			BPF_MOV64_IMM(BPF_REG_2, 0),
5884 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5885 			BPF_EXIT_INSN(),
5886 		},
5887 		.fixup_map_hash_48b = { 3 },
5888 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5889 		.result = REJECT,
5890 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5891 	},
5892 	{
5893 		"helper access to adjusted map (via const imm): out-of-bound range",
5894 		.insns = {
5895 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5897 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5898 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5899 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5900 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5901 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5903 				offsetof(struct test_val, foo)),
5904 			BPF_MOV64_IMM(BPF_REG_2,
5905 				sizeof(struct test_val) -
5906 				offsetof(struct test_val, foo) + 8),
5907 			BPF_MOV64_IMM(BPF_REG_3, 0),
5908 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5909 			BPF_EXIT_INSN(),
5910 		},
5911 		.fixup_map_hash_48b = { 3 },
5912 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
5913 		.result = REJECT,
5914 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5915 	},
5916 	{
5917 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
5918 		.insns = {
5919 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5920 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5921 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5922 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5923 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5924 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5925 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5926 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5927 				offsetof(struct test_val, foo)),
5928 			BPF_MOV64_IMM(BPF_REG_2, -8),
5929 			BPF_MOV64_IMM(BPF_REG_3, 0),
5930 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5931 			BPF_EXIT_INSN(),
5932 		},
5933 		.fixup_map_hash_48b = { 3 },
5934 		.errstr = "R2 min value is negative",
5935 		.result = REJECT,
5936 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5937 	},
5938 	{
5939 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
5940 		.insns = {
5941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5943 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5944 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5945 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5946 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5947 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5948 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5949 				offsetof(struct test_val, foo)),
5950 			BPF_MOV64_IMM(BPF_REG_2, -1),
5951 			BPF_MOV64_IMM(BPF_REG_3, 0),
5952 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5953 			BPF_EXIT_INSN(),
5954 		},
5955 		.fixup_map_hash_48b = { 3 },
5956 		.errstr = "R2 min value is negative",
5957 		.result = REJECT,
5958 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5959 	},
5960 	{
5961 		"helper access to adjusted map (via const reg): full range",
5962 		.insns = {
5963 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5964 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5965 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5966 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5967 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5968 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5969 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5970 			BPF_MOV64_IMM(BPF_REG_3,
5971 				offsetof(struct test_val, foo)),
5972 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5973 			BPF_MOV64_IMM(BPF_REG_2,
5974 				sizeof(struct test_val) -
5975 				offsetof(struct test_val, foo)),
5976 			BPF_MOV64_IMM(BPF_REG_3, 0),
5977 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5978 			BPF_EXIT_INSN(),
5979 		},
5980 		.fixup_map_hash_48b = { 3 },
5981 		.result = ACCEPT,
5982 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5983 	},
5984 	{
5985 		"helper access to adjusted map (via const reg): partial range",
5986 		.insns = {
5987 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5988 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5989 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5990 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5991 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5992 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5993 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5994 			BPF_MOV64_IMM(BPF_REG_3,
5995 				offsetof(struct test_val, foo)),
5996 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5997 			BPF_MOV64_IMM(BPF_REG_2, 8),
5998 			BPF_MOV64_IMM(BPF_REG_3, 0),
5999 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6000 			BPF_EXIT_INSN(),
6001 		},
6002 		.fixup_map_hash_48b = { 3 },
6003 		.result = ACCEPT,
6004 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6005 	},
6006 	{
6007 		"helper access to adjusted map (via const reg): empty range",
6008 		.insns = {
6009 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6010 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6011 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6012 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6013 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6014 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6015 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6016 			BPF_MOV64_IMM(BPF_REG_3, 0),
6017 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6018 			BPF_MOV64_IMM(BPF_REG_2, 0),
6019 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6020 			BPF_EXIT_INSN(),
6021 		},
6022 		.fixup_map_hash_48b = { 3 },
6023 		.errstr = "R1 min value is outside of the array range",
6024 		.result = REJECT,
6025 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6026 	},
6027 	{
6028 		"helper access to adjusted map (via const reg): out-of-bound range",
6029 		.insns = {
6030 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6032 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6033 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6034 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6035 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6036 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6037 			BPF_MOV64_IMM(BPF_REG_3,
6038 				offsetof(struct test_val, foo)),
6039 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6040 			BPF_MOV64_IMM(BPF_REG_2,
6041 				sizeof(struct test_val) -
6042 				offsetof(struct test_val, foo) + 8),
6043 			BPF_MOV64_IMM(BPF_REG_3, 0),
6044 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6045 			BPF_EXIT_INSN(),
6046 		},
6047 		.fixup_map_hash_48b = { 3 },
6048 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6049 		.result = REJECT,
6050 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6051 	},
6052 	{
6053 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6054 		.insns = {
6055 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6056 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6057 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6058 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6059 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6060 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6061 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6062 			BPF_MOV64_IMM(BPF_REG_3,
6063 				offsetof(struct test_val, foo)),
6064 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6065 			BPF_MOV64_IMM(BPF_REG_2, -8),
6066 			BPF_MOV64_IMM(BPF_REG_3, 0),
6067 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6068 			BPF_EXIT_INSN(),
6069 		},
6070 		.fixup_map_hash_48b = { 3 },
6071 		.errstr = "R2 min value is negative",
6072 		.result = REJECT,
6073 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6074 	},
6075 	{
6076 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6077 		.insns = {
6078 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6079 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6080 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6081 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6082 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6083 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6084 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6085 			BPF_MOV64_IMM(BPF_REG_3,
6086 				offsetof(struct test_val, foo)),
6087 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6088 			BPF_MOV64_IMM(BPF_REG_2, -1),
6089 			BPF_MOV64_IMM(BPF_REG_3, 0),
6090 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6091 			BPF_EXIT_INSN(),
6092 		},
6093 		.fixup_map_hash_48b = { 3 },
6094 		.errstr = "R2 min value is negative",
6095 		.result = REJECT,
6096 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6097 	},
6098 	{
6099 		"helper access to adjusted map (via variable): full range",
6100 		.insns = {
6101 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6102 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6103 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6104 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6105 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6106 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6107 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6108 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6109 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6110 				offsetof(struct test_val, foo), 4),
6111 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6112 			BPF_MOV64_IMM(BPF_REG_2,
6113 				sizeof(struct test_val) -
6114 				offsetof(struct test_val, foo)),
6115 			BPF_MOV64_IMM(BPF_REG_3, 0),
6116 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6117 			BPF_EXIT_INSN(),
6118 		},
6119 		.fixup_map_hash_48b = { 3 },
6120 		.result = ACCEPT,
6121 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6122 	},
6123 	{
6124 		"helper access to adjusted map (via variable): partial range",
6125 		.insns = {
6126 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6127 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6128 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6129 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6130 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6131 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6132 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6133 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6134 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6135 				offsetof(struct test_val, foo), 4),
6136 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6137 			BPF_MOV64_IMM(BPF_REG_2, 8),
6138 			BPF_MOV64_IMM(BPF_REG_3, 0),
6139 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6140 			BPF_EXIT_INSN(),
6141 		},
6142 		.fixup_map_hash_48b = { 3 },
6143 		.result = ACCEPT,
6144 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6145 	},
6146 	{
6147 		"helper access to adjusted map (via variable): empty range",
6148 		.insns = {
6149 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6151 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6152 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6153 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6154 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6155 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6156 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6157 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6158 				offsetof(struct test_val, foo), 3),
6159 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6160 			BPF_MOV64_IMM(BPF_REG_2, 0),
6161 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6162 			BPF_EXIT_INSN(),
6163 		},
6164 		.fixup_map_hash_48b = { 3 },
6165 		.errstr = "R1 min value is outside of the array range",
6166 		.result = REJECT,
6167 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6168 	},
6169 	{
6170 		"helper access to adjusted map (via variable): no max check",
6171 		.insns = {
6172 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6174 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6175 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6176 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6177 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6178 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6179 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6180 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6181 			BPF_MOV64_IMM(BPF_REG_2, 1),
6182 			BPF_MOV64_IMM(BPF_REG_3, 0),
6183 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6184 			BPF_EXIT_INSN(),
6185 		},
6186 		.fixup_map_hash_48b = { 3 },
6187 		.errstr = "R1 unbounded memory access",
6188 		.result = REJECT,
6189 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6190 	},
6191 	{
6192 		"helper access to adjusted map (via variable): wrong max check",
6193 		.insns = {
6194 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6196 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6197 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6198 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6199 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6200 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6201 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6202 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6203 				offsetof(struct test_val, foo), 4),
6204 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6205 			BPF_MOV64_IMM(BPF_REG_2,
6206 				sizeof(struct test_val) -
6207 				offsetof(struct test_val, foo) + 1),
6208 			BPF_MOV64_IMM(BPF_REG_3, 0),
6209 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6210 			BPF_EXIT_INSN(),
6211 		},
6212 		.fixup_map_hash_48b = { 3 },
6213 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6214 		.result = REJECT,
6215 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6216 	},
6217 	{
6218 		"helper access to map: bounds check using <, good access",
6219 		.insns = {
6220 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6221 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6222 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6223 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6224 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6225 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6226 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6227 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6228 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6229 			BPF_MOV64_IMM(BPF_REG_0, 0),
6230 			BPF_EXIT_INSN(),
6231 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6232 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6233 			BPF_MOV64_IMM(BPF_REG_0, 0),
6234 			BPF_EXIT_INSN(),
6235 		},
6236 		.fixup_map_hash_48b = { 3 },
6237 		.result = ACCEPT,
6238 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6239 	},
6240 	{
6241 		"helper access to map: bounds check using <, bad access",
6242 		.insns = {
6243 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6244 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6245 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6246 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6247 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6248 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6249 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6250 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6251 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6252 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6253 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6254 			BPF_MOV64_IMM(BPF_REG_0, 0),
6255 			BPF_EXIT_INSN(),
6256 			BPF_MOV64_IMM(BPF_REG_0, 0),
6257 			BPF_EXIT_INSN(),
6258 		},
6259 		.fixup_map_hash_48b = { 3 },
6260 		.result = REJECT,
6261 		.errstr = "R1 unbounded memory access",
6262 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6263 	},
6264 	{
6265 		"helper access to map: bounds check using <=, good access",
6266 		.insns = {
6267 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6268 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6269 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6270 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6271 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6272 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6273 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6274 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6275 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6276 			BPF_MOV64_IMM(BPF_REG_0, 0),
6277 			BPF_EXIT_INSN(),
6278 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6279 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6280 			BPF_MOV64_IMM(BPF_REG_0, 0),
6281 			BPF_EXIT_INSN(),
6282 		},
6283 		.fixup_map_hash_48b = { 3 },
6284 		.result = ACCEPT,
6285 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6286 	},
6287 	{
6288 		"helper access to map: bounds check using <=, bad access",
6289 		.insns = {
6290 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6292 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6293 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6294 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6296 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6297 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6298 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6299 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6300 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6301 			BPF_MOV64_IMM(BPF_REG_0, 0),
6302 			BPF_EXIT_INSN(),
6303 			BPF_MOV64_IMM(BPF_REG_0, 0),
6304 			BPF_EXIT_INSN(),
6305 		},
6306 		.fixup_map_hash_48b = { 3 },
6307 		.result = REJECT,
6308 		.errstr = "R1 unbounded memory access",
6309 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6310 	},
6311 	{
6312 		"helper access to map: bounds check using s<, good access",
6313 		.insns = {
6314 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6315 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6316 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6317 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6318 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6319 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6320 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6321 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6322 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6323 			BPF_MOV64_IMM(BPF_REG_0, 0),
6324 			BPF_EXIT_INSN(),
6325 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6326 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6327 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6328 			BPF_MOV64_IMM(BPF_REG_0, 0),
6329 			BPF_EXIT_INSN(),
6330 		},
6331 		.fixup_map_hash_48b = { 3 },
6332 		.result = ACCEPT,
6333 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6334 	},
6335 	{
6336 		"helper access to map: bounds check using s<, good access 2",
6337 		.insns = {
6338 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6339 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6340 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6341 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6342 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6343 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6344 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6345 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6346 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6347 			BPF_MOV64_IMM(BPF_REG_0, 0),
6348 			BPF_EXIT_INSN(),
6349 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6350 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6351 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6352 			BPF_MOV64_IMM(BPF_REG_0, 0),
6353 			BPF_EXIT_INSN(),
6354 		},
6355 		.fixup_map_hash_48b = { 3 },
6356 		.result = ACCEPT,
6357 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6358 	},
6359 	{
6360 		"helper access to map: bounds check using s<, bad access",
6361 		.insns = {
6362 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6363 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6364 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6365 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6366 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6367 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6368 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6369 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6370 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6371 			BPF_MOV64_IMM(BPF_REG_0, 0),
6372 			BPF_EXIT_INSN(),
6373 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6374 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6375 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6376 			BPF_MOV64_IMM(BPF_REG_0, 0),
6377 			BPF_EXIT_INSN(),
6378 		},
6379 		.fixup_map_hash_48b = { 3 },
6380 		.result = REJECT,
6381 		.errstr = "R1 min value is negative",
6382 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6383 	},
6384 	{
6385 		"helper access to map: bounds check using s<=, good access",
6386 		.insns = {
6387 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6388 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6389 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6390 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6391 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6392 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6393 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6394 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6395 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6396 			BPF_MOV64_IMM(BPF_REG_0, 0),
6397 			BPF_EXIT_INSN(),
6398 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6399 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6400 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6401 			BPF_MOV64_IMM(BPF_REG_0, 0),
6402 			BPF_EXIT_INSN(),
6403 		},
6404 		.fixup_map_hash_48b = { 3 },
6405 		.result = ACCEPT,
6406 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6407 	},
6408 	{
6409 		"helper access to map: bounds check using s<=, good access 2",
6410 		.insns = {
6411 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6413 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6414 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6415 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6416 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6417 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6418 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6419 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6420 			BPF_MOV64_IMM(BPF_REG_0, 0),
6421 			BPF_EXIT_INSN(),
6422 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6423 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6424 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6425 			BPF_MOV64_IMM(BPF_REG_0, 0),
6426 			BPF_EXIT_INSN(),
6427 		},
6428 		.fixup_map_hash_48b = { 3 },
6429 		.result = ACCEPT,
6430 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6431 	},
6432 	{
6433 		"helper access to map: bounds check using s<=, bad access",
6434 		.insns = {
6435 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6437 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6438 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6439 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6440 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6441 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6442 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6443 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6444 			BPF_MOV64_IMM(BPF_REG_0, 0),
6445 			BPF_EXIT_INSN(),
6446 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6447 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6448 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6449 			BPF_MOV64_IMM(BPF_REG_0, 0),
6450 			BPF_EXIT_INSN(),
6451 		},
6452 		.fixup_map_hash_48b = { 3 },
6453 		.result = REJECT,
6454 		.errstr = "R1 min value is negative",
6455 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6456 	},
6457 	{
6458 		"map lookup helper access to map",
6459 		.insns = {
6460 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6462 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6463 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6464 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6465 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6466 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6467 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6468 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6469 			BPF_EXIT_INSN(),
6470 		},
6471 		.fixup_map_hash_16b = { 3, 8 },
6472 		.result = ACCEPT,
6473 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6474 	},
6475 	{
6476 		"map update helper access to map",
6477 		.insns = {
6478 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6480 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6481 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6482 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6483 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6484 			BPF_MOV64_IMM(BPF_REG_4, 0),
6485 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6486 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6487 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6488 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6489 			BPF_EXIT_INSN(),
6490 		},
6491 		.fixup_map_hash_16b = { 3, 10 },
6492 		.result = ACCEPT,
6493 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6494 	},
6495 	{
6496 		"map update helper access to map: wrong size",
6497 		.insns = {
6498 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6499 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6500 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6501 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6502 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6503 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6504 			BPF_MOV64_IMM(BPF_REG_4, 0),
6505 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6506 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6507 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6508 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6509 			BPF_EXIT_INSN(),
6510 		},
6511 		.fixup_map_hash_8b = { 3 },
6512 		.fixup_map_hash_16b = { 10 },
6513 		.result = REJECT,
6514 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6515 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6516 	},
6517 	{
6518 		"map helper access to adjusted map (via const imm)",
6519 		.insns = {
6520 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6521 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6522 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6523 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6524 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6525 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6526 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6528 				      offsetof(struct other_val, bar)),
6529 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6530 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6531 			BPF_EXIT_INSN(),
6532 		},
6533 		.fixup_map_hash_16b = { 3, 9 },
6534 		.result = ACCEPT,
6535 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6536 	},
6537 	{
6538 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6539 		.insns = {
6540 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6542 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6543 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6544 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6545 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6546 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6547 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6548 				      sizeof(struct other_val) - 4),
6549 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6550 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6551 			BPF_EXIT_INSN(),
6552 		},
6553 		.fixup_map_hash_16b = { 3, 9 },
6554 		.result = REJECT,
6555 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6556 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6557 	},
6558 	{
6559 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6560 		.insns = {
6561 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6563 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6564 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6565 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6566 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6569 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6570 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6571 			BPF_EXIT_INSN(),
6572 		},
6573 		.fixup_map_hash_16b = { 3, 9 },
6574 		.result = REJECT,
6575 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6576 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6577 	},
6578 	{
6579 		"map helper access to adjusted map (via const reg)",
6580 		.insns = {
6581 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6582 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6583 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6584 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6585 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6586 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6587 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6588 			BPF_MOV64_IMM(BPF_REG_3,
6589 				      offsetof(struct other_val, bar)),
6590 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6591 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6592 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6593 			BPF_EXIT_INSN(),
6594 		},
6595 		.fixup_map_hash_16b = { 3, 10 },
6596 		.result = ACCEPT,
6597 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6598 	},
6599 	{
6600 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6601 		.insns = {
6602 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6603 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6604 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6605 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6606 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6607 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6608 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6609 			BPF_MOV64_IMM(BPF_REG_3,
6610 				      sizeof(struct other_val) - 4),
6611 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6612 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6613 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6614 			BPF_EXIT_INSN(),
6615 		},
6616 		.fixup_map_hash_16b = { 3, 10 },
6617 		.result = REJECT,
6618 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6619 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6620 	},
6621 	{
6622 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6623 		.insns = {
6624 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6625 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6626 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6627 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6628 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6629 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6630 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6631 			BPF_MOV64_IMM(BPF_REG_3, -4),
6632 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6633 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6634 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6635 			BPF_EXIT_INSN(),
6636 		},
6637 		.fixup_map_hash_16b = { 3, 10 },
6638 		.result = REJECT,
6639 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6640 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6641 	},
6642 	{
6643 		"map helper access to adjusted map (via variable)",
6644 		.insns = {
6645 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6646 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6647 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6648 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6649 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6650 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6651 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6652 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6653 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6654 				    offsetof(struct other_val, bar), 4),
6655 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6656 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6657 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6658 			BPF_EXIT_INSN(),
6659 		},
6660 		.fixup_map_hash_16b = { 3, 11 },
6661 		.result = ACCEPT,
6662 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6663 	},
6664 	{
6665 		"map helper access to adjusted map (via variable): no max check",
6666 		.insns = {
6667 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6668 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6669 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6670 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6671 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6672 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6673 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6674 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6675 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6676 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6677 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6678 			BPF_EXIT_INSN(),
6679 		},
6680 		.fixup_map_hash_16b = { 3, 10 },
6681 		.result = REJECT,
6682 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
6683 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6684 	},
6685 	{
6686 		"map helper access to adjusted map (via variable): wrong max check",
6687 		.insns = {
6688 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6689 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6690 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6691 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6692 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6693 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6694 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6695 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6696 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6697 				    offsetof(struct other_val, bar) + 1, 4),
6698 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6699 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6700 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6701 			BPF_EXIT_INSN(),
6702 		},
6703 		.fixup_map_hash_16b = { 3, 11 },
6704 		.result = REJECT,
6705 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
6706 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6707 	},
6708 	{
6709 		"map element value is preserved across register spilling",
6710 		.insns = {
6711 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6712 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6713 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6714 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6715 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6716 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6717 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6718 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6720 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6721 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6722 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6723 			BPF_EXIT_INSN(),
6724 		},
6725 		.fixup_map_hash_48b = { 3 },
6726 		.errstr_unpriv = "R0 leaks addr",
6727 		.result = ACCEPT,
6728 		.result_unpriv = REJECT,
6729 	},
6730 	{
6731 		"map element value or null is marked on register spilling",
6732 		.insns = {
6733 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6734 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6735 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6736 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6737 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6738 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6739 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
6740 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6741 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6742 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6743 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6744 			BPF_EXIT_INSN(),
6745 		},
6746 		.fixup_map_hash_48b = { 3 },
6747 		.errstr_unpriv = "R0 leaks addr",
6748 		.result = ACCEPT,
6749 		.result_unpriv = REJECT,
6750 	},
6751 	{
6752 		"map element value store of cleared call register",
6753 		.insns = {
6754 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6755 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6756 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6757 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6758 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6759 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
6760 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
6761 			BPF_EXIT_INSN(),
6762 		},
6763 		.fixup_map_hash_48b = { 3 },
6764 		.errstr_unpriv = "R1 !read_ok",
6765 		.errstr = "R1 !read_ok",
6766 		.result = REJECT,
6767 		.result_unpriv = REJECT,
6768 	},
6769 	{
6770 		"map element value with unaligned store",
6771 		.insns = {
6772 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6774 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6775 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6776 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6777 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
6778 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6779 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6780 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
6781 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
6782 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6783 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
6784 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
6785 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
6786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
6787 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
6788 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
6789 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
6790 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
6791 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
6792 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
6793 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
6794 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
6795 			BPF_EXIT_INSN(),
6796 		},
6797 		.fixup_map_hash_48b = { 3 },
6798 		.errstr_unpriv = "R0 leaks addr",
6799 		.result = ACCEPT,
6800 		.result_unpriv = REJECT,
6801 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6802 	},
6803 	{
6804 		"map element value with unaligned load",
6805 		.insns = {
6806 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6808 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6809 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6810 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
6812 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
6813 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
6814 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
6815 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6816 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
6817 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
6818 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
6819 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
6820 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
6821 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
6822 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
6823 			BPF_EXIT_INSN(),
6824 		},
6825 		.fixup_map_hash_48b = { 3 },
6826 		.errstr_unpriv = "R0 leaks addr",
6827 		.result = ACCEPT,
6828 		.result_unpriv = REJECT,
6829 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6830 	},
6831 	{
6832 		"map element value illegal alu op, 1",
6833 		.insns = {
6834 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6835 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6836 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6837 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6838 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6839 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6840 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
6841 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6842 			BPF_EXIT_INSN(),
6843 		},
6844 		.fixup_map_hash_48b = { 3 },
6845 		.errstr = "R0 bitwise operator &= on pointer",
6846 		.result = REJECT,
6847 	},
6848 	{
6849 		"map element value illegal alu op, 2",
6850 		.insns = {
6851 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6853 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6854 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6855 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6856 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6857 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
6858 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6859 			BPF_EXIT_INSN(),
6860 		},
6861 		.fixup_map_hash_48b = { 3 },
6862 		.errstr = "R0 32-bit pointer arithmetic prohibited",
6863 		.result = REJECT,
6864 	},
6865 	{
6866 		"map element value illegal alu op, 3",
6867 		.insns = {
6868 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6869 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6870 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6871 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6872 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6873 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6874 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
6875 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6876 			BPF_EXIT_INSN(),
6877 		},
6878 		.fixup_map_hash_48b = { 3 },
6879 		.errstr = "R0 pointer arithmetic with /= operator",
6880 		.result = REJECT,
6881 	},
6882 	{
6883 		"map element value illegal alu op, 4",
6884 		.insns = {
6885 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6886 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6887 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6888 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6889 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6890 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6891 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
6892 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6893 			BPF_EXIT_INSN(),
6894 		},
6895 		.fixup_map_hash_48b = { 3 },
6896 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
6897 		.errstr = "invalid mem access 'inv'",
6898 		.result = REJECT,
6899 		.result_unpriv = REJECT,
6900 	},
6901 	{
6902 		"map element value illegal alu op, 5",
6903 		.insns = {
6904 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6906 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6907 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6908 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6909 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6910 			BPF_MOV64_IMM(BPF_REG_3, 4096),
6911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6912 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6913 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
6914 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
6915 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
6916 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
6917 			BPF_EXIT_INSN(),
6918 		},
6919 		.fixup_map_hash_48b = { 3 },
6920 		.errstr = "R0 invalid mem access 'inv'",
6921 		.result = REJECT,
6922 	},
6923 	{
6924 		"map element value is preserved across register spilling",
6925 		.insns = {
6926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6928 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6930 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6931 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6932 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
6933 				offsetof(struct test_val, foo)),
6934 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
6935 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6936 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
6937 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6938 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
6939 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
6940 			BPF_EXIT_INSN(),
6941 		},
6942 		.fixup_map_hash_48b = { 3 },
6943 		.errstr_unpriv = "R0 leaks addr",
6944 		.result = ACCEPT,
6945 		.result_unpriv = REJECT,
6946 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6947 	},
6948 	{
6949 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
6950 		.insns = {
6951 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6953 			BPF_MOV64_IMM(BPF_REG_0, 0),
6954 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
6955 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
6956 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
6957 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
6958 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
6959 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
6960 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
6961 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
6962 			BPF_MOV64_IMM(BPF_REG_2, 16),
6963 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6964 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6965 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6966 			BPF_MOV64_IMM(BPF_REG_4, 0),
6967 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
6968 			BPF_MOV64_IMM(BPF_REG_3, 0),
6969 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6970 			BPF_MOV64_IMM(BPF_REG_0, 0),
6971 			BPF_EXIT_INSN(),
6972 		},
6973 		.result = ACCEPT,
6974 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6975 	},
6976 	{
6977 		"helper access to variable memory: stack, bitwise AND, zero included",
6978 		.insns = {
6979 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6980 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6981 			BPF_MOV64_IMM(BPF_REG_2, 16),
6982 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
6983 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
6984 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
6985 			BPF_MOV64_IMM(BPF_REG_3, 0),
6986 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6987 			BPF_EXIT_INSN(),
6988 		},
6989 		.errstr = "invalid indirect read from stack off -64+0 size 64",
6990 		.result = REJECT,
6991 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6992 	},
6993 	{
6994 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
6995 		.insns = {
6996 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
6997 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
6998 			BPF_MOV64_IMM(BPF_REG_2, 16),
6999 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7000 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7001 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7002 			BPF_MOV64_IMM(BPF_REG_4, 0),
7003 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7004 			BPF_MOV64_IMM(BPF_REG_3, 0),
7005 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7006 			BPF_MOV64_IMM(BPF_REG_0, 0),
7007 			BPF_EXIT_INSN(),
7008 		},
7009 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7010 		.result = REJECT,
7011 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7012 	},
7013 	{
7014 		"helper access to variable memory: stack, JMP, correct bounds",
7015 		.insns = {
7016 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7017 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7018 			BPF_MOV64_IMM(BPF_REG_0, 0),
7019 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7020 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7021 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7022 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7023 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7024 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7025 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7026 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7027 			BPF_MOV64_IMM(BPF_REG_2, 16),
7028 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7029 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7030 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7031 			BPF_MOV64_IMM(BPF_REG_4, 0),
7032 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7033 			BPF_MOV64_IMM(BPF_REG_3, 0),
7034 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7035 			BPF_MOV64_IMM(BPF_REG_0, 0),
7036 			BPF_EXIT_INSN(),
7037 		},
7038 		.result = ACCEPT,
7039 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7040 	},
7041 	{
7042 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7043 		.insns = {
7044 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7046 			BPF_MOV64_IMM(BPF_REG_0, 0),
7047 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7048 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7049 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7050 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7051 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7052 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7053 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7054 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7055 			BPF_MOV64_IMM(BPF_REG_2, 16),
7056 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7057 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7058 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7059 			BPF_MOV64_IMM(BPF_REG_4, 0),
7060 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7061 			BPF_MOV64_IMM(BPF_REG_3, 0),
7062 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7063 			BPF_MOV64_IMM(BPF_REG_0, 0),
7064 			BPF_EXIT_INSN(),
7065 		},
7066 		.result = ACCEPT,
7067 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7068 	},
7069 	{
7070 		"helper access to variable memory: stack, JMP, bounds + offset",
7071 		.insns = {
7072 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7073 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7074 			BPF_MOV64_IMM(BPF_REG_2, 16),
7075 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7076 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7077 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7078 			BPF_MOV64_IMM(BPF_REG_4, 0),
7079 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7081 			BPF_MOV64_IMM(BPF_REG_3, 0),
7082 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7083 			BPF_MOV64_IMM(BPF_REG_0, 0),
7084 			BPF_EXIT_INSN(),
7085 		},
7086 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7087 		.result = REJECT,
7088 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7089 	},
7090 	{
7091 		"helper access to variable memory: stack, JMP, wrong max",
7092 		.insns = {
7093 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7094 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7095 			BPF_MOV64_IMM(BPF_REG_2, 16),
7096 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7097 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7098 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7099 			BPF_MOV64_IMM(BPF_REG_4, 0),
7100 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7101 			BPF_MOV64_IMM(BPF_REG_3, 0),
7102 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7103 			BPF_MOV64_IMM(BPF_REG_0, 0),
7104 			BPF_EXIT_INSN(),
7105 		},
7106 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7107 		.result = REJECT,
7108 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7109 	},
7110 	{
7111 		"helper access to variable memory: stack, JMP, no max check",
7112 		.insns = {
7113 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7114 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7115 			BPF_MOV64_IMM(BPF_REG_2, 16),
7116 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7117 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7118 			BPF_MOV64_IMM(BPF_REG_4, 0),
7119 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7120 			BPF_MOV64_IMM(BPF_REG_3, 0),
7121 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7122 			BPF_MOV64_IMM(BPF_REG_0, 0),
7123 			BPF_EXIT_INSN(),
7124 		},
7125 		/* because max wasn't checked, signed min is negative */
7126 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7127 		.result = REJECT,
7128 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7129 	},
7130 	{
7131 		"helper access to variable memory: stack, JMP, no min check",
7132 		.insns = {
7133 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7134 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7135 			BPF_MOV64_IMM(BPF_REG_2, 16),
7136 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7137 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7138 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7139 			BPF_MOV64_IMM(BPF_REG_3, 0),
7140 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7141 			BPF_MOV64_IMM(BPF_REG_0, 0),
7142 			BPF_EXIT_INSN(),
7143 		},
7144 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7145 		.result = REJECT,
7146 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7147 	},
7148 	{
7149 		"helper access to variable memory: stack, JMP (signed), no min check",
7150 		.insns = {
7151 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7153 			BPF_MOV64_IMM(BPF_REG_2, 16),
7154 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7155 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7156 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7157 			BPF_MOV64_IMM(BPF_REG_3, 0),
7158 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7159 			BPF_MOV64_IMM(BPF_REG_0, 0),
7160 			BPF_EXIT_INSN(),
7161 		},
7162 		.errstr = "R2 min value is negative",
7163 		.result = REJECT,
7164 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7165 	},
7166 	{
7167 		"helper access to variable memory: map, JMP, correct bounds",
7168 		.insns = {
7169 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7171 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7172 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7173 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7174 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7175 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7176 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7177 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7178 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7179 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7180 				sizeof(struct test_val), 4),
7181 			BPF_MOV64_IMM(BPF_REG_4, 0),
7182 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7183 			BPF_MOV64_IMM(BPF_REG_3, 0),
7184 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7185 			BPF_MOV64_IMM(BPF_REG_0, 0),
7186 			BPF_EXIT_INSN(),
7187 		},
7188 		.fixup_map_hash_48b = { 3 },
7189 		.result = ACCEPT,
7190 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7191 	},
7192 	{
7193 		"helper access to variable memory: map, JMP, wrong max",
7194 		.insns = {
7195 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7196 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7197 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7198 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7199 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7200 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7201 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7202 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7203 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7204 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7205 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7206 				sizeof(struct test_val) + 1, 4),
7207 			BPF_MOV64_IMM(BPF_REG_4, 0),
7208 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7209 			BPF_MOV64_IMM(BPF_REG_3, 0),
7210 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7211 			BPF_MOV64_IMM(BPF_REG_0, 0),
7212 			BPF_EXIT_INSN(),
7213 		},
7214 		.fixup_map_hash_48b = { 3 },
7215 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7216 		.result = REJECT,
7217 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7218 	},
7219 	{
7220 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7221 		.insns = {
7222 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7224 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7225 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7226 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7227 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7228 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7229 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7230 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7231 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7232 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7233 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7234 				sizeof(struct test_val) - 20, 4),
7235 			BPF_MOV64_IMM(BPF_REG_4, 0),
7236 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7237 			BPF_MOV64_IMM(BPF_REG_3, 0),
7238 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7239 			BPF_MOV64_IMM(BPF_REG_0, 0),
7240 			BPF_EXIT_INSN(),
7241 		},
7242 		.fixup_map_hash_48b = { 3 },
7243 		.result = ACCEPT,
7244 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7245 	},
7246 	{
7247 		"helper access to variable memory: map adjusted, JMP, wrong max",
7248 		.insns = {
7249 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7251 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7252 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7253 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7255 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7257 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7258 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7259 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7260 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7261 				sizeof(struct test_val) - 19, 4),
7262 			BPF_MOV64_IMM(BPF_REG_4, 0),
7263 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7264 			BPF_MOV64_IMM(BPF_REG_3, 0),
7265 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7266 			BPF_MOV64_IMM(BPF_REG_0, 0),
7267 			BPF_EXIT_INSN(),
7268 		},
7269 		.fixup_map_hash_48b = { 3 },
7270 		.errstr = "R1 min value is outside of the array range",
7271 		.result = REJECT,
7272 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7273 	},
7274 	{
7275 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7276 		.insns = {
7277 			BPF_MOV64_IMM(BPF_REG_1, 0),
7278 			BPF_MOV64_IMM(BPF_REG_2, 0),
7279 			BPF_MOV64_IMM(BPF_REG_3, 0),
7280 			BPF_MOV64_IMM(BPF_REG_4, 0),
7281 			BPF_MOV64_IMM(BPF_REG_5, 0),
7282 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7283 			BPF_EXIT_INSN(),
7284 		},
7285 		.result = ACCEPT,
7286 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7287 	},
7288 	{
7289 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7290 		.insns = {
7291 			BPF_MOV64_IMM(BPF_REG_1, 0),
7292 			BPF_MOV64_IMM(BPF_REG_2, 1),
7293 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7294 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7295 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7296 			BPF_MOV64_IMM(BPF_REG_3, 0),
7297 			BPF_MOV64_IMM(BPF_REG_4, 0),
7298 			BPF_MOV64_IMM(BPF_REG_5, 0),
7299 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7300 			BPF_EXIT_INSN(),
7301 		},
7302 		.errstr = "R1 type=inv expected=fp",
7303 		.result = REJECT,
7304 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7305 	},
7306 	{
7307 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7308 		.insns = {
7309 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7310 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7311 			BPF_MOV64_IMM(BPF_REG_2, 0),
7312 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7313 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7314 			BPF_MOV64_IMM(BPF_REG_3, 0),
7315 			BPF_MOV64_IMM(BPF_REG_4, 0),
7316 			BPF_MOV64_IMM(BPF_REG_5, 0),
7317 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7318 			BPF_EXIT_INSN(),
7319 		},
7320 		.result = ACCEPT,
7321 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7322 	},
7323 	{
7324 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7325 		.insns = {
7326 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7327 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7328 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7329 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7330 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7331 				     BPF_FUNC_map_lookup_elem),
7332 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7333 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7334 			BPF_MOV64_IMM(BPF_REG_2, 0),
7335 			BPF_MOV64_IMM(BPF_REG_3, 0),
7336 			BPF_MOV64_IMM(BPF_REG_4, 0),
7337 			BPF_MOV64_IMM(BPF_REG_5, 0),
7338 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7339 			BPF_EXIT_INSN(),
7340 		},
7341 		.fixup_map_hash_8b = { 3 },
7342 		.result = ACCEPT,
7343 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7344 	},
7345 	{
7346 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7347 		.insns = {
7348 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7349 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7351 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7353 				     BPF_FUNC_map_lookup_elem),
7354 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7355 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7356 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7357 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7359 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7360 			BPF_MOV64_IMM(BPF_REG_3, 0),
7361 			BPF_MOV64_IMM(BPF_REG_4, 0),
7362 			BPF_MOV64_IMM(BPF_REG_5, 0),
7363 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7364 			BPF_EXIT_INSN(),
7365 		},
7366 		.fixup_map_hash_8b = { 3 },
7367 		.result = ACCEPT,
7368 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7369 	},
7370 	{
7371 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7372 		.insns = {
7373 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7374 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7375 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7376 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7377 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7378 				     BPF_FUNC_map_lookup_elem),
7379 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7380 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7381 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7382 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7383 			BPF_MOV64_IMM(BPF_REG_3, 0),
7384 			BPF_MOV64_IMM(BPF_REG_4, 0),
7385 			BPF_MOV64_IMM(BPF_REG_5, 0),
7386 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7387 			BPF_EXIT_INSN(),
7388 		},
7389 		.fixup_map_hash_8b = { 3 },
7390 		.result = ACCEPT,
7391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7392 	},
7393 	{
7394 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7395 		.insns = {
7396 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7397 				    offsetof(struct __sk_buff, data)),
7398 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7399 				    offsetof(struct __sk_buff, data_end)),
7400 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7401 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7402 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7403 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7404 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7405 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7406 			BPF_MOV64_IMM(BPF_REG_3, 0),
7407 			BPF_MOV64_IMM(BPF_REG_4, 0),
7408 			BPF_MOV64_IMM(BPF_REG_5, 0),
7409 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7410 			BPF_EXIT_INSN(),
7411 		},
7412 		.result = ACCEPT,
7413 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7414 		.retval = 0 /* csum_diff of 64-byte packet */,
7415 	},
7416 	{
7417 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7418 		.insns = {
7419 			BPF_MOV64_IMM(BPF_REG_1, 0),
7420 			BPF_MOV64_IMM(BPF_REG_2, 0),
7421 			BPF_MOV64_IMM(BPF_REG_3, 0),
7422 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7423 			BPF_EXIT_INSN(),
7424 		},
7425 		.errstr = "R1 type=inv expected=fp",
7426 		.result = REJECT,
7427 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7428 	},
7429 	{
7430 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7431 		.insns = {
7432 			BPF_MOV64_IMM(BPF_REG_1, 0),
7433 			BPF_MOV64_IMM(BPF_REG_2, 1),
7434 			BPF_MOV64_IMM(BPF_REG_3, 0),
7435 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7436 			BPF_EXIT_INSN(),
7437 		},
7438 		.errstr = "R1 type=inv expected=fp",
7439 		.result = REJECT,
7440 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7441 	},
7442 	{
7443 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7444 		.insns = {
7445 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7446 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7447 			BPF_MOV64_IMM(BPF_REG_2, 0),
7448 			BPF_MOV64_IMM(BPF_REG_3, 0),
7449 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7450 			BPF_EXIT_INSN(),
7451 		},
7452 		.result = ACCEPT,
7453 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7454 	},
7455 	{
7456 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7457 		.insns = {
7458 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7459 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7460 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7461 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7462 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7463 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7464 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7465 			BPF_MOV64_IMM(BPF_REG_2, 0),
7466 			BPF_MOV64_IMM(BPF_REG_3, 0),
7467 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7468 			BPF_EXIT_INSN(),
7469 		},
7470 		.fixup_map_hash_8b = { 3 },
7471 		.result = ACCEPT,
7472 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7473 	},
7474 	{
7475 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7476 		.insns = {
7477 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7478 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7480 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7481 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7482 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7483 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7484 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7485 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7487 			BPF_MOV64_IMM(BPF_REG_3, 0),
7488 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7489 			BPF_EXIT_INSN(),
7490 		},
7491 		.fixup_map_hash_8b = { 3 },
7492 		.result = ACCEPT,
7493 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7494 	},
7495 	{
7496 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7497 		.insns = {
7498 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7499 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7501 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7502 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7503 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7504 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7505 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7506 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7507 			BPF_MOV64_IMM(BPF_REG_3, 0),
7508 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7509 			BPF_EXIT_INSN(),
7510 		},
7511 		.fixup_map_hash_8b = { 3 },
7512 		.result = ACCEPT,
7513 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7514 	},
7515 	{
7516 		"helper access to variable memory: 8 bytes leak",
7517 		.insns = {
7518 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7519 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7520 			BPF_MOV64_IMM(BPF_REG_0, 0),
7521 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7522 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7523 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7524 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7525 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7526 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7527 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7528 			BPF_MOV64_IMM(BPF_REG_2, 1),
7529 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7530 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7531 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7533 			BPF_MOV64_IMM(BPF_REG_3, 0),
7534 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7535 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7536 			BPF_EXIT_INSN(),
7537 		},
7538 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7539 		.result = REJECT,
7540 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7541 	},
7542 	{
7543 		"helper access to variable memory: 8 bytes no leak (init memory)",
7544 		.insns = {
7545 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7546 			BPF_MOV64_IMM(BPF_REG_0, 0),
7547 			BPF_MOV64_IMM(BPF_REG_0, 0),
7548 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7549 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7550 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7551 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7552 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7553 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7554 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7555 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7557 			BPF_MOV64_IMM(BPF_REG_2, 0),
7558 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7559 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7560 			BPF_MOV64_IMM(BPF_REG_3, 0),
7561 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7562 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7563 			BPF_EXIT_INSN(),
7564 		},
7565 		.result = ACCEPT,
7566 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7567 	},
7568 	{
7569 		"invalid and of negative number",
7570 		.insns = {
7571 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7572 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7573 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7574 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7575 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7576 				     BPF_FUNC_map_lookup_elem),
7577 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7578 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7579 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7580 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7581 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7582 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7583 				   offsetof(struct test_val, foo)),
7584 			BPF_EXIT_INSN(),
7585 		},
7586 		.fixup_map_hash_48b = { 3 },
7587 		.errstr = "R0 max value is outside of the array range",
7588 		.result = REJECT,
7589 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7590 	},
7591 	{
7592 		"invalid range check",
7593 		.insns = {
7594 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7595 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7597 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7599 				     BPF_FUNC_map_lookup_elem),
7600 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7601 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7602 			BPF_MOV64_IMM(BPF_REG_9, 1),
7603 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7604 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7605 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7606 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7607 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7608 			BPF_MOV32_IMM(BPF_REG_3, 1),
7609 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7610 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7611 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7612 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7613 			BPF_MOV64_REG(BPF_REG_0, 0),
7614 			BPF_EXIT_INSN(),
7615 		},
7616 		.fixup_map_hash_48b = { 3 },
7617 		.errstr = "R0 max value is outside of the array range",
7618 		.result = REJECT,
7619 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7620 	},
7621 	{
7622 		"map in map access",
7623 		.insns = {
7624 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7625 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7626 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7627 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7628 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7629 				     BPF_FUNC_map_lookup_elem),
7630 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7631 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7632 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7633 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7634 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7635 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7636 				     BPF_FUNC_map_lookup_elem),
7637 			BPF_MOV64_IMM(BPF_REG_0, 0),
7638 			BPF_EXIT_INSN(),
7639 		},
7640 		.fixup_map_in_map = { 3 },
7641 		.result = ACCEPT,
7642 	},
7643 	{
7644 		"invalid inner map pointer",
7645 		.insns = {
7646 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7647 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7649 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7651 				     BPF_FUNC_map_lookup_elem),
7652 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7653 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7654 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7655 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7656 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7658 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7659 				     BPF_FUNC_map_lookup_elem),
7660 			BPF_MOV64_IMM(BPF_REG_0, 0),
7661 			BPF_EXIT_INSN(),
7662 		},
7663 		.fixup_map_in_map = { 3 },
7664 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
7665 		.result = REJECT,
7666 	},
7667 	{
7668 		"forgot null checking on the inner map pointer",
7669 		.insns = {
7670 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7671 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7672 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7673 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7674 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7675 				     BPF_FUNC_map_lookup_elem),
7676 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7677 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7679 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7680 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7681 				     BPF_FUNC_map_lookup_elem),
7682 			BPF_MOV64_IMM(BPF_REG_0, 0),
7683 			BPF_EXIT_INSN(),
7684 		},
7685 		.fixup_map_in_map = { 3 },
7686 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
7687 		.result = REJECT,
7688 	},
7689 	{
7690 		"ld_abs: check calling conv, r1",
7691 		.insns = {
7692 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7693 			BPF_MOV64_IMM(BPF_REG_1, 0),
7694 			BPF_LD_ABS(BPF_W, -0x200000),
7695 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7696 			BPF_EXIT_INSN(),
7697 		},
7698 		.errstr = "R1 !read_ok",
7699 		.result = REJECT,
7700 	},
7701 	{
7702 		"ld_abs: check calling conv, r2",
7703 		.insns = {
7704 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7705 			BPF_MOV64_IMM(BPF_REG_2, 0),
7706 			BPF_LD_ABS(BPF_W, -0x200000),
7707 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7708 			BPF_EXIT_INSN(),
7709 		},
7710 		.errstr = "R2 !read_ok",
7711 		.result = REJECT,
7712 	},
7713 	{
7714 		"ld_abs: check calling conv, r3",
7715 		.insns = {
7716 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7717 			BPF_MOV64_IMM(BPF_REG_3, 0),
7718 			BPF_LD_ABS(BPF_W, -0x200000),
7719 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7720 			BPF_EXIT_INSN(),
7721 		},
7722 		.errstr = "R3 !read_ok",
7723 		.result = REJECT,
7724 	},
7725 	{
7726 		"ld_abs: check calling conv, r4",
7727 		.insns = {
7728 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7729 			BPF_MOV64_IMM(BPF_REG_4, 0),
7730 			BPF_LD_ABS(BPF_W, -0x200000),
7731 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7732 			BPF_EXIT_INSN(),
7733 		},
7734 		.errstr = "R4 !read_ok",
7735 		.result = REJECT,
7736 	},
7737 	{
7738 		"ld_abs: check calling conv, r5",
7739 		.insns = {
7740 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7741 			BPF_MOV64_IMM(BPF_REG_5, 0),
7742 			BPF_LD_ABS(BPF_W, -0x200000),
7743 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7744 			BPF_EXIT_INSN(),
7745 		},
7746 		.errstr = "R5 !read_ok",
7747 		.result = REJECT,
7748 	},
7749 	{
7750 		"ld_abs: check calling conv, r7",
7751 		.insns = {
7752 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7753 			BPF_MOV64_IMM(BPF_REG_7, 0),
7754 			BPF_LD_ABS(BPF_W, -0x200000),
7755 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7756 			BPF_EXIT_INSN(),
7757 		},
7758 		.result = ACCEPT,
7759 	},
7760 	{
7761 		"ld_abs: tests on r6 and skb data reload helper",
7762 		.insns = {
7763 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7764 			BPF_LD_ABS(BPF_B, 0),
7765 			BPF_LD_ABS(BPF_H, 0),
7766 			BPF_LD_ABS(BPF_W, 0),
7767 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
7768 			BPF_MOV64_IMM(BPF_REG_6, 0),
7769 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
7770 			BPF_MOV64_IMM(BPF_REG_2, 1),
7771 			BPF_MOV64_IMM(BPF_REG_3, 2),
7772 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7773 				     BPF_FUNC_skb_vlan_push),
7774 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
7775 			BPF_LD_ABS(BPF_B, 0),
7776 			BPF_LD_ABS(BPF_H, 0),
7777 			BPF_LD_ABS(BPF_W, 0),
7778 			BPF_MOV64_IMM(BPF_REG_0, 42),
7779 			BPF_EXIT_INSN(),
7780 		},
7781 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7782 		.result = ACCEPT,
7783 		.retval = 42 /* ultimate return value */,
7784 	},
7785 	{
7786 		"ld_ind: check calling conv, r1",
7787 		.insns = {
7788 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7789 			BPF_MOV64_IMM(BPF_REG_1, 1),
7790 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
7791 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
7792 			BPF_EXIT_INSN(),
7793 		},
7794 		.errstr = "R1 !read_ok",
7795 		.result = REJECT,
7796 	},
7797 	{
7798 		"ld_ind: check calling conv, r2",
7799 		.insns = {
7800 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7801 			BPF_MOV64_IMM(BPF_REG_2, 1),
7802 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
7803 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7804 			BPF_EXIT_INSN(),
7805 		},
7806 		.errstr = "R2 !read_ok",
7807 		.result = REJECT,
7808 	},
7809 	{
7810 		"ld_ind: check calling conv, r3",
7811 		.insns = {
7812 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7813 			BPF_MOV64_IMM(BPF_REG_3, 1),
7814 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
7815 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
7816 			BPF_EXIT_INSN(),
7817 		},
7818 		.errstr = "R3 !read_ok",
7819 		.result = REJECT,
7820 	},
7821 	{
7822 		"ld_ind: check calling conv, r4",
7823 		.insns = {
7824 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7825 			BPF_MOV64_IMM(BPF_REG_4, 1),
7826 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
7827 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
7828 			BPF_EXIT_INSN(),
7829 		},
7830 		.errstr = "R4 !read_ok",
7831 		.result = REJECT,
7832 	},
7833 	{
7834 		"ld_ind: check calling conv, r5",
7835 		.insns = {
7836 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7837 			BPF_MOV64_IMM(BPF_REG_5, 1),
7838 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
7839 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
7840 			BPF_EXIT_INSN(),
7841 		},
7842 		.errstr = "R5 !read_ok",
7843 		.result = REJECT,
7844 	},
7845 	{
7846 		"ld_ind: check calling conv, r7",
7847 		.insns = {
7848 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
7849 			BPF_MOV64_IMM(BPF_REG_7, 1),
7850 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
7851 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
7852 			BPF_EXIT_INSN(),
7853 		},
7854 		.result = ACCEPT,
7855 		.retval = 1,
7856 	},
7857 	{
7858 		"check bpf_perf_event_data->sample_period byte load permitted",
7859 		.insns = {
7860 			BPF_MOV64_IMM(BPF_REG_0, 0),
7861 #if __BYTE_ORDER == __LITTLE_ENDIAN
7862 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7863 				    offsetof(struct bpf_perf_event_data, sample_period)),
7864 #else
7865 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
7866 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
7867 #endif
7868 			BPF_EXIT_INSN(),
7869 		},
7870 		.result = ACCEPT,
7871 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7872 	},
7873 	{
7874 		"check bpf_perf_event_data->sample_period half load permitted",
7875 		.insns = {
7876 			BPF_MOV64_IMM(BPF_REG_0, 0),
7877 #if __BYTE_ORDER == __LITTLE_ENDIAN
7878 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7879 				    offsetof(struct bpf_perf_event_data, sample_period)),
7880 #else
7881 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7882 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
7883 #endif
7884 			BPF_EXIT_INSN(),
7885 		},
7886 		.result = ACCEPT,
7887 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7888 	},
7889 	{
7890 		"check bpf_perf_event_data->sample_period word load permitted",
7891 		.insns = {
7892 			BPF_MOV64_IMM(BPF_REG_0, 0),
7893 #if __BYTE_ORDER == __LITTLE_ENDIAN
7894 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7895 				    offsetof(struct bpf_perf_event_data, sample_period)),
7896 #else
7897 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7898 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
7899 #endif
7900 			BPF_EXIT_INSN(),
7901 		},
7902 		.result = ACCEPT,
7903 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7904 	},
7905 	{
7906 		"check bpf_perf_event_data->sample_period dword load permitted",
7907 		.insns = {
7908 			BPF_MOV64_IMM(BPF_REG_0, 0),
7909 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
7910 				    offsetof(struct bpf_perf_event_data, sample_period)),
7911 			BPF_EXIT_INSN(),
7912 		},
7913 		.result = ACCEPT,
7914 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
7915 	},
7916 	{
7917 		"check skb->data half load not permitted",
7918 		.insns = {
7919 			BPF_MOV64_IMM(BPF_REG_0, 0),
7920 #if __BYTE_ORDER == __LITTLE_ENDIAN
7921 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7922 				    offsetof(struct __sk_buff, data)),
7923 #else
7924 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7925 				    offsetof(struct __sk_buff, data) + 2),
7926 #endif
7927 			BPF_EXIT_INSN(),
7928 		},
7929 		.result = REJECT,
7930 		.errstr = "invalid bpf_context access",
7931 	},
7932 	{
7933 		"check skb->tc_classid half load not permitted for lwt prog",
7934 		.insns = {
7935 			BPF_MOV64_IMM(BPF_REG_0, 0),
7936 #if __BYTE_ORDER == __LITTLE_ENDIAN
7937 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7938 				    offsetof(struct __sk_buff, tc_classid)),
7939 #else
7940 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
7941 				    offsetof(struct __sk_buff, tc_classid) + 2),
7942 #endif
7943 			BPF_EXIT_INSN(),
7944 		},
7945 		.result = REJECT,
7946 		.errstr = "invalid bpf_context access",
7947 		.prog_type = BPF_PROG_TYPE_LWT_IN,
7948 	},
7949 	{
7950 		"bounds checks mixing signed and unsigned, positive bounds",
7951 		.insns = {
7952 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7953 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7954 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7955 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7957 				     BPF_FUNC_map_lookup_elem),
7958 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7959 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7960 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7961 			BPF_MOV64_IMM(BPF_REG_2, 2),
7962 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
7963 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
7964 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7965 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7966 			BPF_MOV64_IMM(BPF_REG_0, 0),
7967 			BPF_EXIT_INSN(),
7968 		},
7969 		.fixup_map_hash_8b = { 3 },
7970 		.errstr = "unbounded min value",
7971 		.result = REJECT,
7972 	},
7973 	{
7974 		"bounds checks mixing signed and unsigned",
7975 		.insns = {
7976 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7977 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7978 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7979 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7980 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7981 				     BPF_FUNC_map_lookup_elem),
7982 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7983 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
7984 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7985 			BPF_MOV64_IMM(BPF_REG_2, -1),
7986 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
7987 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
7988 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7989 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
7990 			BPF_MOV64_IMM(BPF_REG_0, 0),
7991 			BPF_EXIT_INSN(),
7992 		},
7993 		.fixup_map_hash_8b = { 3 },
7994 		.errstr = "unbounded min value",
7995 		.result = REJECT,
7996 	},
7997 	{
7998 		"bounds checks mixing signed and unsigned, variant 2",
7999 		.insns = {
8000 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8001 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8002 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8003 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8004 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8005 				     BPF_FUNC_map_lookup_elem),
8006 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8007 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8008 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8009 			BPF_MOV64_IMM(BPF_REG_2, -1),
8010 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8011 			BPF_MOV64_IMM(BPF_REG_8, 0),
8012 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8013 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8014 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8015 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8016 			BPF_MOV64_IMM(BPF_REG_0, 0),
8017 			BPF_EXIT_INSN(),
8018 		},
8019 		.fixup_map_hash_8b = { 3 },
8020 		.errstr = "unbounded min value",
8021 		.result = REJECT,
8022 	},
8023 	{
8024 		"bounds checks mixing signed and unsigned, variant 3",
8025 		.insns = {
8026 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8027 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8028 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8029 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8030 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8031 				     BPF_FUNC_map_lookup_elem),
8032 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8033 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8034 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8035 			BPF_MOV64_IMM(BPF_REG_2, -1),
8036 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8037 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8038 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8039 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8040 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8041 			BPF_MOV64_IMM(BPF_REG_0, 0),
8042 			BPF_EXIT_INSN(),
8043 		},
8044 		.fixup_map_hash_8b = { 3 },
8045 		.errstr = "unbounded min value",
8046 		.result = REJECT,
8047 	},
8048 	{
8049 		"bounds checks mixing signed and unsigned, variant 4",
8050 		.insns = {
8051 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8052 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8053 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8054 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8055 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8056 				     BPF_FUNC_map_lookup_elem),
8057 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8058 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8059 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8060 			BPF_MOV64_IMM(BPF_REG_2, 1),
8061 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8062 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8063 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8064 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8065 			BPF_MOV64_IMM(BPF_REG_0, 0),
8066 			BPF_EXIT_INSN(),
8067 		},
8068 		.fixup_map_hash_8b = { 3 },
8069 		.result = ACCEPT,
8070 	},
8071 	{
8072 		"bounds checks mixing signed and unsigned, variant 5",
8073 		.insns = {
8074 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8075 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8076 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8077 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8078 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8079 				     BPF_FUNC_map_lookup_elem),
8080 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8081 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8082 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8083 			BPF_MOV64_IMM(BPF_REG_2, -1),
8084 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8085 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8086 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8087 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8088 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8089 			BPF_MOV64_IMM(BPF_REG_0, 0),
8090 			BPF_EXIT_INSN(),
8091 		},
8092 		.fixup_map_hash_8b = { 3 },
8093 		.errstr = "unbounded min value",
8094 		.result = REJECT,
8095 	},
8096 	{
8097 		"bounds checks mixing signed and unsigned, variant 6",
8098 		.insns = {
8099 			BPF_MOV64_IMM(BPF_REG_2, 0),
8100 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8101 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8102 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8103 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8104 			BPF_MOV64_IMM(BPF_REG_6, -1),
8105 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8106 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8107 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8108 			BPF_MOV64_IMM(BPF_REG_5, 0),
8109 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8110 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8111 				     BPF_FUNC_skb_load_bytes),
8112 			BPF_MOV64_IMM(BPF_REG_0, 0),
8113 			BPF_EXIT_INSN(),
8114 		},
8115 		.errstr = "R4 min value is negative, either use unsigned",
8116 		.result = REJECT,
8117 	},
8118 	{
8119 		"bounds checks mixing signed and unsigned, variant 7",
8120 		.insns = {
8121 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8122 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8123 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8124 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8125 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8126 				     BPF_FUNC_map_lookup_elem),
8127 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8128 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8129 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8130 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8131 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8132 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8133 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8134 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8135 			BPF_MOV64_IMM(BPF_REG_0, 0),
8136 			BPF_EXIT_INSN(),
8137 		},
8138 		.fixup_map_hash_8b = { 3 },
8139 		.result = ACCEPT,
8140 	},
8141 	{
8142 		"bounds checks mixing signed and unsigned, variant 8",
8143 		.insns = {
8144 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8145 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8146 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8147 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8148 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8149 				     BPF_FUNC_map_lookup_elem),
8150 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8151 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8152 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8153 			BPF_MOV64_IMM(BPF_REG_2, -1),
8154 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8155 			BPF_MOV64_IMM(BPF_REG_0, 0),
8156 			BPF_EXIT_INSN(),
8157 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8158 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8159 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8160 			BPF_MOV64_IMM(BPF_REG_0, 0),
8161 			BPF_EXIT_INSN(),
8162 		},
8163 		.fixup_map_hash_8b = { 3 },
8164 		.errstr = "unbounded min value",
8165 		.result = REJECT,
8166 	},
8167 	{
8168 		"bounds checks mixing signed and unsigned, variant 9",
8169 		.insns = {
8170 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8171 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8173 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8174 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8175 				     BPF_FUNC_map_lookup_elem),
8176 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8177 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8178 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8179 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8180 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8181 			BPF_MOV64_IMM(BPF_REG_0, 0),
8182 			BPF_EXIT_INSN(),
8183 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8184 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8185 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8186 			BPF_MOV64_IMM(BPF_REG_0, 0),
8187 			BPF_EXIT_INSN(),
8188 		},
8189 		.fixup_map_hash_8b = { 3 },
8190 		.result = ACCEPT,
8191 	},
8192 	{
8193 		"bounds checks mixing signed and unsigned, variant 10",
8194 		.insns = {
8195 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8196 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8197 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8198 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8199 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8200 				     BPF_FUNC_map_lookup_elem),
8201 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8202 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8203 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8204 			BPF_MOV64_IMM(BPF_REG_2, 0),
8205 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8206 			BPF_MOV64_IMM(BPF_REG_0, 0),
8207 			BPF_EXIT_INSN(),
8208 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8209 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8210 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8211 			BPF_MOV64_IMM(BPF_REG_0, 0),
8212 			BPF_EXIT_INSN(),
8213 		},
8214 		.fixup_map_hash_8b = { 3 },
8215 		.errstr = "unbounded min value",
8216 		.result = REJECT,
8217 	},
8218 	{
8219 		"bounds checks mixing signed and unsigned, variant 11",
8220 		.insns = {
8221 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8222 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8224 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8225 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8226 				     BPF_FUNC_map_lookup_elem),
8227 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8228 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8229 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8230 			BPF_MOV64_IMM(BPF_REG_2, -1),
8231 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8232 			/* Dead branch. */
8233 			BPF_MOV64_IMM(BPF_REG_0, 0),
8234 			BPF_EXIT_INSN(),
8235 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8236 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8237 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8238 			BPF_MOV64_IMM(BPF_REG_0, 0),
8239 			BPF_EXIT_INSN(),
8240 		},
8241 		.fixup_map_hash_8b = { 3 },
8242 		.errstr = "unbounded min value",
8243 		.result = REJECT,
8244 	},
8245 	{
8246 		"bounds checks mixing signed and unsigned, variant 12",
8247 		.insns = {
8248 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8249 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8251 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8253 				     BPF_FUNC_map_lookup_elem),
8254 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8255 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8256 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8257 			BPF_MOV64_IMM(BPF_REG_2, -6),
8258 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8259 			BPF_MOV64_IMM(BPF_REG_0, 0),
8260 			BPF_EXIT_INSN(),
8261 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8262 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8263 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8264 			BPF_MOV64_IMM(BPF_REG_0, 0),
8265 			BPF_EXIT_INSN(),
8266 		},
8267 		.fixup_map_hash_8b = { 3 },
8268 		.errstr = "unbounded min value",
8269 		.result = REJECT,
8270 	},
8271 	{
8272 		"bounds checks mixing signed and unsigned, variant 13",
8273 		.insns = {
8274 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8275 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8277 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8278 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8279 				     BPF_FUNC_map_lookup_elem),
8280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8281 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8282 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8283 			BPF_MOV64_IMM(BPF_REG_2, 2),
8284 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8285 			BPF_MOV64_IMM(BPF_REG_7, 1),
8286 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8287 			BPF_MOV64_IMM(BPF_REG_0, 0),
8288 			BPF_EXIT_INSN(),
8289 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8290 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8291 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8292 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8293 			BPF_MOV64_IMM(BPF_REG_0, 0),
8294 			BPF_EXIT_INSN(),
8295 		},
8296 		.fixup_map_hash_8b = { 3 },
8297 		.errstr = "unbounded min value",
8298 		.result = REJECT,
8299 	},
8300 	{
8301 		"bounds checks mixing signed and unsigned, variant 14",
8302 		.insns = {
8303 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8304 				    offsetof(struct __sk_buff, mark)),
8305 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8306 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8308 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8309 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8310 				     BPF_FUNC_map_lookup_elem),
8311 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8312 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8313 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8314 			BPF_MOV64_IMM(BPF_REG_2, -1),
8315 			BPF_MOV64_IMM(BPF_REG_8, 2),
8316 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8317 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8318 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8319 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8320 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8321 			BPF_MOV64_IMM(BPF_REG_0, 0),
8322 			BPF_EXIT_INSN(),
8323 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8324 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8325 		},
8326 		.fixup_map_hash_8b = { 4 },
8327 		.errstr = "R0 invalid mem access 'inv'",
8328 		.result = REJECT,
8329 	},
8330 	{
8331 		"bounds checks mixing signed and unsigned, variant 15",
8332 		.insns = {
8333 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8334 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8335 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8336 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8337 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8338 				     BPF_FUNC_map_lookup_elem),
8339 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8340 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8341 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8342 			BPF_MOV64_IMM(BPF_REG_2, -6),
8343 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8344 			BPF_MOV64_IMM(BPF_REG_0, 0),
8345 			BPF_EXIT_INSN(),
8346 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8347 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8348 			BPF_MOV64_IMM(BPF_REG_0, 0),
8349 			BPF_EXIT_INSN(),
8350 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8351 			BPF_MOV64_IMM(BPF_REG_0, 0),
8352 			BPF_EXIT_INSN(),
8353 		},
8354 		.fixup_map_hash_8b = { 3 },
8355 		.errstr = "unbounded min value",
8356 		.result = REJECT,
8357 		.result_unpriv = REJECT,
8358 	},
8359 	{
8360 		"subtraction bounds (map value) variant 1",
8361 		.insns = {
8362 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8363 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8364 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8365 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8366 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8367 				     BPF_FUNC_map_lookup_elem),
8368 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8369 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8370 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8371 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8372 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8373 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8374 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8375 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8376 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8377 			BPF_EXIT_INSN(),
8378 			BPF_MOV64_IMM(BPF_REG_0, 0),
8379 			BPF_EXIT_INSN(),
8380 		},
8381 		.fixup_map_hash_8b = { 3 },
8382 		.errstr = "R0 max value is outside of the array range",
8383 		.result = REJECT,
8384 	},
8385 	{
8386 		"subtraction bounds (map value) variant 2",
8387 		.insns = {
8388 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8389 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8390 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8391 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8392 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8393 				     BPF_FUNC_map_lookup_elem),
8394 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8395 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8396 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8397 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8398 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8399 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8400 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8401 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8402 			BPF_EXIT_INSN(),
8403 			BPF_MOV64_IMM(BPF_REG_0, 0),
8404 			BPF_EXIT_INSN(),
8405 		},
8406 		.fixup_map_hash_8b = { 3 },
8407 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8408 		.result = REJECT,
8409 	},
8410 	{
8411 		"bounds check based on zero-extended MOV",
8412 		.insns = {
8413 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8414 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8415 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8416 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8417 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8418 				     BPF_FUNC_map_lookup_elem),
8419 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8420 			/* r2 = 0x0000'0000'ffff'ffff */
8421 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8422 			/* r2 = 0 */
8423 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8424 			/* no-op */
8425 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8426 			/* access at offset 0 */
8427 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8428 			/* exit */
8429 			BPF_MOV64_IMM(BPF_REG_0, 0),
8430 			BPF_EXIT_INSN(),
8431 		},
8432 		.fixup_map_hash_8b = { 3 },
8433 		.result = ACCEPT
8434 	},
8435 	{
8436 		"bounds check based on sign-extended MOV. test1",
8437 		.insns = {
8438 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8439 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8440 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8441 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8443 				     BPF_FUNC_map_lookup_elem),
8444 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8445 			/* r2 = 0xffff'ffff'ffff'ffff */
8446 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8447 			/* r2 = 0xffff'ffff */
8448 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8449 			/* r0 = <oob pointer> */
8450 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8451 			/* access to OOB pointer */
8452 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8453 			/* exit */
8454 			BPF_MOV64_IMM(BPF_REG_0, 0),
8455 			BPF_EXIT_INSN(),
8456 		},
8457 		.fixup_map_hash_8b = { 3 },
8458 		.errstr = "map_value pointer and 4294967295",
8459 		.result = REJECT
8460 	},
8461 	{
8462 		"bounds check based on sign-extended MOV. test2",
8463 		.insns = {
8464 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8465 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8466 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8467 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8468 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8469 				     BPF_FUNC_map_lookup_elem),
8470 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8471 			/* r2 = 0xffff'ffff'ffff'ffff */
8472 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8473 			/* r2 = 0xfff'ffff */
8474 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8475 			/* r0 = <oob pointer> */
8476 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8477 			/* access to OOB pointer */
8478 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8479 			/* exit */
8480 			BPF_MOV64_IMM(BPF_REG_0, 0),
8481 			BPF_EXIT_INSN(),
8482 		},
8483 		.fixup_map_hash_8b = { 3 },
8484 		.errstr = "R0 min value is outside of the array range",
8485 		.result = REJECT
8486 	},
8487 	{
8488 		"bounds check based on reg_off + var_off + insn_off. test1",
8489 		.insns = {
8490 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8491 				    offsetof(struct __sk_buff, mark)),
8492 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8493 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8495 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8496 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8497 				     BPF_FUNC_map_lookup_elem),
8498 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8499 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8501 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8502 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8503 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8504 			BPF_MOV64_IMM(BPF_REG_0, 0),
8505 			BPF_EXIT_INSN(),
8506 		},
8507 		.fixup_map_hash_8b = { 4 },
8508 		.errstr = "value_size=8 off=1073741825",
8509 		.result = REJECT,
8510 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8511 	},
8512 	{
8513 		"bounds check based on reg_off + var_off + insn_off. test2",
8514 		.insns = {
8515 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8516 				    offsetof(struct __sk_buff, mark)),
8517 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8518 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8519 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8520 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8521 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8522 				     BPF_FUNC_map_lookup_elem),
8523 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8524 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8525 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8526 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8528 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8529 			BPF_MOV64_IMM(BPF_REG_0, 0),
8530 			BPF_EXIT_INSN(),
8531 		},
8532 		.fixup_map_hash_8b = { 4 },
8533 		.errstr = "value 1073741823",
8534 		.result = REJECT,
8535 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8536 	},
8537 	{
8538 		"bounds check after truncation of non-boundary-crossing range",
8539 		.insns = {
8540 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8541 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8542 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8543 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8544 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8545 				     BPF_FUNC_map_lookup_elem),
8546 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8547 			/* r1 = [0x00, 0xff] */
8548 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8549 			BPF_MOV64_IMM(BPF_REG_2, 1),
8550 			/* r2 = 0x10'0000'0000 */
8551 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8552 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8553 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8554 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8555 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8556 			/* r1 = [0x00, 0xff] */
8557 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8558 			/* r1 = 0 */
8559 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8560 			/* no-op */
8561 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8562 			/* access at offset 0 */
8563 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8564 			/* exit */
8565 			BPF_MOV64_IMM(BPF_REG_0, 0),
8566 			BPF_EXIT_INSN(),
8567 		},
8568 		.fixup_map_hash_8b = { 3 },
8569 		.result = ACCEPT
8570 	},
8571 	{
8572 		"bounds check after truncation of boundary-crossing range (1)",
8573 		.insns = {
8574 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8575 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8576 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8577 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8578 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8579 				     BPF_FUNC_map_lookup_elem),
8580 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8581 			/* r1 = [0x00, 0xff] */
8582 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8583 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8584 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8585 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8586 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8587 			 *      [0x0000'0000, 0x0000'007f]
8588 			 */
8589 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8590 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8591 			/* r1 = [0x00, 0xff] or
8592 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8593 			 */
8594 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8595 			/* r1 = 0 or
8596 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8597 			 */
8598 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8599 			/* no-op or OOB pointer computation */
8600 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8601 			/* potentially OOB access */
8602 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8603 			/* exit */
8604 			BPF_MOV64_IMM(BPF_REG_0, 0),
8605 			BPF_EXIT_INSN(),
8606 		},
8607 		.fixup_map_hash_8b = { 3 },
8608 		/* not actually fully unbounded, but the bound is very high */
8609 		.errstr = "R0 unbounded memory access",
8610 		.result = REJECT
8611 	},
8612 	{
8613 		"bounds check after truncation of boundary-crossing range (2)",
8614 		.insns = {
8615 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8616 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8617 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8618 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8620 				     BPF_FUNC_map_lookup_elem),
8621 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8622 			/* r1 = [0x00, 0xff] */
8623 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8624 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8625 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8626 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8627 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8628 			 *      [0x0000'0000, 0x0000'007f]
8629 			 * difference to previous test: truncation via MOV32
8630 			 * instead of ALU32.
8631 			 */
8632 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8633 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8634 			/* r1 = [0x00, 0xff] or
8635 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8636 			 */
8637 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8638 			/* r1 = 0 or
8639 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8640 			 */
8641 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8642 			/* no-op or OOB pointer computation */
8643 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8644 			/* potentially OOB access */
8645 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8646 			/* exit */
8647 			BPF_MOV64_IMM(BPF_REG_0, 0),
8648 			BPF_EXIT_INSN(),
8649 		},
8650 		.fixup_map_hash_8b = { 3 },
8651 		/* not actually fully unbounded, but the bound is very high */
8652 		.errstr = "R0 unbounded memory access",
8653 		.result = REJECT
8654 	},
8655 	{
8656 		"bounds check after wrapping 32-bit addition",
8657 		.insns = {
8658 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8659 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8660 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8661 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8662 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8663 				     BPF_FUNC_map_lookup_elem),
8664 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
8665 			/* r1 = 0x7fff'ffff */
8666 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
8667 			/* r1 = 0xffff'fffe */
8668 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8669 			/* r1 = 0 */
8670 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
8671 			/* no-op */
8672 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8673 			/* access at offset 0 */
8674 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8675 			/* exit */
8676 			BPF_MOV64_IMM(BPF_REG_0, 0),
8677 			BPF_EXIT_INSN(),
8678 		},
8679 		.fixup_map_hash_8b = { 3 },
8680 		.result = ACCEPT
8681 	},
8682 	{
8683 		"bounds check after shift with oversized count operand",
8684 		.insns = {
8685 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8686 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8688 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8689 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8690 				     BPF_FUNC_map_lookup_elem),
8691 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8692 			BPF_MOV64_IMM(BPF_REG_2, 32),
8693 			BPF_MOV64_IMM(BPF_REG_1, 1),
8694 			/* r1 = (u32)1 << (u32)32 = ? */
8695 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
8696 			/* r1 = [0x0000, 0xffff] */
8697 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
8698 			/* computes unknown pointer, potentially OOB */
8699 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8700 			/* potentially OOB access */
8701 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8702 			/* exit */
8703 			BPF_MOV64_IMM(BPF_REG_0, 0),
8704 			BPF_EXIT_INSN(),
8705 		},
8706 		.fixup_map_hash_8b = { 3 },
8707 		.errstr = "R0 max value is outside of the array range",
8708 		.result = REJECT
8709 	},
8710 	{
8711 		"bounds check after right shift of maybe-negative number",
8712 		.insns = {
8713 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8714 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8716 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8717 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8718 				     BPF_FUNC_map_lookup_elem),
8719 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8720 			/* r1 = [0x00, 0xff] */
8721 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8722 			/* r1 = [-0x01, 0xfe] */
8723 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
8724 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
8725 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8726 			/* r1 = 0 or 0xffff'ffff'ffff */
8727 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8728 			/* computes unknown pointer, potentially OOB */
8729 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8730 			/* potentially OOB access */
8731 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8732 			/* exit */
8733 			BPF_MOV64_IMM(BPF_REG_0, 0),
8734 			BPF_EXIT_INSN(),
8735 		},
8736 		.fixup_map_hash_8b = { 3 },
8737 		.errstr = "R0 unbounded memory access",
8738 		.result = REJECT
8739 	},
8740 	{
8741 		"bounds check map access with off+size signed 32bit overflow. test1",
8742 		.insns = {
8743 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8744 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8745 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8746 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8747 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8748 				     BPF_FUNC_map_lookup_elem),
8749 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8750 			BPF_EXIT_INSN(),
8751 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
8752 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8753 			BPF_JMP_A(0),
8754 			BPF_EXIT_INSN(),
8755 		},
8756 		.fixup_map_hash_8b = { 3 },
8757 		.errstr = "map_value pointer and 2147483646",
8758 		.result = REJECT
8759 	},
8760 	{
8761 		"bounds check map access with off+size signed 32bit overflow. test2",
8762 		.insns = {
8763 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8764 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8765 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8766 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8767 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8768 				     BPF_FUNC_map_lookup_elem),
8769 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8770 			BPF_EXIT_INSN(),
8771 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8773 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
8774 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8775 			BPF_JMP_A(0),
8776 			BPF_EXIT_INSN(),
8777 		},
8778 		.fixup_map_hash_8b = { 3 },
8779 		.errstr = "pointer offset 1073741822",
8780 		.result = REJECT
8781 	},
8782 	{
8783 		"bounds check map access with off+size signed 32bit overflow. test3",
8784 		.insns = {
8785 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8786 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8787 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8788 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8789 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8790 				     BPF_FUNC_map_lookup_elem),
8791 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8792 			BPF_EXIT_INSN(),
8793 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8794 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
8795 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8796 			BPF_JMP_A(0),
8797 			BPF_EXIT_INSN(),
8798 		},
8799 		.fixup_map_hash_8b = { 3 },
8800 		.errstr = "pointer offset -1073741822",
8801 		.result = REJECT
8802 	},
8803 	{
8804 		"bounds check map access with off+size signed 32bit overflow. test4",
8805 		.insns = {
8806 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8807 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8808 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8809 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8810 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8811 				     BPF_FUNC_map_lookup_elem),
8812 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
8813 			BPF_EXIT_INSN(),
8814 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
8815 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
8816 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8817 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
8818 			BPF_JMP_A(0),
8819 			BPF_EXIT_INSN(),
8820 		},
8821 		.fixup_map_hash_8b = { 3 },
8822 		.errstr = "map_value pointer and 1000000000000",
8823 		.result = REJECT
8824 	},
8825 	{
8826 		"pointer/scalar confusion in state equality check (way 1)",
8827 		.insns = {
8828 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8830 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8831 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8832 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8833 				     BPF_FUNC_map_lookup_elem),
8834 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
8835 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8836 			BPF_JMP_A(1),
8837 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8838 			BPF_JMP_A(0),
8839 			BPF_EXIT_INSN(),
8840 		},
8841 		.fixup_map_hash_8b = { 3 },
8842 		.result = ACCEPT,
8843 		.retval = POINTER_VALUE,
8844 		.result_unpriv = REJECT,
8845 		.errstr_unpriv = "R0 leaks addr as return value"
8846 	},
8847 	{
8848 		"pointer/scalar confusion in state equality check (way 2)",
8849 		.insns = {
8850 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8851 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8852 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8853 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8854 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8855 				     BPF_FUNC_map_lookup_elem),
8856 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
8857 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
8858 			BPF_JMP_A(1),
8859 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
8860 			BPF_EXIT_INSN(),
8861 		},
8862 		.fixup_map_hash_8b = { 3 },
8863 		.result = ACCEPT,
8864 		.retval = POINTER_VALUE,
8865 		.result_unpriv = REJECT,
8866 		.errstr_unpriv = "R0 leaks addr as return value"
8867 	},
8868 	{
8869 		"variable-offset ctx access",
8870 		.insns = {
8871 			/* Get an unknown value */
8872 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8873 			/* Make it small and 4-byte aligned */
8874 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8875 			/* add it to skb.  We now have either &skb->len or
8876 			 * &skb->pkt_type, but we don't know which
8877 			 */
8878 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8879 			/* dereference it */
8880 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
8881 			BPF_EXIT_INSN(),
8882 		},
8883 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
8884 		.result = REJECT,
8885 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8886 	},
8887 	{
8888 		"variable-offset stack access",
8889 		.insns = {
8890 			/* Fill the top 8 bytes of the stack */
8891 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8892 			/* Get an unknown value */
8893 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8894 			/* Make it small and 4-byte aligned */
8895 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8896 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8897 			/* add it to fp.  We now have either fp-4 or fp-8, but
8898 			 * we don't know which
8899 			 */
8900 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8901 			/* dereference it */
8902 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
8903 			BPF_EXIT_INSN(),
8904 		},
8905 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
8906 		.result = REJECT,
8907 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8908 	},
8909 	{
8910 		"indirect variable-offset stack access",
8911 		.insns = {
8912 			/* Fill the top 8 bytes of the stack */
8913 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8914 			/* Get an unknown value */
8915 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8916 			/* Make it small and 4-byte aligned */
8917 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
8918 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
8919 			/* add it to fp.  We now have either fp-4 or fp-8, but
8920 			 * we don't know which
8921 			 */
8922 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
8923 			/* dereference it indirectly */
8924 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8925 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8926 				     BPF_FUNC_map_lookup_elem),
8927 			BPF_MOV64_IMM(BPF_REG_0, 0),
8928 			BPF_EXIT_INSN(),
8929 		},
8930 		.fixup_map_hash_8b = { 5 },
8931 		.errstr = "variable stack read R2",
8932 		.result = REJECT,
8933 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8934 	},
8935 	{
8936 		"direct stack access with 32-bit wraparound. test1",
8937 		.insns = {
8938 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8941 			BPF_MOV32_IMM(BPF_REG_0, 0),
8942 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8943 			BPF_EXIT_INSN()
8944 		},
8945 		.errstr = "fp pointer and 2147483647",
8946 		.result = REJECT
8947 	},
8948 	{
8949 		"direct stack access with 32-bit wraparound. test2",
8950 		.insns = {
8951 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
8954 			BPF_MOV32_IMM(BPF_REG_0, 0),
8955 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8956 			BPF_EXIT_INSN()
8957 		},
8958 		.errstr = "fp pointer and 1073741823",
8959 		.result = REJECT
8960 	},
8961 	{
8962 		"direct stack access with 32-bit wraparound. test3",
8963 		.insns = {
8964 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
8965 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8966 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
8967 			BPF_MOV32_IMM(BPF_REG_0, 0),
8968 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8969 			BPF_EXIT_INSN()
8970 		},
8971 		.errstr = "fp pointer offset 1073741822",
8972 		.result = REJECT
8973 	},
8974 	{
8975 		"liveness pruning and write screening",
8976 		.insns = {
8977 			/* Get an unknown value */
8978 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
8979 			/* branch conditions teach us nothing about R2 */
8980 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8981 			BPF_MOV64_IMM(BPF_REG_0, 0),
8982 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
8983 			BPF_MOV64_IMM(BPF_REG_0, 0),
8984 			BPF_EXIT_INSN(),
8985 		},
8986 		.errstr = "R0 !read_ok",
8987 		.result = REJECT,
8988 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8989 	},
8990 	{
8991 		"varlen_map_value_access pruning",
8992 		.insns = {
8993 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8996 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8997 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8998 				     BPF_FUNC_map_lookup_elem),
8999 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9000 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9001 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9002 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9003 			BPF_MOV32_IMM(BPF_REG_1, 0),
9004 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9005 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9006 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9007 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9008 				   offsetof(struct test_val, foo)),
9009 			BPF_EXIT_INSN(),
9010 		},
9011 		.fixup_map_hash_48b = { 3 },
9012 		.errstr_unpriv = "R0 leaks addr",
9013 		.errstr = "R0 unbounded memory access",
9014 		.result_unpriv = REJECT,
9015 		.result = REJECT,
9016 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9017 	},
9018 	{
9019 		"invalid 64-bit BPF_END",
9020 		.insns = {
9021 			BPF_MOV32_IMM(BPF_REG_0, 0),
9022 			{
9023 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9024 				.dst_reg = BPF_REG_0,
9025 				.src_reg = 0,
9026 				.off   = 0,
9027 				.imm   = 32,
9028 			},
9029 			BPF_EXIT_INSN(),
9030 		},
9031 		.errstr = "unknown opcode d7",
9032 		.result = REJECT,
9033 	},
9034 	{
9035 		"XDP, using ifindex from netdev",
9036 		.insns = {
9037 			BPF_MOV64_IMM(BPF_REG_0, 0),
9038 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9039 				    offsetof(struct xdp_md, ingress_ifindex)),
9040 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9041 			BPF_MOV64_IMM(BPF_REG_0, 1),
9042 			BPF_EXIT_INSN(),
9043 		},
9044 		.result = ACCEPT,
9045 		.prog_type = BPF_PROG_TYPE_XDP,
9046 		.retval = 1,
9047 	},
9048 	{
9049 		"meta access, test1",
9050 		.insns = {
9051 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9052 				    offsetof(struct xdp_md, data_meta)),
9053 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9054 				    offsetof(struct xdp_md, data)),
9055 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9056 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9057 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9058 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9059 			BPF_MOV64_IMM(BPF_REG_0, 0),
9060 			BPF_EXIT_INSN(),
9061 		},
9062 		.result = ACCEPT,
9063 		.prog_type = BPF_PROG_TYPE_XDP,
9064 	},
9065 	{
9066 		"meta access, test2",
9067 		.insns = {
9068 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9069 				    offsetof(struct xdp_md, data_meta)),
9070 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9071 				    offsetof(struct xdp_md, data)),
9072 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9073 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9074 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9075 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9076 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9077 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9078 			BPF_MOV64_IMM(BPF_REG_0, 0),
9079 			BPF_EXIT_INSN(),
9080 		},
9081 		.result = REJECT,
9082 		.errstr = "invalid access to packet, off=-8",
9083 		.prog_type = BPF_PROG_TYPE_XDP,
9084 	},
9085 	{
9086 		"meta access, test3",
9087 		.insns = {
9088 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9089 				    offsetof(struct xdp_md, data_meta)),
9090 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9091 				    offsetof(struct xdp_md, data_end)),
9092 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9093 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9094 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9095 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9096 			BPF_MOV64_IMM(BPF_REG_0, 0),
9097 			BPF_EXIT_INSN(),
9098 		},
9099 		.result = REJECT,
9100 		.errstr = "invalid access to packet",
9101 		.prog_type = BPF_PROG_TYPE_XDP,
9102 	},
9103 	{
9104 		"meta access, test4",
9105 		.insns = {
9106 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9107 				    offsetof(struct xdp_md, data_meta)),
9108 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9109 				    offsetof(struct xdp_md, data_end)),
9110 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9111 				    offsetof(struct xdp_md, data)),
9112 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9114 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9115 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9116 			BPF_MOV64_IMM(BPF_REG_0, 0),
9117 			BPF_EXIT_INSN(),
9118 		},
9119 		.result = REJECT,
9120 		.errstr = "invalid access to packet",
9121 		.prog_type = BPF_PROG_TYPE_XDP,
9122 	},
9123 	{
9124 		"meta access, test5",
9125 		.insns = {
9126 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9127 				    offsetof(struct xdp_md, data_meta)),
9128 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9129 				    offsetof(struct xdp_md, data)),
9130 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9132 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9133 			BPF_MOV64_IMM(BPF_REG_2, -8),
9134 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9135 				     BPF_FUNC_xdp_adjust_meta),
9136 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9137 			BPF_MOV64_IMM(BPF_REG_0, 0),
9138 			BPF_EXIT_INSN(),
9139 		},
9140 		.result = REJECT,
9141 		.errstr = "R3 !read_ok",
9142 		.prog_type = BPF_PROG_TYPE_XDP,
9143 	},
9144 	{
9145 		"meta access, test6",
9146 		.insns = {
9147 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9148 				    offsetof(struct xdp_md, data_meta)),
9149 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9150 				    offsetof(struct xdp_md, data)),
9151 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9153 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9155 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9156 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9157 			BPF_MOV64_IMM(BPF_REG_0, 0),
9158 			BPF_EXIT_INSN(),
9159 		},
9160 		.result = REJECT,
9161 		.errstr = "invalid access to packet",
9162 		.prog_type = BPF_PROG_TYPE_XDP,
9163 	},
9164 	{
9165 		"meta access, test7",
9166 		.insns = {
9167 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9168 				    offsetof(struct xdp_md, data_meta)),
9169 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9170 				    offsetof(struct xdp_md, data)),
9171 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9173 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9174 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9175 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9176 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9177 			BPF_MOV64_IMM(BPF_REG_0, 0),
9178 			BPF_EXIT_INSN(),
9179 		},
9180 		.result = ACCEPT,
9181 		.prog_type = BPF_PROG_TYPE_XDP,
9182 	},
9183 	{
9184 		"meta access, test8",
9185 		.insns = {
9186 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9187 				    offsetof(struct xdp_md, data_meta)),
9188 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9189 				    offsetof(struct xdp_md, data)),
9190 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9191 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9192 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9193 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9194 			BPF_MOV64_IMM(BPF_REG_0, 0),
9195 			BPF_EXIT_INSN(),
9196 		},
9197 		.result = ACCEPT,
9198 		.prog_type = BPF_PROG_TYPE_XDP,
9199 	},
9200 	{
9201 		"meta access, test9",
9202 		.insns = {
9203 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9204 				    offsetof(struct xdp_md, data_meta)),
9205 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9206 				    offsetof(struct xdp_md, data)),
9207 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9208 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9210 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9211 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9212 			BPF_MOV64_IMM(BPF_REG_0, 0),
9213 			BPF_EXIT_INSN(),
9214 		},
9215 		.result = REJECT,
9216 		.errstr = "invalid access to packet",
9217 		.prog_type = BPF_PROG_TYPE_XDP,
9218 	},
9219 	{
9220 		"meta access, test10",
9221 		.insns = {
9222 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9223 				    offsetof(struct xdp_md, data_meta)),
9224 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9225 				    offsetof(struct xdp_md, data)),
9226 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9227 				    offsetof(struct xdp_md, data_end)),
9228 			BPF_MOV64_IMM(BPF_REG_5, 42),
9229 			BPF_MOV64_IMM(BPF_REG_6, 24),
9230 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9231 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9232 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9233 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9234 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9235 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9236 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9238 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9239 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9240 			BPF_MOV64_IMM(BPF_REG_0, 0),
9241 			BPF_EXIT_INSN(),
9242 		},
9243 		.result = REJECT,
9244 		.errstr = "invalid access to packet",
9245 		.prog_type = BPF_PROG_TYPE_XDP,
9246 	},
9247 	{
9248 		"meta access, test11",
9249 		.insns = {
9250 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9251 				    offsetof(struct xdp_md, data_meta)),
9252 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9253 				    offsetof(struct xdp_md, data)),
9254 			BPF_MOV64_IMM(BPF_REG_5, 42),
9255 			BPF_MOV64_IMM(BPF_REG_6, 24),
9256 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9257 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9258 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9259 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9260 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9261 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9262 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9263 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9264 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9265 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9266 			BPF_MOV64_IMM(BPF_REG_0, 0),
9267 			BPF_EXIT_INSN(),
9268 		},
9269 		.result = ACCEPT,
9270 		.prog_type = BPF_PROG_TYPE_XDP,
9271 	},
9272 	{
9273 		"meta access, test12",
9274 		.insns = {
9275 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9276 				    offsetof(struct xdp_md, data_meta)),
9277 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9278 				    offsetof(struct xdp_md, data)),
9279 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9280 				    offsetof(struct xdp_md, data_end)),
9281 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9282 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9283 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9284 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9285 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9287 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9288 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9289 			BPF_MOV64_IMM(BPF_REG_0, 0),
9290 			BPF_EXIT_INSN(),
9291 		},
9292 		.result = ACCEPT,
9293 		.prog_type = BPF_PROG_TYPE_XDP,
9294 	},
9295 	{
9296 		"arithmetic ops make PTR_TO_CTX unusable",
9297 		.insns = {
9298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9299 				      offsetof(struct __sk_buff, data) -
9300 				      offsetof(struct __sk_buff, mark)),
9301 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9302 				    offsetof(struct __sk_buff, mark)),
9303 			BPF_EXIT_INSN(),
9304 		},
9305 		.errstr = "dereference of modified ctx ptr",
9306 		.result = REJECT,
9307 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9308 	},
9309 	{
9310 		"pkt_end - pkt_start is allowed",
9311 		.insns = {
9312 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9313 				    offsetof(struct __sk_buff, data_end)),
9314 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9315 				    offsetof(struct __sk_buff, data)),
9316 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9317 			BPF_EXIT_INSN(),
9318 		},
9319 		.result = ACCEPT,
9320 		.retval = TEST_DATA_LEN,
9321 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9322 	},
9323 	{
9324 		"XDP pkt read, pkt_end mangling, bad access 1",
9325 		.insns = {
9326 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9327 				    offsetof(struct xdp_md, data)),
9328 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9329 				    offsetof(struct xdp_md, data_end)),
9330 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9331 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9332 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9333 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9334 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9335 			BPF_MOV64_IMM(BPF_REG_0, 0),
9336 			BPF_EXIT_INSN(),
9337 		},
9338 		.errstr = "R3 pointer arithmetic on pkt_end",
9339 		.result = REJECT,
9340 		.prog_type = BPF_PROG_TYPE_XDP,
9341 	},
9342 	{
9343 		"XDP pkt read, pkt_end mangling, bad access 2",
9344 		.insns = {
9345 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9346 				    offsetof(struct xdp_md, data)),
9347 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9348 				    offsetof(struct xdp_md, data_end)),
9349 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9350 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9351 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9352 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9353 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9354 			BPF_MOV64_IMM(BPF_REG_0, 0),
9355 			BPF_EXIT_INSN(),
9356 		},
9357 		.errstr = "R3 pointer arithmetic on pkt_end",
9358 		.result = REJECT,
9359 		.prog_type = BPF_PROG_TYPE_XDP,
9360 	},
9361 	{
9362 		"XDP pkt read, pkt_data' > pkt_end, good access",
9363 		.insns = {
9364 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9365 				    offsetof(struct xdp_md, data)),
9366 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9367 				    offsetof(struct xdp_md, data_end)),
9368 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9370 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9371 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9372 			BPF_MOV64_IMM(BPF_REG_0, 0),
9373 			BPF_EXIT_INSN(),
9374 		},
9375 		.result = ACCEPT,
9376 		.prog_type = BPF_PROG_TYPE_XDP,
9377 	},
9378 	{
9379 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9380 		.insns = {
9381 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9382 				    offsetof(struct xdp_md, data)),
9383 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9384 				    offsetof(struct xdp_md, data_end)),
9385 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9386 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9387 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9388 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9389 			BPF_MOV64_IMM(BPF_REG_0, 0),
9390 			BPF_EXIT_INSN(),
9391 		},
9392 		.errstr = "R1 offset is outside of the packet",
9393 		.result = REJECT,
9394 		.prog_type = BPF_PROG_TYPE_XDP,
9395 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9396 	},
9397 	{
9398 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9399 		.insns = {
9400 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9401 				    offsetof(struct xdp_md, data)),
9402 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9403 				    offsetof(struct xdp_md, data_end)),
9404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9405 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9406 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9407 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9408 			BPF_MOV64_IMM(BPF_REG_0, 0),
9409 			BPF_EXIT_INSN(),
9410 		},
9411 		.errstr = "R1 offset is outside of the packet",
9412 		.result = REJECT,
9413 		.prog_type = BPF_PROG_TYPE_XDP,
9414 	},
9415 	{
9416 		"XDP pkt read, pkt_end > pkt_data', good access",
9417 		.insns = {
9418 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9419 				    offsetof(struct xdp_md, data)),
9420 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9421 				    offsetof(struct xdp_md, data_end)),
9422 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9423 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9424 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9425 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9426 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9427 			BPF_MOV64_IMM(BPF_REG_0, 0),
9428 			BPF_EXIT_INSN(),
9429 		},
9430 		.result = ACCEPT,
9431 		.prog_type = BPF_PROG_TYPE_XDP,
9432 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9433 	},
9434 	{
9435 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9436 		.insns = {
9437 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9438 				    offsetof(struct xdp_md, data)),
9439 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9440 				    offsetof(struct xdp_md, data_end)),
9441 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9442 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9443 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9444 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9445 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9446 			BPF_MOV64_IMM(BPF_REG_0, 0),
9447 			BPF_EXIT_INSN(),
9448 		},
9449 		.errstr = "R1 offset is outside of the packet",
9450 		.result = REJECT,
9451 		.prog_type = BPF_PROG_TYPE_XDP,
9452 	},
9453 	{
9454 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9455 		.insns = {
9456 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9457 				    offsetof(struct xdp_md, data)),
9458 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9459 				    offsetof(struct xdp_md, data_end)),
9460 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9461 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9462 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9463 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9464 			BPF_MOV64_IMM(BPF_REG_0, 0),
9465 			BPF_EXIT_INSN(),
9466 		},
9467 		.errstr = "R1 offset is outside of the packet",
9468 		.result = REJECT,
9469 		.prog_type = BPF_PROG_TYPE_XDP,
9470 	},
9471 	{
9472 		"XDP pkt read, pkt_data' < pkt_end, good access",
9473 		.insns = {
9474 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9475 				    offsetof(struct xdp_md, data)),
9476 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9477 				    offsetof(struct xdp_md, data_end)),
9478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9480 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9481 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9482 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9483 			BPF_MOV64_IMM(BPF_REG_0, 0),
9484 			BPF_EXIT_INSN(),
9485 		},
9486 		.result = ACCEPT,
9487 		.prog_type = BPF_PROG_TYPE_XDP,
9488 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9489 	},
9490 	{
9491 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9492 		.insns = {
9493 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9494 				    offsetof(struct xdp_md, data)),
9495 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9496 				    offsetof(struct xdp_md, data_end)),
9497 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9499 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9500 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9501 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9502 			BPF_MOV64_IMM(BPF_REG_0, 0),
9503 			BPF_EXIT_INSN(),
9504 		},
9505 		.errstr = "R1 offset is outside of the packet",
9506 		.result = REJECT,
9507 		.prog_type = BPF_PROG_TYPE_XDP,
9508 	},
9509 	{
9510 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9511 		.insns = {
9512 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9513 				    offsetof(struct xdp_md, data)),
9514 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9515 				    offsetof(struct xdp_md, data_end)),
9516 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9517 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9518 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9519 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9520 			BPF_MOV64_IMM(BPF_REG_0, 0),
9521 			BPF_EXIT_INSN(),
9522 		},
9523 		.errstr = "R1 offset is outside of the packet",
9524 		.result = REJECT,
9525 		.prog_type = BPF_PROG_TYPE_XDP,
9526 	},
9527 	{
9528 		"XDP pkt read, pkt_end < pkt_data', good access",
9529 		.insns = {
9530 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9531 				    offsetof(struct xdp_md, data)),
9532 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9533 				    offsetof(struct xdp_md, data_end)),
9534 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9536 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9537 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9538 			BPF_MOV64_IMM(BPF_REG_0, 0),
9539 			BPF_EXIT_INSN(),
9540 		},
9541 		.result = ACCEPT,
9542 		.prog_type = BPF_PROG_TYPE_XDP,
9543 	},
9544 	{
9545 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9546 		.insns = {
9547 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9548 				    offsetof(struct xdp_md, data)),
9549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9550 				    offsetof(struct xdp_md, data_end)),
9551 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9552 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9553 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9554 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9555 			BPF_MOV64_IMM(BPF_REG_0, 0),
9556 			BPF_EXIT_INSN(),
9557 		},
9558 		.errstr = "R1 offset is outside of the packet",
9559 		.result = REJECT,
9560 		.prog_type = BPF_PROG_TYPE_XDP,
9561 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9562 	},
9563 	{
9564 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9565 		.insns = {
9566 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9567 				    offsetof(struct xdp_md, data)),
9568 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9569 				    offsetof(struct xdp_md, data_end)),
9570 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9572 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9573 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9574 			BPF_MOV64_IMM(BPF_REG_0, 0),
9575 			BPF_EXIT_INSN(),
9576 		},
9577 		.errstr = "R1 offset is outside of the packet",
9578 		.result = REJECT,
9579 		.prog_type = BPF_PROG_TYPE_XDP,
9580 	},
9581 	{
9582 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9583 		.insns = {
9584 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9585 				    offsetof(struct xdp_md, data)),
9586 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9587 				    offsetof(struct xdp_md, data_end)),
9588 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9589 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9590 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9591 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9592 			BPF_MOV64_IMM(BPF_REG_0, 0),
9593 			BPF_EXIT_INSN(),
9594 		},
9595 		.result = ACCEPT,
9596 		.prog_type = BPF_PROG_TYPE_XDP,
9597 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9598 	},
9599 	{
9600 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9601 		.insns = {
9602 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9603 				    offsetof(struct xdp_md, data)),
9604 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9605 				    offsetof(struct xdp_md, data_end)),
9606 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9607 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9608 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9609 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9610 			BPF_MOV64_IMM(BPF_REG_0, 0),
9611 			BPF_EXIT_INSN(),
9612 		},
9613 		.errstr = "R1 offset is outside of the packet",
9614 		.result = REJECT,
9615 		.prog_type = BPF_PROG_TYPE_XDP,
9616 	},
9617 	{
9618 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9619 		.insns = {
9620 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9621 				    offsetof(struct xdp_md, data)),
9622 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9623 				    offsetof(struct xdp_md, data_end)),
9624 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9625 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9626 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9627 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9628 			BPF_MOV64_IMM(BPF_REG_0, 0),
9629 			BPF_EXIT_INSN(),
9630 		},
9631 		.errstr = "R1 offset is outside of the packet",
9632 		.result = REJECT,
9633 		.prog_type = BPF_PROG_TYPE_XDP,
9634 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9635 	},
9636 	{
9637 		"XDP pkt read, pkt_end >= pkt_data', good access",
9638 		.insns = {
9639 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9640 				    offsetof(struct xdp_md, data)),
9641 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9642 				    offsetof(struct xdp_md, data_end)),
9643 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9644 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9645 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9646 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9647 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9648 			BPF_MOV64_IMM(BPF_REG_0, 0),
9649 			BPF_EXIT_INSN(),
9650 		},
9651 		.result = ACCEPT,
9652 		.prog_type = BPF_PROG_TYPE_XDP,
9653 	},
9654 	{
9655 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
9656 		.insns = {
9657 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9658 				    offsetof(struct xdp_md, data)),
9659 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9660 				    offsetof(struct xdp_md, data_end)),
9661 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9663 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9664 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9665 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9666 			BPF_MOV64_IMM(BPF_REG_0, 0),
9667 			BPF_EXIT_INSN(),
9668 		},
9669 		.errstr = "R1 offset is outside of the packet",
9670 		.result = REJECT,
9671 		.prog_type = BPF_PROG_TYPE_XDP,
9672 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9673 	},
9674 	{
9675 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
9676 		.insns = {
9677 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9678 				    offsetof(struct xdp_md, data)),
9679 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9680 				    offsetof(struct xdp_md, data_end)),
9681 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9682 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9683 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
9684 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9685 			BPF_MOV64_IMM(BPF_REG_0, 0),
9686 			BPF_EXIT_INSN(),
9687 		},
9688 		.errstr = "R1 offset is outside of the packet",
9689 		.result = REJECT,
9690 		.prog_type = BPF_PROG_TYPE_XDP,
9691 	},
9692 	{
9693 		"XDP pkt read, pkt_data' <= pkt_end, good access",
9694 		.insns = {
9695 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9696 				    offsetof(struct xdp_md, data)),
9697 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9698 				    offsetof(struct xdp_md, data_end)),
9699 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9700 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9701 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9702 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9703 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9704 			BPF_MOV64_IMM(BPF_REG_0, 0),
9705 			BPF_EXIT_INSN(),
9706 		},
9707 		.result = ACCEPT,
9708 		.prog_type = BPF_PROG_TYPE_XDP,
9709 	},
9710 	{
9711 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
9712 		.insns = {
9713 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9714 				    offsetof(struct xdp_md, data)),
9715 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9716 				    offsetof(struct xdp_md, data_end)),
9717 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9719 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9720 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9721 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9722 			BPF_MOV64_IMM(BPF_REG_0, 0),
9723 			BPF_EXIT_INSN(),
9724 		},
9725 		.errstr = "R1 offset is outside of the packet",
9726 		.result = REJECT,
9727 		.prog_type = BPF_PROG_TYPE_XDP,
9728 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9729 	},
9730 	{
9731 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
9732 		.insns = {
9733 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9734 				    offsetof(struct xdp_md, data)),
9735 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9736 				    offsetof(struct xdp_md, data_end)),
9737 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9739 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
9740 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9741 			BPF_MOV64_IMM(BPF_REG_0, 0),
9742 			BPF_EXIT_INSN(),
9743 		},
9744 		.errstr = "R1 offset is outside of the packet",
9745 		.result = REJECT,
9746 		.prog_type = BPF_PROG_TYPE_XDP,
9747 	},
9748 	{
9749 		"XDP pkt read, pkt_end <= pkt_data', good access",
9750 		.insns = {
9751 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9752 				    offsetof(struct xdp_md, data)),
9753 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9754 				    offsetof(struct xdp_md, data_end)),
9755 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9756 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9757 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9758 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9759 			BPF_MOV64_IMM(BPF_REG_0, 0),
9760 			BPF_EXIT_INSN(),
9761 		},
9762 		.result = ACCEPT,
9763 		.prog_type = BPF_PROG_TYPE_XDP,
9764 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9765 	},
9766 	{
9767 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
9768 		.insns = {
9769 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9770 				    offsetof(struct xdp_md, data)),
9771 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9772 				    offsetof(struct xdp_md, data_end)),
9773 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9774 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9775 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
9776 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9777 			BPF_MOV64_IMM(BPF_REG_0, 0),
9778 			BPF_EXIT_INSN(),
9779 		},
9780 		.errstr = "R1 offset is outside of the packet",
9781 		.result = REJECT,
9782 		.prog_type = BPF_PROG_TYPE_XDP,
9783 	},
9784 	{
9785 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
9786 		.insns = {
9787 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9788 				    offsetof(struct xdp_md, data)),
9789 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9790 				    offsetof(struct xdp_md, data_end)),
9791 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9792 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9793 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
9794 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9795 			BPF_MOV64_IMM(BPF_REG_0, 0),
9796 			BPF_EXIT_INSN(),
9797 		},
9798 		.errstr = "R1 offset is outside of the packet",
9799 		.result = REJECT,
9800 		.prog_type = BPF_PROG_TYPE_XDP,
9801 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9802 	},
9803 	{
9804 		"XDP pkt read, pkt_meta' > pkt_data, good access",
9805 		.insns = {
9806 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9807 				    offsetof(struct xdp_md, data_meta)),
9808 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9809 				    offsetof(struct xdp_md, data)),
9810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9811 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9812 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9813 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9814 			BPF_MOV64_IMM(BPF_REG_0, 0),
9815 			BPF_EXIT_INSN(),
9816 		},
9817 		.result = ACCEPT,
9818 		.prog_type = BPF_PROG_TYPE_XDP,
9819 	},
9820 	{
9821 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
9822 		.insns = {
9823 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9824 				    offsetof(struct xdp_md, data_meta)),
9825 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9826 				    offsetof(struct xdp_md, data)),
9827 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9829 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9830 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9831 			BPF_MOV64_IMM(BPF_REG_0, 0),
9832 			BPF_EXIT_INSN(),
9833 		},
9834 		.errstr = "R1 offset is outside of the packet",
9835 		.result = REJECT,
9836 		.prog_type = BPF_PROG_TYPE_XDP,
9837 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9838 	},
9839 	{
9840 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
9841 		.insns = {
9842 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9843 				    offsetof(struct xdp_md, data_meta)),
9844 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9845 				    offsetof(struct xdp_md, data)),
9846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9847 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9848 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9849 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9850 			BPF_MOV64_IMM(BPF_REG_0, 0),
9851 			BPF_EXIT_INSN(),
9852 		},
9853 		.errstr = "R1 offset is outside of the packet",
9854 		.result = REJECT,
9855 		.prog_type = BPF_PROG_TYPE_XDP,
9856 	},
9857 	{
9858 		"XDP pkt read, pkt_data > pkt_meta', good access",
9859 		.insns = {
9860 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9861 				    offsetof(struct xdp_md, data_meta)),
9862 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9863 				    offsetof(struct xdp_md, data)),
9864 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9866 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9867 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9868 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9869 			BPF_MOV64_IMM(BPF_REG_0, 0),
9870 			BPF_EXIT_INSN(),
9871 		},
9872 		.result = ACCEPT,
9873 		.prog_type = BPF_PROG_TYPE_XDP,
9874 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9875 	},
9876 	{
9877 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
9878 		.insns = {
9879 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9880 				    offsetof(struct xdp_md, data_meta)),
9881 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9882 				    offsetof(struct xdp_md, data)),
9883 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9885 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9886 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9887 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9888 			BPF_MOV64_IMM(BPF_REG_0, 0),
9889 			BPF_EXIT_INSN(),
9890 		},
9891 		.errstr = "R1 offset is outside of the packet",
9892 		.result = REJECT,
9893 		.prog_type = BPF_PROG_TYPE_XDP,
9894 	},
9895 	{
9896 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
9897 		.insns = {
9898 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9899 				    offsetof(struct xdp_md, data_meta)),
9900 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9901 				    offsetof(struct xdp_md, data)),
9902 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9903 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9904 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9905 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9906 			BPF_MOV64_IMM(BPF_REG_0, 0),
9907 			BPF_EXIT_INSN(),
9908 		},
9909 		.errstr = "R1 offset is outside of the packet",
9910 		.result = REJECT,
9911 		.prog_type = BPF_PROG_TYPE_XDP,
9912 	},
9913 	{
9914 		"XDP pkt read, pkt_meta' < pkt_data, good access",
9915 		.insns = {
9916 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9917 				    offsetof(struct xdp_md, data_meta)),
9918 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9919 				    offsetof(struct xdp_md, data)),
9920 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9921 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9922 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9923 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9924 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9925 			BPF_MOV64_IMM(BPF_REG_0, 0),
9926 			BPF_EXIT_INSN(),
9927 		},
9928 		.result = ACCEPT,
9929 		.prog_type = BPF_PROG_TYPE_XDP,
9930 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9931 	},
9932 	{
9933 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
9934 		.insns = {
9935 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9936 				    offsetof(struct xdp_md, data_meta)),
9937 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9938 				    offsetof(struct xdp_md, data)),
9939 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9940 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9941 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9942 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9943 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9944 			BPF_MOV64_IMM(BPF_REG_0, 0),
9945 			BPF_EXIT_INSN(),
9946 		},
9947 		.errstr = "R1 offset is outside of the packet",
9948 		.result = REJECT,
9949 		.prog_type = BPF_PROG_TYPE_XDP,
9950 	},
9951 	{
9952 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
9953 		.insns = {
9954 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9955 				    offsetof(struct xdp_md, data_meta)),
9956 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9957 				    offsetof(struct xdp_md, data)),
9958 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9959 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9960 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9961 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9962 			BPF_MOV64_IMM(BPF_REG_0, 0),
9963 			BPF_EXIT_INSN(),
9964 		},
9965 		.errstr = "R1 offset is outside of the packet",
9966 		.result = REJECT,
9967 		.prog_type = BPF_PROG_TYPE_XDP,
9968 	},
9969 	{
9970 		"XDP pkt read, pkt_data < pkt_meta', good access",
9971 		.insns = {
9972 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9973 				    offsetof(struct xdp_md, data_meta)),
9974 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9975 				    offsetof(struct xdp_md, data)),
9976 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9977 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9978 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9979 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9980 			BPF_MOV64_IMM(BPF_REG_0, 0),
9981 			BPF_EXIT_INSN(),
9982 		},
9983 		.result = ACCEPT,
9984 		.prog_type = BPF_PROG_TYPE_XDP,
9985 	},
9986 	{
9987 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
9988 		.insns = {
9989 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9990 				    offsetof(struct xdp_md, data_meta)),
9991 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9992 				    offsetof(struct xdp_md, data)),
9993 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9994 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9995 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9996 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9997 			BPF_MOV64_IMM(BPF_REG_0, 0),
9998 			BPF_EXIT_INSN(),
9999 		},
10000 		.errstr = "R1 offset is outside of the packet",
10001 		.result = REJECT,
10002 		.prog_type = BPF_PROG_TYPE_XDP,
10003 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10004 	},
10005 	{
10006 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10007 		.insns = {
10008 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10009 				    offsetof(struct xdp_md, data_meta)),
10010 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10011 				    offsetof(struct xdp_md, data)),
10012 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10013 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10014 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10015 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10016 			BPF_MOV64_IMM(BPF_REG_0, 0),
10017 			BPF_EXIT_INSN(),
10018 		},
10019 		.errstr = "R1 offset is outside of the packet",
10020 		.result = REJECT,
10021 		.prog_type = BPF_PROG_TYPE_XDP,
10022 	},
10023 	{
10024 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10025 		.insns = {
10026 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10027 				    offsetof(struct xdp_md, data_meta)),
10028 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10029 				    offsetof(struct xdp_md, data)),
10030 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10032 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10033 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10034 			BPF_MOV64_IMM(BPF_REG_0, 0),
10035 			BPF_EXIT_INSN(),
10036 		},
10037 		.result = ACCEPT,
10038 		.prog_type = BPF_PROG_TYPE_XDP,
10039 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10040 	},
10041 	{
10042 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10043 		.insns = {
10044 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10045 				    offsetof(struct xdp_md, data_meta)),
10046 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10047 				    offsetof(struct xdp_md, data)),
10048 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10049 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10050 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10051 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10052 			BPF_MOV64_IMM(BPF_REG_0, 0),
10053 			BPF_EXIT_INSN(),
10054 		},
10055 		.errstr = "R1 offset is outside of the packet",
10056 		.result = REJECT,
10057 		.prog_type = BPF_PROG_TYPE_XDP,
10058 	},
10059 	{
10060 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10061 		.insns = {
10062 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10063 				    offsetof(struct xdp_md, data_meta)),
10064 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10065 				    offsetof(struct xdp_md, data)),
10066 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10067 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10068 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10069 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10070 			BPF_MOV64_IMM(BPF_REG_0, 0),
10071 			BPF_EXIT_INSN(),
10072 		},
10073 		.errstr = "R1 offset is outside of the packet",
10074 		.result = REJECT,
10075 		.prog_type = BPF_PROG_TYPE_XDP,
10076 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10077 	},
10078 	{
10079 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10080 		.insns = {
10081 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10082 				    offsetof(struct xdp_md, data_meta)),
10083 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10084 				    offsetof(struct xdp_md, data)),
10085 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10086 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10087 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10088 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10089 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10090 			BPF_MOV64_IMM(BPF_REG_0, 0),
10091 			BPF_EXIT_INSN(),
10092 		},
10093 		.result = ACCEPT,
10094 		.prog_type = BPF_PROG_TYPE_XDP,
10095 	},
10096 	{
10097 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10098 		.insns = {
10099 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10100 				    offsetof(struct xdp_md, data_meta)),
10101 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10102 				    offsetof(struct xdp_md, data)),
10103 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10104 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10105 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10106 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10107 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10108 			BPF_MOV64_IMM(BPF_REG_0, 0),
10109 			BPF_EXIT_INSN(),
10110 		},
10111 		.errstr = "R1 offset is outside of the packet",
10112 		.result = REJECT,
10113 		.prog_type = BPF_PROG_TYPE_XDP,
10114 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10115 	},
10116 	{
10117 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10118 		.insns = {
10119 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10120 				    offsetof(struct xdp_md, data_meta)),
10121 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10122 				    offsetof(struct xdp_md, data)),
10123 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10125 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10126 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10127 			BPF_MOV64_IMM(BPF_REG_0, 0),
10128 			BPF_EXIT_INSN(),
10129 		},
10130 		.errstr = "R1 offset is outside of the packet",
10131 		.result = REJECT,
10132 		.prog_type = BPF_PROG_TYPE_XDP,
10133 	},
10134 	{
10135 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10136 		.insns = {
10137 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10138 				    offsetof(struct xdp_md, data_meta)),
10139 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10140 				    offsetof(struct xdp_md, data)),
10141 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10142 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10143 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10144 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10145 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10146 			BPF_MOV64_IMM(BPF_REG_0, 0),
10147 			BPF_EXIT_INSN(),
10148 		},
10149 		.result = ACCEPT,
10150 		.prog_type = BPF_PROG_TYPE_XDP,
10151 	},
10152 	{
10153 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10154 		.insns = {
10155 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10156 				    offsetof(struct xdp_md, data_meta)),
10157 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10158 				    offsetof(struct xdp_md, data)),
10159 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10160 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10161 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10162 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10163 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10164 			BPF_MOV64_IMM(BPF_REG_0, 0),
10165 			BPF_EXIT_INSN(),
10166 		},
10167 		.errstr = "R1 offset is outside of the packet",
10168 		.result = REJECT,
10169 		.prog_type = BPF_PROG_TYPE_XDP,
10170 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10171 	},
10172 	{
10173 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10174 		.insns = {
10175 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10176 				    offsetof(struct xdp_md, data_meta)),
10177 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10178 				    offsetof(struct xdp_md, data)),
10179 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10180 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10181 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10182 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10183 			BPF_MOV64_IMM(BPF_REG_0, 0),
10184 			BPF_EXIT_INSN(),
10185 		},
10186 		.errstr = "R1 offset is outside of the packet",
10187 		.result = REJECT,
10188 		.prog_type = BPF_PROG_TYPE_XDP,
10189 	},
10190 	{
10191 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10192 		.insns = {
10193 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10194 				    offsetof(struct xdp_md, data_meta)),
10195 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10196 				    offsetof(struct xdp_md, data)),
10197 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10198 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10199 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10200 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10201 			BPF_MOV64_IMM(BPF_REG_0, 0),
10202 			BPF_EXIT_INSN(),
10203 		},
10204 		.result = ACCEPT,
10205 		.prog_type = BPF_PROG_TYPE_XDP,
10206 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10207 	},
10208 	{
10209 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10210 		.insns = {
10211 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10212 				    offsetof(struct xdp_md, data_meta)),
10213 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10214 				    offsetof(struct xdp_md, data)),
10215 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10216 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10217 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10218 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10219 			BPF_MOV64_IMM(BPF_REG_0, 0),
10220 			BPF_EXIT_INSN(),
10221 		},
10222 		.errstr = "R1 offset is outside of the packet",
10223 		.result = REJECT,
10224 		.prog_type = BPF_PROG_TYPE_XDP,
10225 	},
10226 	{
10227 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10228 		.insns = {
10229 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10230 				    offsetof(struct xdp_md, data_meta)),
10231 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10232 				    offsetof(struct xdp_md, data)),
10233 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10234 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10235 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10236 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10237 			BPF_MOV64_IMM(BPF_REG_0, 0),
10238 			BPF_EXIT_INSN(),
10239 		},
10240 		.errstr = "R1 offset is outside of the packet",
10241 		.result = REJECT,
10242 		.prog_type = BPF_PROG_TYPE_XDP,
10243 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10244 	},
10245 	{
10246 		"check deducing bounds from const, 1",
10247 		.insns = {
10248 			BPF_MOV64_IMM(BPF_REG_0, 1),
10249 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10250 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10251 			BPF_EXIT_INSN(),
10252 		},
10253 		.result = REJECT,
10254 		.errstr = "R0 tried to subtract pointer from scalar",
10255 	},
10256 	{
10257 		"check deducing bounds from const, 2",
10258 		.insns = {
10259 			BPF_MOV64_IMM(BPF_REG_0, 1),
10260 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10261 			BPF_EXIT_INSN(),
10262 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10263 			BPF_EXIT_INSN(),
10264 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10265 			BPF_EXIT_INSN(),
10266 		},
10267 		.result = ACCEPT,
10268 		.retval = 1,
10269 	},
10270 	{
10271 		"check deducing bounds from const, 3",
10272 		.insns = {
10273 			BPF_MOV64_IMM(BPF_REG_0, 0),
10274 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10275 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10276 			BPF_EXIT_INSN(),
10277 		},
10278 		.result = REJECT,
10279 		.errstr = "R0 tried to subtract pointer from scalar",
10280 	},
10281 	{
10282 		"check deducing bounds from const, 4",
10283 		.insns = {
10284 			BPF_MOV64_IMM(BPF_REG_0, 0),
10285 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10286 			BPF_EXIT_INSN(),
10287 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10288 			BPF_EXIT_INSN(),
10289 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10290 			BPF_EXIT_INSN(),
10291 		},
10292 		.result = ACCEPT,
10293 	},
10294 	{
10295 		"check deducing bounds from const, 5",
10296 		.insns = {
10297 			BPF_MOV64_IMM(BPF_REG_0, 0),
10298 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10299 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10300 			BPF_EXIT_INSN(),
10301 		},
10302 		.result = REJECT,
10303 		.errstr = "R0 tried to subtract pointer from scalar",
10304 	},
10305 	{
10306 		"check deducing bounds from const, 6",
10307 		.insns = {
10308 			BPF_MOV64_IMM(BPF_REG_0, 0),
10309 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10310 			BPF_EXIT_INSN(),
10311 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10312 			BPF_EXIT_INSN(),
10313 		},
10314 		.result = REJECT,
10315 		.errstr = "R0 tried to subtract pointer from scalar",
10316 	},
10317 	{
10318 		"check deducing bounds from const, 7",
10319 		.insns = {
10320 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10321 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10322 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10323 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10324 				    offsetof(struct __sk_buff, mark)),
10325 			BPF_EXIT_INSN(),
10326 		},
10327 		.result = REJECT,
10328 		.errstr = "dereference of modified ctx ptr",
10329 	},
10330 	{
10331 		"check deducing bounds from const, 8",
10332 		.insns = {
10333 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10334 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10335 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10336 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10337 				    offsetof(struct __sk_buff, mark)),
10338 			BPF_EXIT_INSN(),
10339 		},
10340 		.result = REJECT,
10341 		.errstr = "dereference of modified ctx ptr",
10342 	},
10343 	{
10344 		"check deducing bounds from const, 9",
10345 		.insns = {
10346 			BPF_MOV64_IMM(BPF_REG_0, 0),
10347 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10348 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10349 			BPF_EXIT_INSN(),
10350 		},
10351 		.result = REJECT,
10352 		.errstr = "R0 tried to subtract pointer from scalar",
10353 	},
10354 	{
10355 		"check deducing bounds from const, 10",
10356 		.insns = {
10357 			BPF_MOV64_IMM(BPF_REG_0, 0),
10358 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10359 			/* Marks reg as unknown. */
10360 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10361 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10362 			BPF_EXIT_INSN(),
10363 		},
10364 		.result = REJECT,
10365 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10366 	},
10367 	{
10368 		"bpf_exit with invalid return code. test1",
10369 		.insns = {
10370 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10371 			BPF_EXIT_INSN(),
10372 		},
10373 		.errstr = "R0 has value (0x0; 0xffffffff)",
10374 		.result = REJECT,
10375 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10376 	},
10377 	{
10378 		"bpf_exit with invalid return code. test2",
10379 		.insns = {
10380 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10381 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10382 			BPF_EXIT_INSN(),
10383 		},
10384 		.result = ACCEPT,
10385 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10386 	},
10387 	{
10388 		"bpf_exit with invalid return code. test3",
10389 		.insns = {
10390 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10391 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10392 			BPF_EXIT_INSN(),
10393 		},
10394 		.errstr = "R0 has value (0x0; 0x3)",
10395 		.result = REJECT,
10396 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10397 	},
10398 	{
10399 		"bpf_exit with invalid return code. test4",
10400 		.insns = {
10401 			BPF_MOV64_IMM(BPF_REG_0, 1),
10402 			BPF_EXIT_INSN(),
10403 		},
10404 		.result = ACCEPT,
10405 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10406 	},
10407 	{
10408 		"bpf_exit with invalid return code. test5",
10409 		.insns = {
10410 			BPF_MOV64_IMM(BPF_REG_0, 2),
10411 			BPF_EXIT_INSN(),
10412 		},
10413 		.errstr = "R0 has value (0x2; 0x0)",
10414 		.result = REJECT,
10415 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10416 	},
10417 	{
10418 		"bpf_exit with invalid return code. test6",
10419 		.insns = {
10420 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10421 			BPF_EXIT_INSN(),
10422 		},
10423 		.errstr = "R0 is not a known value (ctx)",
10424 		.result = REJECT,
10425 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10426 	},
10427 	{
10428 		"bpf_exit with invalid return code. test7",
10429 		.insns = {
10430 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10431 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10432 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10433 			BPF_EXIT_INSN(),
10434 		},
10435 		.errstr = "R0 has unknown scalar value",
10436 		.result = REJECT,
10437 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10438 	},
10439 	{
10440 		"calls: basic sanity",
10441 		.insns = {
10442 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10443 			BPF_MOV64_IMM(BPF_REG_0, 1),
10444 			BPF_EXIT_INSN(),
10445 			BPF_MOV64_IMM(BPF_REG_0, 2),
10446 			BPF_EXIT_INSN(),
10447 		},
10448 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10449 		.result = ACCEPT,
10450 	},
10451 	{
10452 		"calls: not on unpriviledged",
10453 		.insns = {
10454 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10455 			BPF_MOV64_IMM(BPF_REG_0, 1),
10456 			BPF_EXIT_INSN(),
10457 			BPF_MOV64_IMM(BPF_REG_0, 2),
10458 			BPF_EXIT_INSN(),
10459 		},
10460 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10461 		.result_unpriv = REJECT,
10462 		.result = ACCEPT,
10463 		.retval = 1,
10464 	},
10465 	{
10466 		"calls: div by 0 in subprog",
10467 		.insns = {
10468 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10469 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10470 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10471 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10472 				    offsetof(struct __sk_buff, data_end)),
10473 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10474 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10475 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10476 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10477 			BPF_MOV64_IMM(BPF_REG_0, 1),
10478 			BPF_EXIT_INSN(),
10479 			BPF_MOV32_IMM(BPF_REG_2, 0),
10480 			BPF_MOV32_IMM(BPF_REG_3, 1),
10481 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10482 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10483 				    offsetof(struct __sk_buff, data)),
10484 			BPF_EXIT_INSN(),
10485 		},
10486 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10487 		.result = ACCEPT,
10488 		.retval = 1,
10489 	},
10490 	{
10491 		"calls: multiple ret types in subprog 1",
10492 		.insns = {
10493 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10494 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10495 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10496 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10497 				    offsetof(struct __sk_buff, data_end)),
10498 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10499 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10500 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10501 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10502 			BPF_MOV64_IMM(BPF_REG_0, 1),
10503 			BPF_EXIT_INSN(),
10504 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10505 				    offsetof(struct __sk_buff, data)),
10506 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10507 			BPF_MOV32_IMM(BPF_REG_0, 42),
10508 			BPF_EXIT_INSN(),
10509 		},
10510 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10511 		.result = REJECT,
10512 		.errstr = "R0 invalid mem access 'inv'",
10513 	},
10514 	{
10515 		"calls: multiple ret types in subprog 2",
10516 		.insns = {
10517 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10518 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10519 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10520 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10521 				    offsetof(struct __sk_buff, data_end)),
10522 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10524 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10525 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10526 			BPF_MOV64_IMM(BPF_REG_0, 1),
10527 			BPF_EXIT_INSN(),
10528 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10529 				    offsetof(struct __sk_buff, data)),
10530 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10531 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10532 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10533 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10534 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10535 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10536 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10537 				     BPF_FUNC_map_lookup_elem),
10538 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10539 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10540 				    offsetof(struct __sk_buff, data)),
10541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10542 			BPF_EXIT_INSN(),
10543 		},
10544 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10545 		.fixup_map_hash_8b = { 16 },
10546 		.result = REJECT,
10547 		.errstr = "R0 min value is outside of the array range",
10548 	},
10549 	{
10550 		"calls: overlapping caller/callee",
10551 		.insns = {
10552 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10553 			BPF_MOV64_IMM(BPF_REG_0, 1),
10554 			BPF_EXIT_INSN(),
10555 		},
10556 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10557 		.errstr = "last insn is not an exit or jmp",
10558 		.result = REJECT,
10559 	},
10560 	{
10561 		"calls: wrong recursive calls",
10562 		.insns = {
10563 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10564 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10565 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10566 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10567 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10568 			BPF_MOV64_IMM(BPF_REG_0, 1),
10569 			BPF_EXIT_INSN(),
10570 		},
10571 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10572 		.errstr = "jump out of range",
10573 		.result = REJECT,
10574 	},
10575 	{
10576 		"calls: wrong src reg",
10577 		.insns = {
10578 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10579 			BPF_MOV64_IMM(BPF_REG_0, 1),
10580 			BPF_EXIT_INSN(),
10581 		},
10582 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10583 		.errstr = "BPF_CALL uses reserved fields",
10584 		.result = REJECT,
10585 	},
10586 	{
10587 		"calls: wrong off value",
10588 		.insns = {
10589 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10590 			BPF_MOV64_IMM(BPF_REG_0, 1),
10591 			BPF_EXIT_INSN(),
10592 			BPF_MOV64_IMM(BPF_REG_0, 2),
10593 			BPF_EXIT_INSN(),
10594 		},
10595 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10596 		.errstr = "BPF_CALL uses reserved fields",
10597 		.result = REJECT,
10598 	},
10599 	{
10600 		"calls: jump back loop",
10601 		.insns = {
10602 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10603 			BPF_MOV64_IMM(BPF_REG_0, 1),
10604 			BPF_EXIT_INSN(),
10605 		},
10606 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10607 		.errstr = "back-edge from insn 0 to 0",
10608 		.result = REJECT,
10609 	},
10610 	{
10611 		"calls: conditional call",
10612 		.insns = {
10613 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10614 				    offsetof(struct __sk_buff, mark)),
10615 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10616 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10617 			BPF_MOV64_IMM(BPF_REG_0, 1),
10618 			BPF_EXIT_INSN(),
10619 			BPF_MOV64_IMM(BPF_REG_0, 2),
10620 			BPF_EXIT_INSN(),
10621 		},
10622 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10623 		.errstr = "jump out of range",
10624 		.result = REJECT,
10625 	},
10626 	{
10627 		"calls: conditional call 2",
10628 		.insns = {
10629 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10630 				    offsetof(struct __sk_buff, mark)),
10631 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10632 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10633 			BPF_MOV64_IMM(BPF_REG_0, 1),
10634 			BPF_EXIT_INSN(),
10635 			BPF_MOV64_IMM(BPF_REG_0, 2),
10636 			BPF_EXIT_INSN(),
10637 			BPF_MOV64_IMM(BPF_REG_0, 3),
10638 			BPF_EXIT_INSN(),
10639 		},
10640 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10641 		.result = ACCEPT,
10642 	},
10643 	{
10644 		"calls: conditional call 3",
10645 		.insns = {
10646 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10647 				    offsetof(struct __sk_buff, mark)),
10648 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10649 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10650 			BPF_MOV64_IMM(BPF_REG_0, 1),
10651 			BPF_EXIT_INSN(),
10652 			BPF_MOV64_IMM(BPF_REG_0, 1),
10653 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10654 			BPF_MOV64_IMM(BPF_REG_0, 3),
10655 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10656 		},
10657 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10658 		.errstr = "back-edge from insn",
10659 		.result = REJECT,
10660 	},
10661 	{
10662 		"calls: conditional call 4",
10663 		.insns = {
10664 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10665 				    offsetof(struct __sk_buff, mark)),
10666 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10667 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10668 			BPF_MOV64_IMM(BPF_REG_0, 1),
10669 			BPF_EXIT_INSN(),
10670 			BPF_MOV64_IMM(BPF_REG_0, 1),
10671 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
10672 			BPF_MOV64_IMM(BPF_REG_0, 3),
10673 			BPF_EXIT_INSN(),
10674 		},
10675 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10676 		.result = ACCEPT,
10677 	},
10678 	{
10679 		"calls: conditional call 5",
10680 		.insns = {
10681 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10682 				    offsetof(struct __sk_buff, mark)),
10683 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10684 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
10685 			BPF_MOV64_IMM(BPF_REG_0, 1),
10686 			BPF_EXIT_INSN(),
10687 			BPF_MOV64_IMM(BPF_REG_0, 1),
10688 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
10689 			BPF_MOV64_IMM(BPF_REG_0, 3),
10690 			BPF_EXIT_INSN(),
10691 		},
10692 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10693 		.errstr = "back-edge from insn",
10694 		.result = REJECT,
10695 	},
10696 	{
10697 		"calls: conditional call 6",
10698 		.insns = {
10699 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10700 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
10701 			BPF_EXIT_INSN(),
10702 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10703 				    offsetof(struct __sk_buff, mark)),
10704 			BPF_EXIT_INSN(),
10705 		},
10706 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10707 		.errstr = "back-edge from insn",
10708 		.result = REJECT,
10709 	},
10710 	{
10711 		"calls: using r0 returned by callee",
10712 		.insns = {
10713 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10714 			BPF_EXIT_INSN(),
10715 			BPF_MOV64_IMM(BPF_REG_0, 2),
10716 			BPF_EXIT_INSN(),
10717 		},
10718 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10719 		.result = ACCEPT,
10720 	},
10721 	{
10722 		"calls: using uninit r0 from callee",
10723 		.insns = {
10724 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10725 			BPF_EXIT_INSN(),
10726 			BPF_EXIT_INSN(),
10727 		},
10728 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10729 		.errstr = "!read_ok",
10730 		.result = REJECT,
10731 	},
10732 	{
10733 		"calls: callee is using r1",
10734 		.insns = {
10735 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10736 			BPF_EXIT_INSN(),
10737 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10738 				    offsetof(struct __sk_buff, len)),
10739 			BPF_EXIT_INSN(),
10740 		},
10741 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
10742 		.result = ACCEPT,
10743 		.retval = TEST_DATA_LEN,
10744 	},
10745 	{
10746 		"calls: callee using args1",
10747 		.insns = {
10748 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10749 			BPF_EXIT_INSN(),
10750 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10751 			BPF_EXIT_INSN(),
10752 		},
10753 		.errstr_unpriv = "allowed for root only",
10754 		.result_unpriv = REJECT,
10755 		.result = ACCEPT,
10756 		.retval = POINTER_VALUE,
10757 	},
10758 	{
10759 		"calls: callee using wrong args2",
10760 		.insns = {
10761 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10762 			BPF_EXIT_INSN(),
10763 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
10764 			BPF_EXIT_INSN(),
10765 		},
10766 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10767 		.errstr = "R2 !read_ok",
10768 		.result = REJECT,
10769 	},
10770 	{
10771 		"calls: callee using two args",
10772 		.insns = {
10773 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10774 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
10775 				    offsetof(struct __sk_buff, len)),
10776 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
10777 				    offsetof(struct __sk_buff, len)),
10778 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10779 			BPF_EXIT_INSN(),
10780 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10781 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
10782 			BPF_EXIT_INSN(),
10783 		},
10784 		.errstr_unpriv = "allowed for root only",
10785 		.result_unpriv = REJECT,
10786 		.result = ACCEPT,
10787 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
10788 	},
10789 	{
10790 		"calls: callee changing pkt pointers",
10791 		.insns = {
10792 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
10793 				    offsetof(struct xdp_md, data)),
10794 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
10795 				    offsetof(struct xdp_md, data_end)),
10796 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
10797 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
10798 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
10799 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10800 			/* clear_all_pkt_pointers() has to walk all frames
10801 			 * to make sure that pkt pointers in the caller
10802 			 * are cleared when callee is calling a helper that
10803 			 * adjusts packet size
10804 			 */
10805 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
10806 			BPF_MOV32_IMM(BPF_REG_0, 0),
10807 			BPF_EXIT_INSN(),
10808 			BPF_MOV64_IMM(BPF_REG_2, 0),
10809 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10810 				     BPF_FUNC_xdp_adjust_head),
10811 			BPF_EXIT_INSN(),
10812 		},
10813 		.result = REJECT,
10814 		.errstr = "R6 invalid mem access 'inv'",
10815 		.prog_type = BPF_PROG_TYPE_XDP,
10816 	},
10817 	{
10818 		"calls: two calls with args",
10819 		.insns = {
10820 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10821 			BPF_EXIT_INSN(),
10822 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10823 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10824 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10825 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10826 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10827 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10828 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10829 			BPF_EXIT_INSN(),
10830 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10831 				    offsetof(struct __sk_buff, len)),
10832 			BPF_EXIT_INSN(),
10833 		},
10834 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10835 		.result = ACCEPT,
10836 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
10837 	},
10838 	{
10839 		"calls: calls with stack arith",
10840 		.insns = {
10841 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10843 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10844 			BPF_EXIT_INSN(),
10845 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10846 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10847 			BPF_EXIT_INSN(),
10848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
10849 			BPF_MOV64_IMM(BPF_REG_0, 42),
10850 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10851 			BPF_EXIT_INSN(),
10852 		},
10853 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10854 		.result = ACCEPT,
10855 		.retval = 42,
10856 	},
10857 	{
10858 		"calls: calls with misaligned stack access",
10859 		.insns = {
10860 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10861 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10862 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10863 			BPF_EXIT_INSN(),
10864 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
10865 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10866 			BPF_EXIT_INSN(),
10867 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
10868 			BPF_MOV64_IMM(BPF_REG_0, 42),
10869 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
10870 			BPF_EXIT_INSN(),
10871 		},
10872 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10873 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
10874 		.errstr = "misaligned stack access",
10875 		.result = REJECT,
10876 	},
10877 	{
10878 		"calls: calls control flow, jump test",
10879 		.insns = {
10880 			BPF_MOV64_IMM(BPF_REG_0, 42),
10881 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10882 			BPF_MOV64_IMM(BPF_REG_0, 43),
10883 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10884 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
10885 			BPF_EXIT_INSN(),
10886 		},
10887 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10888 		.result = ACCEPT,
10889 		.retval = 43,
10890 	},
10891 	{
10892 		"calls: calls control flow, jump test 2",
10893 		.insns = {
10894 			BPF_MOV64_IMM(BPF_REG_0, 42),
10895 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
10896 			BPF_MOV64_IMM(BPF_REG_0, 43),
10897 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10898 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10899 			BPF_EXIT_INSN(),
10900 		},
10901 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10902 		.errstr = "jump out of range from insn 1 to 4",
10903 		.result = REJECT,
10904 	},
10905 	{
10906 		"calls: two calls with bad jump",
10907 		.insns = {
10908 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10909 			BPF_EXIT_INSN(),
10910 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10911 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
10912 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
10913 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10914 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
10915 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
10916 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
10917 			BPF_EXIT_INSN(),
10918 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10919 				    offsetof(struct __sk_buff, len)),
10920 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
10921 			BPF_EXIT_INSN(),
10922 		},
10923 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10924 		.errstr = "jump out of range from insn 11 to 9",
10925 		.result = REJECT,
10926 	},
10927 	{
10928 		"calls: recursive call. test1",
10929 		.insns = {
10930 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10931 			BPF_EXIT_INSN(),
10932 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10933 			BPF_EXIT_INSN(),
10934 		},
10935 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10936 		.errstr = "back-edge",
10937 		.result = REJECT,
10938 	},
10939 	{
10940 		"calls: recursive call. test2",
10941 		.insns = {
10942 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10943 			BPF_EXIT_INSN(),
10944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
10945 			BPF_EXIT_INSN(),
10946 		},
10947 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10948 		.errstr = "back-edge",
10949 		.result = REJECT,
10950 	},
10951 	{
10952 		"calls: unreachable code",
10953 		.insns = {
10954 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10955 			BPF_EXIT_INSN(),
10956 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10957 			BPF_EXIT_INSN(),
10958 			BPF_MOV64_IMM(BPF_REG_0, 0),
10959 			BPF_EXIT_INSN(),
10960 			BPF_MOV64_IMM(BPF_REG_0, 0),
10961 			BPF_EXIT_INSN(),
10962 		},
10963 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10964 		.errstr = "unreachable insn 6",
10965 		.result = REJECT,
10966 	},
10967 	{
10968 		"calls: invalid call",
10969 		.insns = {
10970 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10971 			BPF_EXIT_INSN(),
10972 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
10973 			BPF_EXIT_INSN(),
10974 		},
10975 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10976 		.errstr = "invalid destination",
10977 		.result = REJECT,
10978 	},
10979 	{
10980 		"calls: invalid call 2",
10981 		.insns = {
10982 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
10983 			BPF_EXIT_INSN(),
10984 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
10985 			BPF_EXIT_INSN(),
10986 		},
10987 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10988 		.errstr = "invalid destination",
10989 		.result = REJECT,
10990 	},
10991 	{
10992 		"calls: jumping across function bodies. test1",
10993 		.insns = {
10994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10995 			BPF_MOV64_IMM(BPF_REG_0, 0),
10996 			BPF_EXIT_INSN(),
10997 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
10998 			BPF_EXIT_INSN(),
10999 		},
11000 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11001 		.errstr = "jump out of range",
11002 		.result = REJECT,
11003 	},
11004 	{
11005 		"calls: jumping across function bodies. test2",
11006 		.insns = {
11007 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11009 			BPF_MOV64_IMM(BPF_REG_0, 0),
11010 			BPF_EXIT_INSN(),
11011 			BPF_EXIT_INSN(),
11012 		},
11013 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11014 		.errstr = "jump out of range",
11015 		.result = REJECT,
11016 	},
11017 	{
11018 		"calls: call without exit",
11019 		.insns = {
11020 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11021 			BPF_EXIT_INSN(),
11022 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11023 			BPF_EXIT_INSN(),
11024 			BPF_MOV64_IMM(BPF_REG_0, 0),
11025 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11026 		},
11027 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11028 		.errstr = "not an exit",
11029 		.result = REJECT,
11030 	},
11031 	{
11032 		"calls: call into middle of ld_imm64",
11033 		.insns = {
11034 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11035 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11036 			BPF_MOV64_IMM(BPF_REG_0, 0),
11037 			BPF_EXIT_INSN(),
11038 			BPF_LD_IMM64(BPF_REG_0, 0),
11039 			BPF_EXIT_INSN(),
11040 		},
11041 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11042 		.errstr = "last insn",
11043 		.result = REJECT,
11044 	},
11045 	{
11046 		"calls: call into middle of other call",
11047 		.insns = {
11048 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11049 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11050 			BPF_MOV64_IMM(BPF_REG_0, 0),
11051 			BPF_EXIT_INSN(),
11052 			BPF_MOV64_IMM(BPF_REG_0, 0),
11053 			BPF_MOV64_IMM(BPF_REG_0, 0),
11054 			BPF_EXIT_INSN(),
11055 		},
11056 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11057 		.errstr = "last insn",
11058 		.result = REJECT,
11059 	},
11060 	{
11061 		"calls: ld_abs with changing ctx data in callee",
11062 		.insns = {
11063 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11064 			BPF_LD_ABS(BPF_B, 0),
11065 			BPF_LD_ABS(BPF_H, 0),
11066 			BPF_LD_ABS(BPF_W, 0),
11067 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11068 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11069 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11070 			BPF_LD_ABS(BPF_B, 0),
11071 			BPF_LD_ABS(BPF_H, 0),
11072 			BPF_LD_ABS(BPF_W, 0),
11073 			BPF_EXIT_INSN(),
11074 			BPF_MOV64_IMM(BPF_REG_2, 1),
11075 			BPF_MOV64_IMM(BPF_REG_3, 2),
11076 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11077 				     BPF_FUNC_skb_vlan_push),
11078 			BPF_EXIT_INSN(),
11079 		},
11080 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11081 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11082 		.result = REJECT,
11083 	},
11084 	{
11085 		"calls: two calls with bad fallthrough",
11086 		.insns = {
11087 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11088 			BPF_EXIT_INSN(),
11089 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11090 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11091 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11092 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11093 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11094 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11095 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11096 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11097 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11098 				    offsetof(struct __sk_buff, len)),
11099 			BPF_EXIT_INSN(),
11100 		},
11101 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11102 		.errstr = "not an exit",
11103 		.result = REJECT,
11104 	},
11105 	{
11106 		"calls: two calls with stack read",
11107 		.insns = {
11108 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11109 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11110 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11111 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11112 			BPF_EXIT_INSN(),
11113 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11114 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11115 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11116 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11117 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11118 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11119 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11120 			BPF_EXIT_INSN(),
11121 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11122 			BPF_EXIT_INSN(),
11123 		},
11124 		.prog_type = BPF_PROG_TYPE_XDP,
11125 		.result = ACCEPT,
11126 	},
11127 	{
11128 		"calls: two calls with stack write",
11129 		.insns = {
11130 			/* main prog */
11131 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11132 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11133 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11134 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11135 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11136 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11137 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11138 			BPF_EXIT_INSN(),
11139 
11140 			/* subprog 1 */
11141 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11142 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11143 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11144 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11145 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11146 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11147 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11148 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11149 			/* write into stack frame of main prog */
11150 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11151 			BPF_EXIT_INSN(),
11152 
11153 			/* subprog 2 */
11154 			/* read from stack frame of main prog */
11155 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11156 			BPF_EXIT_INSN(),
11157 		},
11158 		.prog_type = BPF_PROG_TYPE_XDP,
11159 		.result = ACCEPT,
11160 	},
11161 	{
11162 		"calls: stack overflow using two frames (pre-call access)",
11163 		.insns = {
11164 			/* prog 1 */
11165 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11166 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11167 			BPF_EXIT_INSN(),
11168 
11169 			/* prog 2 */
11170 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11171 			BPF_MOV64_IMM(BPF_REG_0, 0),
11172 			BPF_EXIT_INSN(),
11173 		},
11174 		.prog_type = BPF_PROG_TYPE_XDP,
11175 		.errstr = "combined stack size",
11176 		.result = REJECT,
11177 	},
11178 	{
11179 		"calls: stack overflow using two frames (post-call access)",
11180 		.insns = {
11181 			/* prog 1 */
11182 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11183 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11184 			BPF_EXIT_INSN(),
11185 
11186 			/* prog 2 */
11187 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11188 			BPF_MOV64_IMM(BPF_REG_0, 0),
11189 			BPF_EXIT_INSN(),
11190 		},
11191 		.prog_type = BPF_PROG_TYPE_XDP,
11192 		.errstr = "combined stack size",
11193 		.result = REJECT,
11194 	},
11195 	{
11196 		"calls: stack depth check using three frames. test1",
11197 		.insns = {
11198 			/* main */
11199 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11200 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11201 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11202 			BPF_MOV64_IMM(BPF_REG_0, 0),
11203 			BPF_EXIT_INSN(),
11204 			/* A */
11205 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11206 			BPF_EXIT_INSN(),
11207 			/* B */
11208 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11209 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11210 			BPF_EXIT_INSN(),
11211 		},
11212 		.prog_type = BPF_PROG_TYPE_XDP,
11213 		/* stack_main=32, stack_A=256, stack_B=64
11214 		 * and max(main+A, main+A+B) < 512
11215 		 */
11216 		.result = ACCEPT,
11217 	},
11218 	{
11219 		"calls: stack depth check using three frames. test2",
11220 		.insns = {
11221 			/* main */
11222 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11223 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11224 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11225 			BPF_MOV64_IMM(BPF_REG_0, 0),
11226 			BPF_EXIT_INSN(),
11227 			/* A */
11228 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11229 			BPF_EXIT_INSN(),
11230 			/* B */
11231 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11232 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11233 			BPF_EXIT_INSN(),
11234 		},
11235 		.prog_type = BPF_PROG_TYPE_XDP,
11236 		/* stack_main=32, stack_A=64, stack_B=256
11237 		 * and max(main+A, main+A+B) < 512
11238 		 */
11239 		.result = ACCEPT,
11240 	},
11241 	{
11242 		"calls: stack depth check using three frames. test3",
11243 		.insns = {
11244 			/* main */
11245 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11246 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11247 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11248 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11249 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11250 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11251 			BPF_MOV64_IMM(BPF_REG_0, 0),
11252 			BPF_EXIT_INSN(),
11253 			/* A */
11254 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11255 			BPF_EXIT_INSN(),
11256 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11257 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11258 			/* B */
11259 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11260 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11261 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11262 			BPF_EXIT_INSN(),
11263 		},
11264 		.prog_type = BPF_PROG_TYPE_XDP,
11265 		/* stack_main=64, stack_A=224, stack_B=256
11266 		 * and max(main+A, main+A+B) > 512
11267 		 */
11268 		.errstr = "combined stack",
11269 		.result = REJECT,
11270 	},
11271 	{
11272 		"calls: stack depth check using three frames. test4",
11273 		/* void main(void) {
11274 		 *   func1(0);
11275 		 *   func1(1);
11276 		 *   func2(1);
11277 		 * }
11278 		 * void func1(int alloc_or_recurse) {
11279 		 *   if (alloc_or_recurse) {
11280 		 *     frame_pointer[-300] = 1;
11281 		 *   } else {
11282 		 *     func2(alloc_or_recurse);
11283 		 *   }
11284 		 * }
11285 		 * void func2(int alloc_or_recurse) {
11286 		 *   if (alloc_or_recurse) {
11287 		 *     frame_pointer[-300] = 1;
11288 		 *   }
11289 		 * }
11290 		 */
11291 		.insns = {
11292 			/* main */
11293 			BPF_MOV64_IMM(BPF_REG_1, 0),
11294 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11295 			BPF_MOV64_IMM(BPF_REG_1, 1),
11296 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11297 			BPF_MOV64_IMM(BPF_REG_1, 1),
11298 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11299 			BPF_MOV64_IMM(BPF_REG_0, 0),
11300 			BPF_EXIT_INSN(),
11301 			/* A */
11302 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11303 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11304 			BPF_EXIT_INSN(),
11305 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11306 			BPF_EXIT_INSN(),
11307 			/* B */
11308 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11309 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11310 			BPF_EXIT_INSN(),
11311 		},
11312 		.prog_type = BPF_PROG_TYPE_XDP,
11313 		.result = REJECT,
11314 		.errstr = "combined stack",
11315 	},
11316 	{
11317 		"calls: stack depth check using three frames. test5",
11318 		.insns = {
11319 			/* main */
11320 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11321 			BPF_EXIT_INSN(),
11322 			/* A */
11323 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11324 			BPF_EXIT_INSN(),
11325 			/* B */
11326 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11327 			BPF_EXIT_INSN(),
11328 			/* C */
11329 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11330 			BPF_EXIT_INSN(),
11331 			/* D */
11332 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11333 			BPF_EXIT_INSN(),
11334 			/* E */
11335 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11336 			BPF_EXIT_INSN(),
11337 			/* F */
11338 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11339 			BPF_EXIT_INSN(),
11340 			/* G */
11341 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11342 			BPF_EXIT_INSN(),
11343 			/* H */
11344 			BPF_MOV64_IMM(BPF_REG_0, 0),
11345 			BPF_EXIT_INSN(),
11346 		},
11347 		.prog_type = BPF_PROG_TYPE_XDP,
11348 		.errstr = "call stack",
11349 		.result = REJECT,
11350 	},
11351 	{
11352 		"calls: spill into caller stack frame",
11353 		.insns = {
11354 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11355 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11357 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11358 			BPF_EXIT_INSN(),
11359 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11360 			BPF_MOV64_IMM(BPF_REG_0, 0),
11361 			BPF_EXIT_INSN(),
11362 		},
11363 		.prog_type = BPF_PROG_TYPE_XDP,
11364 		.errstr = "cannot spill",
11365 		.result = REJECT,
11366 	},
11367 	{
11368 		"calls: write into caller stack frame",
11369 		.insns = {
11370 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11371 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11372 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11373 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11374 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11375 			BPF_EXIT_INSN(),
11376 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11377 			BPF_MOV64_IMM(BPF_REG_0, 0),
11378 			BPF_EXIT_INSN(),
11379 		},
11380 		.prog_type = BPF_PROG_TYPE_XDP,
11381 		.result = ACCEPT,
11382 		.retval = 42,
11383 	},
11384 	{
11385 		"calls: write into callee stack frame",
11386 		.insns = {
11387 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11388 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11389 			BPF_EXIT_INSN(),
11390 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11391 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11392 			BPF_EXIT_INSN(),
11393 		},
11394 		.prog_type = BPF_PROG_TYPE_XDP,
11395 		.errstr = "cannot return stack pointer",
11396 		.result = REJECT,
11397 	},
11398 	{
11399 		"calls: two calls with stack write and void return",
11400 		.insns = {
11401 			/* main prog */
11402 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11403 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11404 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11405 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11406 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11407 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11409 			BPF_EXIT_INSN(),
11410 
11411 			/* subprog 1 */
11412 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11413 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11414 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11415 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11416 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11417 			BPF_EXIT_INSN(),
11418 
11419 			/* subprog 2 */
11420 			/* write into stack frame of main prog */
11421 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11422 			BPF_EXIT_INSN(), /* void return */
11423 		},
11424 		.prog_type = BPF_PROG_TYPE_XDP,
11425 		.result = ACCEPT,
11426 	},
11427 	{
11428 		"calls: ambiguous return value",
11429 		.insns = {
11430 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11431 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11432 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11433 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11434 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11435 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11436 			BPF_EXIT_INSN(),
11437 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11438 			BPF_MOV64_IMM(BPF_REG_0, 0),
11439 			BPF_EXIT_INSN(),
11440 		},
11441 		.errstr_unpriv = "allowed for root only",
11442 		.result_unpriv = REJECT,
11443 		.errstr = "R0 !read_ok",
11444 		.result = REJECT,
11445 	},
11446 	{
11447 		"calls: two calls that return map_value",
11448 		.insns = {
11449 			/* main prog */
11450 			/* pass fp-16, fp-8 into a function */
11451 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11453 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11454 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11455 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11456 
11457 			/* fetch map_value_ptr from the stack of this function */
11458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11459 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11460 			/* write into map value */
11461 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11462 			/* fetch secound map_value_ptr from the stack */
11463 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11465 			/* write into map value */
11466 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11467 			BPF_MOV64_IMM(BPF_REG_0, 0),
11468 			BPF_EXIT_INSN(),
11469 
11470 			/* subprog 1 */
11471 			/* call 3rd function twice */
11472 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11473 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11474 			/* first time with fp-8 */
11475 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11476 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11477 			/* second time with fp-16 */
11478 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11479 			BPF_EXIT_INSN(),
11480 
11481 			/* subprog 2 */
11482 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11483 			/* lookup from map */
11484 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11485 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11486 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11487 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11488 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11489 				     BPF_FUNC_map_lookup_elem),
11490 			/* write map_value_ptr into stack frame of main prog */
11491 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11492 			BPF_MOV64_IMM(BPF_REG_0, 0),
11493 			BPF_EXIT_INSN(), /* return 0 */
11494 		},
11495 		.prog_type = BPF_PROG_TYPE_XDP,
11496 		.fixup_map_hash_8b = { 23 },
11497 		.result = ACCEPT,
11498 	},
11499 	{
11500 		"calls: two calls that return map_value with bool condition",
11501 		.insns = {
11502 			/* main prog */
11503 			/* pass fp-16, fp-8 into a function */
11504 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11506 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11507 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11508 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11509 			BPF_MOV64_IMM(BPF_REG_0, 0),
11510 			BPF_EXIT_INSN(),
11511 
11512 			/* subprog 1 */
11513 			/* call 3rd function twice */
11514 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11515 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11516 			/* first time with fp-8 */
11517 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11518 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11519 			/* fetch map_value_ptr from the stack of this function */
11520 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11521 			/* write into map value */
11522 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11523 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11524 			/* second time with fp-16 */
11525 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11526 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11527 			/* fetch secound map_value_ptr from the stack */
11528 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11529 			/* write into map value */
11530 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11531 			BPF_EXIT_INSN(),
11532 
11533 			/* subprog 2 */
11534 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11535 			/* lookup from map */
11536 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11537 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11538 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11539 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11540 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11541 				     BPF_FUNC_map_lookup_elem),
11542 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11543 			BPF_MOV64_IMM(BPF_REG_0, 0),
11544 			BPF_EXIT_INSN(), /* return 0 */
11545 			/* write map_value_ptr into stack frame of main prog */
11546 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11547 			BPF_MOV64_IMM(BPF_REG_0, 1),
11548 			BPF_EXIT_INSN(), /* return 1 */
11549 		},
11550 		.prog_type = BPF_PROG_TYPE_XDP,
11551 		.fixup_map_hash_8b = { 23 },
11552 		.result = ACCEPT,
11553 	},
11554 	{
11555 		"calls: two calls that return map_value with incorrect bool check",
11556 		.insns = {
11557 			/* main prog */
11558 			/* pass fp-16, fp-8 into a function */
11559 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11560 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11561 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11562 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11563 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11564 			BPF_MOV64_IMM(BPF_REG_0, 0),
11565 			BPF_EXIT_INSN(),
11566 
11567 			/* subprog 1 */
11568 			/* call 3rd function twice */
11569 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11570 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11571 			/* first time with fp-8 */
11572 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11573 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11574 			/* fetch map_value_ptr from the stack of this function */
11575 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11576 			/* write into map value */
11577 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11578 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11579 			/* second time with fp-16 */
11580 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11581 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11582 			/* fetch secound map_value_ptr from the stack */
11583 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11584 			/* write into map value */
11585 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11586 			BPF_EXIT_INSN(),
11587 
11588 			/* subprog 2 */
11589 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11590 			/* lookup from map */
11591 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11592 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11593 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11594 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11595 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11596 				     BPF_FUNC_map_lookup_elem),
11597 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11598 			BPF_MOV64_IMM(BPF_REG_0, 0),
11599 			BPF_EXIT_INSN(), /* return 0 */
11600 			/* write map_value_ptr into stack frame of main prog */
11601 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11602 			BPF_MOV64_IMM(BPF_REG_0, 1),
11603 			BPF_EXIT_INSN(), /* return 1 */
11604 		},
11605 		.prog_type = BPF_PROG_TYPE_XDP,
11606 		.fixup_map_hash_8b = { 23 },
11607 		.result = REJECT,
11608 		.errstr = "invalid read from stack off -16+0 size 8",
11609 	},
11610 	{
11611 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11612 		.insns = {
11613 			/* main prog */
11614 			/* pass fp-16, fp-8 into a function */
11615 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11617 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11618 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11619 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11620 			BPF_MOV64_IMM(BPF_REG_0, 0),
11621 			BPF_EXIT_INSN(),
11622 
11623 			/* subprog 1 */
11624 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11625 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11626 			/* 1st lookup from map */
11627 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11630 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11631 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11632 				     BPF_FUNC_map_lookup_elem),
11633 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11634 			BPF_MOV64_IMM(BPF_REG_8, 0),
11635 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11636 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11637 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11638 			BPF_MOV64_IMM(BPF_REG_8, 1),
11639 
11640 			/* 2nd lookup from map */
11641 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11642 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11643 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11644 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11645 				     BPF_FUNC_map_lookup_elem),
11646 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11647 			BPF_MOV64_IMM(BPF_REG_9, 0),
11648 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11649 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11650 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11651 			BPF_MOV64_IMM(BPF_REG_9, 1),
11652 
11653 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11654 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11655 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11656 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11657 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11658 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
11659 			BPF_EXIT_INSN(),
11660 
11661 			/* subprog 2 */
11662 			/* if arg2 == 1 do *arg1 = 0 */
11663 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11664 			/* fetch map_value_ptr from the stack of this function */
11665 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11666 			/* write into map value */
11667 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11668 
11669 			/* if arg4 == 1 do *arg3 = 0 */
11670 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11671 			/* fetch map_value_ptr from the stack of this function */
11672 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11673 			/* write into map value */
11674 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11675 			BPF_EXIT_INSN(),
11676 		},
11677 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11678 		.fixup_map_hash_8b = { 12, 22 },
11679 		.result = REJECT,
11680 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
11681 	},
11682 	{
11683 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
11684 		.insns = {
11685 			/* main prog */
11686 			/* pass fp-16, fp-8 into a function */
11687 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11688 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11689 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11691 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11692 			BPF_MOV64_IMM(BPF_REG_0, 0),
11693 			BPF_EXIT_INSN(),
11694 
11695 			/* subprog 1 */
11696 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11697 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11698 			/* 1st lookup from map */
11699 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11700 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11701 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11702 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11703 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11704 				     BPF_FUNC_map_lookup_elem),
11705 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11706 			BPF_MOV64_IMM(BPF_REG_8, 0),
11707 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11708 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11709 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11710 			BPF_MOV64_IMM(BPF_REG_8, 1),
11711 
11712 			/* 2nd lookup from map */
11713 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
11714 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11715 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11716 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
11717 				     BPF_FUNC_map_lookup_elem),
11718 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11719 			BPF_MOV64_IMM(BPF_REG_9, 0),
11720 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11721 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11722 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11723 			BPF_MOV64_IMM(BPF_REG_9, 1),
11724 
11725 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11726 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
11727 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11728 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11729 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11730 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
11731 			BPF_EXIT_INSN(),
11732 
11733 			/* subprog 2 */
11734 			/* if arg2 == 1 do *arg1 = 0 */
11735 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11736 			/* fetch map_value_ptr from the stack of this function */
11737 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11738 			/* write into map value */
11739 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11740 
11741 			/* if arg4 == 1 do *arg3 = 0 */
11742 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11743 			/* fetch map_value_ptr from the stack of this function */
11744 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11745 			/* write into map value */
11746 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11747 			BPF_EXIT_INSN(),
11748 		},
11749 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11750 		.fixup_map_hash_8b = { 12, 22 },
11751 		.result = ACCEPT,
11752 	},
11753 	{
11754 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
11755 		.insns = {
11756 			/* main prog */
11757 			/* pass fp-16, fp-8 into a function */
11758 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11759 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11760 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11762 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
11763 			BPF_MOV64_IMM(BPF_REG_0, 0),
11764 			BPF_EXIT_INSN(),
11765 
11766 			/* subprog 1 */
11767 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11768 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11769 			/* 1st lookup from map */
11770 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
11771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11773 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11774 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11775 				     BPF_FUNC_map_lookup_elem),
11776 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11777 			BPF_MOV64_IMM(BPF_REG_8, 0),
11778 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11779 			/* write map_value_ptr into stack frame of main prog at fp-8 */
11780 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11781 			BPF_MOV64_IMM(BPF_REG_8, 1),
11782 
11783 			/* 2nd lookup from map */
11784 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11785 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
11786 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11787 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11788 				     BPF_FUNC_map_lookup_elem),
11789 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11790 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
11791 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11792 			/* write map_value_ptr into stack frame of main prog at fp-16 */
11793 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11794 			BPF_MOV64_IMM(BPF_REG_9, 1),
11795 
11796 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11797 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
11798 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11799 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11800 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11801 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
11802 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
11803 
11804 			/* subprog 2 */
11805 			/* if arg2 == 1 do *arg1 = 0 */
11806 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11807 			/* fetch map_value_ptr from the stack of this function */
11808 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11809 			/* write into map value */
11810 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11811 
11812 			/* if arg4 == 1 do *arg3 = 0 */
11813 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11814 			/* fetch map_value_ptr from the stack of this function */
11815 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11816 			/* write into map value */
11817 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
11818 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
11819 		},
11820 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11821 		.fixup_map_hash_8b = { 12, 22 },
11822 		.result = REJECT,
11823 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
11824 	},
11825 	{
11826 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
11827 		.insns = {
11828 			/* main prog */
11829 			/* pass fp-16, fp-8 into a function */
11830 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11832 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11834 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11835 			BPF_MOV64_IMM(BPF_REG_0, 0),
11836 			BPF_EXIT_INSN(),
11837 
11838 			/* subprog 1 */
11839 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11840 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11841 			/* 1st lookup from map */
11842 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11843 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11844 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11845 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11846 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11847 				     BPF_FUNC_map_lookup_elem),
11848 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11849 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11850 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11851 			BPF_MOV64_IMM(BPF_REG_8, 0),
11852 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11853 			BPF_MOV64_IMM(BPF_REG_8, 1),
11854 
11855 			/* 2nd lookup from map */
11856 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11857 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11858 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11859 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11860 				     BPF_FUNC_map_lookup_elem),
11861 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11862 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11863 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11864 			BPF_MOV64_IMM(BPF_REG_9, 0),
11865 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11866 			BPF_MOV64_IMM(BPF_REG_9, 1),
11867 
11868 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11869 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11870 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11871 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11872 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11873 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11874 			BPF_EXIT_INSN(),
11875 
11876 			/* subprog 2 */
11877 			/* if arg2 == 1 do *arg1 = 0 */
11878 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11879 			/* fetch map_value_ptr from the stack of this function */
11880 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11881 			/* write into map value */
11882 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11883 
11884 			/* if arg4 == 1 do *arg3 = 0 */
11885 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
11886 			/* fetch map_value_ptr from the stack of this function */
11887 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11888 			/* write into map value */
11889 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11890 			BPF_EXIT_INSN(),
11891 		},
11892 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11893 		.fixup_map_hash_8b = { 12, 22 },
11894 		.result = ACCEPT,
11895 	},
11896 	{
11897 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
11898 		.insns = {
11899 			/* main prog */
11900 			/* pass fp-16, fp-8 into a function */
11901 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11903 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11905 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11906 			BPF_MOV64_IMM(BPF_REG_0, 0),
11907 			BPF_EXIT_INSN(),
11908 
11909 			/* subprog 1 */
11910 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11911 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11912 			/* 1st lookup from map */
11913 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11914 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11916 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11917 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11918 				     BPF_FUNC_map_lookup_elem),
11919 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
11920 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11921 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11922 			BPF_MOV64_IMM(BPF_REG_8, 0),
11923 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11924 			BPF_MOV64_IMM(BPF_REG_8, 1),
11925 
11926 			/* 2nd lookup from map */
11927 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11928 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11929 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11930 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11931 				     BPF_FUNC_map_lookup_elem),
11932 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
11933 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11934 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11935 			BPF_MOV64_IMM(BPF_REG_9, 0),
11936 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11937 			BPF_MOV64_IMM(BPF_REG_9, 1),
11938 
11939 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
11940 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
11942 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
11943 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
11944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11945 			BPF_EXIT_INSN(),
11946 
11947 			/* subprog 2 */
11948 			/* if arg2 == 1 do *arg1 = 0 */
11949 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
11950 			/* fetch map_value_ptr from the stack of this function */
11951 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
11952 			/* write into map value */
11953 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11954 
11955 			/* if arg4 == 0 do *arg3 = 0 */
11956 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
11957 			/* fetch map_value_ptr from the stack of this function */
11958 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
11959 			/* write into map value */
11960 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11961 			BPF_EXIT_INSN(),
11962 		},
11963 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11964 		.fixup_map_hash_8b = { 12, 22 },
11965 		.result = REJECT,
11966 		.errstr = "R0 invalid mem access 'inv'",
11967 	},
11968 	{
11969 		"calls: pkt_ptr spill into caller stack",
11970 		.insns = {
11971 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
11972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
11973 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11974 			BPF_EXIT_INSN(),
11975 
11976 			/* subprog 1 */
11977 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
11978 				    offsetof(struct __sk_buff, data)),
11979 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
11980 				    offsetof(struct __sk_buff, data_end)),
11981 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11982 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
11983 			/* spill unchecked pkt_ptr into stack of caller */
11984 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
11985 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
11986 			/* now the pkt range is verified, read pkt_ptr from stack */
11987 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
11988 			/* write 4 bytes into packet */
11989 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
11990 			BPF_EXIT_INSN(),
11991 		},
11992 		.result = ACCEPT,
11993 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11994 		.retval = POINTER_VALUE,
11995 	},
11996 	{
11997 		"calls: pkt_ptr spill into caller stack 2",
11998 		.insns = {
11999 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12000 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12001 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12002 			/* Marking is still kept, but not in all cases safe. */
12003 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12004 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12005 			BPF_EXIT_INSN(),
12006 
12007 			/* subprog 1 */
12008 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12009 				    offsetof(struct __sk_buff, data)),
12010 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12011 				    offsetof(struct __sk_buff, data_end)),
12012 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12013 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12014 			/* spill unchecked pkt_ptr into stack of caller */
12015 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12016 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12017 			/* now the pkt range is verified, read pkt_ptr from stack */
12018 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12019 			/* write 4 bytes into packet */
12020 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12021 			BPF_EXIT_INSN(),
12022 		},
12023 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12024 		.errstr = "invalid access to packet",
12025 		.result = REJECT,
12026 	},
12027 	{
12028 		"calls: pkt_ptr spill into caller stack 3",
12029 		.insns = {
12030 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12031 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12032 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12033 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12034 			/* Marking is still kept and safe here. */
12035 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12036 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12037 			BPF_EXIT_INSN(),
12038 
12039 			/* subprog 1 */
12040 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12041 				    offsetof(struct __sk_buff, data)),
12042 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12043 				    offsetof(struct __sk_buff, data_end)),
12044 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12045 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12046 			/* spill unchecked pkt_ptr into stack of caller */
12047 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12048 			BPF_MOV64_IMM(BPF_REG_5, 0),
12049 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12050 			BPF_MOV64_IMM(BPF_REG_5, 1),
12051 			/* now the pkt range is verified, read pkt_ptr from stack */
12052 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12053 			/* write 4 bytes into packet */
12054 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12055 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12056 			BPF_EXIT_INSN(),
12057 		},
12058 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12059 		.result = ACCEPT,
12060 		.retval = 1,
12061 	},
12062 	{
12063 		"calls: pkt_ptr spill into caller stack 4",
12064 		.insns = {
12065 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12066 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12067 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12068 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12069 			/* Check marking propagated. */
12070 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12071 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12072 			BPF_EXIT_INSN(),
12073 
12074 			/* subprog 1 */
12075 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12076 				    offsetof(struct __sk_buff, data)),
12077 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12078 				    offsetof(struct __sk_buff, data_end)),
12079 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12081 			/* spill unchecked pkt_ptr into stack of caller */
12082 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12083 			BPF_MOV64_IMM(BPF_REG_5, 0),
12084 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12085 			BPF_MOV64_IMM(BPF_REG_5, 1),
12086 			/* don't read back pkt_ptr from stack here */
12087 			/* write 4 bytes into packet */
12088 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12089 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12090 			BPF_EXIT_INSN(),
12091 		},
12092 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12093 		.result = ACCEPT,
12094 		.retval = 1,
12095 	},
12096 	{
12097 		"calls: pkt_ptr spill into caller stack 5",
12098 		.insns = {
12099 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12100 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12101 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12102 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12103 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12104 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12105 			BPF_EXIT_INSN(),
12106 
12107 			/* subprog 1 */
12108 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12109 				    offsetof(struct __sk_buff, data)),
12110 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12111 				    offsetof(struct __sk_buff, data_end)),
12112 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12113 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12114 			BPF_MOV64_IMM(BPF_REG_5, 0),
12115 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12116 			/* spill checked pkt_ptr into stack of caller */
12117 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12118 			BPF_MOV64_IMM(BPF_REG_5, 1),
12119 			/* don't read back pkt_ptr from stack here */
12120 			/* write 4 bytes into packet */
12121 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12122 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12123 			BPF_EXIT_INSN(),
12124 		},
12125 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12126 		.errstr = "same insn cannot be used with different",
12127 		.result = REJECT,
12128 	},
12129 	{
12130 		"calls: pkt_ptr spill into caller stack 6",
12131 		.insns = {
12132 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12133 				    offsetof(struct __sk_buff, data_end)),
12134 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12135 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12136 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12138 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12139 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12140 			BPF_EXIT_INSN(),
12141 
12142 			/* subprog 1 */
12143 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12144 				    offsetof(struct __sk_buff, data)),
12145 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12146 				    offsetof(struct __sk_buff, data_end)),
12147 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12148 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12149 			BPF_MOV64_IMM(BPF_REG_5, 0),
12150 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12151 			/* spill checked pkt_ptr into stack of caller */
12152 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12153 			BPF_MOV64_IMM(BPF_REG_5, 1),
12154 			/* don't read back pkt_ptr from stack here */
12155 			/* write 4 bytes into packet */
12156 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12157 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12158 			BPF_EXIT_INSN(),
12159 		},
12160 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12161 		.errstr = "R4 invalid mem access",
12162 		.result = REJECT,
12163 	},
12164 	{
12165 		"calls: pkt_ptr spill into caller stack 7",
12166 		.insns = {
12167 			BPF_MOV64_IMM(BPF_REG_2, 0),
12168 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12169 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12170 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12171 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12172 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12173 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12174 			BPF_EXIT_INSN(),
12175 
12176 			/* subprog 1 */
12177 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12178 				    offsetof(struct __sk_buff, data)),
12179 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12180 				    offsetof(struct __sk_buff, data_end)),
12181 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12182 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12183 			BPF_MOV64_IMM(BPF_REG_5, 0),
12184 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12185 			/* spill checked pkt_ptr into stack of caller */
12186 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12187 			BPF_MOV64_IMM(BPF_REG_5, 1),
12188 			/* don't read back pkt_ptr from stack here */
12189 			/* write 4 bytes into packet */
12190 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12191 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12192 			BPF_EXIT_INSN(),
12193 		},
12194 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12195 		.errstr = "R4 invalid mem access",
12196 		.result = REJECT,
12197 	},
12198 	{
12199 		"calls: pkt_ptr spill into caller stack 8",
12200 		.insns = {
12201 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12202 				    offsetof(struct __sk_buff, data)),
12203 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12204 				    offsetof(struct __sk_buff, data_end)),
12205 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12206 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12207 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12208 			BPF_EXIT_INSN(),
12209 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12210 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12211 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12212 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12213 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12214 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12215 			BPF_EXIT_INSN(),
12216 
12217 			/* subprog 1 */
12218 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12219 				    offsetof(struct __sk_buff, data)),
12220 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12221 				    offsetof(struct __sk_buff, data_end)),
12222 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12224 			BPF_MOV64_IMM(BPF_REG_5, 0),
12225 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12226 			/* spill checked pkt_ptr into stack of caller */
12227 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12228 			BPF_MOV64_IMM(BPF_REG_5, 1),
12229 			/* don't read back pkt_ptr from stack here */
12230 			/* write 4 bytes into packet */
12231 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12232 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12233 			BPF_EXIT_INSN(),
12234 		},
12235 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12236 		.result = ACCEPT,
12237 	},
12238 	{
12239 		"calls: pkt_ptr spill into caller stack 9",
12240 		.insns = {
12241 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12242 				    offsetof(struct __sk_buff, data)),
12243 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12244 				    offsetof(struct __sk_buff, data_end)),
12245 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12246 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12247 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12248 			BPF_EXIT_INSN(),
12249 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12250 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12251 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12253 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12254 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12255 			BPF_EXIT_INSN(),
12256 
12257 			/* subprog 1 */
12258 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12259 				    offsetof(struct __sk_buff, data)),
12260 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12261 				    offsetof(struct __sk_buff, data_end)),
12262 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12263 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12264 			BPF_MOV64_IMM(BPF_REG_5, 0),
12265 			/* spill unchecked pkt_ptr into stack of caller */
12266 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12267 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12268 			BPF_MOV64_IMM(BPF_REG_5, 1),
12269 			/* don't read back pkt_ptr from stack here */
12270 			/* write 4 bytes into packet */
12271 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12272 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12273 			BPF_EXIT_INSN(),
12274 		},
12275 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12276 		.errstr = "invalid access to packet",
12277 		.result = REJECT,
12278 	},
12279 	{
12280 		"calls: caller stack init to zero or map_value_or_null",
12281 		.insns = {
12282 			BPF_MOV64_IMM(BPF_REG_0, 0),
12283 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12284 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12286 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12287 			/* fetch map_value_or_null or const_zero from stack */
12288 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12290 			/* store into map_value */
12291 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12292 			BPF_EXIT_INSN(),
12293 
12294 			/* subprog 1 */
12295 			/* if (ctx == 0) return; */
12296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12297 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12298 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12299 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12300 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12301 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12302 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12303 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12304 				     BPF_FUNC_map_lookup_elem),
12305 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12306 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12307 			BPF_EXIT_INSN(),
12308 		},
12309 		.fixup_map_hash_8b = { 13 },
12310 		.result = ACCEPT,
12311 		.prog_type = BPF_PROG_TYPE_XDP,
12312 	},
12313 	{
12314 		"calls: stack init to zero and pruning",
12315 		.insns = {
12316 			/* first make allocated_stack 16 byte */
12317 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12318 			/* now fork the execution such that the false branch
12319 			 * of JGT insn will be verified second and it skisp zero
12320 			 * init of fp-8 stack slot. If stack liveness marking
12321 			 * is missing live_read marks from call map_lookup
12322 			 * processing then pruning will incorrectly assume
12323 			 * that fp-8 stack slot was unused in the fall-through
12324 			 * branch and will accept the program incorrectly
12325 			 */
12326 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12327 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12328 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12329 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12330 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12331 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12332 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12333 				     BPF_FUNC_map_lookup_elem),
12334 			BPF_EXIT_INSN(),
12335 		},
12336 		.fixup_map_hash_48b = { 6 },
12337 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12338 		.result = REJECT,
12339 		.prog_type = BPF_PROG_TYPE_XDP,
12340 	},
12341 	{
12342 		"calls: two calls returning different map pointers for lookup (hash, array)",
12343 		.insns = {
12344 			/* main prog */
12345 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12346 			BPF_CALL_REL(11),
12347 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12348 			BPF_CALL_REL(12),
12349 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12350 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12351 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12353 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12354 				     BPF_FUNC_map_lookup_elem),
12355 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12356 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12357 				   offsetof(struct test_val, foo)),
12358 			BPF_MOV64_IMM(BPF_REG_0, 1),
12359 			BPF_EXIT_INSN(),
12360 			/* subprog 1 */
12361 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12362 			BPF_EXIT_INSN(),
12363 			/* subprog 2 */
12364 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12365 			BPF_EXIT_INSN(),
12366 		},
12367 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12368 		.fixup_map_hash_48b = { 13 },
12369 		.fixup_map_array_48b = { 16 },
12370 		.result = ACCEPT,
12371 		.retval = 1,
12372 	},
12373 	{
12374 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12375 		.insns = {
12376 			/* main prog */
12377 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12378 			BPF_CALL_REL(11),
12379 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12380 			BPF_CALL_REL(12),
12381 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12382 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12383 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12386 				     BPF_FUNC_map_lookup_elem),
12387 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12388 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12389 				   offsetof(struct test_val, foo)),
12390 			BPF_MOV64_IMM(BPF_REG_0, 1),
12391 			BPF_EXIT_INSN(),
12392 			/* subprog 1 */
12393 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12394 			BPF_EXIT_INSN(),
12395 			/* subprog 2 */
12396 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12397 			BPF_EXIT_INSN(),
12398 		},
12399 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12400 		.fixup_map_in_map = { 16 },
12401 		.fixup_map_array_48b = { 13 },
12402 		.result = REJECT,
12403 		.errstr = "R0 invalid mem access 'map_ptr'",
12404 	},
12405 	{
12406 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12407 		.insns = {
12408 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12409 				    offsetof(struct __sk_buff, mark)),
12410 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12411 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12412 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12413 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12414 			BPF_MOV64_IMM(BPF_REG_3, 7),
12415 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12416 				     BPF_FUNC_tail_call),
12417 			BPF_MOV64_IMM(BPF_REG_0, 1),
12418 			BPF_EXIT_INSN(),
12419 		},
12420 		.fixup_prog1 = { 5 },
12421 		.fixup_prog2 = { 2 },
12422 		.result_unpriv = REJECT,
12423 		.errstr_unpriv = "tail_call abusing map_ptr",
12424 		.result = ACCEPT,
12425 		.retval = 42,
12426 	},
12427 	{
12428 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12429 		.insns = {
12430 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12431 				    offsetof(struct __sk_buff, mark)),
12432 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12433 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12434 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12435 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12436 			BPF_MOV64_IMM(BPF_REG_3, 7),
12437 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12438 				     BPF_FUNC_tail_call),
12439 			BPF_MOV64_IMM(BPF_REG_0, 1),
12440 			BPF_EXIT_INSN(),
12441 		},
12442 		.fixup_prog2 = { 2, 5 },
12443 		.result_unpriv = ACCEPT,
12444 		.result = ACCEPT,
12445 		.retval = 42,
12446 	},
12447 	{
12448 		"search pruning: all branches should be verified (nop operation)",
12449 		.insns = {
12450 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12451 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12452 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12453 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12454 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12455 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12456 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12457 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12458 			BPF_MOV64_IMM(BPF_REG_4, 0),
12459 			BPF_JMP_A(1),
12460 			BPF_MOV64_IMM(BPF_REG_4, 1),
12461 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12462 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12463 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12464 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12465 			BPF_MOV64_IMM(BPF_REG_6, 0),
12466 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12467 			BPF_EXIT_INSN(),
12468 		},
12469 		.fixup_map_hash_8b = { 3 },
12470 		.errstr = "R6 invalid mem access 'inv'",
12471 		.result = REJECT,
12472 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12473 	},
12474 	{
12475 		"search pruning: all branches should be verified (invalid stack access)",
12476 		.insns = {
12477 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12479 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12480 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12481 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12482 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12483 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12484 			BPF_MOV64_IMM(BPF_REG_4, 0),
12485 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12486 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12487 			BPF_JMP_A(1),
12488 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12489 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12490 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12491 			BPF_EXIT_INSN(),
12492 		},
12493 		.fixup_map_hash_8b = { 3 },
12494 		.errstr = "invalid read from stack off -16+0 size 8",
12495 		.result = REJECT,
12496 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12497 	},
12498 	{
12499 		"jit: lsh, rsh, arsh by 1",
12500 		.insns = {
12501 			BPF_MOV64_IMM(BPF_REG_0, 1),
12502 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12503 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12504 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12505 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12506 			BPF_EXIT_INSN(),
12507 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12508 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12509 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12510 			BPF_EXIT_INSN(),
12511 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12512 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12513 			BPF_EXIT_INSN(),
12514 			BPF_MOV64_IMM(BPF_REG_0, 2),
12515 			BPF_EXIT_INSN(),
12516 		},
12517 		.result = ACCEPT,
12518 		.retval = 2,
12519 	},
12520 	{
12521 		"jit: mov32 for ldimm64, 1",
12522 		.insns = {
12523 			BPF_MOV64_IMM(BPF_REG_0, 2),
12524 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12525 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12526 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12527 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12528 			BPF_MOV64_IMM(BPF_REG_0, 1),
12529 			BPF_EXIT_INSN(),
12530 		},
12531 		.result = ACCEPT,
12532 		.retval = 2,
12533 	},
12534 	{
12535 		"jit: mov32 for ldimm64, 2",
12536 		.insns = {
12537 			BPF_MOV64_IMM(BPF_REG_0, 1),
12538 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12539 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12540 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12541 			BPF_MOV64_IMM(BPF_REG_0, 2),
12542 			BPF_EXIT_INSN(),
12543 		},
12544 		.result = ACCEPT,
12545 		.retval = 2,
12546 	},
12547 	{
12548 		"jit: various mul tests",
12549 		.insns = {
12550 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12551 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12552 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12553 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12554 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12555 			BPF_MOV64_IMM(BPF_REG_0, 1),
12556 			BPF_EXIT_INSN(),
12557 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12558 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12559 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12560 			BPF_MOV64_IMM(BPF_REG_0, 1),
12561 			BPF_EXIT_INSN(),
12562 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12563 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12564 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12565 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12566 			BPF_MOV64_IMM(BPF_REG_0, 1),
12567 			BPF_EXIT_INSN(),
12568 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12569 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12570 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12571 			BPF_MOV64_IMM(BPF_REG_0, 1),
12572 			BPF_EXIT_INSN(),
12573 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12574 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12575 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12576 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12577 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12578 			BPF_MOV64_IMM(BPF_REG_0, 1),
12579 			BPF_EXIT_INSN(),
12580 			BPF_MOV64_IMM(BPF_REG_0, 2),
12581 			BPF_EXIT_INSN(),
12582 		},
12583 		.result = ACCEPT,
12584 		.retval = 2,
12585 	},
12586 	{
12587 		"xadd/w check unaligned stack",
12588 		.insns = {
12589 			BPF_MOV64_IMM(BPF_REG_0, 1),
12590 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12591 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12592 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12593 			BPF_EXIT_INSN(),
12594 		},
12595 		.result = REJECT,
12596 		.errstr = "misaligned stack access off",
12597 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12598 	},
12599 	{
12600 		"xadd/w check unaligned map",
12601 		.insns = {
12602 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12603 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12604 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12605 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12606 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12607 				     BPF_FUNC_map_lookup_elem),
12608 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12609 			BPF_EXIT_INSN(),
12610 			BPF_MOV64_IMM(BPF_REG_1, 1),
12611 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
12612 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
12613 			BPF_EXIT_INSN(),
12614 		},
12615 		.fixup_map_hash_8b = { 3 },
12616 		.result = REJECT,
12617 		.errstr = "misaligned value access off",
12618 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12619 	},
12620 	{
12621 		"xadd/w check unaligned pkt",
12622 		.insns = {
12623 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12624 				    offsetof(struct xdp_md, data)),
12625 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12626 				    offsetof(struct xdp_md, data_end)),
12627 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
12628 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
12629 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
12630 			BPF_MOV64_IMM(BPF_REG_0, 99),
12631 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
12632 			BPF_MOV64_IMM(BPF_REG_0, 1),
12633 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12634 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
12635 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
12636 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
12637 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
12638 			BPF_EXIT_INSN(),
12639 		},
12640 		.result = REJECT,
12641 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
12642 		.prog_type = BPF_PROG_TYPE_XDP,
12643 	},
12644 	{
12645 		"xadd/w check whether src/dst got mangled, 1",
12646 		.insns = {
12647 			BPF_MOV64_IMM(BPF_REG_0, 1),
12648 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12649 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12650 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12651 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12652 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12653 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12654 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12655 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12656 			BPF_EXIT_INSN(),
12657 			BPF_MOV64_IMM(BPF_REG_0, 42),
12658 			BPF_EXIT_INSN(),
12659 		},
12660 		.result = ACCEPT,
12661 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12662 		.retval = 3,
12663 	},
12664 	{
12665 		"xadd/w check whether src/dst got mangled, 2",
12666 		.insns = {
12667 			BPF_MOV64_IMM(BPF_REG_0, 1),
12668 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12669 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12670 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12671 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12672 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12673 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12674 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12675 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12676 			BPF_EXIT_INSN(),
12677 			BPF_MOV64_IMM(BPF_REG_0, 42),
12678 			BPF_EXIT_INSN(),
12679 		},
12680 		.result = ACCEPT,
12681 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12682 		.retval = 3,
12683 	},
12684 	{
12685 		"bpf_get_stack return R0 within range",
12686 		.insns = {
12687 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12688 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12689 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12690 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12691 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12692 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12693 				     BPF_FUNC_map_lookup_elem),
12694 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
12695 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
12696 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
12697 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12698 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12699 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
12700 			BPF_MOV64_IMM(BPF_REG_4, 256),
12701 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
12702 			BPF_MOV64_IMM(BPF_REG_1, 0),
12703 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
12704 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
12705 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
12706 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
12707 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
12708 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
12709 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
12710 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
12711 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
12712 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
12713 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
12714 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
12715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12716 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
12717 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
12718 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
12719 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12720 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
12721 			BPF_MOV64_IMM(BPF_REG_4, 0),
12722 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
12723 			BPF_EXIT_INSN(),
12724 		},
12725 		.fixup_map_hash_48b = { 4 },
12726 		.result = ACCEPT,
12727 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12728 	},
12729 	{
12730 		"ld_abs: invalid op 1",
12731 		.insns = {
12732 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12733 			BPF_LD_ABS(BPF_DW, 0),
12734 			BPF_EXIT_INSN(),
12735 		},
12736 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12737 		.result = REJECT,
12738 		.errstr = "unknown opcode",
12739 	},
12740 	{
12741 		"ld_abs: invalid op 2",
12742 		.insns = {
12743 			BPF_MOV32_IMM(BPF_REG_0, 256),
12744 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12745 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
12746 			BPF_EXIT_INSN(),
12747 		},
12748 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12749 		.result = REJECT,
12750 		.errstr = "unknown opcode",
12751 	},
12752 	{
12753 		"ld_abs: nmap reduced",
12754 		.insns = {
12755 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12756 			BPF_LD_ABS(BPF_H, 12),
12757 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
12758 			BPF_LD_ABS(BPF_H, 12),
12759 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
12760 			BPF_MOV32_IMM(BPF_REG_0, 18),
12761 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
12762 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
12763 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
12764 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
12765 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
12766 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12767 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12768 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
12769 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12770 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
12771 			BPF_LD_ABS(BPF_H, 12),
12772 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
12773 			BPF_MOV32_IMM(BPF_REG_0, 22),
12774 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
12775 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
12776 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
12777 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
12778 			BPF_MOV32_IMM(BPF_REG_0, 17366),
12779 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
12780 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
12781 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
12782 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
12783 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12784 			BPF_MOV32_IMM(BPF_REG_0, 256),
12785 			BPF_EXIT_INSN(),
12786 			BPF_MOV32_IMM(BPF_REG_0, 0),
12787 			BPF_EXIT_INSN(),
12788 		},
12789 		.data = {
12790 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
12791 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12792 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
12793 		},
12794 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12795 		.result = ACCEPT,
12796 		.retval = 256,
12797 	},
12798 	{
12799 		"ld_abs: div + abs, test 1",
12800 		.insns = {
12801 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12802 			BPF_LD_ABS(BPF_B, 3),
12803 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12804 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12805 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12806 			BPF_LD_ABS(BPF_B, 4),
12807 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12808 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12809 			BPF_EXIT_INSN(),
12810 		},
12811 		.data = {
12812 			10, 20, 30, 40, 50,
12813 		},
12814 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12815 		.result = ACCEPT,
12816 		.retval = 10,
12817 	},
12818 	{
12819 		"ld_abs: div + abs, test 2",
12820 		.insns = {
12821 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12822 			BPF_LD_ABS(BPF_B, 3),
12823 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
12824 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
12825 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
12826 			BPF_LD_ABS(BPF_B, 128),
12827 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
12828 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
12829 			BPF_EXIT_INSN(),
12830 		},
12831 		.data = {
12832 			10, 20, 30, 40, 50,
12833 		},
12834 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12835 		.result = ACCEPT,
12836 		.retval = 0,
12837 	},
12838 	{
12839 		"ld_abs: div + abs, test 3",
12840 		.insns = {
12841 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12842 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12843 			BPF_LD_ABS(BPF_B, 3),
12844 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12845 			BPF_EXIT_INSN(),
12846 		},
12847 		.data = {
12848 			10, 20, 30, 40, 50,
12849 		},
12850 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12851 		.result = ACCEPT,
12852 		.retval = 0,
12853 	},
12854 	{
12855 		"ld_abs: div + abs, test 4",
12856 		.insns = {
12857 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
12858 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
12859 			BPF_LD_ABS(BPF_B, 256),
12860 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
12861 			BPF_EXIT_INSN(),
12862 		},
12863 		.data = {
12864 			10, 20, 30, 40, 50,
12865 		},
12866 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12867 		.result = ACCEPT,
12868 		.retval = 0,
12869 	},
12870 	{
12871 		"ld_abs: vlan + abs, test 1",
12872 		.insns = { },
12873 		.data = {
12874 			0x34,
12875 		},
12876 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
12877 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12878 		.result = ACCEPT,
12879 		.retval = 0xbef,
12880 	},
12881 	{
12882 		"ld_abs: vlan + abs, test 2",
12883 		.insns = {
12884 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12885 			BPF_LD_ABS(BPF_B, 0),
12886 			BPF_LD_ABS(BPF_H, 0),
12887 			BPF_LD_ABS(BPF_W, 0),
12888 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
12889 			BPF_MOV64_IMM(BPF_REG_6, 0),
12890 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
12891 			BPF_MOV64_IMM(BPF_REG_2, 1),
12892 			BPF_MOV64_IMM(BPF_REG_3, 2),
12893 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12894 				     BPF_FUNC_skb_vlan_push),
12895 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
12896 			BPF_LD_ABS(BPF_B, 0),
12897 			BPF_LD_ABS(BPF_H, 0),
12898 			BPF_LD_ABS(BPF_W, 0),
12899 			BPF_MOV64_IMM(BPF_REG_0, 42),
12900 			BPF_EXIT_INSN(),
12901 		},
12902 		.data = {
12903 			0x34,
12904 		},
12905 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12906 		.result = ACCEPT,
12907 		.retval = 42,
12908 	},
12909 	{
12910 		"ld_abs: jump around ld_abs",
12911 		.insns = { },
12912 		.data = {
12913 			10, 11,
12914 		},
12915 		.fill_helper = bpf_fill_jump_around_ld_abs,
12916 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12917 		.result = ACCEPT,
12918 		.retval = 10,
12919 	},
12920 	{
12921 		"ld_dw: xor semi-random 64 bit imms, test 1",
12922 		.insns = { },
12923 		.data = { },
12924 		.fill_helper = bpf_fill_rand_ld_dw,
12925 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12926 		.result = ACCEPT,
12927 		.retval = 4090,
12928 	},
12929 	{
12930 		"ld_dw: xor semi-random 64 bit imms, test 2",
12931 		.insns = { },
12932 		.data = { },
12933 		.fill_helper = bpf_fill_rand_ld_dw,
12934 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12935 		.result = ACCEPT,
12936 		.retval = 2047,
12937 	},
12938 	{
12939 		"ld_dw: xor semi-random 64 bit imms, test 3",
12940 		.insns = { },
12941 		.data = { },
12942 		.fill_helper = bpf_fill_rand_ld_dw,
12943 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12944 		.result = ACCEPT,
12945 		.retval = 511,
12946 	},
12947 	{
12948 		"ld_dw: xor semi-random 64 bit imms, test 4",
12949 		.insns = { },
12950 		.data = { },
12951 		.fill_helper = bpf_fill_rand_ld_dw,
12952 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12953 		.result = ACCEPT,
12954 		.retval = 5,
12955 	},
12956 	{
12957 		"pass unmodified ctx pointer to helper",
12958 		.insns = {
12959 			BPF_MOV64_IMM(BPF_REG_2, 0),
12960 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12961 				     BPF_FUNC_csum_update),
12962 			BPF_MOV64_IMM(BPF_REG_0, 0),
12963 			BPF_EXIT_INSN(),
12964 		},
12965 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12966 		.result = ACCEPT,
12967 	},
12968 	{
12969 		"reference tracking: leak potential reference",
12970 		.insns = {
12971 			BPF_SK_LOOKUP,
12972 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
12973 			BPF_EXIT_INSN(),
12974 		},
12975 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12976 		.errstr = "Unreleased reference",
12977 		.result = REJECT,
12978 	},
12979 	{
12980 		"reference tracking: leak potential reference on stack",
12981 		.insns = {
12982 			BPF_SK_LOOKUP,
12983 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12984 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12985 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
12986 			BPF_MOV64_IMM(BPF_REG_0, 0),
12987 			BPF_EXIT_INSN(),
12988 		},
12989 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12990 		.errstr = "Unreleased reference",
12991 		.result = REJECT,
12992 	},
12993 	{
12994 		"reference tracking: leak potential reference on stack 2",
12995 		.insns = {
12996 			BPF_SK_LOOKUP,
12997 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12998 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12999 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13000 			BPF_MOV64_IMM(BPF_REG_0, 0),
13001 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13002 			BPF_EXIT_INSN(),
13003 		},
13004 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13005 		.errstr = "Unreleased reference",
13006 		.result = REJECT,
13007 	},
13008 	{
13009 		"reference tracking: zero potential reference",
13010 		.insns = {
13011 			BPF_SK_LOOKUP,
13012 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13013 			BPF_EXIT_INSN(),
13014 		},
13015 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13016 		.errstr = "Unreleased reference",
13017 		.result = REJECT,
13018 	},
13019 	{
13020 		"reference tracking: copy and zero potential references",
13021 		.insns = {
13022 			BPF_SK_LOOKUP,
13023 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13024 			BPF_MOV64_IMM(BPF_REG_0, 0),
13025 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13026 			BPF_EXIT_INSN(),
13027 		},
13028 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13029 		.errstr = "Unreleased reference",
13030 		.result = REJECT,
13031 	},
13032 	{
13033 		"reference tracking: release reference without check",
13034 		.insns = {
13035 			BPF_SK_LOOKUP,
13036 			/* reference in r0 may be NULL */
13037 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13038 			BPF_MOV64_IMM(BPF_REG_2, 0),
13039 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13040 			BPF_EXIT_INSN(),
13041 		},
13042 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13043 		.errstr = "type=sock_or_null expected=sock",
13044 		.result = REJECT,
13045 	},
13046 	{
13047 		"reference tracking: release reference",
13048 		.insns = {
13049 			BPF_SK_LOOKUP,
13050 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13051 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13052 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13053 			BPF_EXIT_INSN(),
13054 		},
13055 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13056 		.result = ACCEPT,
13057 	},
13058 	{
13059 		"reference tracking: release reference 2",
13060 		.insns = {
13061 			BPF_SK_LOOKUP,
13062 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13063 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13064 			BPF_EXIT_INSN(),
13065 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13066 			BPF_EXIT_INSN(),
13067 		},
13068 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13069 		.result = ACCEPT,
13070 	},
13071 	{
13072 		"reference tracking: release reference twice",
13073 		.insns = {
13074 			BPF_SK_LOOKUP,
13075 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13076 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13077 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13078 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13079 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13080 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13081 			BPF_EXIT_INSN(),
13082 		},
13083 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13084 		.errstr = "type=inv expected=sock",
13085 		.result = REJECT,
13086 	},
13087 	{
13088 		"reference tracking: release reference twice inside branch",
13089 		.insns = {
13090 			BPF_SK_LOOKUP,
13091 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13092 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13093 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13094 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13095 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13096 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13097 			BPF_EXIT_INSN(),
13098 		},
13099 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13100 		.errstr = "type=inv expected=sock",
13101 		.result = REJECT,
13102 	},
13103 	{
13104 		"reference tracking: alloc, check, free in one subbranch",
13105 		.insns = {
13106 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13107 				    offsetof(struct __sk_buff, data)),
13108 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13109 				    offsetof(struct __sk_buff, data_end)),
13110 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13112 			/* if (offsetof(skb, mark) > data_len) exit; */
13113 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13114 			BPF_EXIT_INSN(),
13115 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13116 				    offsetof(struct __sk_buff, mark)),
13117 			BPF_SK_LOOKUP,
13118 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13119 			/* Leak reference in R0 */
13120 			BPF_EXIT_INSN(),
13121 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13122 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13123 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13124 			BPF_EXIT_INSN(),
13125 		},
13126 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13127 		.errstr = "Unreleased reference",
13128 		.result = REJECT,
13129 	},
13130 	{
13131 		"reference tracking: alloc, check, free in both subbranches",
13132 		.insns = {
13133 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13134 				    offsetof(struct __sk_buff, data)),
13135 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13136 				    offsetof(struct __sk_buff, data_end)),
13137 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13138 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13139 			/* if (offsetof(skb, mark) > data_len) exit; */
13140 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13141 			BPF_EXIT_INSN(),
13142 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13143 				    offsetof(struct __sk_buff, mark)),
13144 			BPF_SK_LOOKUP,
13145 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13146 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13147 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13148 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13149 			BPF_EXIT_INSN(),
13150 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13151 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13152 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13153 			BPF_EXIT_INSN(),
13154 		},
13155 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13156 		.result = ACCEPT,
13157 	},
13158 	{
13159 		"reference tracking in call: free reference in subprog",
13160 		.insns = {
13161 			BPF_SK_LOOKUP,
13162 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13163 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13164 			BPF_MOV64_IMM(BPF_REG_0, 0),
13165 			BPF_EXIT_INSN(),
13166 
13167 			/* subprog 1 */
13168 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13170 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13171 			BPF_EXIT_INSN(),
13172 		},
13173 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13174 		.result = ACCEPT,
13175 	},
13176 	{
13177 		"pass modified ctx pointer to helper, 1",
13178 		.insns = {
13179 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13180 			BPF_MOV64_IMM(BPF_REG_2, 0),
13181 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13182 				     BPF_FUNC_csum_update),
13183 			BPF_MOV64_IMM(BPF_REG_0, 0),
13184 			BPF_EXIT_INSN(),
13185 		},
13186 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13187 		.result = REJECT,
13188 		.errstr = "dereference of modified ctx ptr",
13189 	},
13190 	{
13191 		"pass modified ctx pointer to helper, 2",
13192 		.insns = {
13193 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13194 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13195 				     BPF_FUNC_get_socket_cookie),
13196 			BPF_MOV64_IMM(BPF_REG_0, 0),
13197 			BPF_EXIT_INSN(),
13198 		},
13199 		.result_unpriv = REJECT,
13200 		.result = REJECT,
13201 		.errstr_unpriv = "dereference of modified ctx ptr",
13202 		.errstr = "dereference of modified ctx ptr",
13203 	},
13204 	{
13205 		"pass modified ctx pointer to helper, 3",
13206 		.insns = {
13207 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13208 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13209 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13210 			BPF_MOV64_IMM(BPF_REG_2, 0),
13211 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13212 				     BPF_FUNC_csum_update),
13213 			BPF_MOV64_IMM(BPF_REG_0, 0),
13214 			BPF_EXIT_INSN(),
13215 		},
13216 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13217 		.result = REJECT,
13218 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13219 	},
13220 	{
13221 		"mov64 src == dst",
13222 		.insns = {
13223 			BPF_MOV64_IMM(BPF_REG_2, 0),
13224 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13225 			// Check bounds are OK
13226 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13227 			BPF_MOV64_IMM(BPF_REG_0, 0),
13228 			BPF_EXIT_INSN(),
13229 		},
13230 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13231 		.result = ACCEPT,
13232 	},
13233 	{
13234 		"mov64 src != dst",
13235 		.insns = {
13236 			BPF_MOV64_IMM(BPF_REG_3, 0),
13237 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13238 			// Check bounds are OK
13239 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13240 			BPF_MOV64_IMM(BPF_REG_0, 0),
13241 			BPF_EXIT_INSN(),
13242 		},
13243 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13244 		.result = ACCEPT,
13245 	},
13246 	{
13247 		"reference tracking in call: free reference in subprog and outside",
13248 		.insns = {
13249 			BPF_SK_LOOKUP,
13250 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13251 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13253 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13254 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13255 			BPF_EXIT_INSN(),
13256 
13257 			/* subprog 1 */
13258 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13259 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13260 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13261 			BPF_EXIT_INSN(),
13262 		},
13263 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13264 		.errstr = "type=inv expected=sock",
13265 		.result = REJECT,
13266 	},
13267 	{
13268 		"reference tracking in call: alloc & leak reference in subprog",
13269 		.insns = {
13270 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13271 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13272 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13273 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13274 			BPF_MOV64_IMM(BPF_REG_0, 0),
13275 			BPF_EXIT_INSN(),
13276 
13277 			/* subprog 1 */
13278 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13279 			BPF_SK_LOOKUP,
13280 			/* spill unchecked sk_ptr into stack of caller */
13281 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13282 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13283 			BPF_EXIT_INSN(),
13284 		},
13285 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13286 		.errstr = "Unreleased reference",
13287 		.result = REJECT,
13288 	},
13289 	{
13290 		"reference tracking in call: alloc in subprog, release outside",
13291 		.insns = {
13292 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13293 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13294 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13296 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13297 			BPF_EXIT_INSN(),
13298 
13299 			/* subprog 1 */
13300 			BPF_SK_LOOKUP,
13301 			BPF_EXIT_INSN(), /* return sk */
13302 		},
13303 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13304 		.retval = POINTER_VALUE,
13305 		.result = ACCEPT,
13306 	},
13307 	{
13308 		"reference tracking in call: sk_ptr leak into caller stack",
13309 		.insns = {
13310 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13312 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13313 			BPF_MOV64_IMM(BPF_REG_0, 0),
13314 			BPF_EXIT_INSN(),
13315 
13316 			/* subprog 1 */
13317 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13318 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13319 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13320 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13321 			/* spill unchecked sk_ptr into stack of caller */
13322 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13323 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13324 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13325 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13326 			BPF_EXIT_INSN(),
13327 
13328 			/* subprog 2 */
13329 			BPF_SK_LOOKUP,
13330 			BPF_EXIT_INSN(),
13331 		},
13332 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13333 		.errstr = "Unreleased reference",
13334 		.result = REJECT,
13335 	},
13336 	{
13337 		"reference tracking in call: sk_ptr spill into caller stack",
13338 		.insns = {
13339 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13340 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13341 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13342 			BPF_MOV64_IMM(BPF_REG_0, 0),
13343 			BPF_EXIT_INSN(),
13344 
13345 			/* subprog 1 */
13346 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13347 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13348 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13349 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13350 			/* spill unchecked sk_ptr into stack of caller */
13351 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13352 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13353 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13354 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13355 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13356 			/* now the sk_ptr is verified, free the reference */
13357 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13358 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13359 			BPF_EXIT_INSN(),
13360 
13361 			/* subprog 2 */
13362 			BPF_SK_LOOKUP,
13363 			BPF_EXIT_INSN(),
13364 		},
13365 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13366 		.result = ACCEPT,
13367 	},
13368 	{
13369 		"reference tracking: allow LD_ABS",
13370 		.insns = {
13371 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13372 			BPF_SK_LOOKUP,
13373 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13374 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13375 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13376 			BPF_LD_ABS(BPF_B, 0),
13377 			BPF_LD_ABS(BPF_H, 0),
13378 			BPF_LD_ABS(BPF_W, 0),
13379 			BPF_EXIT_INSN(),
13380 		},
13381 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13382 		.result = ACCEPT,
13383 	},
13384 	{
13385 		"reference tracking: forbid LD_ABS while holding reference",
13386 		.insns = {
13387 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13388 			BPF_SK_LOOKUP,
13389 			BPF_LD_ABS(BPF_B, 0),
13390 			BPF_LD_ABS(BPF_H, 0),
13391 			BPF_LD_ABS(BPF_W, 0),
13392 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13393 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13394 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13395 			BPF_EXIT_INSN(),
13396 		},
13397 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13398 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13399 		.result = REJECT,
13400 	},
13401 	{
13402 		"reference tracking: allow LD_IND",
13403 		.insns = {
13404 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13405 			BPF_SK_LOOKUP,
13406 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13407 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13408 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13409 			BPF_MOV64_IMM(BPF_REG_7, 1),
13410 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13411 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13412 			BPF_EXIT_INSN(),
13413 		},
13414 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13415 		.result = ACCEPT,
13416 		.retval = 1,
13417 	},
13418 	{
13419 		"reference tracking: forbid LD_IND while holding reference",
13420 		.insns = {
13421 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13422 			BPF_SK_LOOKUP,
13423 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13424 			BPF_MOV64_IMM(BPF_REG_7, 1),
13425 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13426 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13427 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13428 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13429 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13430 			BPF_EXIT_INSN(),
13431 		},
13432 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13433 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13434 		.result = REJECT,
13435 	},
13436 	{
13437 		"reference tracking: check reference or tail call",
13438 		.insns = {
13439 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13440 			BPF_SK_LOOKUP,
13441 			/* if (sk) bpf_sk_release() */
13442 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13443 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13444 			/* bpf_tail_call() */
13445 			BPF_MOV64_IMM(BPF_REG_3, 2),
13446 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13447 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13448 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13449 				     BPF_FUNC_tail_call),
13450 			BPF_MOV64_IMM(BPF_REG_0, 0),
13451 			BPF_EXIT_INSN(),
13452 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13453 			BPF_EXIT_INSN(),
13454 		},
13455 		.fixup_prog1 = { 17 },
13456 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13457 		.result = ACCEPT,
13458 	},
13459 	{
13460 		"reference tracking: release reference then tail call",
13461 		.insns = {
13462 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13463 			BPF_SK_LOOKUP,
13464 			/* if (sk) bpf_sk_release() */
13465 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13466 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13467 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13468 			/* bpf_tail_call() */
13469 			BPF_MOV64_IMM(BPF_REG_3, 2),
13470 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13471 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13472 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13473 				     BPF_FUNC_tail_call),
13474 			BPF_MOV64_IMM(BPF_REG_0, 0),
13475 			BPF_EXIT_INSN(),
13476 		},
13477 		.fixup_prog1 = { 18 },
13478 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13479 		.result = ACCEPT,
13480 	},
13481 	{
13482 		"reference tracking: leak possible reference over tail call",
13483 		.insns = {
13484 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13485 			/* Look up socket and store in REG_6 */
13486 			BPF_SK_LOOKUP,
13487 			/* bpf_tail_call() */
13488 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13489 			BPF_MOV64_IMM(BPF_REG_3, 2),
13490 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13491 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13492 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13493 				     BPF_FUNC_tail_call),
13494 			BPF_MOV64_IMM(BPF_REG_0, 0),
13495 			/* if (sk) bpf_sk_release() */
13496 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13497 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13498 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13499 			BPF_EXIT_INSN(),
13500 		},
13501 		.fixup_prog1 = { 16 },
13502 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13503 		.errstr = "tail_call would lead to reference leak",
13504 		.result = REJECT,
13505 	},
13506 	{
13507 		"reference tracking: leak checked reference over tail call",
13508 		.insns = {
13509 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13510 			/* Look up socket and store in REG_6 */
13511 			BPF_SK_LOOKUP,
13512 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13513 			/* if (!sk) goto end */
13514 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13515 			/* bpf_tail_call() */
13516 			BPF_MOV64_IMM(BPF_REG_3, 0),
13517 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13518 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13519 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13520 				     BPF_FUNC_tail_call),
13521 			BPF_MOV64_IMM(BPF_REG_0, 0),
13522 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13523 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13524 			BPF_EXIT_INSN(),
13525 		},
13526 		.fixup_prog1 = { 17 },
13527 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13528 		.errstr = "tail_call would lead to reference leak",
13529 		.result = REJECT,
13530 	},
13531 	{
13532 		"reference tracking: mangle and release sock_or_null",
13533 		.insns = {
13534 			BPF_SK_LOOKUP,
13535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13536 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13537 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13538 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13539 			BPF_EXIT_INSN(),
13540 		},
13541 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13542 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13543 		.result = REJECT,
13544 	},
13545 	{
13546 		"reference tracking: mangle and release sock",
13547 		.insns = {
13548 			BPF_SK_LOOKUP,
13549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13550 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13551 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13552 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13553 			BPF_EXIT_INSN(),
13554 		},
13555 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13556 		.errstr = "R1 pointer arithmetic on sock prohibited",
13557 		.result = REJECT,
13558 	},
13559 	{
13560 		"reference tracking: access member",
13561 		.insns = {
13562 			BPF_SK_LOOKUP,
13563 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13564 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13565 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13566 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13567 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13568 			BPF_EXIT_INSN(),
13569 		},
13570 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13571 		.result = ACCEPT,
13572 	},
13573 	{
13574 		"reference tracking: write to member",
13575 		.insns = {
13576 			BPF_SK_LOOKUP,
13577 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13578 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13579 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13580 			BPF_LD_IMM64(BPF_REG_2, 42),
13581 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13582 				    offsetof(struct bpf_sock, mark)),
13583 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13584 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13585 			BPF_LD_IMM64(BPF_REG_0, 0),
13586 			BPF_EXIT_INSN(),
13587 		},
13588 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13589 		.errstr = "cannot write into socket",
13590 		.result = REJECT,
13591 	},
13592 	{
13593 		"reference tracking: invalid 64-bit access of member",
13594 		.insns = {
13595 			BPF_SK_LOOKUP,
13596 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13597 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13598 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13599 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13600 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13601 			BPF_EXIT_INSN(),
13602 		},
13603 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13604 		.errstr = "invalid bpf_sock access off=0 size=8",
13605 		.result = REJECT,
13606 	},
13607 	{
13608 		"reference tracking: access after release",
13609 		.insns = {
13610 			BPF_SK_LOOKUP,
13611 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13612 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13613 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13614 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
13615 			BPF_EXIT_INSN(),
13616 		},
13617 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13618 		.errstr = "!read_ok",
13619 		.result = REJECT,
13620 	},
13621 	{
13622 		"reference tracking: direct access for lookup",
13623 		.insns = {
13624 			/* Check that the packet is at least 64B long */
13625 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13626 				    offsetof(struct __sk_buff, data)),
13627 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13628 				    offsetof(struct __sk_buff, data_end)),
13629 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13630 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
13631 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
13632 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
13633 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
13634 			BPF_MOV64_IMM(BPF_REG_4, 0),
13635 			BPF_MOV64_IMM(BPF_REG_5, 0),
13636 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
13637 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13638 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13639 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13640 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13641 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13642 			BPF_EXIT_INSN(),
13643 		},
13644 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13645 		.result = ACCEPT,
13646 	},
13647 };
13648 
13649 static int probe_filter_length(const struct bpf_insn *fp)
13650 {
13651 	int len;
13652 
13653 	for (len = MAX_INSNS - 1; len > 0; --len)
13654 		if (fp[len].code != 0 || fp[len].imm != 0)
13655 			break;
13656 	return len + 1;
13657 }
13658 
13659 static int create_map(uint32_t type, uint32_t size_key,
13660 		      uint32_t size_value, uint32_t max_elem)
13661 {
13662 	int fd;
13663 
13664 	fd = bpf_create_map(type, size_key, size_value, max_elem,
13665 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
13666 	if (fd < 0)
13667 		printf("Failed to create hash map '%s'!\n", strerror(errno));
13668 
13669 	return fd;
13670 }
13671 
13672 static int create_prog_dummy1(enum bpf_map_type prog_type)
13673 {
13674 	struct bpf_insn prog[] = {
13675 		BPF_MOV64_IMM(BPF_REG_0, 42),
13676 		BPF_EXIT_INSN(),
13677 	};
13678 
13679 	return bpf_load_program(prog_type, prog,
13680 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13681 }
13682 
13683 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
13684 {
13685 	struct bpf_insn prog[] = {
13686 		BPF_MOV64_IMM(BPF_REG_3, idx),
13687 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
13688 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13689 			     BPF_FUNC_tail_call),
13690 		BPF_MOV64_IMM(BPF_REG_0, 41),
13691 		BPF_EXIT_INSN(),
13692 	};
13693 
13694 	return bpf_load_program(prog_type, prog,
13695 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
13696 }
13697 
13698 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
13699 			     int p1key)
13700 {
13701 	int p2key = 1;
13702 	int mfd, p1fd, p2fd;
13703 
13704 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
13705 			     sizeof(int), max_elem, 0);
13706 	if (mfd < 0) {
13707 		printf("Failed to create prog array '%s'!\n", strerror(errno));
13708 		return -1;
13709 	}
13710 
13711 	p1fd = create_prog_dummy1(prog_type);
13712 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
13713 	if (p1fd < 0 || p2fd < 0)
13714 		goto out;
13715 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
13716 		goto out;
13717 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
13718 		goto out;
13719 	close(p2fd);
13720 	close(p1fd);
13721 
13722 	return mfd;
13723 out:
13724 	close(p2fd);
13725 	close(p1fd);
13726 	close(mfd);
13727 	return -1;
13728 }
13729 
13730 static int create_map_in_map(void)
13731 {
13732 	int inner_map_fd, outer_map_fd;
13733 
13734 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13735 				      sizeof(int), 1, 0);
13736 	if (inner_map_fd < 0) {
13737 		printf("Failed to create array '%s'!\n", strerror(errno));
13738 		return inner_map_fd;
13739 	}
13740 
13741 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
13742 					     sizeof(int), inner_map_fd, 1, 0);
13743 	if (outer_map_fd < 0)
13744 		printf("Failed to create array of maps '%s'!\n",
13745 		       strerror(errno));
13746 
13747 	close(inner_map_fd);
13748 
13749 	return outer_map_fd;
13750 }
13751 
13752 static int create_cgroup_storage(bool percpu)
13753 {
13754 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
13755 		BPF_MAP_TYPE_CGROUP_STORAGE;
13756 	int fd;
13757 
13758 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
13759 			    TEST_DATA_LEN, 0, 0);
13760 	if (fd < 0)
13761 		printf("Failed to create cgroup storage '%s'!\n",
13762 		       strerror(errno));
13763 
13764 	return fd;
13765 }
13766 
13767 static char bpf_vlog[UINT_MAX >> 8];
13768 
13769 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
13770 			  struct bpf_insn *prog, int *map_fds)
13771 {
13772 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
13773 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
13774 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
13775 	int *fixup_map_array_48b = test->fixup_map_array_48b;
13776 	int *fixup_map_sockmap = test->fixup_map_sockmap;
13777 	int *fixup_map_sockhash = test->fixup_map_sockhash;
13778 	int *fixup_map_xskmap = test->fixup_map_xskmap;
13779 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
13780 	int *fixup_prog1 = test->fixup_prog1;
13781 	int *fixup_prog2 = test->fixup_prog2;
13782 	int *fixup_map_in_map = test->fixup_map_in_map;
13783 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
13784 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
13785 
13786 	if (test->fill_helper)
13787 		test->fill_helper(test);
13788 
13789 	/* Allocating HTs with 1 elem is fine here, since we only test
13790 	 * for verifier and not do a runtime lookup, so the only thing
13791 	 * that really matters is value size in this case.
13792 	 */
13793 	if (*fixup_map_hash_8b) {
13794 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13795 					sizeof(long long), 1);
13796 		do {
13797 			prog[*fixup_map_hash_8b].imm = map_fds[0];
13798 			fixup_map_hash_8b++;
13799 		} while (*fixup_map_hash_8b);
13800 	}
13801 
13802 	if (*fixup_map_hash_48b) {
13803 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13804 					sizeof(struct test_val), 1);
13805 		do {
13806 			prog[*fixup_map_hash_48b].imm = map_fds[1];
13807 			fixup_map_hash_48b++;
13808 		} while (*fixup_map_hash_48b);
13809 	}
13810 
13811 	if (*fixup_map_hash_16b) {
13812 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
13813 					sizeof(struct other_val), 1);
13814 		do {
13815 			prog[*fixup_map_hash_16b].imm = map_fds[2];
13816 			fixup_map_hash_16b++;
13817 		} while (*fixup_map_hash_16b);
13818 	}
13819 
13820 	if (*fixup_map_array_48b) {
13821 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
13822 					sizeof(struct test_val), 1);
13823 		do {
13824 			prog[*fixup_map_array_48b].imm = map_fds[3];
13825 			fixup_map_array_48b++;
13826 		} while (*fixup_map_array_48b);
13827 	}
13828 
13829 	if (*fixup_prog1) {
13830 		map_fds[4] = create_prog_array(prog_type, 4, 0);
13831 		do {
13832 			prog[*fixup_prog1].imm = map_fds[4];
13833 			fixup_prog1++;
13834 		} while (*fixup_prog1);
13835 	}
13836 
13837 	if (*fixup_prog2) {
13838 		map_fds[5] = create_prog_array(prog_type, 8, 7);
13839 		do {
13840 			prog[*fixup_prog2].imm = map_fds[5];
13841 			fixup_prog2++;
13842 		} while (*fixup_prog2);
13843 	}
13844 
13845 	if (*fixup_map_in_map) {
13846 		map_fds[6] = create_map_in_map();
13847 		do {
13848 			prog[*fixup_map_in_map].imm = map_fds[6];
13849 			fixup_map_in_map++;
13850 		} while (*fixup_map_in_map);
13851 	}
13852 
13853 	if (*fixup_cgroup_storage) {
13854 		map_fds[7] = create_cgroup_storage(false);
13855 		do {
13856 			prog[*fixup_cgroup_storage].imm = map_fds[7];
13857 			fixup_cgroup_storage++;
13858 		} while (*fixup_cgroup_storage);
13859 	}
13860 
13861 	if (*fixup_percpu_cgroup_storage) {
13862 		map_fds[8] = create_cgroup_storage(true);
13863 		do {
13864 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
13865 			fixup_percpu_cgroup_storage++;
13866 		} while (*fixup_percpu_cgroup_storage);
13867 	}
13868 	if (*fixup_map_sockmap) {
13869 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
13870 					sizeof(int), 1);
13871 		do {
13872 			prog[*fixup_map_sockmap].imm = map_fds[9];
13873 			fixup_map_sockmap++;
13874 		} while (*fixup_map_sockmap);
13875 	}
13876 	if (*fixup_map_sockhash) {
13877 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
13878 					sizeof(int), 1);
13879 		do {
13880 			prog[*fixup_map_sockhash].imm = map_fds[10];
13881 			fixup_map_sockhash++;
13882 		} while (*fixup_map_sockhash);
13883 	}
13884 	if (*fixup_map_xskmap) {
13885 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
13886 					sizeof(int), 1);
13887 		do {
13888 			prog[*fixup_map_xskmap].imm = map_fds[11];
13889 			fixup_map_xskmap++;
13890 		} while (*fixup_map_xskmap);
13891 	}
13892 	if (*fixup_map_stacktrace) {
13893 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
13894 					 sizeof(u64), 1);
13895 		do {
13896 			prog[*fixup_map_stacktrace].imm = map_fds[12];
13897 			fixup_map_stacktrace++;
13898 		} while (fixup_map_stacktrace);
13899 	}
13900 }
13901 
13902 static void do_test_single(struct bpf_test *test, bool unpriv,
13903 			   int *passes, int *errors)
13904 {
13905 	int fd_prog, expected_ret, reject_from_alignment;
13906 	int prog_len, prog_type = test->prog_type;
13907 	struct bpf_insn *prog = test->insns;
13908 	int map_fds[MAX_NR_MAPS];
13909 	const char *expected_err;
13910 	uint32_t retval;
13911 	int i, err;
13912 
13913 	for (i = 0; i < MAX_NR_MAPS; i++)
13914 		map_fds[i] = -1;
13915 
13916 	if (!prog_type)
13917 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
13918 	do_test_fixup(test, prog_type, prog, map_fds);
13919 	prog_len = probe_filter_length(prog);
13920 
13921 	fd_prog = bpf_verify_program(prog_type, prog, prog_len,
13922 				     test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
13923 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
13924 
13925 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
13926 		       test->result_unpriv : test->result;
13927 	expected_err = unpriv && test->errstr_unpriv ?
13928 		       test->errstr_unpriv : test->errstr;
13929 
13930 	reject_from_alignment = fd_prog < 0 &&
13931 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
13932 				strstr(bpf_vlog, "Unknown alignment.");
13933 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13934 	if (reject_from_alignment) {
13935 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
13936 		       strerror(errno));
13937 		goto fail_log;
13938 	}
13939 #endif
13940 	if (expected_ret == ACCEPT) {
13941 		if (fd_prog < 0 && !reject_from_alignment) {
13942 			printf("FAIL\nFailed to load prog '%s'!\n",
13943 			       strerror(errno));
13944 			goto fail_log;
13945 		}
13946 	} else {
13947 		if (fd_prog >= 0) {
13948 			printf("FAIL\nUnexpected success to load!\n");
13949 			goto fail_log;
13950 		}
13951 		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
13952 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
13953 			      expected_err, bpf_vlog);
13954 			goto fail_log;
13955 		}
13956 	}
13957 
13958 	if (fd_prog >= 0) {
13959 		__u8 tmp[TEST_DATA_LEN << 2];
13960 		__u32 size_tmp = sizeof(tmp);
13961 
13962 		err = bpf_prog_test_run(fd_prog, 1, test->data,
13963 					sizeof(test->data), tmp, &size_tmp,
13964 					&retval, NULL);
13965 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
13966 			printf("Unexpected bpf_prog_test_run error\n");
13967 			goto fail_log;
13968 		}
13969 		if (!err && retval != test->retval &&
13970 		    test->retval != POINTER_VALUE) {
13971 			printf("FAIL retval %d != %d\n", retval, test->retval);
13972 			goto fail_log;
13973 		}
13974 	}
13975 	(*passes)++;
13976 	printf("OK%s\n", reject_from_alignment ?
13977 	       " (NOTE: reject due to unknown alignment)" : "");
13978 close_fds:
13979 	close(fd_prog);
13980 	for (i = 0; i < MAX_NR_MAPS; i++)
13981 		close(map_fds[i]);
13982 	sched_yield();
13983 	return;
13984 fail_log:
13985 	(*errors)++;
13986 	printf("%s", bpf_vlog);
13987 	goto close_fds;
13988 }
13989 
13990 static bool is_admin(void)
13991 {
13992 	cap_t caps;
13993 	cap_flag_value_t sysadmin = CAP_CLEAR;
13994 	const cap_value_t cap_val = CAP_SYS_ADMIN;
13995 
13996 #ifdef CAP_IS_SUPPORTED
13997 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
13998 		perror("cap_get_flag");
13999 		return false;
14000 	}
14001 #endif
14002 	caps = cap_get_proc();
14003 	if (!caps) {
14004 		perror("cap_get_proc");
14005 		return false;
14006 	}
14007 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14008 		perror("cap_get_flag");
14009 	if (cap_free(caps))
14010 		perror("cap_free");
14011 	return (sysadmin == CAP_SET);
14012 }
14013 
14014 static int set_admin(bool admin)
14015 {
14016 	cap_t caps;
14017 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14018 	int ret = -1;
14019 
14020 	caps = cap_get_proc();
14021 	if (!caps) {
14022 		perror("cap_get_proc");
14023 		return -1;
14024 	}
14025 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14026 				admin ? CAP_SET : CAP_CLEAR)) {
14027 		perror("cap_set_flag");
14028 		goto out;
14029 	}
14030 	if (cap_set_proc(caps)) {
14031 		perror("cap_set_proc");
14032 		goto out;
14033 	}
14034 	ret = 0;
14035 out:
14036 	if (cap_free(caps))
14037 		perror("cap_free");
14038 	return ret;
14039 }
14040 
14041 static void get_unpriv_disabled()
14042 {
14043 	char buf[2];
14044 	FILE *fd;
14045 
14046 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14047 	if (!fd) {
14048 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14049 		unpriv_disabled = true;
14050 		return;
14051 	}
14052 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14053 		unpriv_disabled = true;
14054 	fclose(fd);
14055 }
14056 
14057 static bool test_as_unpriv(struct bpf_test *test)
14058 {
14059 	return !test->prog_type ||
14060 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14061 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14062 }
14063 
14064 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14065 {
14066 	int i, passes = 0, errors = 0, skips = 0;
14067 
14068 	for (i = from; i < to; i++) {
14069 		struct bpf_test *test = &tests[i];
14070 
14071 		/* Program types that are not supported by non-root we
14072 		 * skip right away.
14073 		 */
14074 		if (test_as_unpriv(test) && unpriv_disabled) {
14075 			printf("#%d/u %s SKIP\n", i, test->descr);
14076 			skips++;
14077 		} else if (test_as_unpriv(test)) {
14078 			if (!unpriv)
14079 				set_admin(false);
14080 			printf("#%d/u %s ", i, test->descr);
14081 			do_test_single(test, true, &passes, &errors);
14082 			if (!unpriv)
14083 				set_admin(true);
14084 		}
14085 
14086 		if (unpriv) {
14087 			printf("#%d/p %s SKIP\n", i, test->descr);
14088 			skips++;
14089 		} else {
14090 			printf("#%d/p %s ", i, test->descr);
14091 			do_test_single(test, false, &passes, &errors);
14092 		}
14093 	}
14094 
14095 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14096 	       skips, errors);
14097 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14098 }
14099 
14100 int main(int argc, char **argv)
14101 {
14102 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14103 	bool unpriv = !is_admin();
14104 
14105 	if (argc == 3) {
14106 		unsigned int l = atoi(argv[argc - 2]);
14107 		unsigned int u = atoi(argv[argc - 1]);
14108 
14109 		if (l < to && u < to) {
14110 			from = l;
14111 			to   = u + 1;
14112 		}
14113 	} else if (argc == 2) {
14114 		unsigned int t = atoi(argv[argc - 1]);
14115 
14116 		if (t < to) {
14117 			from = t;
14118 			to   = t + 1;
14119 		}
14120 	}
14121 
14122 	get_unpriv_disabled();
14123 	if (unpriv && unpriv_disabled) {
14124 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14125 		       UNPRIV_SYSCTL);
14126 		return EXIT_FAILURE;
14127 	}
14128 
14129 	bpf_semi_rand_init();
14130 	return do_test(unpriv, from, to);
14131 }
14132