xref: /linux/tools/testing/selftests/bpf/test_verifier.c (revision b053b439b72ad152257ecc3f71cfb4c619b0137e)
1 /*
2  * Testsuite for eBPF verifier
3  *
4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5  * Copyright (c) 2017 Facebook
6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of version 2 of the GNU General Public
10  * License as published by the Free Software Foundation.
11  */
12 
13 #include <endian.h>
14 #include <asm/types.h>
15 #include <linux/types.h>
16 #include <stdint.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <unistd.h>
20 #include <errno.h>
21 #include <string.h>
22 #include <stddef.h>
23 #include <stdbool.h>
24 #include <sched.h>
25 #include <limits.h>
26 
27 #include <sys/capability.h>
28 
29 #include <linux/unistd.h>
30 #include <linux/filter.h>
31 #include <linux/bpf_perf_event.h>
32 #include <linux/bpf.h>
33 #include <linux/if_ether.h>
34 
35 #include <bpf/bpf.h>
36 
37 #ifdef HAVE_GENHDR
38 # include "autoconf.h"
39 #else
40 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
41 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 # endif
43 #endif
44 #include "bpf_rlimit.h"
45 #include "bpf_rand.h"
46 #include "bpf_util.h"
47 #include "../../../include/linux/filter.h"
48 
49 #define MAX_INSNS	BPF_MAXINSNS
50 #define MAX_FIXUPS	8
51 #define MAX_NR_MAPS	13
52 #define POINTER_VALUE	0xcafe4all
53 #define TEST_DATA_LEN	64
54 
55 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
56 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
57 
58 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
59 static bool unpriv_disabled = false;
60 
61 struct bpf_test {
62 	const char *descr;
63 	struct bpf_insn	insns[MAX_INSNS];
64 	int fixup_map_hash_8b[MAX_FIXUPS];
65 	int fixup_map_hash_48b[MAX_FIXUPS];
66 	int fixup_map_hash_16b[MAX_FIXUPS];
67 	int fixup_map_array_48b[MAX_FIXUPS];
68 	int fixup_map_sockmap[MAX_FIXUPS];
69 	int fixup_map_sockhash[MAX_FIXUPS];
70 	int fixup_map_xskmap[MAX_FIXUPS];
71 	int fixup_map_stacktrace[MAX_FIXUPS];
72 	int fixup_prog1[MAX_FIXUPS];
73 	int fixup_prog2[MAX_FIXUPS];
74 	int fixup_map_in_map[MAX_FIXUPS];
75 	int fixup_cgroup_storage[MAX_FIXUPS];
76 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
77 	const char *errstr;
78 	const char *errstr_unpriv;
79 	uint32_t retval, retval_unpriv;
80 	enum {
81 		UNDEF,
82 		ACCEPT,
83 		REJECT
84 	} result, result_unpriv;
85 	enum bpf_prog_type prog_type;
86 	uint8_t flags;
87 	__u8 data[TEST_DATA_LEN];
88 	void (*fill_helper)(struct bpf_test *self);
89 };
90 
91 /* Note we want this to be 64 bit aligned so that the end of our array is
92  * actually the end of the structure.
93  */
94 #define MAX_ENTRIES 11
95 
96 struct test_val {
97 	unsigned int index;
98 	int foo[MAX_ENTRIES];
99 };
100 
101 struct other_val {
102 	long long foo;
103 	long long bar;
104 };
105 
106 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
107 {
108 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
109 #define PUSH_CNT 51
110 	unsigned int len = BPF_MAXINSNS;
111 	struct bpf_insn *insn = self->insns;
112 	int i = 0, j, k = 0;
113 
114 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
115 loop:
116 	for (j = 0; j < PUSH_CNT; j++) {
117 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
118 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
119 		i++;
120 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
121 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
122 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
123 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
124 					 BPF_FUNC_skb_vlan_push),
125 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
126 		i++;
127 	}
128 
129 	for (j = 0; j < PUSH_CNT; j++) {
130 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
131 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
132 		i++;
133 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
134 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 					 BPF_FUNC_skb_vlan_pop),
136 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
137 		i++;
138 	}
139 	if (++k < 5)
140 		goto loop;
141 
142 	for (; i < len - 1; i++)
143 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
144 	insn[len - 1] = BPF_EXIT_INSN();
145 }
146 
147 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
148 {
149 	struct bpf_insn *insn = self->insns;
150 	unsigned int len = BPF_MAXINSNS;
151 	int i = 0;
152 
153 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
154 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
155 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
156 	i++;
157 	while (i < len - 1)
158 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
159 	insn[i] = BPF_EXIT_INSN();
160 }
161 
162 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
163 {
164 	struct bpf_insn *insn = self->insns;
165 	uint64_t res = 0;
166 	int i = 0;
167 
168 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
169 	while (i < self->retval) {
170 		uint64_t val = bpf_semi_rand_get();
171 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
172 
173 		res ^= val;
174 		insn[i++] = tmp[0];
175 		insn[i++] = tmp[1];
176 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
177 	}
178 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
179 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
180 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
181 	insn[i] = BPF_EXIT_INSN();
182 	res ^= (res >> 32);
183 	self->retval = (uint32_t)res;
184 }
185 
186 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
187 #define BPF_SK_LOOKUP							\
188 	/* struct bpf_sock_tuple tuple = {} */				\
189 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
191 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
192 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
193 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
194 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
195 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
196 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
197 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
198 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
199 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
200 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
201 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
202 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
203 
204 static struct bpf_test tests[] = {
205 	{
206 		"add+sub+mul",
207 		.insns = {
208 			BPF_MOV64_IMM(BPF_REG_1, 1),
209 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
210 			BPF_MOV64_IMM(BPF_REG_2, 3),
211 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
213 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
214 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
215 			BPF_EXIT_INSN(),
216 		},
217 		.result = ACCEPT,
218 		.retval = -3,
219 	},
220 	{
221 		"DIV32 by 0, zero check 1",
222 		.insns = {
223 			BPF_MOV32_IMM(BPF_REG_0, 42),
224 			BPF_MOV32_IMM(BPF_REG_1, 0),
225 			BPF_MOV32_IMM(BPF_REG_2, 1),
226 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
227 			BPF_EXIT_INSN(),
228 		},
229 		.result = ACCEPT,
230 		.retval = 42,
231 	},
232 	{
233 		"DIV32 by 0, zero check 2",
234 		.insns = {
235 			BPF_MOV32_IMM(BPF_REG_0, 42),
236 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
237 			BPF_MOV32_IMM(BPF_REG_2, 1),
238 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
239 			BPF_EXIT_INSN(),
240 		},
241 		.result = ACCEPT,
242 		.retval = 42,
243 	},
244 	{
245 		"DIV64 by 0, zero check",
246 		.insns = {
247 			BPF_MOV32_IMM(BPF_REG_0, 42),
248 			BPF_MOV32_IMM(BPF_REG_1, 0),
249 			BPF_MOV32_IMM(BPF_REG_2, 1),
250 			BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
251 			BPF_EXIT_INSN(),
252 		},
253 		.result = ACCEPT,
254 		.retval = 42,
255 	},
256 	{
257 		"MOD32 by 0, zero check 1",
258 		.insns = {
259 			BPF_MOV32_IMM(BPF_REG_0, 42),
260 			BPF_MOV32_IMM(BPF_REG_1, 0),
261 			BPF_MOV32_IMM(BPF_REG_2, 1),
262 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
263 			BPF_EXIT_INSN(),
264 		},
265 		.result = ACCEPT,
266 		.retval = 42,
267 	},
268 	{
269 		"MOD32 by 0, zero check 2",
270 		.insns = {
271 			BPF_MOV32_IMM(BPF_REG_0, 42),
272 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
273 			BPF_MOV32_IMM(BPF_REG_2, 1),
274 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
275 			BPF_EXIT_INSN(),
276 		},
277 		.result = ACCEPT,
278 		.retval = 42,
279 	},
280 	{
281 		"MOD64 by 0, zero check",
282 		.insns = {
283 			BPF_MOV32_IMM(BPF_REG_0, 42),
284 			BPF_MOV32_IMM(BPF_REG_1, 0),
285 			BPF_MOV32_IMM(BPF_REG_2, 1),
286 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
287 			BPF_EXIT_INSN(),
288 		},
289 		.result = ACCEPT,
290 		.retval = 42,
291 	},
292 	{
293 		"DIV32 by 0, zero check ok, cls",
294 		.insns = {
295 			BPF_MOV32_IMM(BPF_REG_0, 42),
296 			BPF_MOV32_IMM(BPF_REG_1, 2),
297 			BPF_MOV32_IMM(BPF_REG_2, 16),
298 			BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
299 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
300 			BPF_EXIT_INSN(),
301 		},
302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
303 		.result = ACCEPT,
304 		.retval = 8,
305 	},
306 	{
307 		"DIV32 by 0, zero check 1, cls",
308 		.insns = {
309 			BPF_MOV32_IMM(BPF_REG_1, 0),
310 			BPF_MOV32_IMM(BPF_REG_0, 1),
311 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
312 			BPF_EXIT_INSN(),
313 		},
314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
315 		.result = ACCEPT,
316 		.retval = 0,
317 	},
318 	{
319 		"DIV32 by 0, zero check 2, cls",
320 		.insns = {
321 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
322 			BPF_MOV32_IMM(BPF_REG_0, 1),
323 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
324 			BPF_EXIT_INSN(),
325 		},
326 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
327 		.result = ACCEPT,
328 		.retval = 0,
329 	},
330 	{
331 		"DIV64 by 0, zero check, cls",
332 		.insns = {
333 			BPF_MOV32_IMM(BPF_REG_1, 0),
334 			BPF_MOV32_IMM(BPF_REG_0, 1),
335 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
336 			BPF_EXIT_INSN(),
337 		},
338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
339 		.result = ACCEPT,
340 		.retval = 0,
341 	},
342 	{
343 		"MOD32 by 0, zero check ok, cls",
344 		.insns = {
345 			BPF_MOV32_IMM(BPF_REG_0, 42),
346 			BPF_MOV32_IMM(BPF_REG_1, 3),
347 			BPF_MOV32_IMM(BPF_REG_2, 5),
348 			BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
349 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
350 			BPF_EXIT_INSN(),
351 		},
352 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
353 		.result = ACCEPT,
354 		.retval = 2,
355 	},
356 	{
357 		"MOD32 by 0, zero check 1, cls",
358 		.insns = {
359 			BPF_MOV32_IMM(BPF_REG_1, 0),
360 			BPF_MOV32_IMM(BPF_REG_0, 1),
361 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
362 			BPF_EXIT_INSN(),
363 		},
364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
365 		.result = ACCEPT,
366 		.retval = 1,
367 	},
368 	{
369 		"MOD32 by 0, zero check 2, cls",
370 		.insns = {
371 			BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
372 			BPF_MOV32_IMM(BPF_REG_0, 1),
373 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
374 			BPF_EXIT_INSN(),
375 		},
376 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
377 		.result = ACCEPT,
378 		.retval = 1,
379 	},
380 	{
381 		"MOD64 by 0, zero check 1, cls",
382 		.insns = {
383 			BPF_MOV32_IMM(BPF_REG_1, 0),
384 			BPF_MOV32_IMM(BPF_REG_0, 2),
385 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
386 			BPF_EXIT_INSN(),
387 		},
388 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
389 		.result = ACCEPT,
390 		.retval = 2,
391 	},
392 	{
393 		"MOD64 by 0, zero check 2, cls",
394 		.insns = {
395 			BPF_MOV32_IMM(BPF_REG_1, 0),
396 			BPF_MOV32_IMM(BPF_REG_0, -1),
397 			BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
398 			BPF_EXIT_INSN(),
399 		},
400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
401 		.result = ACCEPT,
402 		.retval = -1,
403 	},
404 	/* Just make sure that JITs used udiv/umod as otherwise we get
405 	 * an exception from INT_MIN/-1 overflow similarly as with div
406 	 * by zero.
407 	 */
408 	{
409 		"DIV32 overflow, check 1",
410 		.insns = {
411 			BPF_MOV32_IMM(BPF_REG_1, -1),
412 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
413 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
414 			BPF_EXIT_INSN(),
415 		},
416 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
417 		.result = ACCEPT,
418 		.retval = 0,
419 	},
420 	{
421 		"DIV32 overflow, check 2",
422 		.insns = {
423 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
424 			BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
425 			BPF_EXIT_INSN(),
426 		},
427 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
428 		.result = ACCEPT,
429 		.retval = 0,
430 	},
431 	{
432 		"DIV64 overflow, check 1",
433 		.insns = {
434 			BPF_MOV64_IMM(BPF_REG_1, -1),
435 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
436 			BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
437 			BPF_EXIT_INSN(),
438 		},
439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
440 		.result = ACCEPT,
441 		.retval = 0,
442 	},
443 	{
444 		"DIV64 overflow, check 2",
445 		.insns = {
446 			BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
447 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
448 			BPF_EXIT_INSN(),
449 		},
450 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 		.result = ACCEPT,
452 		.retval = 0,
453 	},
454 	{
455 		"MOD32 overflow, check 1",
456 		.insns = {
457 			BPF_MOV32_IMM(BPF_REG_1, -1),
458 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
459 			BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
460 			BPF_EXIT_INSN(),
461 		},
462 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
463 		.result = ACCEPT,
464 		.retval = INT_MIN,
465 	},
466 	{
467 		"MOD32 overflow, check 2",
468 		.insns = {
469 			BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
470 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
471 			BPF_EXIT_INSN(),
472 		},
473 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
474 		.result = ACCEPT,
475 		.retval = INT_MIN,
476 	},
477 	{
478 		"MOD64 overflow, check 1",
479 		.insns = {
480 			BPF_MOV64_IMM(BPF_REG_1, -1),
481 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
482 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
483 			BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
484 			BPF_MOV32_IMM(BPF_REG_0, 0),
485 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
486 			BPF_MOV32_IMM(BPF_REG_0, 1),
487 			BPF_EXIT_INSN(),
488 		},
489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
490 		.result = ACCEPT,
491 		.retval = 1,
492 	},
493 	{
494 		"MOD64 overflow, check 2",
495 		.insns = {
496 			BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
497 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
498 			BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
499 			BPF_MOV32_IMM(BPF_REG_0, 0),
500 			BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
501 			BPF_MOV32_IMM(BPF_REG_0, 1),
502 			BPF_EXIT_INSN(),
503 		},
504 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
505 		.result = ACCEPT,
506 		.retval = 1,
507 	},
508 	{
509 		"xor32 zero extend check",
510 		.insns = {
511 			BPF_MOV32_IMM(BPF_REG_2, -1),
512 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
513 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
514 			BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
515 			BPF_MOV32_IMM(BPF_REG_0, 2),
516 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
517 			BPF_MOV32_IMM(BPF_REG_0, 1),
518 			BPF_EXIT_INSN(),
519 		},
520 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
521 		.result = ACCEPT,
522 		.retval = 1,
523 	},
524 	{
525 		"empty prog",
526 		.insns = {
527 		},
528 		.errstr = "unknown opcode 00",
529 		.result = REJECT,
530 	},
531 	{
532 		"only exit insn",
533 		.insns = {
534 			BPF_EXIT_INSN(),
535 		},
536 		.errstr = "R0 !read_ok",
537 		.result = REJECT,
538 	},
539 	{
540 		"unreachable",
541 		.insns = {
542 			BPF_EXIT_INSN(),
543 			BPF_EXIT_INSN(),
544 		},
545 		.errstr = "unreachable",
546 		.result = REJECT,
547 	},
548 	{
549 		"unreachable2",
550 		.insns = {
551 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
552 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
553 			BPF_EXIT_INSN(),
554 		},
555 		.errstr = "unreachable",
556 		.result = REJECT,
557 	},
558 	{
559 		"out of range jump",
560 		.insns = {
561 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
562 			BPF_EXIT_INSN(),
563 		},
564 		.errstr = "jump out of range",
565 		.result = REJECT,
566 	},
567 	{
568 		"out of range jump2",
569 		.insns = {
570 			BPF_JMP_IMM(BPF_JA, 0, 0, -2),
571 			BPF_EXIT_INSN(),
572 		},
573 		.errstr = "jump out of range",
574 		.result = REJECT,
575 	},
576 	{
577 		"test1 ld_imm64",
578 		.insns = {
579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
580 			BPF_LD_IMM64(BPF_REG_0, 0),
581 			BPF_LD_IMM64(BPF_REG_0, 0),
582 			BPF_LD_IMM64(BPF_REG_0, 1),
583 			BPF_LD_IMM64(BPF_REG_0, 1),
584 			BPF_MOV64_IMM(BPF_REG_0, 2),
585 			BPF_EXIT_INSN(),
586 		},
587 		.errstr = "invalid BPF_LD_IMM insn",
588 		.errstr_unpriv = "R1 pointer comparison",
589 		.result = REJECT,
590 	},
591 	{
592 		"test2 ld_imm64",
593 		.insns = {
594 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
595 			BPF_LD_IMM64(BPF_REG_0, 0),
596 			BPF_LD_IMM64(BPF_REG_0, 0),
597 			BPF_LD_IMM64(BPF_REG_0, 1),
598 			BPF_LD_IMM64(BPF_REG_0, 1),
599 			BPF_EXIT_INSN(),
600 		},
601 		.errstr = "invalid BPF_LD_IMM insn",
602 		.errstr_unpriv = "R1 pointer comparison",
603 		.result = REJECT,
604 	},
605 	{
606 		"test3 ld_imm64",
607 		.insns = {
608 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
609 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
610 			BPF_LD_IMM64(BPF_REG_0, 0),
611 			BPF_LD_IMM64(BPF_REG_0, 0),
612 			BPF_LD_IMM64(BPF_REG_0, 1),
613 			BPF_LD_IMM64(BPF_REG_0, 1),
614 			BPF_EXIT_INSN(),
615 		},
616 		.errstr = "invalid bpf_ld_imm64 insn",
617 		.result = REJECT,
618 	},
619 	{
620 		"test4 ld_imm64",
621 		.insns = {
622 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
623 			BPF_EXIT_INSN(),
624 		},
625 		.errstr = "invalid bpf_ld_imm64 insn",
626 		.result = REJECT,
627 	},
628 	{
629 		"test5 ld_imm64",
630 		.insns = {
631 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
632 		},
633 		.errstr = "invalid bpf_ld_imm64 insn",
634 		.result = REJECT,
635 	},
636 	{
637 		"test6 ld_imm64",
638 		.insns = {
639 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
640 			BPF_RAW_INSN(0, 0, 0, 0, 0),
641 			BPF_EXIT_INSN(),
642 		},
643 		.result = ACCEPT,
644 	},
645 	{
646 		"test7 ld_imm64",
647 		.insns = {
648 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
649 			BPF_RAW_INSN(0, 0, 0, 0, 1),
650 			BPF_EXIT_INSN(),
651 		},
652 		.result = ACCEPT,
653 		.retval = 1,
654 	},
655 	{
656 		"test8 ld_imm64",
657 		.insns = {
658 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
659 			BPF_RAW_INSN(0, 0, 0, 0, 1),
660 			BPF_EXIT_INSN(),
661 		},
662 		.errstr = "uses reserved fields",
663 		.result = REJECT,
664 	},
665 	{
666 		"test9 ld_imm64",
667 		.insns = {
668 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
669 			BPF_RAW_INSN(0, 0, 0, 1, 1),
670 			BPF_EXIT_INSN(),
671 		},
672 		.errstr = "invalid bpf_ld_imm64 insn",
673 		.result = REJECT,
674 	},
675 	{
676 		"test10 ld_imm64",
677 		.insns = {
678 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
679 			BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
680 			BPF_EXIT_INSN(),
681 		},
682 		.errstr = "invalid bpf_ld_imm64 insn",
683 		.result = REJECT,
684 	},
685 	{
686 		"test11 ld_imm64",
687 		.insns = {
688 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
689 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
690 			BPF_EXIT_INSN(),
691 		},
692 		.errstr = "invalid bpf_ld_imm64 insn",
693 		.result = REJECT,
694 	},
695 	{
696 		"test12 ld_imm64",
697 		.insns = {
698 			BPF_MOV64_IMM(BPF_REG_1, 0),
699 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
700 			BPF_RAW_INSN(0, 0, 0, 0, 1),
701 			BPF_EXIT_INSN(),
702 		},
703 		.errstr = "not pointing to valid bpf_map",
704 		.result = REJECT,
705 	},
706 	{
707 		"test13 ld_imm64",
708 		.insns = {
709 			BPF_MOV64_IMM(BPF_REG_1, 0),
710 			BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
711 			BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
712 			BPF_EXIT_INSN(),
713 		},
714 		.errstr = "invalid bpf_ld_imm64 insn",
715 		.result = REJECT,
716 	},
717 	{
718 		"arsh32 on imm",
719 		.insns = {
720 			BPF_MOV64_IMM(BPF_REG_0, 1),
721 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
722 			BPF_EXIT_INSN(),
723 		},
724 		.result = ACCEPT,
725 		.retval = 0,
726 	},
727 	{
728 		"arsh32 on imm 2",
729 		.insns = {
730 			BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
731 			BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
732 			BPF_EXIT_INSN(),
733 		},
734 		.result = ACCEPT,
735 		.retval = -16069393,
736 	},
737 	{
738 		"arsh32 on reg",
739 		.insns = {
740 			BPF_MOV64_IMM(BPF_REG_0, 1),
741 			BPF_MOV64_IMM(BPF_REG_1, 5),
742 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
743 			BPF_EXIT_INSN(),
744 		},
745 		.result = ACCEPT,
746 		.retval = 0,
747 	},
748 	{
749 		"arsh32 on reg 2",
750 		.insns = {
751 			BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
752 			BPF_MOV64_IMM(BPF_REG_1, 15),
753 			BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
754 			BPF_EXIT_INSN(),
755 		},
756 		.result = ACCEPT,
757 		.retval = 43724,
758 	},
759 	{
760 		"arsh64 on imm",
761 		.insns = {
762 			BPF_MOV64_IMM(BPF_REG_0, 1),
763 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
764 			BPF_EXIT_INSN(),
765 		},
766 		.result = ACCEPT,
767 	},
768 	{
769 		"arsh64 on reg",
770 		.insns = {
771 			BPF_MOV64_IMM(BPF_REG_0, 1),
772 			BPF_MOV64_IMM(BPF_REG_1, 5),
773 			BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
774 			BPF_EXIT_INSN(),
775 		},
776 		.result = ACCEPT,
777 	},
778 	{
779 		"no bpf_exit",
780 		.insns = {
781 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
782 		},
783 		.errstr = "not an exit",
784 		.result = REJECT,
785 	},
786 	{
787 		"loop (back-edge)",
788 		.insns = {
789 			BPF_JMP_IMM(BPF_JA, 0, 0, -1),
790 			BPF_EXIT_INSN(),
791 		},
792 		.errstr = "back-edge",
793 		.result = REJECT,
794 	},
795 	{
796 		"loop2 (back-edge)",
797 		.insns = {
798 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
799 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
800 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
801 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
802 			BPF_EXIT_INSN(),
803 		},
804 		.errstr = "back-edge",
805 		.result = REJECT,
806 	},
807 	{
808 		"conditional loop",
809 		.insns = {
810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
811 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
812 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
813 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
814 			BPF_EXIT_INSN(),
815 		},
816 		.errstr = "back-edge",
817 		.result = REJECT,
818 	},
819 	{
820 		"read uninitialized register",
821 		.insns = {
822 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
823 			BPF_EXIT_INSN(),
824 		},
825 		.errstr = "R2 !read_ok",
826 		.result = REJECT,
827 	},
828 	{
829 		"read invalid register",
830 		.insns = {
831 			BPF_MOV64_REG(BPF_REG_0, -1),
832 			BPF_EXIT_INSN(),
833 		},
834 		.errstr = "R15 is invalid",
835 		.result = REJECT,
836 	},
837 	{
838 		"program doesn't init R0 before exit",
839 		.insns = {
840 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
841 			BPF_EXIT_INSN(),
842 		},
843 		.errstr = "R0 !read_ok",
844 		.result = REJECT,
845 	},
846 	{
847 		"program doesn't init R0 before exit in all branches",
848 		.insns = {
849 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
850 			BPF_MOV64_IMM(BPF_REG_0, 1),
851 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
852 			BPF_EXIT_INSN(),
853 		},
854 		.errstr = "R0 !read_ok",
855 		.errstr_unpriv = "R1 pointer comparison",
856 		.result = REJECT,
857 	},
858 	{
859 		"stack out of bounds",
860 		.insns = {
861 			BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
862 			BPF_EXIT_INSN(),
863 		},
864 		.errstr = "invalid stack",
865 		.result = REJECT,
866 	},
867 	{
868 		"invalid call insn1",
869 		.insns = {
870 			BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
871 			BPF_EXIT_INSN(),
872 		},
873 		.errstr = "unknown opcode 8d",
874 		.result = REJECT,
875 	},
876 	{
877 		"invalid call insn2",
878 		.insns = {
879 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
880 			BPF_EXIT_INSN(),
881 		},
882 		.errstr = "BPF_CALL uses reserved",
883 		.result = REJECT,
884 	},
885 	{
886 		"invalid function call",
887 		.insns = {
888 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
889 			BPF_EXIT_INSN(),
890 		},
891 		.errstr = "invalid func unknown#1234567",
892 		.result = REJECT,
893 	},
894 	{
895 		"uninitialized stack1",
896 		.insns = {
897 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
898 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
899 			BPF_LD_MAP_FD(BPF_REG_1, 0),
900 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
901 				     BPF_FUNC_map_lookup_elem),
902 			BPF_EXIT_INSN(),
903 		},
904 		.fixup_map_hash_8b = { 2 },
905 		.errstr = "invalid indirect read from stack",
906 		.result = REJECT,
907 	},
908 	{
909 		"uninitialized stack2",
910 		.insns = {
911 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
912 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
913 			BPF_EXIT_INSN(),
914 		},
915 		.errstr = "invalid read from stack",
916 		.result = REJECT,
917 	},
918 	{
919 		"invalid fp arithmetic",
920 		/* If this gets ever changed, make sure JITs can deal with it. */
921 		.insns = {
922 			BPF_MOV64_IMM(BPF_REG_0, 0),
923 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
924 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
925 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
926 			BPF_EXIT_INSN(),
927 		},
928 		.errstr = "R1 subtraction from stack pointer",
929 		.result = REJECT,
930 	},
931 	{
932 		"non-invalid fp arithmetic",
933 		.insns = {
934 			BPF_MOV64_IMM(BPF_REG_0, 0),
935 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
936 			BPF_EXIT_INSN(),
937 		},
938 		.result = ACCEPT,
939 	},
940 	{
941 		"invalid argument register",
942 		.insns = {
943 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
944 				     BPF_FUNC_get_cgroup_classid),
945 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
946 				     BPF_FUNC_get_cgroup_classid),
947 			BPF_EXIT_INSN(),
948 		},
949 		.errstr = "R1 !read_ok",
950 		.result = REJECT,
951 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
952 	},
953 	{
954 		"non-invalid argument register",
955 		.insns = {
956 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 				     BPF_FUNC_get_cgroup_classid),
959 			BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
960 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
961 				     BPF_FUNC_get_cgroup_classid),
962 			BPF_EXIT_INSN(),
963 		},
964 		.result = ACCEPT,
965 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
966 	},
967 	{
968 		"check valid spill/fill",
969 		.insns = {
970 			/* spill R1(ctx) into stack */
971 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
972 			/* fill it back into R2 */
973 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
974 			/* should be able to access R0 = *(R2 + 8) */
975 			/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
976 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
977 			BPF_EXIT_INSN(),
978 		},
979 		.errstr_unpriv = "R0 leaks addr",
980 		.result = ACCEPT,
981 		.result_unpriv = REJECT,
982 		.retval = POINTER_VALUE,
983 	},
984 	{
985 		"check valid spill/fill, skb mark",
986 		.insns = {
987 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
988 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
989 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
990 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
991 				    offsetof(struct __sk_buff, mark)),
992 			BPF_EXIT_INSN(),
993 		},
994 		.result = ACCEPT,
995 		.result_unpriv = ACCEPT,
996 	},
997 	{
998 		"check corrupted spill/fill",
999 		.insns = {
1000 			/* spill R1(ctx) into stack */
1001 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
1002 			/* mess up with R1 pointer on stack */
1003 			BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
1004 			/* fill back into R0 should fail */
1005 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1006 			BPF_EXIT_INSN(),
1007 		},
1008 		.errstr_unpriv = "attempt to corrupt spilled",
1009 		.errstr = "corrupted spill",
1010 		.result = REJECT,
1011 	},
1012 	{
1013 		"invalid src register in STX",
1014 		.insns = {
1015 			BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
1016 			BPF_EXIT_INSN(),
1017 		},
1018 		.errstr = "R15 is invalid",
1019 		.result = REJECT,
1020 	},
1021 	{
1022 		"invalid dst register in STX",
1023 		.insns = {
1024 			BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
1025 			BPF_EXIT_INSN(),
1026 		},
1027 		.errstr = "R14 is invalid",
1028 		.result = REJECT,
1029 	},
1030 	{
1031 		"invalid dst register in ST",
1032 		.insns = {
1033 			BPF_ST_MEM(BPF_B, 14, -1, -1),
1034 			BPF_EXIT_INSN(),
1035 		},
1036 		.errstr = "R14 is invalid",
1037 		.result = REJECT,
1038 	},
1039 	{
1040 		"invalid src register in LDX",
1041 		.insns = {
1042 			BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
1043 			BPF_EXIT_INSN(),
1044 		},
1045 		.errstr = "R12 is invalid",
1046 		.result = REJECT,
1047 	},
1048 	{
1049 		"invalid dst register in LDX",
1050 		.insns = {
1051 			BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
1052 			BPF_EXIT_INSN(),
1053 		},
1054 		.errstr = "R11 is invalid",
1055 		.result = REJECT,
1056 	},
1057 	{
1058 		"junk insn",
1059 		.insns = {
1060 			BPF_RAW_INSN(0, 0, 0, 0, 0),
1061 			BPF_EXIT_INSN(),
1062 		},
1063 		.errstr = "unknown opcode 00",
1064 		.result = REJECT,
1065 	},
1066 	{
1067 		"junk insn2",
1068 		.insns = {
1069 			BPF_RAW_INSN(1, 0, 0, 0, 0),
1070 			BPF_EXIT_INSN(),
1071 		},
1072 		.errstr = "BPF_LDX uses reserved fields",
1073 		.result = REJECT,
1074 	},
1075 	{
1076 		"junk insn3",
1077 		.insns = {
1078 			BPF_RAW_INSN(-1, 0, 0, 0, 0),
1079 			BPF_EXIT_INSN(),
1080 		},
1081 		.errstr = "unknown opcode ff",
1082 		.result = REJECT,
1083 	},
1084 	{
1085 		"junk insn4",
1086 		.insns = {
1087 			BPF_RAW_INSN(-1, -1, -1, -1, -1),
1088 			BPF_EXIT_INSN(),
1089 		},
1090 		.errstr = "unknown opcode ff",
1091 		.result = REJECT,
1092 	},
1093 	{
1094 		"junk insn5",
1095 		.insns = {
1096 			BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
1097 			BPF_EXIT_INSN(),
1098 		},
1099 		.errstr = "BPF_ALU uses reserved fields",
1100 		.result = REJECT,
1101 	},
1102 	{
1103 		"misaligned read from stack",
1104 		.insns = {
1105 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1106 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
1107 			BPF_EXIT_INSN(),
1108 		},
1109 		.errstr = "misaligned stack access",
1110 		.result = REJECT,
1111 	},
1112 	{
1113 		"invalid map_fd for function call",
1114 		.insns = {
1115 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1116 			BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
1117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1119 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1120 				     BPF_FUNC_map_delete_elem),
1121 			BPF_EXIT_INSN(),
1122 		},
1123 		.errstr = "fd 0 is not pointing to valid bpf_map",
1124 		.result = REJECT,
1125 	},
1126 	{
1127 		"don't check return value before access",
1128 		.insns = {
1129 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1130 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1131 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1132 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1133 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1134 				     BPF_FUNC_map_lookup_elem),
1135 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1136 			BPF_EXIT_INSN(),
1137 		},
1138 		.fixup_map_hash_8b = { 3 },
1139 		.errstr = "R0 invalid mem access 'map_value_or_null'",
1140 		.result = REJECT,
1141 	},
1142 	{
1143 		"access memory with incorrect alignment",
1144 		.insns = {
1145 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1146 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1147 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1148 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1149 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1150 				     BPF_FUNC_map_lookup_elem),
1151 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1152 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
1153 			BPF_EXIT_INSN(),
1154 		},
1155 		.fixup_map_hash_8b = { 3 },
1156 		.errstr = "misaligned value access",
1157 		.result = REJECT,
1158 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1159 	},
1160 	{
1161 		"sometimes access memory with incorrect alignment",
1162 		.insns = {
1163 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1164 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1165 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1166 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1167 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1168 				     BPF_FUNC_map_lookup_elem),
1169 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1170 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1171 			BPF_EXIT_INSN(),
1172 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
1173 			BPF_EXIT_INSN(),
1174 		},
1175 		.fixup_map_hash_8b = { 3 },
1176 		.errstr = "R0 invalid mem access",
1177 		.errstr_unpriv = "R0 leaks addr",
1178 		.result = REJECT,
1179 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1180 	},
1181 	{
1182 		"jump test 1",
1183 		.insns = {
1184 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1185 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
1186 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1187 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1188 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
1189 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
1190 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
1191 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
1192 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
1193 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
1194 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
1195 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
1196 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1197 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
1198 			BPF_MOV64_IMM(BPF_REG_0, 0),
1199 			BPF_EXIT_INSN(),
1200 		},
1201 		.errstr_unpriv = "R1 pointer comparison",
1202 		.result_unpriv = REJECT,
1203 		.result = ACCEPT,
1204 	},
1205 	{
1206 		"jump test 2",
1207 		.insns = {
1208 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1209 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1210 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1211 			BPF_JMP_IMM(BPF_JA, 0, 0, 14),
1212 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
1213 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1214 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1215 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
1216 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1217 			BPF_JMP_IMM(BPF_JA, 0, 0, 8),
1218 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
1219 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1220 			BPF_JMP_IMM(BPF_JA, 0, 0, 5),
1221 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
1222 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1223 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
1225 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1226 			BPF_MOV64_IMM(BPF_REG_0, 0),
1227 			BPF_EXIT_INSN(),
1228 		},
1229 		.errstr_unpriv = "R1 pointer comparison",
1230 		.result_unpriv = REJECT,
1231 		.result = ACCEPT,
1232 	},
1233 	{
1234 		"jump test 3",
1235 		.insns = {
1236 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1237 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1238 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
1239 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1240 			BPF_JMP_IMM(BPF_JA, 0, 0, 19),
1241 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
1242 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
1243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1244 			BPF_JMP_IMM(BPF_JA, 0, 0, 15),
1245 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
1246 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
1247 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
1248 			BPF_JMP_IMM(BPF_JA, 0, 0, 11),
1249 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
1250 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
1251 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
1252 			BPF_JMP_IMM(BPF_JA, 0, 0, 7),
1253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
1254 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
1255 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
1256 			BPF_JMP_IMM(BPF_JA, 0, 0, 3),
1257 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
1258 			BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
1259 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
1260 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1261 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1262 				     BPF_FUNC_map_delete_elem),
1263 			BPF_EXIT_INSN(),
1264 		},
1265 		.fixup_map_hash_8b = { 24 },
1266 		.errstr_unpriv = "R1 pointer comparison",
1267 		.result_unpriv = REJECT,
1268 		.result = ACCEPT,
1269 		.retval = -ENOENT,
1270 	},
1271 	{
1272 		"jump test 4",
1273 		.insns = {
1274 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1275 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1277 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1278 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1279 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1280 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1281 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1282 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1283 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1284 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1285 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1286 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1287 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1288 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1289 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1290 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1291 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1293 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1294 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1295 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1296 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1297 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1298 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1299 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1300 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1301 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1302 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1303 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1304 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1305 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1306 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
1307 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
1308 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
1309 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
1310 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1311 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1312 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1313 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1314 			BPF_MOV64_IMM(BPF_REG_0, 0),
1315 			BPF_EXIT_INSN(),
1316 		},
1317 		.errstr_unpriv = "R1 pointer comparison",
1318 		.result_unpriv = REJECT,
1319 		.result = ACCEPT,
1320 	},
1321 	{
1322 		"jump test 5",
1323 		.insns = {
1324 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1325 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1326 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1327 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1328 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1329 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1330 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1331 			BPF_MOV64_IMM(BPF_REG_0, 0),
1332 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1333 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1334 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1335 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1336 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1337 			BPF_MOV64_IMM(BPF_REG_0, 0),
1338 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1339 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1340 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1341 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1342 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1343 			BPF_MOV64_IMM(BPF_REG_0, 0),
1344 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1345 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1346 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1347 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1348 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1349 			BPF_MOV64_IMM(BPF_REG_0, 0),
1350 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1351 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
1352 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1353 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
1354 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1355 			BPF_MOV64_IMM(BPF_REG_0, 0),
1356 			BPF_EXIT_INSN(),
1357 		},
1358 		.errstr_unpriv = "R1 pointer comparison",
1359 		.result_unpriv = REJECT,
1360 		.result = ACCEPT,
1361 	},
1362 	{
1363 		"access skb fields ok",
1364 		.insns = {
1365 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1366 				    offsetof(struct __sk_buff, len)),
1367 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1368 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1369 				    offsetof(struct __sk_buff, mark)),
1370 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1371 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1372 				    offsetof(struct __sk_buff, pkt_type)),
1373 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1374 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1375 				    offsetof(struct __sk_buff, queue_mapping)),
1376 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1377 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1378 				    offsetof(struct __sk_buff, protocol)),
1379 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1380 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1381 				    offsetof(struct __sk_buff, vlan_present)),
1382 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1383 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1384 				    offsetof(struct __sk_buff, vlan_tci)),
1385 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1386 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1387 				    offsetof(struct __sk_buff, napi_id)),
1388 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
1389 			BPF_EXIT_INSN(),
1390 		},
1391 		.result = ACCEPT,
1392 	},
1393 	{
1394 		"access skb fields bad1",
1395 		.insns = {
1396 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
1397 			BPF_EXIT_INSN(),
1398 		},
1399 		.errstr = "invalid bpf_context access",
1400 		.result = REJECT,
1401 	},
1402 	{
1403 		"access skb fields bad2",
1404 		.insns = {
1405 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
1406 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1407 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1409 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1410 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1411 				     BPF_FUNC_map_lookup_elem),
1412 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1413 			BPF_EXIT_INSN(),
1414 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1415 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1416 				    offsetof(struct __sk_buff, pkt_type)),
1417 			BPF_EXIT_INSN(),
1418 		},
1419 		.fixup_map_hash_8b = { 4 },
1420 		.errstr = "different pointers",
1421 		.errstr_unpriv = "R1 pointer comparison",
1422 		.result = REJECT,
1423 	},
1424 	{
1425 		"access skb fields bad3",
1426 		.insns = {
1427 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
1428 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1429 				    offsetof(struct __sk_buff, pkt_type)),
1430 			BPF_EXIT_INSN(),
1431 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1432 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1433 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1434 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1435 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1436 				     BPF_FUNC_map_lookup_elem),
1437 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1438 			BPF_EXIT_INSN(),
1439 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1440 			BPF_JMP_IMM(BPF_JA, 0, 0, -12),
1441 		},
1442 		.fixup_map_hash_8b = { 6 },
1443 		.errstr = "different pointers",
1444 		.errstr_unpriv = "R1 pointer comparison",
1445 		.result = REJECT,
1446 	},
1447 	{
1448 		"access skb fields bad4",
1449 		.insns = {
1450 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
1451 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1452 				    offsetof(struct __sk_buff, len)),
1453 			BPF_MOV64_IMM(BPF_REG_0, 0),
1454 			BPF_EXIT_INSN(),
1455 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1456 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1457 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
1459 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1460 				     BPF_FUNC_map_lookup_elem),
1461 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1462 			BPF_EXIT_INSN(),
1463 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1464 			BPF_JMP_IMM(BPF_JA, 0, 0, -13),
1465 		},
1466 		.fixup_map_hash_8b = { 7 },
1467 		.errstr = "different pointers",
1468 		.errstr_unpriv = "R1 pointer comparison",
1469 		.result = REJECT,
1470 	},
1471 	{
1472 		"invalid access __sk_buff family",
1473 		.insns = {
1474 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1475 				    offsetof(struct __sk_buff, family)),
1476 			BPF_EXIT_INSN(),
1477 		},
1478 		.errstr = "invalid bpf_context access",
1479 		.result = REJECT,
1480 	},
1481 	{
1482 		"invalid access __sk_buff remote_ip4",
1483 		.insns = {
1484 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1485 				    offsetof(struct __sk_buff, remote_ip4)),
1486 			BPF_EXIT_INSN(),
1487 		},
1488 		.errstr = "invalid bpf_context access",
1489 		.result = REJECT,
1490 	},
1491 	{
1492 		"invalid access __sk_buff local_ip4",
1493 		.insns = {
1494 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1495 				    offsetof(struct __sk_buff, local_ip4)),
1496 			BPF_EXIT_INSN(),
1497 		},
1498 		.errstr = "invalid bpf_context access",
1499 		.result = REJECT,
1500 	},
1501 	{
1502 		"invalid access __sk_buff remote_ip6",
1503 		.insns = {
1504 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1505 				    offsetof(struct __sk_buff, remote_ip6)),
1506 			BPF_EXIT_INSN(),
1507 		},
1508 		.errstr = "invalid bpf_context access",
1509 		.result = REJECT,
1510 	},
1511 	{
1512 		"invalid access __sk_buff local_ip6",
1513 		.insns = {
1514 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1515 				    offsetof(struct __sk_buff, local_ip6)),
1516 			BPF_EXIT_INSN(),
1517 		},
1518 		.errstr = "invalid bpf_context access",
1519 		.result = REJECT,
1520 	},
1521 	{
1522 		"invalid access __sk_buff remote_port",
1523 		.insns = {
1524 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1525 				    offsetof(struct __sk_buff, remote_port)),
1526 			BPF_EXIT_INSN(),
1527 		},
1528 		.errstr = "invalid bpf_context access",
1529 		.result = REJECT,
1530 	},
1531 	{
1532 		"invalid access __sk_buff remote_port",
1533 		.insns = {
1534 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1535 				    offsetof(struct __sk_buff, local_port)),
1536 			BPF_EXIT_INSN(),
1537 		},
1538 		.errstr = "invalid bpf_context access",
1539 		.result = REJECT,
1540 	},
1541 	{
1542 		"valid access __sk_buff family",
1543 		.insns = {
1544 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1545 				    offsetof(struct __sk_buff, family)),
1546 			BPF_EXIT_INSN(),
1547 		},
1548 		.result = ACCEPT,
1549 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1550 	},
1551 	{
1552 		"valid access __sk_buff remote_ip4",
1553 		.insns = {
1554 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1555 				    offsetof(struct __sk_buff, remote_ip4)),
1556 			BPF_EXIT_INSN(),
1557 		},
1558 		.result = ACCEPT,
1559 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1560 	},
1561 	{
1562 		"valid access __sk_buff local_ip4",
1563 		.insns = {
1564 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1565 				    offsetof(struct __sk_buff, local_ip4)),
1566 			BPF_EXIT_INSN(),
1567 		},
1568 		.result = ACCEPT,
1569 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1570 	},
1571 	{
1572 		"valid access __sk_buff remote_ip6",
1573 		.insns = {
1574 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 				    offsetof(struct __sk_buff, remote_ip6[0])),
1576 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1577 				    offsetof(struct __sk_buff, remote_ip6[1])),
1578 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1579 				    offsetof(struct __sk_buff, remote_ip6[2])),
1580 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1581 				    offsetof(struct __sk_buff, remote_ip6[3])),
1582 			BPF_EXIT_INSN(),
1583 		},
1584 		.result = ACCEPT,
1585 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1586 	},
1587 	{
1588 		"valid access __sk_buff local_ip6",
1589 		.insns = {
1590 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1591 				    offsetof(struct __sk_buff, local_ip6[0])),
1592 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1593 				    offsetof(struct __sk_buff, local_ip6[1])),
1594 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1595 				    offsetof(struct __sk_buff, local_ip6[2])),
1596 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1597 				    offsetof(struct __sk_buff, local_ip6[3])),
1598 			BPF_EXIT_INSN(),
1599 		},
1600 		.result = ACCEPT,
1601 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1602 	},
1603 	{
1604 		"valid access __sk_buff remote_port",
1605 		.insns = {
1606 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1607 				    offsetof(struct __sk_buff, remote_port)),
1608 			BPF_EXIT_INSN(),
1609 		},
1610 		.result = ACCEPT,
1611 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1612 	},
1613 	{
1614 		"valid access __sk_buff remote_port",
1615 		.insns = {
1616 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1617 				    offsetof(struct __sk_buff, local_port)),
1618 			BPF_EXIT_INSN(),
1619 		},
1620 		.result = ACCEPT,
1621 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1622 	},
1623 	{
1624 		"invalid access of tc_classid for SK_SKB",
1625 		.insns = {
1626 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1627 				    offsetof(struct __sk_buff, tc_classid)),
1628 			BPF_EXIT_INSN(),
1629 		},
1630 		.result = REJECT,
1631 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1632 		.errstr = "invalid bpf_context access",
1633 	},
1634 	{
1635 		"invalid access of skb->mark for SK_SKB",
1636 		.insns = {
1637 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1638 				    offsetof(struct __sk_buff, mark)),
1639 			BPF_EXIT_INSN(),
1640 		},
1641 		.result =  REJECT,
1642 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1643 		.errstr = "invalid bpf_context access",
1644 	},
1645 	{
1646 		"check skb->mark is not writeable by SK_SKB",
1647 		.insns = {
1648 			BPF_MOV64_IMM(BPF_REG_0, 0),
1649 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1650 				    offsetof(struct __sk_buff, mark)),
1651 			BPF_EXIT_INSN(),
1652 		},
1653 		.result =  REJECT,
1654 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1655 		.errstr = "invalid bpf_context access",
1656 	},
1657 	{
1658 		"check skb->tc_index is writeable by SK_SKB",
1659 		.insns = {
1660 			BPF_MOV64_IMM(BPF_REG_0, 0),
1661 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1662 				    offsetof(struct __sk_buff, tc_index)),
1663 			BPF_EXIT_INSN(),
1664 		},
1665 		.result = ACCEPT,
1666 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1667 	},
1668 	{
1669 		"check skb->priority is writeable by SK_SKB",
1670 		.insns = {
1671 			BPF_MOV64_IMM(BPF_REG_0, 0),
1672 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1673 				    offsetof(struct __sk_buff, priority)),
1674 			BPF_EXIT_INSN(),
1675 		},
1676 		.result = ACCEPT,
1677 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1678 	},
1679 	{
1680 		"direct packet read for SK_SKB",
1681 		.insns = {
1682 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1683 				    offsetof(struct __sk_buff, data)),
1684 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1685 				    offsetof(struct __sk_buff, data_end)),
1686 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1687 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1688 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1689 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1690 			BPF_MOV64_IMM(BPF_REG_0, 0),
1691 			BPF_EXIT_INSN(),
1692 		},
1693 		.result = ACCEPT,
1694 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1695 	},
1696 	{
1697 		"direct packet write for SK_SKB",
1698 		.insns = {
1699 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1700 				    offsetof(struct __sk_buff, data)),
1701 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1702 				    offsetof(struct __sk_buff, data_end)),
1703 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1704 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1705 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1706 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1707 			BPF_MOV64_IMM(BPF_REG_0, 0),
1708 			BPF_EXIT_INSN(),
1709 		},
1710 		.result = ACCEPT,
1711 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1712 	},
1713 	{
1714 		"overlapping checks for direct packet access SK_SKB",
1715 		.insns = {
1716 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1717 				    offsetof(struct __sk_buff, data)),
1718 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1719 				    offsetof(struct __sk_buff, data_end)),
1720 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1721 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1722 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1723 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1724 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1725 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1726 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1727 			BPF_MOV64_IMM(BPF_REG_0, 0),
1728 			BPF_EXIT_INSN(),
1729 		},
1730 		.result = ACCEPT,
1731 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1732 	},
1733 	{
1734 		"valid access family in SK_MSG",
1735 		.insns = {
1736 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1737 				    offsetof(struct sk_msg_md, family)),
1738 			BPF_EXIT_INSN(),
1739 		},
1740 		.result = ACCEPT,
1741 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1742 	},
1743 	{
1744 		"valid access remote_ip4 in SK_MSG",
1745 		.insns = {
1746 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1747 				    offsetof(struct sk_msg_md, remote_ip4)),
1748 			BPF_EXIT_INSN(),
1749 		},
1750 		.result = ACCEPT,
1751 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1752 	},
1753 	{
1754 		"valid access local_ip4 in SK_MSG",
1755 		.insns = {
1756 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1757 				    offsetof(struct sk_msg_md, local_ip4)),
1758 			BPF_EXIT_INSN(),
1759 		},
1760 		.result = ACCEPT,
1761 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1762 	},
1763 	{
1764 		"valid access remote_port in SK_MSG",
1765 		.insns = {
1766 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1767 				    offsetof(struct sk_msg_md, remote_port)),
1768 			BPF_EXIT_INSN(),
1769 		},
1770 		.result = ACCEPT,
1771 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1772 	},
1773 	{
1774 		"valid access local_port in SK_MSG",
1775 		.insns = {
1776 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1777 				    offsetof(struct sk_msg_md, local_port)),
1778 			BPF_EXIT_INSN(),
1779 		},
1780 		.result = ACCEPT,
1781 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1782 	},
1783 	{
1784 		"valid access remote_ip6 in SK_MSG",
1785 		.insns = {
1786 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1787 				    offsetof(struct sk_msg_md, remote_ip6[0])),
1788 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1789 				    offsetof(struct sk_msg_md, remote_ip6[1])),
1790 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1791 				    offsetof(struct sk_msg_md, remote_ip6[2])),
1792 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1793 				    offsetof(struct sk_msg_md, remote_ip6[3])),
1794 			BPF_EXIT_INSN(),
1795 		},
1796 		.result = ACCEPT,
1797 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1798 	},
1799 	{
1800 		"valid access local_ip6 in SK_MSG",
1801 		.insns = {
1802 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1803 				    offsetof(struct sk_msg_md, local_ip6[0])),
1804 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1805 				    offsetof(struct sk_msg_md, local_ip6[1])),
1806 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1807 				    offsetof(struct sk_msg_md, local_ip6[2])),
1808 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1809 				    offsetof(struct sk_msg_md, local_ip6[3])),
1810 			BPF_EXIT_INSN(),
1811 		},
1812 		.result = ACCEPT,
1813 		.prog_type = BPF_PROG_TYPE_SK_SKB,
1814 	},
1815 	{
1816 		"invalid 64B read of family in SK_MSG",
1817 		.insns = {
1818 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1819 				    offsetof(struct sk_msg_md, family)),
1820 			BPF_EXIT_INSN(),
1821 		},
1822 		.errstr = "invalid bpf_context access",
1823 		.result = REJECT,
1824 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1825 	},
1826 	{
1827 		"invalid read past end of SK_MSG",
1828 		.insns = {
1829 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1830 				    offsetof(struct sk_msg_md, local_port) + 4),
1831 			BPF_EXIT_INSN(),
1832 		},
1833 		.errstr = "R0 !read_ok",
1834 		.result = REJECT,
1835 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1836 	},
1837 	{
1838 		"invalid read offset in SK_MSG",
1839 		.insns = {
1840 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1841 				    offsetof(struct sk_msg_md, family) + 1),
1842 			BPF_EXIT_INSN(),
1843 		},
1844 		.errstr = "invalid bpf_context access",
1845 		.result = REJECT,
1846 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1847 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1848 	},
1849 	{
1850 		"direct packet read for SK_MSG",
1851 		.insns = {
1852 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1853 				    offsetof(struct sk_msg_md, data)),
1854 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1855 				    offsetof(struct sk_msg_md, data_end)),
1856 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1857 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1858 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1859 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1860 			BPF_MOV64_IMM(BPF_REG_0, 0),
1861 			BPF_EXIT_INSN(),
1862 		},
1863 		.result = ACCEPT,
1864 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1865 	},
1866 	{
1867 		"direct packet write for SK_MSG",
1868 		.insns = {
1869 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1870 				    offsetof(struct sk_msg_md, data)),
1871 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1872 				    offsetof(struct sk_msg_md, data_end)),
1873 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1874 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1875 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1876 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1877 			BPF_MOV64_IMM(BPF_REG_0, 0),
1878 			BPF_EXIT_INSN(),
1879 		},
1880 		.result = ACCEPT,
1881 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1882 	},
1883 	{
1884 		"overlapping checks for direct packet access SK_MSG",
1885 		.insns = {
1886 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
1887 				    offsetof(struct sk_msg_md, data)),
1888 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
1889 				    offsetof(struct sk_msg_md, data_end)),
1890 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1891 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1892 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1893 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1894 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1895 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1896 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1897 			BPF_MOV64_IMM(BPF_REG_0, 0),
1898 			BPF_EXIT_INSN(),
1899 		},
1900 		.result = ACCEPT,
1901 		.prog_type = BPF_PROG_TYPE_SK_MSG,
1902 	},
1903 	{
1904 		"check skb->mark is not writeable by sockets",
1905 		.insns = {
1906 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1907 				    offsetof(struct __sk_buff, mark)),
1908 			BPF_EXIT_INSN(),
1909 		},
1910 		.errstr = "invalid bpf_context access",
1911 		.errstr_unpriv = "R1 leaks addr",
1912 		.result = REJECT,
1913 	},
1914 	{
1915 		"check skb->tc_index is not writeable by sockets",
1916 		.insns = {
1917 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1918 				    offsetof(struct __sk_buff, tc_index)),
1919 			BPF_EXIT_INSN(),
1920 		},
1921 		.errstr = "invalid bpf_context access",
1922 		.errstr_unpriv = "R1 leaks addr",
1923 		.result = REJECT,
1924 	},
1925 	{
1926 		"check cb access: byte",
1927 		.insns = {
1928 			BPF_MOV64_IMM(BPF_REG_0, 0),
1929 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1930 				    offsetof(struct __sk_buff, cb[0])),
1931 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1932 				    offsetof(struct __sk_buff, cb[0]) + 1),
1933 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1934 				    offsetof(struct __sk_buff, cb[0]) + 2),
1935 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1936 				    offsetof(struct __sk_buff, cb[0]) + 3),
1937 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1938 				    offsetof(struct __sk_buff, cb[1])),
1939 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1940 				    offsetof(struct __sk_buff, cb[1]) + 1),
1941 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1942 				    offsetof(struct __sk_buff, cb[1]) + 2),
1943 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1944 				    offsetof(struct __sk_buff, cb[1]) + 3),
1945 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1946 				    offsetof(struct __sk_buff, cb[2])),
1947 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1948 				    offsetof(struct __sk_buff, cb[2]) + 1),
1949 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1950 				    offsetof(struct __sk_buff, cb[2]) + 2),
1951 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1952 				    offsetof(struct __sk_buff, cb[2]) + 3),
1953 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1954 				    offsetof(struct __sk_buff, cb[3])),
1955 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1956 				    offsetof(struct __sk_buff, cb[3]) + 1),
1957 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1958 				    offsetof(struct __sk_buff, cb[3]) + 2),
1959 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1960 				    offsetof(struct __sk_buff, cb[3]) + 3),
1961 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1962 				    offsetof(struct __sk_buff, cb[4])),
1963 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1964 				    offsetof(struct __sk_buff, cb[4]) + 1),
1965 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1966 				    offsetof(struct __sk_buff, cb[4]) + 2),
1967 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1968 				    offsetof(struct __sk_buff, cb[4]) + 3),
1969 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1970 				    offsetof(struct __sk_buff, cb[0])),
1971 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1972 				    offsetof(struct __sk_buff, cb[0]) + 1),
1973 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1974 				    offsetof(struct __sk_buff, cb[0]) + 2),
1975 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1976 				    offsetof(struct __sk_buff, cb[0]) + 3),
1977 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1978 				    offsetof(struct __sk_buff, cb[1])),
1979 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1980 				    offsetof(struct __sk_buff, cb[1]) + 1),
1981 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1982 				    offsetof(struct __sk_buff, cb[1]) + 2),
1983 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1984 				    offsetof(struct __sk_buff, cb[1]) + 3),
1985 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1986 				    offsetof(struct __sk_buff, cb[2])),
1987 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1988 				    offsetof(struct __sk_buff, cb[2]) + 1),
1989 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1990 				    offsetof(struct __sk_buff, cb[2]) + 2),
1991 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1992 				    offsetof(struct __sk_buff, cb[2]) + 3),
1993 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1994 				    offsetof(struct __sk_buff, cb[3])),
1995 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1996 				    offsetof(struct __sk_buff, cb[3]) + 1),
1997 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1998 				    offsetof(struct __sk_buff, cb[3]) + 2),
1999 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2000 				    offsetof(struct __sk_buff, cb[3]) + 3),
2001 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2002 				    offsetof(struct __sk_buff, cb[4])),
2003 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2004 				    offsetof(struct __sk_buff, cb[4]) + 1),
2005 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2006 				    offsetof(struct __sk_buff, cb[4]) + 2),
2007 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2008 				    offsetof(struct __sk_buff, cb[4]) + 3),
2009 			BPF_EXIT_INSN(),
2010 		},
2011 		.result = ACCEPT,
2012 	},
2013 	{
2014 		"__sk_buff->hash, offset 0, byte store not permitted",
2015 		.insns = {
2016 			BPF_MOV64_IMM(BPF_REG_0, 0),
2017 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2018 				    offsetof(struct __sk_buff, hash)),
2019 			BPF_EXIT_INSN(),
2020 		},
2021 		.errstr = "invalid bpf_context access",
2022 		.result = REJECT,
2023 	},
2024 	{
2025 		"__sk_buff->tc_index, offset 3, byte store not permitted",
2026 		.insns = {
2027 			BPF_MOV64_IMM(BPF_REG_0, 0),
2028 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2029 				    offsetof(struct __sk_buff, tc_index) + 3),
2030 			BPF_EXIT_INSN(),
2031 		},
2032 		.errstr = "invalid bpf_context access",
2033 		.result = REJECT,
2034 	},
2035 	{
2036 		"check skb->hash byte load permitted",
2037 		.insns = {
2038 			BPF_MOV64_IMM(BPF_REG_0, 0),
2039 #if __BYTE_ORDER == __LITTLE_ENDIAN
2040 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2041 				    offsetof(struct __sk_buff, hash)),
2042 #else
2043 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2044 				    offsetof(struct __sk_buff, hash) + 3),
2045 #endif
2046 			BPF_EXIT_INSN(),
2047 		},
2048 		.result = ACCEPT,
2049 	},
2050 	{
2051 		"check skb->hash byte load permitted 1",
2052 		.insns = {
2053 			BPF_MOV64_IMM(BPF_REG_0, 0),
2054 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2055 				    offsetof(struct __sk_buff, hash) + 1),
2056 			BPF_EXIT_INSN(),
2057 		},
2058 		.result = ACCEPT,
2059 	},
2060 	{
2061 		"check skb->hash byte load permitted 2",
2062 		.insns = {
2063 			BPF_MOV64_IMM(BPF_REG_0, 0),
2064 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2065 				    offsetof(struct __sk_buff, hash) + 2),
2066 			BPF_EXIT_INSN(),
2067 		},
2068 		.result = ACCEPT,
2069 	},
2070 	{
2071 		"check skb->hash byte load permitted 3",
2072 		.insns = {
2073 			BPF_MOV64_IMM(BPF_REG_0, 0),
2074 #if __BYTE_ORDER == __LITTLE_ENDIAN
2075 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2076 				    offsetof(struct __sk_buff, hash) + 3),
2077 #else
2078 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
2079 				    offsetof(struct __sk_buff, hash)),
2080 #endif
2081 			BPF_EXIT_INSN(),
2082 		},
2083 		.result = ACCEPT,
2084 	},
2085 	{
2086 		"check cb access: byte, wrong type",
2087 		.insns = {
2088 			BPF_MOV64_IMM(BPF_REG_0, 0),
2089 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
2090 				    offsetof(struct __sk_buff, cb[0])),
2091 			BPF_EXIT_INSN(),
2092 		},
2093 		.errstr = "invalid bpf_context access",
2094 		.result = REJECT,
2095 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2096 	},
2097 	{
2098 		"check cb access: half",
2099 		.insns = {
2100 			BPF_MOV64_IMM(BPF_REG_0, 0),
2101 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2102 				    offsetof(struct __sk_buff, cb[0])),
2103 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2104 				    offsetof(struct __sk_buff, cb[0]) + 2),
2105 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2106 				    offsetof(struct __sk_buff, cb[1])),
2107 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2108 				    offsetof(struct __sk_buff, cb[1]) + 2),
2109 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2110 				    offsetof(struct __sk_buff, cb[2])),
2111 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2112 				    offsetof(struct __sk_buff, cb[2]) + 2),
2113 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2114 				    offsetof(struct __sk_buff, cb[3])),
2115 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2116 				    offsetof(struct __sk_buff, cb[3]) + 2),
2117 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2118 				    offsetof(struct __sk_buff, cb[4])),
2119 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2120 				    offsetof(struct __sk_buff, cb[4]) + 2),
2121 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2122 				    offsetof(struct __sk_buff, cb[0])),
2123 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2124 				    offsetof(struct __sk_buff, cb[0]) + 2),
2125 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2126 				    offsetof(struct __sk_buff, cb[1])),
2127 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2128 				    offsetof(struct __sk_buff, cb[1]) + 2),
2129 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2130 				    offsetof(struct __sk_buff, cb[2])),
2131 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2132 				    offsetof(struct __sk_buff, cb[2]) + 2),
2133 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2134 				    offsetof(struct __sk_buff, cb[3])),
2135 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2136 				    offsetof(struct __sk_buff, cb[3]) + 2),
2137 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2138 				    offsetof(struct __sk_buff, cb[4])),
2139 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2140 				    offsetof(struct __sk_buff, cb[4]) + 2),
2141 			BPF_EXIT_INSN(),
2142 		},
2143 		.result = ACCEPT,
2144 	},
2145 	{
2146 		"check cb access: half, unaligned",
2147 		.insns = {
2148 			BPF_MOV64_IMM(BPF_REG_0, 0),
2149 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2150 				    offsetof(struct __sk_buff, cb[0]) + 1),
2151 			BPF_EXIT_INSN(),
2152 		},
2153 		.errstr = "misaligned context access",
2154 		.result = REJECT,
2155 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2156 	},
2157 	{
2158 		"check __sk_buff->hash, offset 0, half store not permitted",
2159 		.insns = {
2160 			BPF_MOV64_IMM(BPF_REG_0, 0),
2161 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2162 				    offsetof(struct __sk_buff, hash)),
2163 			BPF_EXIT_INSN(),
2164 		},
2165 		.errstr = "invalid bpf_context access",
2166 		.result = REJECT,
2167 	},
2168 	{
2169 		"check __sk_buff->tc_index, offset 2, half store not permitted",
2170 		.insns = {
2171 			BPF_MOV64_IMM(BPF_REG_0, 0),
2172 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2173 				    offsetof(struct __sk_buff, tc_index) + 2),
2174 			BPF_EXIT_INSN(),
2175 		},
2176 		.errstr = "invalid bpf_context access",
2177 		.result = REJECT,
2178 	},
2179 	{
2180 		"check skb->hash half load permitted",
2181 		.insns = {
2182 			BPF_MOV64_IMM(BPF_REG_0, 0),
2183 #if __BYTE_ORDER == __LITTLE_ENDIAN
2184 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2185 				    offsetof(struct __sk_buff, hash)),
2186 #else
2187 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2188 				    offsetof(struct __sk_buff, hash) + 2),
2189 #endif
2190 			BPF_EXIT_INSN(),
2191 		},
2192 		.result = ACCEPT,
2193 	},
2194 	{
2195 		"check skb->hash half load permitted 2",
2196 		.insns = {
2197 			BPF_MOV64_IMM(BPF_REG_0, 0),
2198 #if __BYTE_ORDER == __LITTLE_ENDIAN
2199 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2200 				    offsetof(struct __sk_buff, hash) + 2),
2201 #else
2202 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2203 				    offsetof(struct __sk_buff, hash)),
2204 #endif
2205 			BPF_EXIT_INSN(),
2206 		},
2207 		.result = ACCEPT,
2208 	},
2209 	{
2210 		"check skb->hash half load not permitted, unaligned 1",
2211 		.insns = {
2212 			BPF_MOV64_IMM(BPF_REG_0, 0),
2213 #if __BYTE_ORDER == __LITTLE_ENDIAN
2214 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2215 				    offsetof(struct __sk_buff, hash) + 1),
2216 #else
2217 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2218 				    offsetof(struct __sk_buff, hash) + 3),
2219 #endif
2220 			BPF_EXIT_INSN(),
2221 		},
2222 		.errstr = "invalid bpf_context access",
2223 		.result = REJECT,
2224 	},
2225 	{
2226 		"check skb->hash half load not permitted, unaligned 3",
2227 		.insns = {
2228 			BPF_MOV64_IMM(BPF_REG_0, 0),
2229 #if __BYTE_ORDER == __LITTLE_ENDIAN
2230 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2231 				    offsetof(struct __sk_buff, hash) + 3),
2232 #else
2233 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
2234 				    offsetof(struct __sk_buff, hash) + 1),
2235 #endif
2236 			BPF_EXIT_INSN(),
2237 		},
2238 		.errstr = "invalid bpf_context access",
2239 		.result = REJECT,
2240 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2241 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2242 	},
2243 	{
2244 		"check cb access: half, wrong type",
2245 		.insns = {
2246 			BPF_MOV64_IMM(BPF_REG_0, 0),
2247 			BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
2248 				    offsetof(struct __sk_buff, cb[0])),
2249 			BPF_EXIT_INSN(),
2250 		},
2251 		.errstr = "invalid bpf_context access",
2252 		.result = REJECT,
2253 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2254 	},
2255 	{
2256 		"check cb access: word",
2257 		.insns = {
2258 			BPF_MOV64_IMM(BPF_REG_0, 0),
2259 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2260 				    offsetof(struct __sk_buff, cb[0])),
2261 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2262 				    offsetof(struct __sk_buff, cb[1])),
2263 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2264 				    offsetof(struct __sk_buff, cb[2])),
2265 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2266 				    offsetof(struct __sk_buff, cb[3])),
2267 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2268 				    offsetof(struct __sk_buff, cb[4])),
2269 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2270 				    offsetof(struct __sk_buff, cb[0])),
2271 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2272 				    offsetof(struct __sk_buff, cb[1])),
2273 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2274 				    offsetof(struct __sk_buff, cb[2])),
2275 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2276 				    offsetof(struct __sk_buff, cb[3])),
2277 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2278 				    offsetof(struct __sk_buff, cb[4])),
2279 			BPF_EXIT_INSN(),
2280 		},
2281 		.result = ACCEPT,
2282 	},
2283 	{
2284 		"check cb access: word, unaligned 1",
2285 		.insns = {
2286 			BPF_MOV64_IMM(BPF_REG_0, 0),
2287 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2288 				    offsetof(struct __sk_buff, cb[0]) + 2),
2289 			BPF_EXIT_INSN(),
2290 		},
2291 		.errstr = "misaligned context access",
2292 		.result = REJECT,
2293 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2294 	},
2295 	{
2296 		"check cb access: word, unaligned 2",
2297 		.insns = {
2298 			BPF_MOV64_IMM(BPF_REG_0, 0),
2299 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2300 				    offsetof(struct __sk_buff, cb[4]) + 1),
2301 			BPF_EXIT_INSN(),
2302 		},
2303 		.errstr = "misaligned context access",
2304 		.result = REJECT,
2305 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2306 	},
2307 	{
2308 		"check cb access: word, unaligned 3",
2309 		.insns = {
2310 			BPF_MOV64_IMM(BPF_REG_0, 0),
2311 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2312 				    offsetof(struct __sk_buff, cb[4]) + 2),
2313 			BPF_EXIT_INSN(),
2314 		},
2315 		.errstr = "misaligned context access",
2316 		.result = REJECT,
2317 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2318 	},
2319 	{
2320 		"check cb access: word, unaligned 4",
2321 		.insns = {
2322 			BPF_MOV64_IMM(BPF_REG_0, 0),
2323 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2324 				    offsetof(struct __sk_buff, cb[4]) + 3),
2325 			BPF_EXIT_INSN(),
2326 		},
2327 		.errstr = "misaligned context access",
2328 		.result = REJECT,
2329 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2330 	},
2331 	{
2332 		"check cb access: double",
2333 		.insns = {
2334 			BPF_MOV64_IMM(BPF_REG_0, 0),
2335 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2336 				    offsetof(struct __sk_buff, cb[0])),
2337 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2338 				    offsetof(struct __sk_buff, cb[2])),
2339 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2340 				    offsetof(struct __sk_buff, cb[0])),
2341 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2342 				    offsetof(struct __sk_buff, cb[2])),
2343 			BPF_EXIT_INSN(),
2344 		},
2345 		.result = ACCEPT,
2346 	},
2347 	{
2348 		"check cb access: double, unaligned 1",
2349 		.insns = {
2350 			BPF_MOV64_IMM(BPF_REG_0, 0),
2351 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2352 				    offsetof(struct __sk_buff, cb[1])),
2353 			BPF_EXIT_INSN(),
2354 		},
2355 		.errstr = "misaligned context access",
2356 		.result = REJECT,
2357 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2358 	},
2359 	{
2360 		"check cb access: double, unaligned 2",
2361 		.insns = {
2362 			BPF_MOV64_IMM(BPF_REG_0, 0),
2363 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2364 				    offsetof(struct __sk_buff, cb[3])),
2365 			BPF_EXIT_INSN(),
2366 		},
2367 		.errstr = "misaligned context access",
2368 		.result = REJECT,
2369 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2370 	},
2371 	{
2372 		"check cb access: double, oob 1",
2373 		.insns = {
2374 			BPF_MOV64_IMM(BPF_REG_0, 0),
2375 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2376 				    offsetof(struct __sk_buff, cb[4])),
2377 			BPF_EXIT_INSN(),
2378 		},
2379 		.errstr = "invalid bpf_context access",
2380 		.result = REJECT,
2381 	},
2382 	{
2383 		"check cb access: double, oob 2",
2384 		.insns = {
2385 			BPF_MOV64_IMM(BPF_REG_0, 0),
2386 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2387 				    offsetof(struct __sk_buff, cb[4])),
2388 			BPF_EXIT_INSN(),
2389 		},
2390 		.errstr = "invalid bpf_context access",
2391 		.result = REJECT,
2392 	},
2393 	{
2394 		"check __sk_buff->ifindex dw store not permitted",
2395 		.insns = {
2396 			BPF_MOV64_IMM(BPF_REG_0, 0),
2397 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2398 				    offsetof(struct __sk_buff, ifindex)),
2399 			BPF_EXIT_INSN(),
2400 		},
2401 		.errstr = "invalid bpf_context access",
2402 		.result = REJECT,
2403 	},
2404 	{
2405 		"check __sk_buff->ifindex dw load not permitted",
2406 		.insns = {
2407 			BPF_MOV64_IMM(BPF_REG_0, 0),
2408 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2409 				    offsetof(struct __sk_buff, ifindex)),
2410 			BPF_EXIT_INSN(),
2411 		},
2412 		.errstr = "invalid bpf_context access",
2413 		.result = REJECT,
2414 	},
2415 	{
2416 		"check cb access: double, wrong type",
2417 		.insns = {
2418 			BPF_MOV64_IMM(BPF_REG_0, 0),
2419 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2420 				    offsetof(struct __sk_buff, cb[0])),
2421 			BPF_EXIT_INSN(),
2422 		},
2423 		.errstr = "invalid bpf_context access",
2424 		.result = REJECT,
2425 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
2426 	},
2427 	{
2428 		"check out of range skb->cb access",
2429 		.insns = {
2430 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2431 				    offsetof(struct __sk_buff, cb[0]) + 256),
2432 			BPF_EXIT_INSN(),
2433 		},
2434 		.errstr = "invalid bpf_context access",
2435 		.errstr_unpriv = "",
2436 		.result = REJECT,
2437 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
2438 	},
2439 	{
2440 		"write skb fields from socket prog",
2441 		.insns = {
2442 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2443 				    offsetof(struct __sk_buff, cb[4])),
2444 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2445 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2446 				    offsetof(struct __sk_buff, mark)),
2447 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2448 				    offsetof(struct __sk_buff, tc_index)),
2449 			BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
2450 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2451 				    offsetof(struct __sk_buff, cb[0])),
2452 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2453 				    offsetof(struct __sk_buff, cb[2])),
2454 			BPF_EXIT_INSN(),
2455 		},
2456 		.result = ACCEPT,
2457 		.errstr_unpriv = "R1 leaks addr",
2458 		.result_unpriv = REJECT,
2459 	},
2460 	{
2461 		"write skb fields from tc_cls_act prog",
2462 		.insns = {
2463 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2464 				    offsetof(struct __sk_buff, cb[0])),
2465 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2466 				    offsetof(struct __sk_buff, mark)),
2467 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
2468 				    offsetof(struct __sk_buff, tc_index)),
2469 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2470 				    offsetof(struct __sk_buff, tc_index)),
2471 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
2472 				    offsetof(struct __sk_buff, cb[3])),
2473 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
2474 				    offsetof(struct __sk_buff, tstamp)),
2475 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
2476 				    offsetof(struct __sk_buff, tstamp)),
2477 			BPF_EXIT_INSN(),
2478 		},
2479 		.errstr_unpriv = "",
2480 		.result_unpriv = REJECT,
2481 		.result = ACCEPT,
2482 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2483 	},
2484 	{
2485 		"PTR_TO_STACK store/load",
2486 		.insns = {
2487 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2488 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2489 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2490 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2491 			BPF_EXIT_INSN(),
2492 		},
2493 		.result = ACCEPT,
2494 		.retval = 0xfaceb00c,
2495 	},
2496 	{
2497 		"PTR_TO_STACK store/load - bad alignment on off",
2498 		.insns = {
2499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2501 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
2502 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
2503 			BPF_EXIT_INSN(),
2504 		},
2505 		.result = REJECT,
2506 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
2507 	},
2508 	{
2509 		"PTR_TO_STACK store/load - bad alignment on reg",
2510 		.insns = {
2511 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
2513 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2514 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2515 			BPF_EXIT_INSN(),
2516 		},
2517 		.result = REJECT,
2518 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
2519 	},
2520 	{
2521 		"PTR_TO_STACK store/load - out of bounds low",
2522 		.insns = {
2523 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2524 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
2525 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2526 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2527 			BPF_EXIT_INSN(),
2528 		},
2529 		.result = REJECT,
2530 		.errstr = "invalid stack off=-79992 size=8",
2531 	},
2532 	{
2533 		"PTR_TO_STACK store/load - out of bounds high",
2534 		.insns = {
2535 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2536 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2537 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
2538 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
2539 			BPF_EXIT_INSN(),
2540 		},
2541 		.result = REJECT,
2542 		.errstr = "invalid stack off=0 size=8",
2543 	},
2544 	{
2545 		"unpriv: return pointer",
2546 		.insns = {
2547 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
2548 			BPF_EXIT_INSN(),
2549 		},
2550 		.result = ACCEPT,
2551 		.result_unpriv = REJECT,
2552 		.errstr_unpriv = "R0 leaks addr",
2553 		.retval = POINTER_VALUE,
2554 	},
2555 	{
2556 		"unpriv: add const to pointer",
2557 		.insns = {
2558 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2559 			BPF_MOV64_IMM(BPF_REG_0, 0),
2560 			BPF_EXIT_INSN(),
2561 		},
2562 		.result = ACCEPT,
2563 	},
2564 	{
2565 		"unpriv: add pointer to pointer",
2566 		.insns = {
2567 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2568 			BPF_MOV64_IMM(BPF_REG_0, 0),
2569 			BPF_EXIT_INSN(),
2570 		},
2571 		.result = REJECT,
2572 		.errstr = "R1 pointer += pointer",
2573 	},
2574 	{
2575 		"unpriv: neg pointer",
2576 		.insns = {
2577 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
2578 			BPF_MOV64_IMM(BPF_REG_0, 0),
2579 			BPF_EXIT_INSN(),
2580 		},
2581 		.result = ACCEPT,
2582 		.result_unpriv = REJECT,
2583 		.errstr_unpriv = "R1 pointer arithmetic",
2584 	},
2585 	{
2586 		"unpriv: cmp pointer with const",
2587 		.insns = {
2588 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2589 			BPF_MOV64_IMM(BPF_REG_0, 0),
2590 			BPF_EXIT_INSN(),
2591 		},
2592 		.result = ACCEPT,
2593 		.result_unpriv = REJECT,
2594 		.errstr_unpriv = "R1 pointer comparison",
2595 	},
2596 	{
2597 		"unpriv: cmp pointer with pointer",
2598 		.insns = {
2599 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
2600 			BPF_MOV64_IMM(BPF_REG_0, 0),
2601 			BPF_EXIT_INSN(),
2602 		},
2603 		.result = ACCEPT,
2604 		.result_unpriv = REJECT,
2605 		.errstr_unpriv = "R10 pointer comparison",
2606 	},
2607 	{
2608 		"unpriv: check that printk is disallowed",
2609 		.insns = {
2610 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2611 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
2612 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
2613 			BPF_MOV64_IMM(BPF_REG_2, 8),
2614 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2615 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2616 				     BPF_FUNC_trace_printk),
2617 			BPF_MOV64_IMM(BPF_REG_0, 0),
2618 			BPF_EXIT_INSN(),
2619 		},
2620 		.errstr_unpriv = "unknown func bpf_trace_printk#6",
2621 		.result_unpriv = REJECT,
2622 		.result = ACCEPT,
2623 	},
2624 	{
2625 		"unpriv: pass pointer to helper function",
2626 		.insns = {
2627 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2629 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2630 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2631 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2632 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2633 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2634 				     BPF_FUNC_map_update_elem),
2635 			BPF_MOV64_IMM(BPF_REG_0, 0),
2636 			BPF_EXIT_INSN(),
2637 		},
2638 		.fixup_map_hash_8b = { 3 },
2639 		.errstr_unpriv = "R4 leaks addr",
2640 		.result_unpriv = REJECT,
2641 		.result = ACCEPT,
2642 	},
2643 	{
2644 		"unpriv: indirectly pass pointer on stack to helper function",
2645 		.insns = {
2646 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2647 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2648 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2649 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2650 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2651 				     BPF_FUNC_map_lookup_elem),
2652 			BPF_MOV64_IMM(BPF_REG_0, 0),
2653 			BPF_EXIT_INSN(),
2654 		},
2655 		.fixup_map_hash_8b = { 3 },
2656 		.errstr = "invalid indirect read from stack off -8+0 size 8",
2657 		.result = REJECT,
2658 	},
2659 	{
2660 		"unpriv: mangle pointer on stack 1",
2661 		.insns = {
2662 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2663 			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
2664 			BPF_MOV64_IMM(BPF_REG_0, 0),
2665 			BPF_EXIT_INSN(),
2666 		},
2667 		.errstr_unpriv = "attempt to corrupt spilled",
2668 		.result_unpriv = REJECT,
2669 		.result = ACCEPT,
2670 	},
2671 	{
2672 		"unpriv: mangle pointer on stack 2",
2673 		.insns = {
2674 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2675 			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
2676 			BPF_MOV64_IMM(BPF_REG_0, 0),
2677 			BPF_EXIT_INSN(),
2678 		},
2679 		.errstr_unpriv = "attempt to corrupt spilled",
2680 		.result_unpriv = REJECT,
2681 		.result = ACCEPT,
2682 	},
2683 	{
2684 		"unpriv: read pointer from stack in small chunks",
2685 		.insns = {
2686 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
2687 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
2688 			BPF_MOV64_IMM(BPF_REG_0, 0),
2689 			BPF_EXIT_INSN(),
2690 		},
2691 		.errstr = "invalid size",
2692 		.result = REJECT,
2693 	},
2694 	{
2695 		"unpriv: write pointer into ctx",
2696 		.insns = {
2697 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
2698 			BPF_MOV64_IMM(BPF_REG_0, 0),
2699 			BPF_EXIT_INSN(),
2700 		},
2701 		.errstr_unpriv = "R1 leaks addr",
2702 		.result_unpriv = REJECT,
2703 		.errstr = "invalid bpf_context access",
2704 		.result = REJECT,
2705 	},
2706 	{
2707 		"unpriv: spill/fill of ctx",
2708 		.insns = {
2709 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2711 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2712 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2713 			BPF_MOV64_IMM(BPF_REG_0, 0),
2714 			BPF_EXIT_INSN(),
2715 		},
2716 		.result = ACCEPT,
2717 	},
2718 	{
2719 		"unpriv: spill/fill of ctx 2",
2720 		.insns = {
2721 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2722 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2723 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2724 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2725 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2726 				     BPF_FUNC_get_hash_recalc),
2727 			BPF_MOV64_IMM(BPF_REG_0, 0),
2728 			BPF_EXIT_INSN(),
2729 		},
2730 		.result = ACCEPT,
2731 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2732 	},
2733 	{
2734 		"unpriv: spill/fill of ctx 3",
2735 		.insns = {
2736 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2737 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2738 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2739 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2740 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2741 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2742 				     BPF_FUNC_get_hash_recalc),
2743 			BPF_EXIT_INSN(),
2744 		},
2745 		.result = REJECT,
2746 		.errstr = "R1 type=fp expected=ctx",
2747 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2748 	},
2749 	{
2750 		"unpriv: spill/fill of ctx 4",
2751 		.insns = {
2752 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2753 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2754 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2755 			BPF_MOV64_IMM(BPF_REG_0, 1),
2756 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2757 				     BPF_REG_0, -8, 0),
2758 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2759 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2760 				     BPF_FUNC_get_hash_recalc),
2761 			BPF_EXIT_INSN(),
2762 		},
2763 		.result = REJECT,
2764 		.errstr = "R1 type=inv expected=ctx",
2765 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2766 	},
2767 	{
2768 		"unpriv: spill/fill of different pointers stx",
2769 		.insns = {
2770 			BPF_MOV64_IMM(BPF_REG_3, 42),
2771 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2773 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2774 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2775 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2776 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2777 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2778 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2779 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2780 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2781 				    offsetof(struct __sk_buff, mark)),
2782 			BPF_MOV64_IMM(BPF_REG_0, 0),
2783 			BPF_EXIT_INSN(),
2784 		},
2785 		.result = REJECT,
2786 		.errstr = "same insn cannot be used with different pointers",
2787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2788 	},
2789 	{
2790 		"unpriv: spill/fill of different pointers stx - ctx and sock",
2791 		.insns = {
2792 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2793 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2794 			BPF_SK_LOOKUP,
2795 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2796 			/* u64 foo; */
2797 			/* void *target = &foo; */
2798 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2799 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2800 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2801 			/* if (skb == NULL) *target = sock; */
2802 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2803 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2804 			/* else *target = skb; */
2805 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2806 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2807 			/* struct __sk_buff *skb = *target; */
2808 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2809 			/* skb->mark = 42; */
2810 			BPF_MOV64_IMM(BPF_REG_3, 42),
2811 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2812 				    offsetof(struct __sk_buff, mark)),
2813 			/* if (sk) bpf_sk_release(sk) */
2814 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2815 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2816 			BPF_MOV64_IMM(BPF_REG_0, 0),
2817 			BPF_EXIT_INSN(),
2818 		},
2819 		.result = REJECT,
2820 		.errstr = "type=ctx expected=sock",
2821 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2822 	},
2823 	{
2824 		"unpriv: spill/fill of different pointers stx - leak sock",
2825 		.insns = {
2826 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2827 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2828 			BPF_SK_LOOKUP,
2829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2830 			/* u64 foo; */
2831 			/* void *target = &foo; */
2832 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2833 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2834 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2835 			/* if (skb == NULL) *target = sock; */
2836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2837 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2838 			/* else *target = skb; */
2839 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2840 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2841 			/* struct __sk_buff *skb = *target; */
2842 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2843 			/* skb->mark = 42; */
2844 			BPF_MOV64_IMM(BPF_REG_3, 42),
2845 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2846 				    offsetof(struct __sk_buff, mark)),
2847 			BPF_EXIT_INSN(),
2848 		},
2849 		.result = REJECT,
2850 		//.errstr = "same insn cannot be used with different pointers",
2851 		.errstr = "Unreleased reference",
2852 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2853 	},
2854 	{
2855 		"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
2856 		.insns = {
2857 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2858 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2859 			BPF_SK_LOOKUP,
2860 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2861 			/* u64 foo; */
2862 			/* void *target = &foo; */
2863 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2864 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2865 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2866 			/* if (skb) *target = skb */
2867 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2868 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2869 			/* else *target = sock */
2870 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2871 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2872 			/* struct bpf_sock *sk = *target; */
2873 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2874 			/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
2875 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
2876 				BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2877 					    offsetof(struct bpf_sock, mark)),
2878 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2879 			BPF_MOV64_IMM(BPF_REG_0, 0),
2880 			BPF_EXIT_INSN(),
2881 		},
2882 		.result = REJECT,
2883 		.errstr = "same insn cannot be used with different pointers",
2884 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2885 	},
2886 	{
2887 		"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
2888 		.insns = {
2889 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
2890 			/* struct bpf_sock *sock = bpf_sock_lookup(...); */
2891 			BPF_SK_LOOKUP,
2892 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
2893 			/* u64 foo; */
2894 			/* void *target = &foo; */
2895 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2896 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2897 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2898 			/* if (skb) *target = skb */
2899 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
2900 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2901 			/* else *target = sock */
2902 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2903 				BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2904 			/* struct bpf_sock *sk = *target; */
2905 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2906 			/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
2907 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2908 				BPF_MOV64_IMM(BPF_REG_3, 42),
2909 				BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2910 					    offsetof(struct bpf_sock, mark)),
2911 				BPF_EMIT_CALL(BPF_FUNC_sk_release),
2912 			BPF_MOV64_IMM(BPF_REG_0, 0),
2913 			BPF_EXIT_INSN(),
2914 		},
2915 		.result = REJECT,
2916 		//.errstr = "same insn cannot be used with different pointers",
2917 		.errstr = "cannot write into socket",
2918 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
2919 	},
2920 	{
2921 		"unpriv: spill/fill of different pointers ldx",
2922 		.insns = {
2923 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2925 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2926 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2928 				      -(__s32)offsetof(struct bpf_perf_event_data,
2929 						       sample_period) - 8),
2930 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2931 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2932 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2933 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2934 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2935 				    offsetof(struct bpf_perf_event_data,
2936 					     sample_period)),
2937 			BPF_MOV64_IMM(BPF_REG_0, 0),
2938 			BPF_EXIT_INSN(),
2939 		},
2940 		.result = REJECT,
2941 		.errstr = "same insn cannot be used with different pointers",
2942 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
2943 	},
2944 	{
2945 		"unpriv: write pointer into map elem value",
2946 		.insns = {
2947 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2948 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2949 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2950 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2951 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2952 				     BPF_FUNC_map_lookup_elem),
2953 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2954 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2955 			BPF_EXIT_INSN(),
2956 		},
2957 		.fixup_map_hash_8b = { 3 },
2958 		.errstr_unpriv = "R0 leaks addr",
2959 		.result_unpriv = REJECT,
2960 		.result = ACCEPT,
2961 	},
2962 	{
2963 		"unpriv: partial copy of pointer",
2964 		.insns = {
2965 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2966 			BPF_MOV64_IMM(BPF_REG_0, 0),
2967 			BPF_EXIT_INSN(),
2968 		},
2969 		.errstr_unpriv = "R10 partial copy",
2970 		.result_unpriv = REJECT,
2971 		.result = ACCEPT,
2972 	},
2973 	{
2974 		"unpriv: pass pointer to tail_call",
2975 		.insns = {
2976 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2977 			BPF_LD_MAP_FD(BPF_REG_2, 0),
2978 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2979 				     BPF_FUNC_tail_call),
2980 			BPF_MOV64_IMM(BPF_REG_0, 0),
2981 			BPF_EXIT_INSN(),
2982 		},
2983 		.fixup_prog1 = { 1 },
2984 		.errstr_unpriv = "R3 leaks addr into helper",
2985 		.result_unpriv = REJECT,
2986 		.result = ACCEPT,
2987 	},
2988 	{
2989 		"unpriv: cmp map pointer with zero",
2990 		.insns = {
2991 			BPF_MOV64_IMM(BPF_REG_1, 0),
2992 			BPF_LD_MAP_FD(BPF_REG_1, 0),
2993 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2994 			BPF_MOV64_IMM(BPF_REG_0, 0),
2995 			BPF_EXIT_INSN(),
2996 		},
2997 		.fixup_map_hash_8b = { 1 },
2998 		.errstr_unpriv = "R1 pointer comparison",
2999 		.result_unpriv = REJECT,
3000 		.result = ACCEPT,
3001 	},
3002 	{
3003 		"unpriv: write into frame pointer",
3004 		.insns = {
3005 			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
3006 			BPF_MOV64_IMM(BPF_REG_0, 0),
3007 			BPF_EXIT_INSN(),
3008 		},
3009 		.errstr = "frame pointer is read only",
3010 		.result = REJECT,
3011 	},
3012 	{
3013 		"unpriv: spill/fill frame pointer",
3014 		.insns = {
3015 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3016 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3017 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
3018 			BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
3019 			BPF_MOV64_IMM(BPF_REG_0, 0),
3020 			BPF_EXIT_INSN(),
3021 		},
3022 		.errstr = "frame pointer is read only",
3023 		.result = REJECT,
3024 	},
3025 	{
3026 		"unpriv: cmp of frame pointer",
3027 		.insns = {
3028 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
3029 			BPF_MOV64_IMM(BPF_REG_0, 0),
3030 			BPF_EXIT_INSN(),
3031 		},
3032 		.errstr_unpriv = "R10 pointer comparison",
3033 		.result_unpriv = REJECT,
3034 		.result = ACCEPT,
3035 	},
3036 	{
3037 		"unpriv: adding of fp",
3038 		.insns = {
3039 			BPF_MOV64_IMM(BPF_REG_0, 0),
3040 			BPF_MOV64_IMM(BPF_REG_1, 0),
3041 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
3042 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
3043 			BPF_EXIT_INSN(),
3044 		},
3045 		.result = ACCEPT,
3046 	},
3047 	{
3048 		"unpriv: cmp of stack pointer",
3049 		.insns = {
3050 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3051 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3052 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
3053 			BPF_MOV64_IMM(BPF_REG_0, 0),
3054 			BPF_EXIT_INSN(),
3055 		},
3056 		.errstr_unpriv = "R2 pointer comparison",
3057 		.result_unpriv = REJECT,
3058 		.result = ACCEPT,
3059 	},
3060 	{
3061 		"runtime/jit: tail_call within bounds, prog once",
3062 		.insns = {
3063 			BPF_MOV64_IMM(BPF_REG_3, 0),
3064 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3065 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3066 				     BPF_FUNC_tail_call),
3067 			BPF_MOV64_IMM(BPF_REG_0, 1),
3068 			BPF_EXIT_INSN(),
3069 		},
3070 		.fixup_prog1 = { 1 },
3071 		.result = ACCEPT,
3072 		.retval = 42,
3073 	},
3074 	{
3075 		"runtime/jit: tail_call within bounds, prog loop",
3076 		.insns = {
3077 			BPF_MOV64_IMM(BPF_REG_3, 1),
3078 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3079 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3080 				     BPF_FUNC_tail_call),
3081 			BPF_MOV64_IMM(BPF_REG_0, 1),
3082 			BPF_EXIT_INSN(),
3083 		},
3084 		.fixup_prog1 = { 1 },
3085 		.result = ACCEPT,
3086 		.retval = 41,
3087 	},
3088 	{
3089 		"runtime/jit: tail_call within bounds, no prog",
3090 		.insns = {
3091 			BPF_MOV64_IMM(BPF_REG_3, 2),
3092 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3093 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3094 				     BPF_FUNC_tail_call),
3095 			BPF_MOV64_IMM(BPF_REG_0, 1),
3096 			BPF_EXIT_INSN(),
3097 		},
3098 		.fixup_prog1 = { 1 },
3099 		.result = ACCEPT,
3100 		.retval = 1,
3101 	},
3102 	{
3103 		"runtime/jit: tail_call out of bounds",
3104 		.insns = {
3105 			BPF_MOV64_IMM(BPF_REG_3, 256),
3106 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3107 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3108 				     BPF_FUNC_tail_call),
3109 			BPF_MOV64_IMM(BPF_REG_0, 2),
3110 			BPF_EXIT_INSN(),
3111 		},
3112 		.fixup_prog1 = { 1 },
3113 		.result = ACCEPT,
3114 		.retval = 2,
3115 	},
3116 	{
3117 		"runtime/jit: pass negative index to tail_call",
3118 		.insns = {
3119 			BPF_MOV64_IMM(BPF_REG_3, -1),
3120 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3121 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3122 				     BPF_FUNC_tail_call),
3123 			BPF_MOV64_IMM(BPF_REG_0, 2),
3124 			BPF_EXIT_INSN(),
3125 		},
3126 		.fixup_prog1 = { 1 },
3127 		.result = ACCEPT,
3128 		.retval = 2,
3129 	},
3130 	{
3131 		"runtime/jit: pass > 32bit index to tail_call",
3132 		.insns = {
3133 			BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
3134 			BPF_LD_MAP_FD(BPF_REG_2, 0),
3135 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3136 				     BPF_FUNC_tail_call),
3137 			BPF_MOV64_IMM(BPF_REG_0, 2),
3138 			BPF_EXIT_INSN(),
3139 		},
3140 		.fixup_prog1 = { 2 },
3141 		.result = ACCEPT,
3142 		.retval = 42,
3143 		/* Verifier rewrite for unpriv skips tail call here. */
3144 		.retval_unpriv = 2,
3145 	},
3146 	{
3147 		"stack pointer arithmetic",
3148 		.insns = {
3149 			BPF_MOV64_IMM(BPF_REG_1, 4),
3150 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
3151 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
3152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3153 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
3154 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3155 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
3156 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3157 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3158 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3159 			BPF_ST_MEM(0, BPF_REG_2, 4, 0),
3160 			BPF_MOV64_IMM(BPF_REG_0, 0),
3161 			BPF_EXIT_INSN(),
3162 		},
3163 		.result = ACCEPT,
3164 	},
3165 	{
3166 		"raw_stack: no skb_load_bytes",
3167 		.insns = {
3168 			BPF_MOV64_IMM(BPF_REG_2, 4),
3169 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3170 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3171 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3172 			BPF_MOV64_IMM(BPF_REG_4, 8),
3173 			/* Call to skb_load_bytes() omitted. */
3174 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3175 			BPF_EXIT_INSN(),
3176 		},
3177 		.result = REJECT,
3178 		.errstr = "invalid read from stack off -8+0 size 8",
3179 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3180 	},
3181 	{
3182 		"raw_stack: skb_load_bytes, negative len",
3183 		.insns = {
3184 			BPF_MOV64_IMM(BPF_REG_2, 4),
3185 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3186 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3187 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3188 			BPF_MOV64_IMM(BPF_REG_4, -8),
3189 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3190 				     BPF_FUNC_skb_load_bytes),
3191 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3192 			BPF_EXIT_INSN(),
3193 		},
3194 		.result = REJECT,
3195 		.errstr = "R4 min value is negative",
3196 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3197 	},
3198 	{
3199 		"raw_stack: skb_load_bytes, negative len 2",
3200 		.insns = {
3201 			BPF_MOV64_IMM(BPF_REG_2, 4),
3202 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3203 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3204 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3205 			BPF_MOV64_IMM(BPF_REG_4, ~0),
3206 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3207 				     BPF_FUNC_skb_load_bytes),
3208 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3209 			BPF_EXIT_INSN(),
3210 		},
3211 		.result = REJECT,
3212 		.errstr = "R4 min value is negative",
3213 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3214 	},
3215 	{
3216 		"raw_stack: skb_load_bytes, zero len",
3217 		.insns = {
3218 			BPF_MOV64_IMM(BPF_REG_2, 4),
3219 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3220 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3221 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3222 			BPF_MOV64_IMM(BPF_REG_4, 0),
3223 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3224 				     BPF_FUNC_skb_load_bytes),
3225 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3226 			BPF_EXIT_INSN(),
3227 		},
3228 		.result = REJECT,
3229 		.errstr = "invalid stack type R3",
3230 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3231 	},
3232 	{
3233 		"raw_stack: skb_load_bytes, no init",
3234 		.insns = {
3235 			BPF_MOV64_IMM(BPF_REG_2, 4),
3236 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3237 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3238 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3239 			BPF_MOV64_IMM(BPF_REG_4, 8),
3240 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3241 				     BPF_FUNC_skb_load_bytes),
3242 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3243 			BPF_EXIT_INSN(),
3244 		},
3245 		.result = ACCEPT,
3246 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3247 	},
3248 	{
3249 		"raw_stack: skb_load_bytes, init",
3250 		.insns = {
3251 			BPF_MOV64_IMM(BPF_REG_2, 4),
3252 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3253 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3254 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
3255 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3256 			BPF_MOV64_IMM(BPF_REG_4, 8),
3257 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3258 				     BPF_FUNC_skb_load_bytes),
3259 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3260 			BPF_EXIT_INSN(),
3261 		},
3262 		.result = ACCEPT,
3263 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3264 	},
3265 	{
3266 		"raw_stack: skb_load_bytes, spilled regs around bounds",
3267 		.insns = {
3268 			BPF_MOV64_IMM(BPF_REG_2, 4),
3269 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3270 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3271 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3272 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3273 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3274 			BPF_MOV64_IMM(BPF_REG_4, 8),
3275 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3276 				     BPF_FUNC_skb_load_bytes),
3277 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3278 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3279 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3280 				    offsetof(struct __sk_buff, mark)),
3281 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3282 				    offsetof(struct __sk_buff, priority)),
3283 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3284 			BPF_EXIT_INSN(),
3285 		},
3286 		.result = ACCEPT,
3287 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3288 	},
3289 	{
3290 		"raw_stack: skb_load_bytes, spilled regs corruption",
3291 		.insns = {
3292 			BPF_MOV64_IMM(BPF_REG_2, 4),
3293 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
3295 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
3296 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3297 			BPF_MOV64_IMM(BPF_REG_4, 8),
3298 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3299 				     BPF_FUNC_skb_load_bytes),
3300 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3301 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3302 				    offsetof(struct __sk_buff, mark)),
3303 			BPF_EXIT_INSN(),
3304 		},
3305 		.result = REJECT,
3306 		.errstr = "R0 invalid mem access 'inv'",
3307 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3308 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3309 	},
3310 	{
3311 		"raw_stack: skb_load_bytes, spilled regs corruption 2",
3312 		.insns = {
3313 			BPF_MOV64_IMM(BPF_REG_2, 4),
3314 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3315 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3316 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3317 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3318 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3319 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3320 			BPF_MOV64_IMM(BPF_REG_4, 8),
3321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3322 				     BPF_FUNC_skb_load_bytes),
3323 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3324 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3325 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3326 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3327 				    offsetof(struct __sk_buff, mark)),
3328 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3329 				    offsetof(struct __sk_buff, priority)),
3330 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3331 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
3332 				    offsetof(struct __sk_buff, pkt_type)),
3333 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3334 			BPF_EXIT_INSN(),
3335 		},
3336 		.result = REJECT,
3337 		.errstr = "R3 invalid mem access 'inv'",
3338 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3339 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3340 	},
3341 	{
3342 		"raw_stack: skb_load_bytes, spilled regs + data",
3343 		.insns = {
3344 			BPF_MOV64_IMM(BPF_REG_2, 4),
3345 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3346 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
3347 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
3348 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
3349 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
3350 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3351 			BPF_MOV64_IMM(BPF_REG_4, 8),
3352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3353 				     BPF_FUNC_skb_load_bytes),
3354 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
3355 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
3356 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
3357 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3358 				    offsetof(struct __sk_buff, mark)),
3359 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
3360 				    offsetof(struct __sk_buff, priority)),
3361 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3362 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
3363 			BPF_EXIT_INSN(),
3364 		},
3365 		.result = ACCEPT,
3366 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3367 	},
3368 	{
3369 		"raw_stack: skb_load_bytes, invalid access 1",
3370 		.insns = {
3371 			BPF_MOV64_IMM(BPF_REG_2, 4),
3372 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
3374 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3375 			BPF_MOV64_IMM(BPF_REG_4, 8),
3376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3377 				     BPF_FUNC_skb_load_bytes),
3378 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3379 			BPF_EXIT_INSN(),
3380 		},
3381 		.result = REJECT,
3382 		.errstr = "invalid stack type R3 off=-513 access_size=8",
3383 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3384 	},
3385 	{
3386 		"raw_stack: skb_load_bytes, invalid access 2",
3387 		.insns = {
3388 			BPF_MOV64_IMM(BPF_REG_2, 4),
3389 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3390 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3391 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3392 			BPF_MOV64_IMM(BPF_REG_4, 8),
3393 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3394 				     BPF_FUNC_skb_load_bytes),
3395 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3396 			BPF_EXIT_INSN(),
3397 		},
3398 		.result = REJECT,
3399 		.errstr = "invalid stack type R3 off=-1 access_size=8",
3400 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3401 	},
3402 	{
3403 		"raw_stack: skb_load_bytes, invalid access 3",
3404 		.insns = {
3405 			BPF_MOV64_IMM(BPF_REG_2, 4),
3406 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3407 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
3408 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3409 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3410 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3411 				     BPF_FUNC_skb_load_bytes),
3412 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3413 			BPF_EXIT_INSN(),
3414 		},
3415 		.result = REJECT,
3416 		.errstr = "R4 min value is negative",
3417 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3418 	},
3419 	{
3420 		"raw_stack: skb_load_bytes, invalid access 4",
3421 		.insns = {
3422 			BPF_MOV64_IMM(BPF_REG_2, 4),
3423 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3424 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
3425 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3426 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3427 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3428 				     BPF_FUNC_skb_load_bytes),
3429 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3430 			BPF_EXIT_INSN(),
3431 		},
3432 		.result = REJECT,
3433 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3434 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3435 	},
3436 	{
3437 		"raw_stack: skb_load_bytes, invalid access 5",
3438 		.insns = {
3439 			BPF_MOV64_IMM(BPF_REG_2, 4),
3440 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3441 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3442 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3443 			BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
3444 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3445 				     BPF_FUNC_skb_load_bytes),
3446 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3447 			BPF_EXIT_INSN(),
3448 		},
3449 		.result = REJECT,
3450 		.errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
3451 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3452 	},
3453 	{
3454 		"raw_stack: skb_load_bytes, invalid access 6",
3455 		.insns = {
3456 			BPF_MOV64_IMM(BPF_REG_2, 4),
3457 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3458 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3459 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3460 			BPF_MOV64_IMM(BPF_REG_4, 0),
3461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3462 				     BPF_FUNC_skb_load_bytes),
3463 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3464 			BPF_EXIT_INSN(),
3465 		},
3466 		.result = REJECT,
3467 		.errstr = "invalid stack type R3 off=-512 access_size=0",
3468 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3469 	},
3470 	{
3471 		"raw_stack: skb_load_bytes, large access",
3472 		.insns = {
3473 			BPF_MOV64_IMM(BPF_REG_2, 4),
3474 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
3475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
3476 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3477 			BPF_MOV64_IMM(BPF_REG_4, 512),
3478 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3479 				     BPF_FUNC_skb_load_bytes),
3480 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
3481 			BPF_EXIT_INSN(),
3482 		},
3483 		.result = ACCEPT,
3484 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3485 	},
3486 	{
3487 		"context stores via ST",
3488 		.insns = {
3489 			BPF_MOV64_IMM(BPF_REG_0, 0),
3490 			BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
3491 			BPF_EXIT_INSN(),
3492 		},
3493 		.errstr = "BPF_ST stores into R1 ctx is not allowed",
3494 		.result = REJECT,
3495 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3496 	},
3497 	{
3498 		"context stores via XADD",
3499 		.insns = {
3500 			BPF_MOV64_IMM(BPF_REG_0, 0),
3501 			BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
3502 				     BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
3503 			BPF_EXIT_INSN(),
3504 		},
3505 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
3506 		.result = REJECT,
3507 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3508 	},
3509 	{
3510 		"direct packet access: test1",
3511 		.insns = {
3512 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3513 				    offsetof(struct __sk_buff, data)),
3514 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3515 				    offsetof(struct __sk_buff, data_end)),
3516 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3517 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3518 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3519 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3520 			BPF_MOV64_IMM(BPF_REG_0, 0),
3521 			BPF_EXIT_INSN(),
3522 		},
3523 		.result = ACCEPT,
3524 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3525 	},
3526 	{
3527 		"direct packet access: test2",
3528 		.insns = {
3529 			BPF_MOV64_IMM(BPF_REG_0, 1),
3530 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
3531 				    offsetof(struct __sk_buff, data_end)),
3532 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3533 				    offsetof(struct __sk_buff, data)),
3534 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3535 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
3536 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
3537 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
3538 			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
3539 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
3540 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3541 				    offsetof(struct __sk_buff, data)),
3542 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
3543 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3544 				    offsetof(struct __sk_buff, len)),
3545 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
3546 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
3547 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
3548 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
3549 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
3550 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
3551 				    offsetof(struct __sk_buff, data_end)),
3552 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3553 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
3554 			BPF_MOV64_IMM(BPF_REG_0, 0),
3555 			BPF_EXIT_INSN(),
3556 		},
3557 		.result = ACCEPT,
3558 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3559 	},
3560 	{
3561 		"direct packet access: test3",
3562 		.insns = {
3563 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3564 				    offsetof(struct __sk_buff, data)),
3565 			BPF_MOV64_IMM(BPF_REG_0, 0),
3566 			BPF_EXIT_INSN(),
3567 		},
3568 		.errstr = "invalid bpf_context access off=76",
3569 		.result = REJECT,
3570 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
3571 	},
3572 	{
3573 		"direct packet access: test4 (write)",
3574 		.insns = {
3575 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3576 				    offsetof(struct __sk_buff, data)),
3577 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3578 				    offsetof(struct __sk_buff, data_end)),
3579 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3580 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3581 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3582 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3583 			BPF_MOV64_IMM(BPF_REG_0, 0),
3584 			BPF_EXIT_INSN(),
3585 		},
3586 		.result = ACCEPT,
3587 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3588 	},
3589 	{
3590 		"direct packet access: test5 (pkt_end >= reg, good access)",
3591 		.insns = {
3592 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3593 				    offsetof(struct __sk_buff, data)),
3594 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3595 				    offsetof(struct __sk_buff, data_end)),
3596 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3598 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3599 			BPF_MOV64_IMM(BPF_REG_0, 1),
3600 			BPF_EXIT_INSN(),
3601 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3602 			BPF_MOV64_IMM(BPF_REG_0, 0),
3603 			BPF_EXIT_INSN(),
3604 		},
3605 		.result = ACCEPT,
3606 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3607 	},
3608 	{
3609 		"direct packet access: test6 (pkt_end >= reg, bad access)",
3610 		.insns = {
3611 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3612 				    offsetof(struct __sk_buff, data)),
3613 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3614 				    offsetof(struct __sk_buff, data_end)),
3615 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3617 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3618 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3619 			BPF_MOV64_IMM(BPF_REG_0, 1),
3620 			BPF_EXIT_INSN(),
3621 			BPF_MOV64_IMM(BPF_REG_0, 0),
3622 			BPF_EXIT_INSN(),
3623 		},
3624 		.errstr = "invalid access to packet",
3625 		.result = REJECT,
3626 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3627 	},
3628 	{
3629 		"direct packet access: test7 (pkt_end >= reg, both accesses)",
3630 		.insns = {
3631 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3632 				    offsetof(struct __sk_buff, data)),
3633 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3634 				    offsetof(struct __sk_buff, data_end)),
3635 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3636 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3637 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
3638 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3639 			BPF_MOV64_IMM(BPF_REG_0, 1),
3640 			BPF_EXIT_INSN(),
3641 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3642 			BPF_MOV64_IMM(BPF_REG_0, 0),
3643 			BPF_EXIT_INSN(),
3644 		},
3645 		.errstr = "invalid access to packet",
3646 		.result = REJECT,
3647 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3648 	},
3649 	{
3650 		"direct packet access: test8 (double test, variant 1)",
3651 		.insns = {
3652 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3653 				    offsetof(struct __sk_buff, data)),
3654 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3655 				    offsetof(struct __sk_buff, data_end)),
3656 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3657 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3658 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
3659 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3660 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3661 			BPF_MOV64_IMM(BPF_REG_0, 1),
3662 			BPF_EXIT_INSN(),
3663 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3664 			BPF_MOV64_IMM(BPF_REG_0, 0),
3665 			BPF_EXIT_INSN(),
3666 		},
3667 		.result = ACCEPT,
3668 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3669 	},
3670 	{
3671 		"direct packet access: test9 (double test, variant 2)",
3672 		.insns = {
3673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3674 				    offsetof(struct __sk_buff, data)),
3675 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3676 				    offsetof(struct __sk_buff, data_end)),
3677 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3679 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
3680 			BPF_MOV64_IMM(BPF_REG_0, 1),
3681 			BPF_EXIT_INSN(),
3682 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3683 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3684 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3685 			BPF_MOV64_IMM(BPF_REG_0, 0),
3686 			BPF_EXIT_INSN(),
3687 		},
3688 		.result = ACCEPT,
3689 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3690 	},
3691 	{
3692 		"direct packet access: test10 (write invalid)",
3693 		.insns = {
3694 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3695 				    offsetof(struct __sk_buff, data)),
3696 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3697 				    offsetof(struct __sk_buff, data_end)),
3698 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3699 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3700 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3701 			BPF_MOV64_IMM(BPF_REG_0, 0),
3702 			BPF_EXIT_INSN(),
3703 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3704 			BPF_MOV64_IMM(BPF_REG_0, 0),
3705 			BPF_EXIT_INSN(),
3706 		},
3707 		.errstr = "invalid access to packet",
3708 		.result = REJECT,
3709 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3710 	},
3711 	{
3712 		"direct packet access: test11 (shift, good access)",
3713 		.insns = {
3714 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3715 				    offsetof(struct __sk_buff, data)),
3716 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3717 				    offsetof(struct __sk_buff, data_end)),
3718 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3719 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3720 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3721 			BPF_MOV64_IMM(BPF_REG_3, 144),
3722 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3723 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3724 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
3725 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3726 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3727 			BPF_MOV64_IMM(BPF_REG_0, 1),
3728 			BPF_EXIT_INSN(),
3729 			BPF_MOV64_IMM(BPF_REG_0, 0),
3730 			BPF_EXIT_INSN(),
3731 		},
3732 		.result = ACCEPT,
3733 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3734 		.retval = 1,
3735 	},
3736 	{
3737 		"direct packet access: test12 (and, good access)",
3738 		.insns = {
3739 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3740 				    offsetof(struct __sk_buff, data)),
3741 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3742 				    offsetof(struct __sk_buff, data_end)),
3743 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3744 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3745 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3746 			BPF_MOV64_IMM(BPF_REG_3, 144),
3747 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3748 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3749 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3750 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3751 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3752 			BPF_MOV64_IMM(BPF_REG_0, 1),
3753 			BPF_EXIT_INSN(),
3754 			BPF_MOV64_IMM(BPF_REG_0, 0),
3755 			BPF_EXIT_INSN(),
3756 		},
3757 		.result = ACCEPT,
3758 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3759 		.retval = 1,
3760 	},
3761 	{
3762 		"direct packet access: test13 (branches, good access)",
3763 		.insns = {
3764 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3765 				    offsetof(struct __sk_buff, data)),
3766 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3767 				    offsetof(struct __sk_buff, data_end)),
3768 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3769 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3770 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
3771 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3772 				    offsetof(struct __sk_buff, mark)),
3773 			BPF_MOV64_IMM(BPF_REG_4, 1),
3774 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
3775 			BPF_MOV64_IMM(BPF_REG_3, 14),
3776 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
3777 			BPF_MOV64_IMM(BPF_REG_3, 24),
3778 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
3779 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
3780 			BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
3781 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3782 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3783 			BPF_MOV64_IMM(BPF_REG_0, 1),
3784 			BPF_EXIT_INSN(),
3785 			BPF_MOV64_IMM(BPF_REG_0, 0),
3786 			BPF_EXIT_INSN(),
3787 		},
3788 		.result = ACCEPT,
3789 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3790 		.retval = 1,
3791 	},
3792 	{
3793 		"direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
3794 		.insns = {
3795 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3796 				    offsetof(struct __sk_buff, data)),
3797 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3798 				    offsetof(struct __sk_buff, data_end)),
3799 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3800 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
3801 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
3802 			BPF_MOV64_IMM(BPF_REG_5, 12),
3803 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
3804 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
3805 			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
3806 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
3807 			BPF_MOV64_IMM(BPF_REG_0, 1),
3808 			BPF_EXIT_INSN(),
3809 			BPF_MOV64_IMM(BPF_REG_0, 0),
3810 			BPF_EXIT_INSN(),
3811 		},
3812 		.result = ACCEPT,
3813 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3814 		.retval = 1,
3815 	},
3816 	{
3817 		"direct packet access: test15 (spill with xadd)",
3818 		.insns = {
3819 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3820 				    offsetof(struct __sk_buff, data)),
3821 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3822 				    offsetof(struct __sk_buff, data_end)),
3823 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3824 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3825 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
3826 			BPF_MOV64_IMM(BPF_REG_5, 4096),
3827 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
3828 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
3829 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
3830 			BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
3831 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
3832 			BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
3833 			BPF_MOV64_IMM(BPF_REG_0, 0),
3834 			BPF_EXIT_INSN(),
3835 		},
3836 		.errstr = "R2 invalid mem access 'inv'",
3837 		.result = REJECT,
3838 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3839 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3840 	},
3841 	{
3842 		"direct packet access: test16 (arith on data_end)",
3843 		.insns = {
3844 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3845 				    offsetof(struct __sk_buff, data)),
3846 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3847 				    offsetof(struct __sk_buff, data_end)),
3848 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3849 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
3851 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3852 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3853 			BPF_MOV64_IMM(BPF_REG_0, 0),
3854 			BPF_EXIT_INSN(),
3855 		},
3856 		.errstr = "R3 pointer arithmetic on pkt_end",
3857 		.result = REJECT,
3858 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3859 	},
3860 	{
3861 		"direct packet access: test17 (pruning, alignment)",
3862 		.insns = {
3863 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3864 				    offsetof(struct __sk_buff, data)),
3865 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3866 				    offsetof(struct __sk_buff, data_end)),
3867 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3868 				    offsetof(struct __sk_buff, mark)),
3869 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
3871 			BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
3872 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3873 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
3874 			BPF_MOV64_IMM(BPF_REG_0, 0),
3875 			BPF_EXIT_INSN(),
3876 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
3877 			BPF_JMP_A(-6),
3878 		},
3879 		.errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
3880 		.result = REJECT,
3881 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3882 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
3883 	},
3884 	{
3885 		"direct packet access: test18 (imm += pkt_ptr, 1)",
3886 		.insns = {
3887 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3888 				    offsetof(struct __sk_buff, data)),
3889 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3890 				    offsetof(struct __sk_buff, data_end)),
3891 			BPF_MOV64_IMM(BPF_REG_0, 8),
3892 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3893 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3894 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3895 			BPF_MOV64_IMM(BPF_REG_0, 0),
3896 			BPF_EXIT_INSN(),
3897 		},
3898 		.result = ACCEPT,
3899 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3900 	},
3901 	{
3902 		"direct packet access: test19 (imm += pkt_ptr, 2)",
3903 		.insns = {
3904 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3905 				    offsetof(struct __sk_buff, data)),
3906 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3907 				    offsetof(struct __sk_buff, data_end)),
3908 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3909 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3910 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
3911 			BPF_MOV64_IMM(BPF_REG_4, 4),
3912 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3913 			BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
3914 			BPF_MOV64_IMM(BPF_REG_0, 0),
3915 			BPF_EXIT_INSN(),
3916 		},
3917 		.result = ACCEPT,
3918 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3919 	},
3920 	{
3921 		"direct packet access: test20 (x += pkt_ptr, 1)",
3922 		.insns = {
3923 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3924 				    offsetof(struct __sk_buff, data)),
3925 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3926 				    offsetof(struct __sk_buff, data_end)),
3927 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3928 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3929 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3930 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
3931 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3932 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3933 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3934 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3935 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3936 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3937 			BPF_MOV64_IMM(BPF_REG_0, 0),
3938 			BPF_EXIT_INSN(),
3939 		},
3940 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3941 		.result = ACCEPT,
3942 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3943 	},
3944 	{
3945 		"direct packet access: test21 (x += pkt_ptr, 2)",
3946 		.insns = {
3947 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3948 				    offsetof(struct __sk_buff, data)),
3949 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3950 				    offsetof(struct __sk_buff, data_end)),
3951 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3952 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3953 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
3954 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3955 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3956 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3957 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3958 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3959 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3960 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3961 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3962 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3963 			BPF_MOV64_IMM(BPF_REG_0, 0),
3964 			BPF_EXIT_INSN(),
3965 		},
3966 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3967 		.result = ACCEPT,
3968 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3969 	},
3970 	{
3971 		"direct packet access: test22 (x += pkt_ptr, 3)",
3972 		.insns = {
3973 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3974 				    offsetof(struct __sk_buff, data)),
3975 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3976 				    offsetof(struct __sk_buff, data_end)),
3977 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3978 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3979 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3980 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3981 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3982 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3983 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3984 			BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3985 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3986 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3987 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3988 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3989 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3990 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3991 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3992 			BPF_MOV64_IMM(BPF_REG_2, 1),
3993 			BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3994 			BPF_MOV64_IMM(BPF_REG_0, 0),
3995 			BPF_EXIT_INSN(),
3996 		},
3997 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
3998 		.result = ACCEPT,
3999 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4000 	},
4001 	{
4002 		"direct packet access: test23 (x += pkt_ptr, 4)",
4003 		.insns = {
4004 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4005 				    offsetof(struct __sk_buff, data)),
4006 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4007 				    offsetof(struct __sk_buff, data_end)),
4008 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4009 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4010 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4011 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
4012 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4013 			BPF_MOV64_IMM(BPF_REG_0, 31),
4014 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4015 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4016 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4017 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
4018 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4019 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4020 			BPF_MOV64_IMM(BPF_REG_0, 0),
4021 			BPF_EXIT_INSN(),
4022 		},
4023 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4024 		.result = REJECT,
4025 		.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
4026 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4027 	},
4028 	{
4029 		"direct packet access: test24 (x += pkt_ptr, 5)",
4030 		.insns = {
4031 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4032 				    offsetof(struct __sk_buff, data)),
4033 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4034 				    offsetof(struct __sk_buff, data_end)),
4035 			BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
4036 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
4037 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
4038 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
4039 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4040 			BPF_MOV64_IMM(BPF_REG_0, 64),
4041 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
4042 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
4043 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
4044 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
4045 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4046 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
4047 			BPF_MOV64_IMM(BPF_REG_0, 0),
4048 			BPF_EXIT_INSN(),
4049 		},
4050 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4051 		.result = ACCEPT,
4052 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4053 	},
4054 	{
4055 		"direct packet access: test25 (marking on <, good access)",
4056 		.insns = {
4057 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4058 				    offsetof(struct __sk_buff, data)),
4059 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4060 				    offsetof(struct __sk_buff, data_end)),
4061 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4062 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4063 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
4064 			BPF_MOV64_IMM(BPF_REG_0, 0),
4065 			BPF_EXIT_INSN(),
4066 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4067 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4068 		},
4069 		.result = ACCEPT,
4070 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4071 	},
4072 	{
4073 		"direct packet access: test26 (marking on <, bad access)",
4074 		.insns = {
4075 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4076 				    offsetof(struct __sk_buff, data)),
4077 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4078 				    offsetof(struct __sk_buff, data_end)),
4079 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4080 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4081 			BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
4082 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4083 			BPF_MOV64_IMM(BPF_REG_0, 0),
4084 			BPF_EXIT_INSN(),
4085 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
4086 		},
4087 		.result = REJECT,
4088 		.errstr = "invalid access to packet",
4089 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4090 	},
4091 	{
4092 		"direct packet access: test27 (marking on <=, good access)",
4093 		.insns = {
4094 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4095 				    offsetof(struct __sk_buff, data)),
4096 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4097 				    offsetof(struct __sk_buff, data_end)),
4098 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4099 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4100 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
4101 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4102 			BPF_MOV64_IMM(BPF_REG_0, 1),
4103 			BPF_EXIT_INSN(),
4104 		},
4105 		.result = ACCEPT,
4106 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4107 		.retval = 1,
4108 	},
4109 	{
4110 		"direct packet access: test28 (marking on <=, bad access)",
4111 		.insns = {
4112 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4113 				    offsetof(struct __sk_buff, data)),
4114 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4115 				    offsetof(struct __sk_buff, data_end)),
4116 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4118 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
4119 			BPF_MOV64_IMM(BPF_REG_0, 1),
4120 			BPF_EXIT_INSN(),
4121 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4122 			BPF_JMP_IMM(BPF_JA, 0, 0, -4),
4123 		},
4124 		.result = REJECT,
4125 		.errstr = "invalid access to packet",
4126 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4127 	},
4128 	{
4129 		"helper access to packet: test1, valid packet_ptr range",
4130 		.insns = {
4131 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4132 				    offsetof(struct xdp_md, data)),
4133 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4134 				    offsetof(struct xdp_md, data_end)),
4135 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4136 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4137 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4138 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4139 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4140 			BPF_MOV64_IMM(BPF_REG_4, 0),
4141 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4142 				     BPF_FUNC_map_update_elem),
4143 			BPF_MOV64_IMM(BPF_REG_0, 0),
4144 			BPF_EXIT_INSN(),
4145 		},
4146 		.fixup_map_hash_8b = { 5 },
4147 		.result_unpriv = ACCEPT,
4148 		.result = ACCEPT,
4149 		.prog_type = BPF_PROG_TYPE_XDP,
4150 	},
4151 	{
4152 		"helper access to packet: test2, unchecked packet_ptr",
4153 		.insns = {
4154 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4155 				    offsetof(struct xdp_md, data)),
4156 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4157 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4158 				     BPF_FUNC_map_lookup_elem),
4159 			BPF_MOV64_IMM(BPF_REG_0, 0),
4160 			BPF_EXIT_INSN(),
4161 		},
4162 		.fixup_map_hash_8b = { 1 },
4163 		.result = REJECT,
4164 		.errstr = "invalid access to packet",
4165 		.prog_type = BPF_PROG_TYPE_XDP,
4166 	},
4167 	{
4168 		"helper access to packet: test3, variable add",
4169 		.insns = {
4170 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4171 					offsetof(struct xdp_md, data)),
4172 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4173 					offsetof(struct xdp_md, data_end)),
4174 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4175 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4176 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4177 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4178 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4179 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4180 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4181 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4182 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4183 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4184 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4185 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4186 				     BPF_FUNC_map_lookup_elem),
4187 			BPF_MOV64_IMM(BPF_REG_0, 0),
4188 			BPF_EXIT_INSN(),
4189 		},
4190 		.fixup_map_hash_8b = { 11 },
4191 		.result = ACCEPT,
4192 		.prog_type = BPF_PROG_TYPE_XDP,
4193 	},
4194 	{
4195 		"helper access to packet: test4, packet_ptr with bad range",
4196 		.insns = {
4197 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4198 				    offsetof(struct xdp_md, data)),
4199 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4200 				    offsetof(struct xdp_md, data_end)),
4201 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4202 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4203 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4204 			BPF_MOV64_IMM(BPF_REG_0, 0),
4205 			BPF_EXIT_INSN(),
4206 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4207 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4208 				     BPF_FUNC_map_lookup_elem),
4209 			BPF_MOV64_IMM(BPF_REG_0, 0),
4210 			BPF_EXIT_INSN(),
4211 		},
4212 		.fixup_map_hash_8b = { 7 },
4213 		.result = REJECT,
4214 		.errstr = "invalid access to packet",
4215 		.prog_type = BPF_PROG_TYPE_XDP,
4216 	},
4217 	{
4218 		"helper access to packet: test5, packet_ptr with too short range",
4219 		.insns = {
4220 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4221 				    offsetof(struct xdp_md, data)),
4222 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4223 				    offsetof(struct xdp_md, data_end)),
4224 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4225 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4226 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4227 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4228 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4229 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4230 				     BPF_FUNC_map_lookup_elem),
4231 			BPF_MOV64_IMM(BPF_REG_0, 0),
4232 			BPF_EXIT_INSN(),
4233 		},
4234 		.fixup_map_hash_8b = { 6 },
4235 		.result = REJECT,
4236 		.errstr = "invalid access to packet",
4237 		.prog_type = BPF_PROG_TYPE_XDP,
4238 	},
4239 	{
4240 		"helper access to packet: test6, cls valid packet_ptr range",
4241 		.insns = {
4242 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4243 				    offsetof(struct __sk_buff, data)),
4244 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4245 				    offsetof(struct __sk_buff, data_end)),
4246 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4247 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
4248 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
4249 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4250 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
4251 			BPF_MOV64_IMM(BPF_REG_4, 0),
4252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4253 				     BPF_FUNC_map_update_elem),
4254 			BPF_MOV64_IMM(BPF_REG_0, 0),
4255 			BPF_EXIT_INSN(),
4256 		},
4257 		.fixup_map_hash_8b = { 5 },
4258 		.result = ACCEPT,
4259 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4260 	},
4261 	{
4262 		"helper access to packet: test7, cls unchecked packet_ptr",
4263 		.insns = {
4264 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4265 				    offsetof(struct __sk_buff, data)),
4266 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4267 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4268 				     BPF_FUNC_map_lookup_elem),
4269 			BPF_MOV64_IMM(BPF_REG_0, 0),
4270 			BPF_EXIT_INSN(),
4271 		},
4272 		.fixup_map_hash_8b = { 1 },
4273 		.result = REJECT,
4274 		.errstr = "invalid access to packet",
4275 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4276 	},
4277 	{
4278 		"helper access to packet: test8, cls variable add",
4279 		.insns = {
4280 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4281 					offsetof(struct __sk_buff, data)),
4282 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4283 					offsetof(struct __sk_buff, data_end)),
4284 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
4286 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
4287 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
4288 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4289 			BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
4290 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
4291 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
4292 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
4293 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4294 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
4295 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4296 				     BPF_FUNC_map_lookup_elem),
4297 			BPF_MOV64_IMM(BPF_REG_0, 0),
4298 			BPF_EXIT_INSN(),
4299 		},
4300 		.fixup_map_hash_8b = { 11 },
4301 		.result = ACCEPT,
4302 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4303 	},
4304 	{
4305 		"helper access to packet: test9, cls packet_ptr with bad range",
4306 		.insns = {
4307 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4308 				    offsetof(struct __sk_buff, data)),
4309 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4310 				    offsetof(struct __sk_buff, data_end)),
4311 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
4313 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
4314 			BPF_MOV64_IMM(BPF_REG_0, 0),
4315 			BPF_EXIT_INSN(),
4316 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4317 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4318 				     BPF_FUNC_map_lookup_elem),
4319 			BPF_MOV64_IMM(BPF_REG_0, 0),
4320 			BPF_EXIT_INSN(),
4321 		},
4322 		.fixup_map_hash_8b = { 7 },
4323 		.result = REJECT,
4324 		.errstr = "invalid access to packet",
4325 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4326 	},
4327 	{
4328 		"helper access to packet: test10, cls packet_ptr with too short range",
4329 		.insns = {
4330 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4331 				    offsetof(struct __sk_buff, data)),
4332 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4333 				    offsetof(struct __sk_buff, data_end)),
4334 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
4335 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
4336 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
4337 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
4338 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4339 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4340 				     BPF_FUNC_map_lookup_elem),
4341 			BPF_MOV64_IMM(BPF_REG_0, 0),
4342 			BPF_EXIT_INSN(),
4343 		},
4344 		.fixup_map_hash_8b = { 6 },
4345 		.result = REJECT,
4346 		.errstr = "invalid access to packet",
4347 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4348 	},
4349 	{
4350 		"helper access to packet: test11, cls unsuitable helper 1",
4351 		.insns = {
4352 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4353 				    offsetof(struct __sk_buff, data)),
4354 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4355 				    offsetof(struct __sk_buff, data_end)),
4356 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4357 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4358 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
4359 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
4360 			BPF_MOV64_IMM(BPF_REG_2, 0),
4361 			BPF_MOV64_IMM(BPF_REG_4, 42),
4362 			BPF_MOV64_IMM(BPF_REG_5, 0),
4363 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4364 				     BPF_FUNC_skb_store_bytes),
4365 			BPF_MOV64_IMM(BPF_REG_0, 0),
4366 			BPF_EXIT_INSN(),
4367 		},
4368 		.result = REJECT,
4369 		.errstr = "helper access to the packet",
4370 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4371 	},
4372 	{
4373 		"helper access to packet: test12, cls unsuitable helper 2",
4374 		.insns = {
4375 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4376 				    offsetof(struct __sk_buff, data)),
4377 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4378 				    offsetof(struct __sk_buff, data_end)),
4379 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
4380 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
4381 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
4382 			BPF_MOV64_IMM(BPF_REG_2, 0),
4383 			BPF_MOV64_IMM(BPF_REG_4, 4),
4384 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4385 				     BPF_FUNC_skb_load_bytes),
4386 			BPF_MOV64_IMM(BPF_REG_0, 0),
4387 			BPF_EXIT_INSN(),
4388 		},
4389 		.result = REJECT,
4390 		.errstr = "helper access to the packet",
4391 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4392 	},
4393 	{
4394 		"helper access to packet: test13, cls helper ok",
4395 		.insns = {
4396 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4397 				    offsetof(struct __sk_buff, data)),
4398 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4399 				    offsetof(struct __sk_buff, data_end)),
4400 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4401 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4402 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4403 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4404 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4405 			BPF_MOV64_IMM(BPF_REG_2, 4),
4406 			BPF_MOV64_IMM(BPF_REG_3, 0),
4407 			BPF_MOV64_IMM(BPF_REG_4, 0),
4408 			BPF_MOV64_IMM(BPF_REG_5, 0),
4409 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4410 				     BPF_FUNC_csum_diff),
4411 			BPF_MOV64_IMM(BPF_REG_0, 0),
4412 			BPF_EXIT_INSN(),
4413 		},
4414 		.result = ACCEPT,
4415 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4416 	},
4417 	{
4418 		"helper access to packet: test14, cls helper ok sub",
4419 		.insns = {
4420 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4421 				    offsetof(struct __sk_buff, data)),
4422 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4423 				    offsetof(struct __sk_buff, data_end)),
4424 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4425 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4426 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4427 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4428 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
4429 			BPF_MOV64_IMM(BPF_REG_2, 4),
4430 			BPF_MOV64_IMM(BPF_REG_3, 0),
4431 			BPF_MOV64_IMM(BPF_REG_4, 0),
4432 			BPF_MOV64_IMM(BPF_REG_5, 0),
4433 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4434 				     BPF_FUNC_csum_diff),
4435 			BPF_MOV64_IMM(BPF_REG_0, 0),
4436 			BPF_EXIT_INSN(),
4437 		},
4438 		.result = ACCEPT,
4439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4440 	},
4441 	{
4442 		"helper access to packet: test15, cls helper fail sub",
4443 		.insns = {
4444 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4445 				    offsetof(struct __sk_buff, data)),
4446 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4447 				    offsetof(struct __sk_buff, data_end)),
4448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4449 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4450 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4451 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4452 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
4453 			BPF_MOV64_IMM(BPF_REG_2, 4),
4454 			BPF_MOV64_IMM(BPF_REG_3, 0),
4455 			BPF_MOV64_IMM(BPF_REG_4, 0),
4456 			BPF_MOV64_IMM(BPF_REG_5, 0),
4457 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4458 				     BPF_FUNC_csum_diff),
4459 			BPF_MOV64_IMM(BPF_REG_0, 0),
4460 			BPF_EXIT_INSN(),
4461 		},
4462 		.result = REJECT,
4463 		.errstr = "invalid access to packet",
4464 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4465 	},
4466 	{
4467 		"helper access to packet: test16, cls helper fail range 1",
4468 		.insns = {
4469 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4470 				    offsetof(struct __sk_buff, data)),
4471 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4472 				    offsetof(struct __sk_buff, data_end)),
4473 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4474 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4475 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4476 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4477 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4478 			BPF_MOV64_IMM(BPF_REG_2, 8),
4479 			BPF_MOV64_IMM(BPF_REG_3, 0),
4480 			BPF_MOV64_IMM(BPF_REG_4, 0),
4481 			BPF_MOV64_IMM(BPF_REG_5, 0),
4482 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4483 				     BPF_FUNC_csum_diff),
4484 			BPF_MOV64_IMM(BPF_REG_0, 0),
4485 			BPF_EXIT_INSN(),
4486 		},
4487 		.result = REJECT,
4488 		.errstr = "invalid access to packet",
4489 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4490 	},
4491 	{
4492 		"helper access to packet: test17, cls helper fail range 2",
4493 		.insns = {
4494 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4495 				    offsetof(struct __sk_buff, data)),
4496 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4497 				    offsetof(struct __sk_buff, data_end)),
4498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4499 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4501 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4502 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4503 			BPF_MOV64_IMM(BPF_REG_2, -9),
4504 			BPF_MOV64_IMM(BPF_REG_3, 0),
4505 			BPF_MOV64_IMM(BPF_REG_4, 0),
4506 			BPF_MOV64_IMM(BPF_REG_5, 0),
4507 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4508 				     BPF_FUNC_csum_diff),
4509 			BPF_MOV64_IMM(BPF_REG_0, 0),
4510 			BPF_EXIT_INSN(),
4511 		},
4512 		.result = REJECT,
4513 		.errstr = "R2 min value is negative",
4514 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4515 	},
4516 	{
4517 		"helper access to packet: test18, cls helper fail range 3",
4518 		.insns = {
4519 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4520 				    offsetof(struct __sk_buff, data)),
4521 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4522 				    offsetof(struct __sk_buff, data_end)),
4523 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4524 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4525 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4526 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4527 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4528 			BPF_MOV64_IMM(BPF_REG_2, ~0),
4529 			BPF_MOV64_IMM(BPF_REG_3, 0),
4530 			BPF_MOV64_IMM(BPF_REG_4, 0),
4531 			BPF_MOV64_IMM(BPF_REG_5, 0),
4532 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4533 				     BPF_FUNC_csum_diff),
4534 			BPF_MOV64_IMM(BPF_REG_0, 0),
4535 			BPF_EXIT_INSN(),
4536 		},
4537 		.result = REJECT,
4538 		.errstr = "R2 min value is negative",
4539 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4540 	},
4541 	{
4542 		"helper access to packet: test19, cls helper range zero",
4543 		.insns = {
4544 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4545 				    offsetof(struct __sk_buff, data)),
4546 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4547 				    offsetof(struct __sk_buff, data_end)),
4548 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4549 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4550 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4551 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4552 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4553 			BPF_MOV64_IMM(BPF_REG_2, 0),
4554 			BPF_MOV64_IMM(BPF_REG_3, 0),
4555 			BPF_MOV64_IMM(BPF_REG_4, 0),
4556 			BPF_MOV64_IMM(BPF_REG_5, 0),
4557 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4558 				     BPF_FUNC_csum_diff),
4559 			BPF_MOV64_IMM(BPF_REG_0, 0),
4560 			BPF_EXIT_INSN(),
4561 		},
4562 		.result = ACCEPT,
4563 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4564 	},
4565 	{
4566 		"helper access to packet: test20, pkt end as input",
4567 		.insns = {
4568 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4569 				    offsetof(struct __sk_buff, data)),
4570 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4571 				    offsetof(struct __sk_buff, data_end)),
4572 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4573 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4575 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4576 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
4577 			BPF_MOV64_IMM(BPF_REG_2, 4),
4578 			BPF_MOV64_IMM(BPF_REG_3, 0),
4579 			BPF_MOV64_IMM(BPF_REG_4, 0),
4580 			BPF_MOV64_IMM(BPF_REG_5, 0),
4581 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4582 				     BPF_FUNC_csum_diff),
4583 			BPF_MOV64_IMM(BPF_REG_0, 0),
4584 			BPF_EXIT_INSN(),
4585 		},
4586 		.result = REJECT,
4587 		.errstr = "R1 type=pkt_end expected=fp",
4588 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4589 	},
4590 	{
4591 		"helper access to packet: test21, wrong reg",
4592 		.insns = {
4593 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4594 				    offsetof(struct __sk_buff, data)),
4595 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4596 				    offsetof(struct __sk_buff, data_end)),
4597 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
4598 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
4599 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
4600 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
4601 			BPF_MOV64_IMM(BPF_REG_2, 4),
4602 			BPF_MOV64_IMM(BPF_REG_3, 0),
4603 			BPF_MOV64_IMM(BPF_REG_4, 0),
4604 			BPF_MOV64_IMM(BPF_REG_5, 0),
4605 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4606 				     BPF_FUNC_csum_diff),
4607 			BPF_MOV64_IMM(BPF_REG_0, 0),
4608 			BPF_EXIT_INSN(),
4609 		},
4610 		.result = REJECT,
4611 		.errstr = "invalid access to packet",
4612 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
4613 	},
4614 	{
4615 		"prevent map lookup in sockmap",
4616 		.insns = {
4617 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4618 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4619 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4620 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4621 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4622 				     BPF_FUNC_map_lookup_elem),
4623 			BPF_EXIT_INSN(),
4624 		},
4625 		.fixup_map_sockmap = { 3 },
4626 		.result = REJECT,
4627 		.errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
4628 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4629 	},
4630 	{
4631 		"prevent map lookup in sockhash",
4632 		.insns = {
4633 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4634 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4635 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4636 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4637 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4638 				     BPF_FUNC_map_lookup_elem),
4639 			BPF_EXIT_INSN(),
4640 		},
4641 		.fixup_map_sockhash = { 3 },
4642 		.result = REJECT,
4643 		.errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
4644 		.prog_type = BPF_PROG_TYPE_SOCK_OPS,
4645 	},
4646 	{
4647 		"prevent map lookup in xskmap",
4648 		.insns = {
4649 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4650 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4651 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4652 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4653 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4654 				     BPF_FUNC_map_lookup_elem),
4655 			BPF_EXIT_INSN(),
4656 		},
4657 		.fixup_map_xskmap = { 3 },
4658 		.result = REJECT,
4659 		.errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
4660 		.prog_type = BPF_PROG_TYPE_XDP,
4661 	},
4662 	{
4663 		"prevent map lookup in stack trace",
4664 		.insns = {
4665 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4666 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4667 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4668 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4669 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4670 				     BPF_FUNC_map_lookup_elem),
4671 			BPF_EXIT_INSN(),
4672 		},
4673 		.fixup_map_stacktrace = { 3 },
4674 		.result = REJECT,
4675 		.errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
4676 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
4677 	},
4678 	{
4679 		"prevent map lookup in prog array",
4680 		.insns = {
4681 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4682 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4683 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4684 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4685 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4686 				     BPF_FUNC_map_lookup_elem),
4687 			BPF_EXIT_INSN(),
4688 		},
4689 		.fixup_prog2 = { 3 },
4690 		.result = REJECT,
4691 		.errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
4692 	},
4693 	{
4694 		"valid map access into an array with a constant",
4695 		.insns = {
4696 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4697 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4699 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4700 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4701 				     BPF_FUNC_map_lookup_elem),
4702 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4703 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4704 				   offsetof(struct test_val, foo)),
4705 			BPF_EXIT_INSN(),
4706 		},
4707 		.fixup_map_hash_48b = { 3 },
4708 		.errstr_unpriv = "R0 leaks addr",
4709 		.result_unpriv = REJECT,
4710 		.result = ACCEPT,
4711 	},
4712 	{
4713 		"valid map access into an array with a register",
4714 		.insns = {
4715 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4716 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4717 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4718 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4719 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4720 				     BPF_FUNC_map_lookup_elem),
4721 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4722 			BPF_MOV64_IMM(BPF_REG_1, 4),
4723 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4724 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4725 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4726 				   offsetof(struct test_val, foo)),
4727 			BPF_EXIT_INSN(),
4728 		},
4729 		.fixup_map_hash_48b = { 3 },
4730 		.errstr_unpriv = "R0 leaks addr",
4731 		.result_unpriv = REJECT,
4732 		.result = ACCEPT,
4733 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4734 	},
4735 	{
4736 		"valid map access into an array with a variable",
4737 		.insns = {
4738 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4741 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4742 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4743 				     BPF_FUNC_map_lookup_elem),
4744 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4745 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4746 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
4747 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4748 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4749 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4750 				   offsetof(struct test_val, foo)),
4751 			BPF_EXIT_INSN(),
4752 		},
4753 		.fixup_map_hash_48b = { 3 },
4754 		.errstr_unpriv = "R0 leaks addr",
4755 		.result_unpriv = REJECT,
4756 		.result = ACCEPT,
4757 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4758 	},
4759 	{
4760 		"valid map access into an array with a signed variable",
4761 		.insns = {
4762 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4763 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4764 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4765 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4766 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4767 				     BPF_FUNC_map_lookup_elem),
4768 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
4769 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4770 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
4771 			BPF_MOV32_IMM(BPF_REG_1, 0),
4772 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4773 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4774 			BPF_MOV32_IMM(BPF_REG_1, 0),
4775 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4776 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4777 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4778 				   offsetof(struct test_val, foo)),
4779 			BPF_EXIT_INSN(),
4780 		},
4781 		.fixup_map_hash_48b = { 3 },
4782 		.errstr_unpriv = "R0 leaks addr",
4783 		.result_unpriv = REJECT,
4784 		.result = ACCEPT,
4785 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4786 	},
4787 	{
4788 		"invalid map access into an array with a constant",
4789 		.insns = {
4790 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4791 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4792 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4793 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4794 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4795 				     BPF_FUNC_map_lookup_elem),
4796 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4797 			BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
4798 				   offsetof(struct test_val, foo)),
4799 			BPF_EXIT_INSN(),
4800 		},
4801 		.fixup_map_hash_48b = { 3 },
4802 		.errstr = "invalid access to map value, value_size=48 off=48 size=8",
4803 		.result = REJECT,
4804 	},
4805 	{
4806 		"invalid map access into an array with a register",
4807 		.insns = {
4808 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4809 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4810 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4811 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4812 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4813 				     BPF_FUNC_map_lookup_elem),
4814 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4815 			BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
4816 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4817 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4818 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4819 				   offsetof(struct test_val, foo)),
4820 			BPF_EXIT_INSN(),
4821 		},
4822 		.fixup_map_hash_48b = { 3 },
4823 		.errstr = "R0 min value is outside of the array range",
4824 		.result = REJECT,
4825 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4826 	},
4827 	{
4828 		"invalid map access into an array with a variable",
4829 		.insns = {
4830 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4831 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4832 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4833 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4834 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4835 				     BPF_FUNC_map_lookup_elem),
4836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4837 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4838 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4839 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4840 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4841 				   offsetof(struct test_val, foo)),
4842 			BPF_EXIT_INSN(),
4843 		},
4844 		.fixup_map_hash_48b = { 3 },
4845 		.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
4846 		.result = REJECT,
4847 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4848 	},
4849 	{
4850 		"invalid map access into an array with no floor check",
4851 		.insns = {
4852 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4853 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4854 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4855 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4856 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4857 				     BPF_FUNC_map_lookup_elem),
4858 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4859 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4860 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
4861 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
4862 			BPF_MOV32_IMM(BPF_REG_1, 0),
4863 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4864 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4865 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4866 				   offsetof(struct test_val, foo)),
4867 			BPF_EXIT_INSN(),
4868 		},
4869 		.fixup_map_hash_48b = { 3 },
4870 		.errstr_unpriv = "R0 leaks addr",
4871 		.errstr = "R0 unbounded memory access",
4872 		.result_unpriv = REJECT,
4873 		.result = REJECT,
4874 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4875 	},
4876 	{
4877 		"invalid map access into an array with a invalid max check",
4878 		.insns = {
4879 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4880 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4881 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4882 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4883 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4884 				     BPF_FUNC_map_lookup_elem),
4885 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4886 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4887 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
4888 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
4889 			BPF_MOV32_IMM(BPF_REG_1, 0),
4890 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
4891 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4892 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
4893 				   offsetof(struct test_val, foo)),
4894 			BPF_EXIT_INSN(),
4895 		},
4896 		.fixup_map_hash_48b = { 3 },
4897 		.errstr_unpriv = "R0 leaks addr",
4898 		.errstr = "invalid access to map value, value_size=48 off=44 size=8",
4899 		.result_unpriv = REJECT,
4900 		.result = REJECT,
4901 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4902 	},
4903 	{
4904 		"invalid map access into an array with a invalid max check",
4905 		.insns = {
4906 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4907 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4908 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4909 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4910 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4911 				     BPF_FUNC_map_lookup_elem),
4912 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
4913 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4914 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4915 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4916 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4917 			BPF_LD_MAP_FD(BPF_REG_1, 0),
4918 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4919 				     BPF_FUNC_map_lookup_elem),
4920 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4921 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
4922 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
4923 				    offsetof(struct test_val, foo)),
4924 			BPF_EXIT_INSN(),
4925 		},
4926 		.fixup_map_hash_48b = { 3, 11 },
4927 		.errstr = "R0 pointer += pointer",
4928 		.result = REJECT,
4929 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4930 	},
4931 	{
4932 		"direct packet read test#1 for CGROUP_SKB",
4933 		.insns = {
4934 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4935 				    offsetof(struct __sk_buff, data)),
4936 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4937 				    offsetof(struct __sk_buff, data_end)),
4938 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4939 				    offsetof(struct __sk_buff, len)),
4940 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4941 				    offsetof(struct __sk_buff, pkt_type)),
4942 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4943 				    offsetof(struct __sk_buff, mark)),
4944 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4945 				    offsetof(struct __sk_buff, mark)),
4946 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4947 				    offsetof(struct __sk_buff, queue_mapping)),
4948 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4949 				    offsetof(struct __sk_buff, protocol)),
4950 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4951 				    offsetof(struct __sk_buff, vlan_present)),
4952 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4954 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4955 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4956 			BPF_MOV64_IMM(BPF_REG_0, 0),
4957 			BPF_EXIT_INSN(),
4958 		},
4959 		.result = ACCEPT,
4960 		.result_unpriv = REJECT,
4961 		.errstr_unpriv = "invalid bpf_context access off=76 size=4",
4962 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4963 	},
4964 	{
4965 		"direct packet read test#2 for CGROUP_SKB",
4966 		.insns = {
4967 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4968 				    offsetof(struct __sk_buff, vlan_tci)),
4969 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4970 				    offsetof(struct __sk_buff, vlan_proto)),
4971 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4972 				    offsetof(struct __sk_buff, priority)),
4973 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
4974 				    offsetof(struct __sk_buff, priority)),
4975 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4976 				    offsetof(struct __sk_buff,
4977 					     ingress_ifindex)),
4978 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
4979 				    offsetof(struct __sk_buff, tc_index)),
4980 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
4981 				    offsetof(struct __sk_buff, hash)),
4982 			BPF_MOV64_IMM(BPF_REG_0, 0),
4983 			BPF_EXIT_INSN(),
4984 		},
4985 		.result = ACCEPT,
4986 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
4987 	},
4988 	{
4989 		"direct packet read test#3 for CGROUP_SKB",
4990 		.insns = {
4991 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
4992 				    offsetof(struct __sk_buff, cb[0])),
4993 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
4994 				    offsetof(struct __sk_buff, cb[1])),
4995 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
4996 				    offsetof(struct __sk_buff, cb[2])),
4997 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
4998 				    offsetof(struct __sk_buff, cb[3])),
4999 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5000 				    offsetof(struct __sk_buff, cb[4])),
5001 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5002 				    offsetof(struct __sk_buff, napi_id)),
5003 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
5004 				    offsetof(struct __sk_buff, cb[0])),
5005 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
5006 				    offsetof(struct __sk_buff, cb[1])),
5007 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
5008 				    offsetof(struct __sk_buff, cb[2])),
5009 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
5010 				    offsetof(struct __sk_buff, cb[3])),
5011 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
5012 				    offsetof(struct __sk_buff, cb[4])),
5013 			BPF_MOV64_IMM(BPF_REG_0, 0),
5014 			BPF_EXIT_INSN(),
5015 		},
5016 		.result = ACCEPT,
5017 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5018 	},
5019 	{
5020 		"direct packet read test#4 for CGROUP_SKB",
5021 		.insns = {
5022 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5023 				    offsetof(struct __sk_buff, family)),
5024 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5025 				    offsetof(struct __sk_buff, remote_ip4)),
5026 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
5027 				    offsetof(struct __sk_buff, local_ip4)),
5028 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5029 				    offsetof(struct __sk_buff, remote_ip6[0])),
5030 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5031 				    offsetof(struct __sk_buff, remote_ip6[1])),
5032 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5033 				    offsetof(struct __sk_buff, remote_ip6[2])),
5034 			BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
5035 				    offsetof(struct __sk_buff, remote_ip6[3])),
5036 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5037 				    offsetof(struct __sk_buff, local_ip6[0])),
5038 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5039 				    offsetof(struct __sk_buff, local_ip6[1])),
5040 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5041 				    offsetof(struct __sk_buff, local_ip6[2])),
5042 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
5043 				    offsetof(struct __sk_buff, local_ip6[3])),
5044 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
5045 				    offsetof(struct __sk_buff, remote_port)),
5046 			BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
5047 				    offsetof(struct __sk_buff, local_port)),
5048 			BPF_MOV64_IMM(BPF_REG_0, 0),
5049 			BPF_EXIT_INSN(),
5050 		},
5051 		.result = ACCEPT,
5052 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5053 	},
5054 	{
5055 		"invalid access of tc_classid for CGROUP_SKB",
5056 		.insns = {
5057 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5058 				    offsetof(struct __sk_buff, tc_classid)),
5059 			BPF_MOV64_IMM(BPF_REG_0, 0),
5060 			BPF_EXIT_INSN(),
5061 		},
5062 		.result = REJECT,
5063 		.errstr = "invalid bpf_context access",
5064 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5065 	},
5066 	{
5067 		"invalid access of data_meta for CGROUP_SKB",
5068 		.insns = {
5069 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5070 				    offsetof(struct __sk_buff, data_meta)),
5071 			BPF_MOV64_IMM(BPF_REG_0, 0),
5072 			BPF_EXIT_INSN(),
5073 		},
5074 		.result = REJECT,
5075 		.errstr = "invalid bpf_context access",
5076 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5077 	},
5078 	{
5079 		"invalid access of flow_keys for CGROUP_SKB",
5080 		.insns = {
5081 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5082 				    offsetof(struct __sk_buff, flow_keys)),
5083 			BPF_MOV64_IMM(BPF_REG_0, 0),
5084 			BPF_EXIT_INSN(),
5085 		},
5086 		.result = REJECT,
5087 		.errstr = "invalid bpf_context access",
5088 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5089 	},
5090 	{
5091 		"invalid write access to napi_id for CGROUP_SKB",
5092 		.insns = {
5093 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
5094 				    offsetof(struct __sk_buff, napi_id)),
5095 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
5096 				    offsetof(struct __sk_buff, napi_id)),
5097 			BPF_MOV64_IMM(BPF_REG_0, 0),
5098 			BPF_EXIT_INSN(),
5099 		},
5100 		.result = REJECT,
5101 		.errstr = "invalid bpf_context access",
5102 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5103 	},
5104 	{
5105 		"valid cgroup storage access",
5106 		.insns = {
5107 			BPF_MOV64_IMM(BPF_REG_2, 0),
5108 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5109 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5110 				     BPF_FUNC_get_local_storage),
5111 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5112 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5113 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5114 			BPF_EXIT_INSN(),
5115 		},
5116 		.fixup_cgroup_storage = { 1 },
5117 		.result = ACCEPT,
5118 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5119 	},
5120 	{
5121 		"invalid cgroup storage access 1",
5122 		.insns = {
5123 			BPF_MOV64_IMM(BPF_REG_2, 0),
5124 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5125 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5126 				     BPF_FUNC_get_local_storage),
5127 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5128 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5129 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5130 			BPF_EXIT_INSN(),
5131 		},
5132 		.fixup_map_hash_8b = { 1 },
5133 		.result = REJECT,
5134 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5135 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5136 	},
5137 	{
5138 		"invalid cgroup storage access 2",
5139 		.insns = {
5140 			BPF_MOV64_IMM(BPF_REG_2, 0),
5141 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5142 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5143 				     BPF_FUNC_get_local_storage),
5144 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5145 			BPF_EXIT_INSN(),
5146 		},
5147 		.result = REJECT,
5148 		.errstr = "fd 1 is not pointing to valid bpf_map",
5149 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5150 	},
5151 	{
5152 		"invalid cgroup storage access 3",
5153 		.insns = {
5154 			BPF_MOV64_IMM(BPF_REG_2, 0),
5155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5156 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5157 				     BPF_FUNC_get_local_storage),
5158 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5159 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5160 			BPF_MOV64_IMM(BPF_REG_0, 0),
5161 			BPF_EXIT_INSN(),
5162 		},
5163 		.fixup_cgroup_storage = { 1 },
5164 		.result = REJECT,
5165 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5166 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5167 	},
5168 	{
5169 		"invalid cgroup storage access 4",
5170 		.insns = {
5171 			BPF_MOV64_IMM(BPF_REG_2, 0),
5172 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5173 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5174 				     BPF_FUNC_get_local_storage),
5175 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5176 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5177 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5178 			BPF_EXIT_INSN(),
5179 		},
5180 		.fixup_cgroup_storage = { 1 },
5181 		.result = REJECT,
5182 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5183 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5184 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5185 	},
5186 	{
5187 		"invalid cgroup storage access 5",
5188 		.insns = {
5189 			BPF_MOV64_IMM(BPF_REG_2, 7),
5190 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5191 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5192 				     BPF_FUNC_get_local_storage),
5193 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5194 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5195 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5196 			BPF_EXIT_INSN(),
5197 		},
5198 		.fixup_cgroup_storage = { 1 },
5199 		.result = REJECT,
5200 		.errstr = "get_local_storage() doesn't support non-zero flags",
5201 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5202 	},
5203 	{
5204 		"invalid cgroup storage access 6",
5205 		.insns = {
5206 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5207 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5208 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5209 				     BPF_FUNC_get_local_storage),
5210 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5211 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5212 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5213 			BPF_EXIT_INSN(),
5214 		},
5215 		.fixup_cgroup_storage = { 1 },
5216 		.result = REJECT,
5217 		.errstr = "get_local_storage() doesn't support non-zero flags",
5218 		.errstr_unpriv = "R2 leaks addr into helper function",
5219 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5220 	},
5221 	{
5222 		"valid per-cpu cgroup storage access",
5223 		.insns = {
5224 			BPF_MOV64_IMM(BPF_REG_2, 0),
5225 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5226 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5227 				     BPF_FUNC_get_local_storage),
5228 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5229 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5230 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5231 			BPF_EXIT_INSN(),
5232 		},
5233 		.fixup_percpu_cgroup_storage = { 1 },
5234 		.result = ACCEPT,
5235 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5236 	},
5237 	{
5238 		"invalid per-cpu cgroup storage access 1",
5239 		.insns = {
5240 			BPF_MOV64_IMM(BPF_REG_2, 0),
5241 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5242 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5243 				     BPF_FUNC_get_local_storage),
5244 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5245 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5246 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5247 			BPF_EXIT_INSN(),
5248 		},
5249 		.fixup_map_hash_8b = { 1 },
5250 		.result = REJECT,
5251 		.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
5252 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5253 	},
5254 	{
5255 		"invalid per-cpu cgroup storage access 2",
5256 		.insns = {
5257 			BPF_MOV64_IMM(BPF_REG_2, 0),
5258 			BPF_LD_MAP_FD(BPF_REG_1, 1),
5259 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5260 				     BPF_FUNC_get_local_storage),
5261 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5262 			BPF_EXIT_INSN(),
5263 		},
5264 		.result = REJECT,
5265 		.errstr = "fd 1 is not pointing to valid bpf_map",
5266 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5267 	},
5268 	{
5269 		"invalid per-cpu cgroup storage access 3",
5270 		.insns = {
5271 			BPF_MOV64_IMM(BPF_REG_2, 0),
5272 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5273 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5274 				     BPF_FUNC_get_local_storage),
5275 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
5276 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5277 			BPF_MOV64_IMM(BPF_REG_0, 0),
5278 			BPF_EXIT_INSN(),
5279 		},
5280 		.fixup_percpu_cgroup_storage = { 1 },
5281 		.result = REJECT,
5282 		.errstr = "invalid access to map value, value_size=64 off=256 size=4",
5283 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5284 	},
5285 	{
5286 		"invalid per-cpu cgroup storage access 4",
5287 		.insns = {
5288 			BPF_MOV64_IMM(BPF_REG_2, 0),
5289 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5290 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5291 				     BPF_FUNC_get_local_storage),
5292 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
5293 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5295 			BPF_EXIT_INSN(),
5296 		},
5297 		.fixup_cgroup_storage = { 1 },
5298 		.result = REJECT,
5299 		.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
5300 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5301 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5302 	},
5303 	{
5304 		"invalid per-cpu cgroup storage access 5",
5305 		.insns = {
5306 			BPF_MOV64_IMM(BPF_REG_2, 7),
5307 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5308 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5309 				     BPF_FUNC_get_local_storage),
5310 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5311 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5312 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5313 			BPF_EXIT_INSN(),
5314 		},
5315 		.fixup_percpu_cgroup_storage = { 1 },
5316 		.result = REJECT,
5317 		.errstr = "get_local_storage() doesn't support non-zero flags",
5318 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5319 	},
5320 	{
5321 		"invalid per-cpu cgroup storage access 6",
5322 		.insns = {
5323 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
5324 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5325 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5326 				     BPF_FUNC_get_local_storage),
5327 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5328 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5329 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
5330 			BPF_EXIT_INSN(),
5331 		},
5332 		.fixup_percpu_cgroup_storage = { 1 },
5333 		.result = REJECT,
5334 		.errstr = "get_local_storage() doesn't support non-zero flags",
5335 		.errstr_unpriv = "R2 leaks addr into helper function",
5336 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5337 	},
5338 	{
5339 		"write tstamp from CGROUP_SKB",
5340 		.insns = {
5341 			BPF_MOV64_IMM(BPF_REG_0, 0),
5342 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5343 				    offsetof(struct __sk_buff, tstamp)),
5344 			BPF_MOV64_IMM(BPF_REG_0, 0),
5345 			BPF_EXIT_INSN(),
5346 		},
5347 		.result = ACCEPT,
5348 		.result_unpriv = REJECT,
5349 		.errstr_unpriv = "invalid bpf_context access off=152 size=8",
5350 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5351 	},
5352 	{
5353 		"read tstamp from CGROUP_SKB",
5354 		.insns = {
5355 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5356 				    offsetof(struct __sk_buff, tstamp)),
5357 			BPF_MOV64_IMM(BPF_REG_0, 0),
5358 			BPF_EXIT_INSN(),
5359 		},
5360 		.result = ACCEPT,
5361 		.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
5362 	},
5363 	{
5364 		"multiple registers share map_lookup_elem result",
5365 		.insns = {
5366 			BPF_MOV64_IMM(BPF_REG_1, 10),
5367 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5368 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5369 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5370 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5371 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5372 				     BPF_FUNC_map_lookup_elem),
5373 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5374 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5375 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5376 			BPF_EXIT_INSN(),
5377 		},
5378 		.fixup_map_hash_8b = { 4 },
5379 		.result = ACCEPT,
5380 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5381 	},
5382 	{
5383 		"alu ops on ptr_to_map_value_or_null, 1",
5384 		.insns = {
5385 			BPF_MOV64_IMM(BPF_REG_1, 10),
5386 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5387 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5388 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5389 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5390 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5391 				     BPF_FUNC_map_lookup_elem),
5392 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
5394 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
5395 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5396 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5397 			BPF_EXIT_INSN(),
5398 		},
5399 		.fixup_map_hash_8b = { 4 },
5400 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5401 		.result = REJECT,
5402 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5403 	},
5404 	{
5405 		"alu ops on ptr_to_map_value_or_null, 2",
5406 		.insns = {
5407 			BPF_MOV64_IMM(BPF_REG_1, 10),
5408 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5409 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5410 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5411 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5413 				     BPF_FUNC_map_lookup_elem),
5414 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5415 			BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
5416 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5417 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5418 			BPF_EXIT_INSN(),
5419 		},
5420 		.fixup_map_hash_8b = { 4 },
5421 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5422 		.result = REJECT,
5423 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5424 	},
5425 	{
5426 		"alu ops on ptr_to_map_value_or_null, 3",
5427 		.insns = {
5428 			BPF_MOV64_IMM(BPF_REG_1, 10),
5429 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5430 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5431 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5432 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5433 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5434 				     BPF_FUNC_map_lookup_elem),
5435 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5436 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
5437 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5438 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5439 			BPF_EXIT_INSN(),
5440 		},
5441 		.fixup_map_hash_8b = { 4 },
5442 		.errstr = "R4 pointer arithmetic on map_value_or_null",
5443 		.result = REJECT,
5444 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5445 	},
5446 	{
5447 		"invalid memory access with multiple map_lookup_elem calls",
5448 		.insns = {
5449 			BPF_MOV64_IMM(BPF_REG_1, 10),
5450 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5451 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5452 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5453 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5454 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5455 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5457 				     BPF_FUNC_map_lookup_elem),
5458 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5459 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5460 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5461 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5462 				     BPF_FUNC_map_lookup_elem),
5463 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5464 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5465 			BPF_EXIT_INSN(),
5466 		},
5467 		.fixup_map_hash_8b = { 4 },
5468 		.result = REJECT,
5469 		.errstr = "R4 !read_ok",
5470 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5471 	},
5472 	{
5473 		"valid indirect map_lookup_elem access with 2nd lookup in branch",
5474 		.insns = {
5475 			BPF_MOV64_IMM(BPF_REG_1, 10),
5476 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
5477 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5478 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5479 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5480 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5481 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
5482 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5483 				     BPF_FUNC_map_lookup_elem),
5484 			BPF_MOV64_IMM(BPF_REG_2, 10),
5485 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
5486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
5487 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
5488 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5489 				     BPF_FUNC_map_lookup_elem),
5490 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
5491 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5492 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
5493 			BPF_EXIT_INSN(),
5494 		},
5495 		.fixup_map_hash_8b = { 4 },
5496 		.result = ACCEPT,
5497 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
5498 	},
5499 	{
5500 		"invalid map access from else condition",
5501 		.insns = {
5502 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5503 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5504 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5505 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5506 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
5507 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5508 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5509 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
5510 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
5511 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5512 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5513 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
5514 			BPF_EXIT_INSN(),
5515 		},
5516 		.fixup_map_hash_48b = { 3 },
5517 		.errstr = "R0 unbounded memory access",
5518 		.result = REJECT,
5519 		.errstr_unpriv = "R0 leaks addr",
5520 		.result_unpriv = REJECT,
5521 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5522 	},
5523 	{
5524 		"constant register |= constant should keep constant type",
5525 		.insns = {
5526 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5527 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5528 			BPF_MOV64_IMM(BPF_REG_2, 34),
5529 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
5530 			BPF_MOV64_IMM(BPF_REG_3, 0),
5531 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5532 			BPF_EXIT_INSN(),
5533 		},
5534 		.result = ACCEPT,
5535 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5536 	},
5537 	{
5538 		"constant register |= constant should not bypass stack boundary checks",
5539 		.insns = {
5540 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5542 			BPF_MOV64_IMM(BPF_REG_2, 34),
5543 			BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
5544 			BPF_MOV64_IMM(BPF_REG_3, 0),
5545 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5546 			BPF_EXIT_INSN(),
5547 		},
5548 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5549 		.result = REJECT,
5550 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5551 	},
5552 	{
5553 		"constant register |= constant register should keep constant type",
5554 		.insns = {
5555 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5556 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5557 			BPF_MOV64_IMM(BPF_REG_2, 34),
5558 			BPF_MOV64_IMM(BPF_REG_4, 13),
5559 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5560 			BPF_MOV64_IMM(BPF_REG_3, 0),
5561 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5562 			BPF_EXIT_INSN(),
5563 		},
5564 		.result = ACCEPT,
5565 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5566 	},
5567 	{
5568 		"constant register |= constant register should not bypass stack boundary checks",
5569 		.insns = {
5570 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
5572 			BPF_MOV64_IMM(BPF_REG_2, 34),
5573 			BPF_MOV64_IMM(BPF_REG_4, 24),
5574 			BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
5575 			BPF_MOV64_IMM(BPF_REG_3, 0),
5576 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5577 			BPF_EXIT_INSN(),
5578 		},
5579 		.errstr = "invalid stack type R1 off=-48 access_size=58",
5580 		.result = REJECT,
5581 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5582 	},
5583 	{
5584 		"invalid direct packet write for LWT_IN",
5585 		.insns = {
5586 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5587 				    offsetof(struct __sk_buff, data)),
5588 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5589 				    offsetof(struct __sk_buff, data_end)),
5590 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5591 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5592 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5593 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5594 			BPF_MOV64_IMM(BPF_REG_0, 0),
5595 			BPF_EXIT_INSN(),
5596 		},
5597 		.errstr = "cannot write into packet",
5598 		.result = REJECT,
5599 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5600 	},
5601 	{
5602 		"invalid direct packet write for LWT_OUT",
5603 		.insns = {
5604 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5605 				    offsetof(struct __sk_buff, data)),
5606 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5607 				    offsetof(struct __sk_buff, data_end)),
5608 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5610 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5611 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5612 			BPF_MOV64_IMM(BPF_REG_0, 0),
5613 			BPF_EXIT_INSN(),
5614 		},
5615 		.errstr = "cannot write into packet",
5616 		.result = REJECT,
5617 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5618 	},
5619 	{
5620 		"direct packet write for LWT_XMIT",
5621 		.insns = {
5622 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5623 				    offsetof(struct __sk_buff, data)),
5624 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5625 				    offsetof(struct __sk_buff, data_end)),
5626 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5627 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5628 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5629 			BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
5630 			BPF_MOV64_IMM(BPF_REG_0, 0),
5631 			BPF_EXIT_INSN(),
5632 		},
5633 		.result = ACCEPT,
5634 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5635 	},
5636 	{
5637 		"direct packet read for LWT_IN",
5638 		.insns = {
5639 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5640 				    offsetof(struct __sk_buff, data)),
5641 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5642 				    offsetof(struct __sk_buff, data_end)),
5643 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5644 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5645 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5646 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5647 			BPF_MOV64_IMM(BPF_REG_0, 0),
5648 			BPF_EXIT_INSN(),
5649 		},
5650 		.result = ACCEPT,
5651 		.prog_type = BPF_PROG_TYPE_LWT_IN,
5652 	},
5653 	{
5654 		"direct packet read for LWT_OUT",
5655 		.insns = {
5656 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5657 				    offsetof(struct __sk_buff, data)),
5658 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5659 				    offsetof(struct __sk_buff, data_end)),
5660 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5661 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5662 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5663 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5664 			BPF_MOV64_IMM(BPF_REG_0, 0),
5665 			BPF_EXIT_INSN(),
5666 		},
5667 		.result = ACCEPT,
5668 		.prog_type = BPF_PROG_TYPE_LWT_OUT,
5669 	},
5670 	{
5671 		"direct packet read for LWT_XMIT",
5672 		.insns = {
5673 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5674 				    offsetof(struct __sk_buff, data)),
5675 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5676 				    offsetof(struct __sk_buff, data_end)),
5677 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5679 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
5680 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
5681 			BPF_MOV64_IMM(BPF_REG_0, 0),
5682 			BPF_EXIT_INSN(),
5683 		},
5684 		.result = ACCEPT,
5685 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5686 	},
5687 	{
5688 		"overlapping checks for direct packet access",
5689 		.insns = {
5690 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
5691 				    offsetof(struct __sk_buff, data)),
5692 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
5693 				    offsetof(struct __sk_buff, data_end)),
5694 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5695 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
5696 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
5697 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
5698 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
5699 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
5700 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
5701 			BPF_MOV64_IMM(BPF_REG_0, 0),
5702 			BPF_EXIT_INSN(),
5703 		},
5704 		.result = ACCEPT,
5705 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5706 	},
5707 	{
5708 		"make headroom for LWT_XMIT",
5709 		.insns = {
5710 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5711 			BPF_MOV64_IMM(BPF_REG_2, 34),
5712 			BPF_MOV64_IMM(BPF_REG_3, 0),
5713 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5714 			/* split for s390 to succeed */
5715 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
5716 			BPF_MOV64_IMM(BPF_REG_2, 42),
5717 			BPF_MOV64_IMM(BPF_REG_3, 0),
5718 			BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
5719 			BPF_MOV64_IMM(BPF_REG_0, 0),
5720 			BPF_EXIT_INSN(),
5721 		},
5722 		.result = ACCEPT,
5723 		.prog_type = BPF_PROG_TYPE_LWT_XMIT,
5724 	},
5725 	{
5726 		"invalid access of tc_classid for LWT_IN",
5727 		.insns = {
5728 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5729 				    offsetof(struct __sk_buff, tc_classid)),
5730 			BPF_EXIT_INSN(),
5731 		},
5732 		.result = REJECT,
5733 		.errstr = "invalid bpf_context access",
5734 	},
5735 	{
5736 		"invalid access of tc_classid for LWT_OUT",
5737 		.insns = {
5738 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5739 				    offsetof(struct __sk_buff, tc_classid)),
5740 			BPF_EXIT_INSN(),
5741 		},
5742 		.result = REJECT,
5743 		.errstr = "invalid bpf_context access",
5744 	},
5745 	{
5746 		"invalid access of tc_classid for LWT_XMIT",
5747 		.insns = {
5748 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5749 				    offsetof(struct __sk_buff, tc_classid)),
5750 			BPF_EXIT_INSN(),
5751 		},
5752 		.result = REJECT,
5753 		.errstr = "invalid bpf_context access",
5754 	},
5755 	{
5756 		"leak pointer into ctx 1",
5757 		.insns = {
5758 			BPF_MOV64_IMM(BPF_REG_0, 0),
5759 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5760 				    offsetof(struct __sk_buff, cb[0])),
5761 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5762 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
5763 				      offsetof(struct __sk_buff, cb[0])),
5764 			BPF_EXIT_INSN(),
5765 		},
5766 		.fixup_map_hash_8b = { 2 },
5767 		.errstr_unpriv = "R2 leaks addr into mem",
5768 		.result_unpriv = REJECT,
5769 		.result = REJECT,
5770 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5771 	},
5772 	{
5773 		"leak pointer into ctx 2",
5774 		.insns = {
5775 			BPF_MOV64_IMM(BPF_REG_0, 0),
5776 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
5777 				    offsetof(struct __sk_buff, cb[0])),
5778 			BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
5779 				      offsetof(struct __sk_buff, cb[0])),
5780 			BPF_EXIT_INSN(),
5781 		},
5782 		.errstr_unpriv = "R10 leaks addr into mem",
5783 		.result_unpriv = REJECT,
5784 		.result = REJECT,
5785 		.errstr = "BPF_XADD stores into R1 ctx is not allowed",
5786 	},
5787 	{
5788 		"leak pointer into ctx 3",
5789 		.insns = {
5790 			BPF_MOV64_IMM(BPF_REG_0, 0),
5791 			BPF_LD_MAP_FD(BPF_REG_2, 0),
5792 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
5793 				      offsetof(struct __sk_buff, cb[0])),
5794 			BPF_EXIT_INSN(),
5795 		},
5796 		.fixup_map_hash_8b = { 1 },
5797 		.errstr_unpriv = "R2 leaks addr into ctx",
5798 		.result_unpriv = REJECT,
5799 		.result = ACCEPT,
5800 	},
5801 	{
5802 		"leak pointer into map val",
5803 		.insns = {
5804 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5805 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5806 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5807 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5808 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5809 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5810 				     BPF_FUNC_map_lookup_elem),
5811 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5812 			BPF_MOV64_IMM(BPF_REG_3, 0),
5813 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
5814 			BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
5815 			BPF_MOV64_IMM(BPF_REG_0, 0),
5816 			BPF_EXIT_INSN(),
5817 		},
5818 		.fixup_map_hash_8b = { 4 },
5819 		.errstr_unpriv = "R6 leaks addr into mem",
5820 		.result_unpriv = REJECT,
5821 		.result = ACCEPT,
5822 	},
5823 	{
5824 		"helper access to map: full range",
5825 		.insns = {
5826 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5827 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5828 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5829 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5830 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5831 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5832 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5833 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5834 			BPF_MOV64_IMM(BPF_REG_3, 0),
5835 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5836 			BPF_EXIT_INSN(),
5837 		},
5838 		.fixup_map_hash_48b = { 3 },
5839 		.result = ACCEPT,
5840 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5841 	},
5842 	{
5843 		"helper access to map: partial range",
5844 		.insns = {
5845 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5846 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5847 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5848 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5849 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5850 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5851 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5852 			BPF_MOV64_IMM(BPF_REG_2, 8),
5853 			BPF_MOV64_IMM(BPF_REG_3, 0),
5854 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5855 			BPF_EXIT_INSN(),
5856 		},
5857 		.fixup_map_hash_48b = { 3 },
5858 		.result = ACCEPT,
5859 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5860 	},
5861 	{
5862 		"helper access to map: empty range",
5863 		.insns = {
5864 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5866 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5867 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5868 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
5870 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5871 			BPF_MOV64_IMM(BPF_REG_2, 0),
5872 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5873 			BPF_EXIT_INSN(),
5874 		},
5875 		.fixup_map_hash_48b = { 3 },
5876 		.errstr = "invalid access to map value, value_size=48 off=0 size=0",
5877 		.result = REJECT,
5878 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5879 	},
5880 	{
5881 		"helper access to map: out-of-bound range",
5882 		.insns = {
5883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5885 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5886 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5887 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5889 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5890 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
5891 			BPF_MOV64_IMM(BPF_REG_3, 0),
5892 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5893 			BPF_EXIT_INSN(),
5894 		},
5895 		.fixup_map_hash_48b = { 3 },
5896 		.errstr = "invalid access to map value, value_size=48 off=0 size=56",
5897 		.result = REJECT,
5898 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5899 	},
5900 	{
5901 		"helper access to map: negative range",
5902 		.insns = {
5903 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5904 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5905 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5906 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5907 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5908 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5909 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5910 			BPF_MOV64_IMM(BPF_REG_2, -8),
5911 			BPF_MOV64_IMM(BPF_REG_3, 0),
5912 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5913 			BPF_EXIT_INSN(),
5914 		},
5915 		.fixup_map_hash_48b = { 3 },
5916 		.errstr = "R2 min value is negative",
5917 		.result = REJECT,
5918 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5919 	},
5920 	{
5921 		"helper access to adjusted map (via const imm): full range",
5922 		.insns = {
5923 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5925 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5926 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5927 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5928 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5929 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5930 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5931 				offsetof(struct test_val, foo)),
5932 			BPF_MOV64_IMM(BPF_REG_2,
5933 				sizeof(struct test_val) -
5934 				offsetof(struct test_val, foo)),
5935 			BPF_MOV64_IMM(BPF_REG_3, 0),
5936 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5937 			BPF_EXIT_INSN(),
5938 		},
5939 		.fixup_map_hash_48b = { 3 },
5940 		.result = ACCEPT,
5941 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5942 	},
5943 	{
5944 		"helper access to adjusted map (via const imm): partial range",
5945 		.insns = {
5946 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5947 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5948 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5949 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5950 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5951 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5952 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5953 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5954 				offsetof(struct test_val, foo)),
5955 			BPF_MOV64_IMM(BPF_REG_2, 8),
5956 			BPF_MOV64_IMM(BPF_REG_3, 0),
5957 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
5958 			BPF_EXIT_INSN(),
5959 		},
5960 		.fixup_map_hash_48b = { 3 },
5961 		.result = ACCEPT,
5962 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5963 	},
5964 	{
5965 		"helper access to adjusted map (via const imm): empty range",
5966 		.insns = {
5967 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5968 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5969 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5970 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5971 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5973 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5974 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5975 				offsetof(struct test_val, foo)),
5976 			BPF_MOV64_IMM(BPF_REG_2, 0),
5977 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
5978 			BPF_EXIT_INSN(),
5979 		},
5980 		.fixup_map_hash_48b = { 3 },
5981 		.errstr = "invalid access to map value, value_size=48 off=4 size=0",
5982 		.result = REJECT,
5983 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
5984 	},
5985 	{
5986 		"helper access to adjusted map (via const imm): out-of-bound range",
5987 		.insns = {
5988 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5989 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5990 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5991 			BPF_LD_MAP_FD(BPF_REG_1, 0),
5992 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5993 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5994 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
5996 				offsetof(struct test_val, foo)),
5997 			BPF_MOV64_IMM(BPF_REG_2,
5998 				sizeof(struct test_val) -
5999 				offsetof(struct test_val, foo) + 8),
6000 			BPF_MOV64_IMM(BPF_REG_3, 0),
6001 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6002 			BPF_EXIT_INSN(),
6003 		},
6004 		.fixup_map_hash_48b = { 3 },
6005 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6006 		.result = REJECT,
6007 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6008 	},
6009 	{
6010 		"helper access to adjusted map (via const imm): negative range (> adjustment)",
6011 		.insns = {
6012 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6013 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6014 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6015 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6016 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6017 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6018 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6020 				offsetof(struct test_val, foo)),
6021 			BPF_MOV64_IMM(BPF_REG_2, -8),
6022 			BPF_MOV64_IMM(BPF_REG_3, 0),
6023 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6024 			BPF_EXIT_INSN(),
6025 		},
6026 		.fixup_map_hash_48b = { 3 },
6027 		.errstr = "R2 min value is negative",
6028 		.result = REJECT,
6029 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6030 	},
6031 	{
6032 		"helper access to adjusted map (via const imm): negative range (< adjustment)",
6033 		.insns = {
6034 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6035 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6036 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6037 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6038 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6039 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6040 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6041 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6042 				offsetof(struct test_val, foo)),
6043 			BPF_MOV64_IMM(BPF_REG_2, -1),
6044 			BPF_MOV64_IMM(BPF_REG_3, 0),
6045 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6046 			BPF_EXIT_INSN(),
6047 		},
6048 		.fixup_map_hash_48b = { 3 },
6049 		.errstr = "R2 min value is negative",
6050 		.result = REJECT,
6051 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6052 	},
6053 	{
6054 		"helper access to adjusted map (via const reg): full range",
6055 		.insns = {
6056 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6058 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6059 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6060 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6061 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6062 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6063 			BPF_MOV64_IMM(BPF_REG_3,
6064 				offsetof(struct test_val, foo)),
6065 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6066 			BPF_MOV64_IMM(BPF_REG_2,
6067 				sizeof(struct test_val) -
6068 				offsetof(struct test_val, foo)),
6069 			BPF_MOV64_IMM(BPF_REG_3, 0),
6070 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6071 			BPF_EXIT_INSN(),
6072 		},
6073 		.fixup_map_hash_48b = { 3 },
6074 		.result = ACCEPT,
6075 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6076 	},
6077 	{
6078 		"helper access to adjusted map (via const reg): partial range",
6079 		.insns = {
6080 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6081 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6082 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6083 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6084 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6085 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6086 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6087 			BPF_MOV64_IMM(BPF_REG_3,
6088 				offsetof(struct test_val, foo)),
6089 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6090 			BPF_MOV64_IMM(BPF_REG_2, 8),
6091 			BPF_MOV64_IMM(BPF_REG_3, 0),
6092 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6093 			BPF_EXIT_INSN(),
6094 		},
6095 		.fixup_map_hash_48b = { 3 },
6096 		.result = ACCEPT,
6097 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6098 	},
6099 	{
6100 		"helper access to adjusted map (via const reg): empty range",
6101 		.insns = {
6102 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6103 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6104 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6105 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6106 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6107 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6108 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6109 			BPF_MOV64_IMM(BPF_REG_3, 0),
6110 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6111 			BPF_MOV64_IMM(BPF_REG_2, 0),
6112 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6113 			BPF_EXIT_INSN(),
6114 		},
6115 		.fixup_map_hash_48b = { 3 },
6116 		.errstr = "R1 min value is outside of the array range",
6117 		.result = REJECT,
6118 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6119 	},
6120 	{
6121 		"helper access to adjusted map (via const reg): out-of-bound range",
6122 		.insns = {
6123 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6124 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6125 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6126 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6127 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6128 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6129 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6130 			BPF_MOV64_IMM(BPF_REG_3,
6131 				offsetof(struct test_val, foo)),
6132 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6133 			BPF_MOV64_IMM(BPF_REG_2,
6134 				sizeof(struct test_val) -
6135 				offsetof(struct test_val, foo) + 8),
6136 			BPF_MOV64_IMM(BPF_REG_3, 0),
6137 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6138 			BPF_EXIT_INSN(),
6139 		},
6140 		.fixup_map_hash_48b = { 3 },
6141 		.errstr = "invalid access to map value, value_size=48 off=4 size=52",
6142 		.result = REJECT,
6143 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6144 	},
6145 	{
6146 		"helper access to adjusted map (via const reg): negative range (> adjustment)",
6147 		.insns = {
6148 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6149 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6150 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6152 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6153 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6154 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6155 			BPF_MOV64_IMM(BPF_REG_3,
6156 				offsetof(struct test_val, foo)),
6157 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6158 			BPF_MOV64_IMM(BPF_REG_2, -8),
6159 			BPF_MOV64_IMM(BPF_REG_3, 0),
6160 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6161 			BPF_EXIT_INSN(),
6162 		},
6163 		.fixup_map_hash_48b = { 3 },
6164 		.errstr = "R2 min value is negative",
6165 		.result = REJECT,
6166 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6167 	},
6168 	{
6169 		"helper access to adjusted map (via const reg): negative range (< adjustment)",
6170 		.insns = {
6171 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6172 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6173 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6174 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6175 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6176 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6177 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6178 			BPF_MOV64_IMM(BPF_REG_3,
6179 				offsetof(struct test_val, foo)),
6180 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6181 			BPF_MOV64_IMM(BPF_REG_2, -1),
6182 			BPF_MOV64_IMM(BPF_REG_3, 0),
6183 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6184 			BPF_EXIT_INSN(),
6185 		},
6186 		.fixup_map_hash_48b = { 3 },
6187 		.errstr = "R2 min value is negative",
6188 		.result = REJECT,
6189 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6190 	},
6191 	{
6192 		"helper access to adjusted map (via variable): full range",
6193 		.insns = {
6194 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6196 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6197 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6198 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6199 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6200 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6201 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6202 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6203 				offsetof(struct test_val, foo), 4),
6204 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6205 			BPF_MOV64_IMM(BPF_REG_2,
6206 				sizeof(struct test_val) -
6207 				offsetof(struct test_val, foo)),
6208 			BPF_MOV64_IMM(BPF_REG_3, 0),
6209 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6210 			BPF_EXIT_INSN(),
6211 		},
6212 		.fixup_map_hash_48b = { 3 },
6213 		.result = ACCEPT,
6214 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6215 	},
6216 	{
6217 		"helper access to adjusted map (via variable): partial range",
6218 		.insns = {
6219 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6220 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6221 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6222 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6223 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6224 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6225 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6226 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6227 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6228 				offsetof(struct test_val, foo), 4),
6229 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6230 			BPF_MOV64_IMM(BPF_REG_2, 8),
6231 			BPF_MOV64_IMM(BPF_REG_3, 0),
6232 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6233 			BPF_EXIT_INSN(),
6234 		},
6235 		.fixup_map_hash_48b = { 3 },
6236 		.result = ACCEPT,
6237 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6238 	},
6239 	{
6240 		"helper access to adjusted map (via variable): empty range",
6241 		.insns = {
6242 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6243 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6244 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6245 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6246 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6247 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6248 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6249 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6250 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6251 				offsetof(struct test_val, foo), 3),
6252 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6253 			BPF_MOV64_IMM(BPF_REG_2, 0),
6254 			BPF_EMIT_CALL(BPF_FUNC_trace_printk),
6255 			BPF_EXIT_INSN(),
6256 		},
6257 		.fixup_map_hash_48b = { 3 },
6258 		.errstr = "R1 min value is outside of the array range",
6259 		.result = REJECT,
6260 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6261 	},
6262 	{
6263 		"helper access to adjusted map (via variable): no max check",
6264 		.insns = {
6265 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6267 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6268 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6269 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6270 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6271 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6272 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6273 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6274 			BPF_MOV64_IMM(BPF_REG_2, 1),
6275 			BPF_MOV64_IMM(BPF_REG_3, 0),
6276 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6277 			BPF_EXIT_INSN(),
6278 		},
6279 		.fixup_map_hash_48b = { 3 },
6280 		.errstr = "R1 unbounded memory access",
6281 		.result = REJECT,
6282 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6283 	},
6284 	{
6285 		"helper access to adjusted map (via variable): wrong max check",
6286 		.insns = {
6287 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6288 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6289 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6290 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6291 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6292 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6293 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6294 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6295 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6296 				offsetof(struct test_val, foo), 4),
6297 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6298 			BPF_MOV64_IMM(BPF_REG_2,
6299 				sizeof(struct test_val) -
6300 				offsetof(struct test_val, foo) + 1),
6301 			BPF_MOV64_IMM(BPF_REG_3, 0),
6302 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
6303 			BPF_EXIT_INSN(),
6304 		},
6305 		.fixup_map_hash_48b = { 3 },
6306 		.errstr = "invalid access to map value, value_size=48 off=4 size=45",
6307 		.result = REJECT,
6308 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6309 	},
6310 	{
6311 		"helper access to map: bounds check using <, good access",
6312 		.insns = {
6313 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6314 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6315 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6316 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6317 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6318 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6319 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6320 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6321 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
6322 			BPF_MOV64_IMM(BPF_REG_0, 0),
6323 			BPF_EXIT_INSN(),
6324 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6325 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6326 			BPF_MOV64_IMM(BPF_REG_0, 0),
6327 			BPF_EXIT_INSN(),
6328 		},
6329 		.fixup_map_hash_48b = { 3 },
6330 		.result = ACCEPT,
6331 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6332 	},
6333 	{
6334 		"helper access to map: bounds check using <, bad access",
6335 		.insns = {
6336 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6337 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6338 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6339 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6340 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6341 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6342 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6343 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6344 			BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
6345 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6346 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6347 			BPF_MOV64_IMM(BPF_REG_0, 0),
6348 			BPF_EXIT_INSN(),
6349 			BPF_MOV64_IMM(BPF_REG_0, 0),
6350 			BPF_EXIT_INSN(),
6351 		},
6352 		.fixup_map_hash_48b = { 3 },
6353 		.result = REJECT,
6354 		.errstr = "R1 unbounded memory access",
6355 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6356 	},
6357 	{
6358 		"helper access to map: bounds check using <=, good access",
6359 		.insns = {
6360 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6362 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6363 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6364 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6365 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6366 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6367 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6368 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
6369 			BPF_MOV64_IMM(BPF_REG_0, 0),
6370 			BPF_EXIT_INSN(),
6371 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6372 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6373 			BPF_MOV64_IMM(BPF_REG_0, 0),
6374 			BPF_EXIT_INSN(),
6375 		},
6376 		.fixup_map_hash_48b = { 3 },
6377 		.result = ACCEPT,
6378 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6379 	},
6380 	{
6381 		"helper access to map: bounds check using <=, bad access",
6382 		.insns = {
6383 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6384 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6385 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6386 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6387 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6388 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6389 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6390 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6391 			BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
6392 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6393 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6394 			BPF_MOV64_IMM(BPF_REG_0, 0),
6395 			BPF_EXIT_INSN(),
6396 			BPF_MOV64_IMM(BPF_REG_0, 0),
6397 			BPF_EXIT_INSN(),
6398 		},
6399 		.fixup_map_hash_48b = { 3 },
6400 		.result = REJECT,
6401 		.errstr = "R1 unbounded memory access",
6402 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6403 	},
6404 	{
6405 		"helper access to map: bounds check using s<, good access",
6406 		.insns = {
6407 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6408 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6409 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6410 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6411 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6412 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6413 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6414 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6415 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6416 			BPF_MOV64_IMM(BPF_REG_0, 0),
6417 			BPF_EXIT_INSN(),
6418 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
6419 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6420 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6421 			BPF_MOV64_IMM(BPF_REG_0, 0),
6422 			BPF_EXIT_INSN(),
6423 		},
6424 		.fixup_map_hash_48b = { 3 },
6425 		.result = ACCEPT,
6426 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6427 	},
6428 	{
6429 		"helper access to map: bounds check using s<, good access 2",
6430 		.insns = {
6431 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6433 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6434 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6435 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6436 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6437 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6438 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6439 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6440 			BPF_MOV64_IMM(BPF_REG_0, 0),
6441 			BPF_EXIT_INSN(),
6442 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6443 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6444 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6445 			BPF_MOV64_IMM(BPF_REG_0, 0),
6446 			BPF_EXIT_INSN(),
6447 		},
6448 		.fixup_map_hash_48b = { 3 },
6449 		.result = ACCEPT,
6450 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6451 	},
6452 	{
6453 		"helper access to map: bounds check using s<, bad access",
6454 		.insns = {
6455 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6456 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6457 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6458 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6459 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6460 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6461 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6462 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6463 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
6464 			BPF_MOV64_IMM(BPF_REG_0, 0),
6465 			BPF_EXIT_INSN(),
6466 			BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
6467 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6468 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6469 			BPF_MOV64_IMM(BPF_REG_0, 0),
6470 			BPF_EXIT_INSN(),
6471 		},
6472 		.fixup_map_hash_48b = { 3 },
6473 		.result = REJECT,
6474 		.errstr = "R1 min value is negative",
6475 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6476 	},
6477 	{
6478 		"helper access to map: bounds check using s<=, good access",
6479 		.insns = {
6480 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6481 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6482 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6483 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6484 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6485 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6487 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6488 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6489 			BPF_MOV64_IMM(BPF_REG_0, 0),
6490 			BPF_EXIT_INSN(),
6491 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
6492 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6493 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6494 			BPF_MOV64_IMM(BPF_REG_0, 0),
6495 			BPF_EXIT_INSN(),
6496 		},
6497 		.fixup_map_hash_48b = { 3 },
6498 		.result = ACCEPT,
6499 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6500 	},
6501 	{
6502 		"helper access to map: bounds check using s<=, good access 2",
6503 		.insns = {
6504 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6505 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6506 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6507 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6508 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6509 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6510 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6511 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6512 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6513 			BPF_MOV64_IMM(BPF_REG_0, 0),
6514 			BPF_EXIT_INSN(),
6515 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6516 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6517 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6518 			BPF_MOV64_IMM(BPF_REG_0, 0),
6519 			BPF_EXIT_INSN(),
6520 		},
6521 		.fixup_map_hash_48b = { 3 },
6522 		.result = ACCEPT,
6523 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6524 	},
6525 	{
6526 		"helper access to map: bounds check using s<=, bad access",
6527 		.insns = {
6528 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6529 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6530 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6531 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6532 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6533 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6534 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
6535 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
6536 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
6537 			BPF_MOV64_IMM(BPF_REG_0, 0),
6538 			BPF_EXIT_INSN(),
6539 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
6540 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
6541 			BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
6542 			BPF_MOV64_IMM(BPF_REG_0, 0),
6543 			BPF_EXIT_INSN(),
6544 		},
6545 		.fixup_map_hash_48b = { 3 },
6546 		.result = REJECT,
6547 		.errstr = "R1 min value is negative",
6548 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6549 	},
6550 	{
6551 		"map access: known scalar += value_ptr",
6552 		.insns = {
6553 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6554 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6555 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6556 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6557 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6558 				     BPF_FUNC_map_lookup_elem),
6559 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6560 			BPF_MOV64_IMM(BPF_REG_1, 4),
6561 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6562 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6563 			BPF_MOV64_IMM(BPF_REG_0, 1),
6564 			BPF_EXIT_INSN(),
6565 		},
6566 		.fixup_map_array_48b = { 3 },
6567 		.result = ACCEPT,
6568 		.retval = 1,
6569 	},
6570 	{
6571 		"map access: value_ptr += known scalar",
6572 		.insns = {
6573 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6574 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6575 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6576 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6577 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6578 				     BPF_FUNC_map_lookup_elem),
6579 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6580 			BPF_MOV64_IMM(BPF_REG_1, 4),
6581 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6582 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6583 			BPF_MOV64_IMM(BPF_REG_0, 1),
6584 			BPF_EXIT_INSN(),
6585 		},
6586 		.fixup_map_array_48b = { 3 },
6587 		.result = ACCEPT,
6588 		.retval = 1,
6589 	},
6590 	{
6591 		"map access: unknown scalar += value_ptr",
6592 		.insns = {
6593 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6594 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6596 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6597 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6598 				     BPF_FUNC_map_lookup_elem),
6599 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6600 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6601 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6602 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
6603 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6604 			BPF_MOV64_IMM(BPF_REG_0, 1),
6605 			BPF_EXIT_INSN(),
6606 		},
6607 		.fixup_map_array_48b = { 3 },
6608 		.result = ACCEPT,
6609 		.retval = 1,
6610 	},
6611 	{
6612 		"map access: value_ptr += unknown scalar",
6613 		.insns = {
6614 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6615 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6616 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6617 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6618 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6619 				     BPF_FUNC_map_lookup_elem),
6620 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6621 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6622 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6623 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6624 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6625 			BPF_MOV64_IMM(BPF_REG_0, 1),
6626 			BPF_EXIT_INSN(),
6627 		},
6628 		.fixup_map_array_48b = { 3 },
6629 		.result = ACCEPT,
6630 		.retval = 1,
6631 	},
6632 	{
6633 		"map access: value_ptr += value_ptr",
6634 		.insns = {
6635 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6636 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6637 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6638 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6639 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6640 				     BPF_FUNC_map_lookup_elem),
6641 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6642 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
6643 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6644 			BPF_MOV64_IMM(BPF_REG_0, 1),
6645 			BPF_EXIT_INSN(),
6646 		},
6647 		.fixup_map_array_48b = { 3 },
6648 		.result = REJECT,
6649 		.errstr = "R0 pointer += pointer prohibited",
6650 	},
6651 	{
6652 		"map access: known scalar -= value_ptr",
6653 		.insns = {
6654 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6655 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6656 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6657 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6658 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6659 				     BPF_FUNC_map_lookup_elem),
6660 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6661 			BPF_MOV64_IMM(BPF_REG_1, 4),
6662 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6663 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6664 			BPF_MOV64_IMM(BPF_REG_0, 1),
6665 			BPF_EXIT_INSN(),
6666 		},
6667 		.fixup_map_array_48b = { 3 },
6668 		.result = REJECT,
6669 		.errstr = "R1 tried to subtract pointer from scalar",
6670 	},
6671 	{
6672 		"map access: value_ptr -= known scalar",
6673 		.insns = {
6674 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6675 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6676 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6677 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6678 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6679 				     BPF_FUNC_map_lookup_elem),
6680 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
6681 			BPF_MOV64_IMM(BPF_REG_1, 4),
6682 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6683 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6684 			BPF_MOV64_IMM(BPF_REG_0, 1),
6685 			BPF_EXIT_INSN(),
6686 		},
6687 		.fixup_map_array_48b = { 3 },
6688 		.result = REJECT,
6689 		.errstr = "R0 min value is outside of the array range",
6690 	},
6691 	{
6692 		"map access: value_ptr -= known scalar, 2",
6693 		.insns = {
6694 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6695 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6697 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6698 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6699 				     BPF_FUNC_map_lookup_elem),
6700 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6701 			BPF_MOV64_IMM(BPF_REG_1, 6),
6702 			BPF_MOV64_IMM(BPF_REG_2, 4),
6703 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6704 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
6705 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6706 			BPF_MOV64_IMM(BPF_REG_0, 1),
6707 			BPF_EXIT_INSN(),
6708 		},
6709 		.fixup_map_array_48b = { 3 },
6710 		.result = ACCEPT,
6711 		.retval = 1,
6712 	},
6713 	{
6714 		"map access: unknown scalar -= value_ptr",
6715 		.insns = {
6716 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6717 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6719 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6721 				     BPF_FUNC_map_lookup_elem),
6722 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6723 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6724 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6725 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
6726 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
6727 			BPF_MOV64_IMM(BPF_REG_0, 1),
6728 			BPF_EXIT_INSN(),
6729 		},
6730 		.fixup_map_array_48b = { 3 },
6731 		.result = REJECT,
6732 		.errstr = "R1 tried to subtract pointer from scalar",
6733 	},
6734 	{
6735 		"map access: value_ptr -= unknown scalar",
6736 		.insns = {
6737 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6738 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6739 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6740 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6741 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6742 				     BPF_FUNC_map_lookup_elem),
6743 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6744 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6745 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6746 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6747 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6748 			BPF_MOV64_IMM(BPF_REG_0, 1),
6749 			BPF_EXIT_INSN(),
6750 		},
6751 		.fixup_map_array_48b = { 3 },
6752 		.result = REJECT,
6753 		.errstr = "R0 min value is negative",
6754 	},
6755 	{
6756 		"map access: value_ptr -= unknown scalar, 2",
6757 		.insns = {
6758 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6759 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6760 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6761 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6762 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6763 				     BPF_FUNC_map_lookup_elem),
6764 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6765 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6766 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
6767 			BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
6768 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6769 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6770 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
6771 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6772 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6773 			BPF_MOV64_IMM(BPF_REG_0, 1),
6774 			BPF_EXIT_INSN(),
6775 		},
6776 		.fixup_map_array_48b = { 3 },
6777 		.result = ACCEPT,
6778 		.retval = 1,
6779 	},
6780 	{
6781 		"map access: value_ptr -= value_ptr",
6782 		.insns = {
6783 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6784 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6785 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6786 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6787 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6788 				     BPF_FUNC_map_lookup_elem),
6789 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6790 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
6791 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6792 			BPF_MOV64_IMM(BPF_REG_0, 1),
6793 			BPF_EXIT_INSN(),
6794 		},
6795 		.fixup_map_array_48b = { 3 },
6796 		.result = REJECT,
6797 		.errstr = "R0 invalid mem access 'inv'",
6798 		.errstr_unpriv = "R0 pointer -= pointer prohibited",
6799 	},
6800 	{
6801 		"map lookup helper access to map",
6802 		.insns = {
6803 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6804 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6805 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6806 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6807 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6808 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6809 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6810 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6812 			BPF_EXIT_INSN(),
6813 		},
6814 		.fixup_map_hash_16b = { 3, 8 },
6815 		.result = ACCEPT,
6816 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6817 	},
6818 	{
6819 		"map update helper access to map",
6820 		.insns = {
6821 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6822 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6823 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6824 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6825 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6826 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6827 			BPF_MOV64_IMM(BPF_REG_4, 0),
6828 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6829 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6830 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6831 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6832 			BPF_EXIT_INSN(),
6833 		},
6834 		.fixup_map_hash_16b = { 3, 10 },
6835 		.result = ACCEPT,
6836 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6837 	},
6838 	{
6839 		"map update helper access to map: wrong size",
6840 		.insns = {
6841 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6842 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6843 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6844 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6845 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6846 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6847 			BPF_MOV64_IMM(BPF_REG_4, 0),
6848 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
6849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6850 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6851 			BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
6852 			BPF_EXIT_INSN(),
6853 		},
6854 		.fixup_map_hash_8b = { 3 },
6855 		.fixup_map_hash_16b = { 10 },
6856 		.result = REJECT,
6857 		.errstr = "invalid access to map value, value_size=8 off=0 size=16",
6858 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6859 	},
6860 	{
6861 		"map helper access to adjusted map (via const imm)",
6862 		.insns = {
6863 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6864 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6865 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6866 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6867 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6868 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6869 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6870 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6871 				      offsetof(struct other_val, bar)),
6872 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6873 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6874 			BPF_EXIT_INSN(),
6875 		},
6876 		.fixup_map_hash_16b = { 3, 9 },
6877 		.result = ACCEPT,
6878 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6879 	},
6880 	{
6881 		"map helper access to adjusted map (via const imm): out-of-bound 1",
6882 		.insns = {
6883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6885 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6886 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6889 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6890 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
6891 				      sizeof(struct other_val) - 4),
6892 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6893 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6894 			BPF_EXIT_INSN(),
6895 		},
6896 		.fixup_map_hash_16b = { 3, 9 },
6897 		.result = REJECT,
6898 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6899 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6900 	},
6901 	{
6902 		"map helper access to adjusted map (via const imm): out-of-bound 2",
6903 		.insns = {
6904 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6906 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6907 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6908 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6909 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6910 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6911 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
6912 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6913 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6914 			BPF_EXIT_INSN(),
6915 		},
6916 		.fixup_map_hash_16b = { 3, 9 },
6917 		.result = REJECT,
6918 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6919 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6920 	},
6921 	{
6922 		"map helper access to adjusted map (via const reg)",
6923 		.insns = {
6924 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6925 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6926 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6927 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6928 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6929 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6930 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6931 			BPF_MOV64_IMM(BPF_REG_3,
6932 				      offsetof(struct other_val, bar)),
6933 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6934 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6935 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6936 			BPF_EXIT_INSN(),
6937 		},
6938 		.fixup_map_hash_16b = { 3, 10 },
6939 		.result = ACCEPT,
6940 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6941 	},
6942 	{
6943 		"map helper access to adjusted map (via const reg): out-of-bound 1",
6944 		.insns = {
6945 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6946 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6947 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6948 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6949 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6950 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6951 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6952 			BPF_MOV64_IMM(BPF_REG_3,
6953 				      sizeof(struct other_val) - 4),
6954 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6955 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6956 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6957 			BPF_EXIT_INSN(),
6958 		},
6959 		.fixup_map_hash_16b = { 3, 10 },
6960 		.result = REJECT,
6961 		.errstr = "invalid access to map value, value_size=16 off=12 size=8",
6962 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6963 	},
6964 	{
6965 		"map helper access to adjusted map (via const reg): out-of-bound 2",
6966 		.insns = {
6967 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6968 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6969 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6970 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6971 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6972 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6973 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6974 			BPF_MOV64_IMM(BPF_REG_3, -4),
6975 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6976 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6977 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6978 			BPF_EXIT_INSN(),
6979 		},
6980 		.fixup_map_hash_16b = { 3, 10 },
6981 		.result = REJECT,
6982 		.errstr = "invalid access to map value, value_size=16 off=-4 size=8",
6983 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
6984 	},
6985 	{
6986 		"map helper access to adjusted map (via variable)",
6987 		.insns = {
6988 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6989 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6990 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
6991 			BPF_LD_MAP_FD(BPF_REG_1, 0),
6992 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6993 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6995 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
6996 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
6997 				    offsetof(struct other_val, bar), 4),
6998 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6999 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7000 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7001 			BPF_EXIT_INSN(),
7002 		},
7003 		.fixup_map_hash_16b = { 3, 11 },
7004 		.result = ACCEPT,
7005 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7006 	},
7007 	{
7008 		"map helper access to adjusted map (via variable): no max check",
7009 		.insns = {
7010 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7011 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7012 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7013 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7014 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7015 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7016 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7017 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7018 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7019 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7020 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7021 			BPF_EXIT_INSN(),
7022 		},
7023 		.fixup_map_hash_16b = { 3, 10 },
7024 		.result = REJECT,
7025 		.errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
7026 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7027 	},
7028 	{
7029 		"map helper access to adjusted map (via variable): wrong max check",
7030 		.insns = {
7031 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7032 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7033 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7034 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7035 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7036 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7037 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7038 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
7039 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
7040 				    offsetof(struct other_val, bar) + 1, 4),
7041 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7042 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7043 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7044 			BPF_EXIT_INSN(),
7045 		},
7046 		.fixup_map_hash_16b = { 3, 11 },
7047 		.result = REJECT,
7048 		.errstr = "invalid access to map value, value_size=16 off=9 size=8",
7049 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7050 	},
7051 	{
7052 		"map element value is preserved across register spilling",
7053 		.insns = {
7054 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7055 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7056 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7057 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7058 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7059 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7060 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7061 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7062 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7063 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7064 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7065 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7066 			BPF_EXIT_INSN(),
7067 		},
7068 		.fixup_map_hash_48b = { 3 },
7069 		.errstr_unpriv = "R0 leaks addr",
7070 		.result = ACCEPT,
7071 		.result_unpriv = REJECT,
7072 	},
7073 	{
7074 		"map element value or null is marked on register spilling",
7075 		.insns = {
7076 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7077 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7078 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7079 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7080 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7081 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7082 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
7083 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7084 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7085 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7086 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7087 			BPF_EXIT_INSN(),
7088 		},
7089 		.fixup_map_hash_48b = { 3 },
7090 		.errstr_unpriv = "R0 leaks addr",
7091 		.result = ACCEPT,
7092 		.result_unpriv = REJECT,
7093 	},
7094 	{
7095 		"map element value store of cleared call register",
7096 		.insns = {
7097 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7098 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7099 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7100 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7101 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7102 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
7103 			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
7104 			BPF_EXIT_INSN(),
7105 		},
7106 		.fixup_map_hash_48b = { 3 },
7107 		.errstr_unpriv = "R1 !read_ok",
7108 		.errstr = "R1 !read_ok",
7109 		.result = REJECT,
7110 		.result_unpriv = REJECT,
7111 	},
7112 	{
7113 		"map element value with unaligned store",
7114 		.insns = {
7115 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7116 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7117 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7118 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7119 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7120 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
7121 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7122 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7123 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
7124 			BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
7125 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7126 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
7127 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
7128 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
7129 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
7130 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
7131 			BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
7132 			BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
7133 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
7134 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
7135 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
7136 			BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
7137 			BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
7138 			BPF_EXIT_INSN(),
7139 		},
7140 		.fixup_map_hash_48b = { 3 },
7141 		.errstr_unpriv = "R0 leaks addr",
7142 		.result = ACCEPT,
7143 		.result_unpriv = REJECT,
7144 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7145 	},
7146 	{
7147 		"map element value with unaligned load",
7148 		.insns = {
7149 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7151 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7152 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7153 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7154 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7155 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7156 			BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
7157 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
7158 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7159 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
7160 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
7161 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
7162 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
7163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
7164 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
7165 			BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
7166 			BPF_EXIT_INSN(),
7167 		},
7168 		.fixup_map_hash_48b = { 3 },
7169 		.errstr_unpriv = "R0 leaks addr",
7170 		.result = ACCEPT,
7171 		.result_unpriv = REJECT,
7172 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7173 	},
7174 	{
7175 		"map element value illegal alu op, 1",
7176 		.insns = {
7177 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7178 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7179 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7180 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7181 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7182 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7183 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
7184 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7185 			BPF_EXIT_INSN(),
7186 		},
7187 		.fixup_map_hash_48b = { 3 },
7188 		.errstr = "R0 bitwise operator &= on pointer",
7189 		.result = REJECT,
7190 	},
7191 	{
7192 		"map element value illegal alu op, 2",
7193 		.insns = {
7194 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7195 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7196 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7197 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7198 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7199 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7200 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
7201 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7202 			BPF_EXIT_INSN(),
7203 		},
7204 		.fixup_map_hash_48b = { 3 },
7205 		.errstr = "R0 32-bit pointer arithmetic prohibited",
7206 		.result = REJECT,
7207 	},
7208 	{
7209 		"map element value illegal alu op, 3",
7210 		.insns = {
7211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7213 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7214 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7215 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7216 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7217 			BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
7218 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7219 			BPF_EXIT_INSN(),
7220 		},
7221 		.fixup_map_hash_48b = { 3 },
7222 		.errstr = "R0 pointer arithmetic with /= operator",
7223 		.result = REJECT,
7224 	},
7225 	{
7226 		"map element value illegal alu op, 4",
7227 		.insns = {
7228 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7229 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7230 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7231 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7232 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7233 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7234 			BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
7235 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7236 			BPF_EXIT_INSN(),
7237 		},
7238 		.fixup_map_hash_48b = { 3 },
7239 		.errstr_unpriv = "R0 pointer arithmetic prohibited",
7240 		.errstr = "invalid mem access 'inv'",
7241 		.result = REJECT,
7242 		.result_unpriv = REJECT,
7243 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7244 	},
7245 	{
7246 		"map element value illegal alu op, 5",
7247 		.insns = {
7248 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7249 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7250 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7251 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7252 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7253 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7254 			BPF_MOV64_IMM(BPF_REG_3, 4096),
7255 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7256 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7257 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7258 			BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
7259 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
7260 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
7261 			BPF_EXIT_INSN(),
7262 		},
7263 		.fixup_map_hash_48b = { 3 },
7264 		.errstr = "R0 invalid mem access 'inv'",
7265 		.result = REJECT,
7266 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7267 	},
7268 	{
7269 		"map element value is preserved across register spilling",
7270 		.insns = {
7271 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7272 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7273 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7274 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7275 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7276 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7277 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
7278 				offsetof(struct test_val, foo)),
7279 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
7280 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
7282 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7283 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
7284 			BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
7285 			BPF_EXIT_INSN(),
7286 		},
7287 		.fixup_map_hash_48b = { 3 },
7288 		.errstr_unpriv = "R0 leaks addr",
7289 		.result = ACCEPT,
7290 		.result_unpriv = REJECT,
7291 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7292 	},
7293 	{
7294 		"helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
7295 		.insns = {
7296 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7297 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7298 			BPF_MOV64_IMM(BPF_REG_0, 0),
7299 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7300 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7301 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7302 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7303 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7304 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7305 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7306 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7307 			BPF_MOV64_IMM(BPF_REG_2, 16),
7308 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7309 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7310 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7311 			BPF_MOV64_IMM(BPF_REG_4, 0),
7312 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7313 			BPF_MOV64_IMM(BPF_REG_3, 0),
7314 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7315 			BPF_MOV64_IMM(BPF_REG_0, 0),
7316 			BPF_EXIT_INSN(),
7317 		},
7318 		.result = ACCEPT,
7319 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7320 	},
7321 	{
7322 		"helper access to variable memory: stack, bitwise AND, zero included",
7323 		.insns = {
7324 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7325 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7326 			BPF_MOV64_IMM(BPF_REG_2, 16),
7327 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7328 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7329 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7330 			BPF_MOV64_IMM(BPF_REG_3, 0),
7331 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7332 			BPF_EXIT_INSN(),
7333 		},
7334 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7335 		.result = REJECT,
7336 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7337 	},
7338 	{
7339 		"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
7340 		.insns = {
7341 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7342 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7343 			BPF_MOV64_IMM(BPF_REG_2, 16),
7344 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7345 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7346 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
7347 			BPF_MOV64_IMM(BPF_REG_4, 0),
7348 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7349 			BPF_MOV64_IMM(BPF_REG_3, 0),
7350 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7351 			BPF_MOV64_IMM(BPF_REG_0, 0),
7352 			BPF_EXIT_INSN(),
7353 		},
7354 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7355 		.result = REJECT,
7356 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7357 	},
7358 	{
7359 		"helper access to variable memory: stack, JMP, correct bounds",
7360 		.insns = {
7361 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7362 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7363 			BPF_MOV64_IMM(BPF_REG_0, 0),
7364 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7365 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7366 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7367 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7368 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7369 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7370 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7371 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7372 			BPF_MOV64_IMM(BPF_REG_2, 16),
7373 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7374 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7375 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
7376 			BPF_MOV64_IMM(BPF_REG_4, 0),
7377 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7378 			BPF_MOV64_IMM(BPF_REG_3, 0),
7379 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7380 			BPF_MOV64_IMM(BPF_REG_0, 0),
7381 			BPF_EXIT_INSN(),
7382 		},
7383 		.result = ACCEPT,
7384 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7385 	},
7386 	{
7387 		"helper access to variable memory: stack, JMP (signed), correct bounds",
7388 		.insns = {
7389 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7390 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7391 			BPF_MOV64_IMM(BPF_REG_0, 0),
7392 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7393 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7394 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7395 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7396 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7397 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7398 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7399 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7400 			BPF_MOV64_IMM(BPF_REG_2, 16),
7401 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7402 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7403 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
7404 			BPF_MOV64_IMM(BPF_REG_4, 0),
7405 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7406 			BPF_MOV64_IMM(BPF_REG_3, 0),
7407 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7408 			BPF_MOV64_IMM(BPF_REG_0, 0),
7409 			BPF_EXIT_INSN(),
7410 		},
7411 		.result = ACCEPT,
7412 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7413 	},
7414 	{
7415 		"helper access to variable memory: stack, JMP, bounds + offset",
7416 		.insns = {
7417 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7418 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7419 			BPF_MOV64_IMM(BPF_REG_2, 16),
7420 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7421 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7422 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
7423 			BPF_MOV64_IMM(BPF_REG_4, 0),
7424 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
7425 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7426 			BPF_MOV64_IMM(BPF_REG_3, 0),
7427 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7428 			BPF_MOV64_IMM(BPF_REG_0, 0),
7429 			BPF_EXIT_INSN(),
7430 		},
7431 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7432 		.result = REJECT,
7433 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7434 	},
7435 	{
7436 		"helper access to variable memory: stack, JMP, wrong max",
7437 		.insns = {
7438 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7439 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7440 			BPF_MOV64_IMM(BPF_REG_2, 16),
7441 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7442 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7443 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
7444 			BPF_MOV64_IMM(BPF_REG_4, 0),
7445 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7446 			BPF_MOV64_IMM(BPF_REG_3, 0),
7447 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7448 			BPF_MOV64_IMM(BPF_REG_0, 0),
7449 			BPF_EXIT_INSN(),
7450 		},
7451 		.errstr = "invalid stack type R1 off=-64 access_size=65",
7452 		.result = REJECT,
7453 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7454 	},
7455 	{
7456 		"helper access to variable memory: stack, JMP, no max check",
7457 		.insns = {
7458 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7459 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7460 			BPF_MOV64_IMM(BPF_REG_2, 16),
7461 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7462 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7463 			BPF_MOV64_IMM(BPF_REG_4, 0),
7464 			BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
7465 			BPF_MOV64_IMM(BPF_REG_3, 0),
7466 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7467 			BPF_MOV64_IMM(BPF_REG_0, 0),
7468 			BPF_EXIT_INSN(),
7469 		},
7470 		/* because max wasn't checked, signed min is negative */
7471 		.errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
7472 		.result = REJECT,
7473 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7474 	},
7475 	{
7476 		"helper access to variable memory: stack, JMP, no min check",
7477 		.insns = {
7478 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7479 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7480 			BPF_MOV64_IMM(BPF_REG_2, 16),
7481 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7482 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7483 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
7484 			BPF_MOV64_IMM(BPF_REG_3, 0),
7485 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7486 			BPF_MOV64_IMM(BPF_REG_0, 0),
7487 			BPF_EXIT_INSN(),
7488 		},
7489 		.errstr = "invalid indirect read from stack off -64+0 size 64",
7490 		.result = REJECT,
7491 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7492 	},
7493 	{
7494 		"helper access to variable memory: stack, JMP (signed), no min check",
7495 		.insns = {
7496 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7497 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7498 			BPF_MOV64_IMM(BPF_REG_2, 16),
7499 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
7500 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
7501 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
7502 			BPF_MOV64_IMM(BPF_REG_3, 0),
7503 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7504 			BPF_MOV64_IMM(BPF_REG_0, 0),
7505 			BPF_EXIT_INSN(),
7506 		},
7507 		.errstr = "R2 min value is negative",
7508 		.result = REJECT,
7509 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7510 	},
7511 	{
7512 		"helper access to variable memory: map, JMP, correct bounds",
7513 		.insns = {
7514 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7515 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7516 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7517 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7518 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7519 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7520 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7521 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7522 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7523 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7524 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7525 				sizeof(struct test_val), 4),
7526 			BPF_MOV64_IMM(BPF_REG_4, 0),
7527 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7528 			BPF_MOV64_IMM(BPF_REG_3, 0),
7529 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7530 			BPF_MOV64_IMM(BPF_REG_0, 0),
7531 			BPF_EXIT_INSN(),
7532 		},
7533 		.fixup_map_hash_48b = { 3 },
7534 		.result = ACCEPT,
7535 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7536 	},
7537 	{
7538 		"helper access to variable memory: map, JMP, wrong max",
7539 		.insns = {
7540 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7541 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7542 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7543 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7544 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7545 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
7546 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7547 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7548 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7549 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7550 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7551 				sizeof(struct test_val) + 1, 4),
7552 			BPF_MOV64_IMM(BPF_REG_4, 0),
7553 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7554 			BPF_MOV64_IMM(BPF_REG_3, 0),
7555 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7556 			BPF_MOV64_IMM(BPF_REG_0, 0),
7557 			BPF_EXIT_INSN(),
7558 		},
7559 		.fixup_map_hash_48b = { 3 },
7560 		.errstr = "invalid access to map value, value_size=48 off=0 size=49",
7561 		.result = REJECT,
7562 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7563 	},
7564 	{
7565 		"helper access to variable memory: map adjusted, JMP, correct bounds",
7566 		.insns = {
7567 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7568 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7569 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7570 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7571 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7572 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7573 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7574 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7575 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7576 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7577 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7578 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7579 				sizeof(struct test_val) - 20, 4),
7580 			BPF_MOV64_IMM(BPF_REG_4, 0),
7581 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7582 			BPF_MOV64_IMM(BPF_REG_3, 0),
7583 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7584 			BPF_MOV64_IMM(BPF_REG_0, 0),
7585 			BPF_EXIT_INSN(),
7586 		},
7587 		.fixup_map_hash_48b = { 3 },
7588 		.result = ACCEPT,
7589 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7590 	},
7591 	{
7592 		"helper access to variable memory: map adjusted, JMP, wrong max",
7593 		.insns = {
7594 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7595 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7596 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
7597 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7598 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7599 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
7600 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7601 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
7602 			BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
7603 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7604 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7605 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
7606 				sizeof(struct test_val) - 19, 4),
7607 			BPF_MOV64_IMM(BPF_REG_4, 0),
7608 			BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
7609 			BPF_MOV64_IMM(BPF_REG_3, 0),
7610 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7611 			BPF_MOV64_IMM(BPF_REG_0, 0),
7612 			BPF_EXIT_INSN(),
7613 		},
7614 		.fixup_map_hash_48b = { 3 },
7615 		.errstr = "R1 min value is outside of the array range",
7616 		.result = REJECT,
7617 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7618 	},
7619 	{
7620 		"helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7621 		.insns = {
7622 			BPF_MOV64_IMM(BPF_REG_1, 0),
7623 			BPF_MOV64_IMM(BPF_REG_2, 0),
7624 			BPF_MOV64_IMM(BPF_REG_3, 0),
7625 			BPF_MOV64_IMM(BPF_REG_4, 0),
7626 			BPF_MOV64_IMM(BPF_REG_5, 0),
7627 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7628 			BPF_EXIT_INSN(),
7629 		},
7630 		.result = ACCEPT,
7631 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7632 	},
7633 	{
7634 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
7635 		.insns = {
7636 			BPF_MOV64_IMM(BPF_REG_1, 0),
7637 			BPF_MOV64_IMM(BPF_REG_2, 1),
7638 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7639 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7640 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
7641 			BPF_MOV64_IMM(BPF_REG_3, 0),
7642 			BPF_MOV64_IMM(BPF_REG_4, 0),
7643 			BPF_MOV64_IMM(BPF_REG_5, 0),
7644 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7645 			BPF_EXIT_INSN(),
7646 		},
7647 		.errstr = "R1 type=inv expected=fp",
7648 		.result = REJECT,
7649 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7650 	},
7651 	{
7652 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7653 		.insns = {
7654 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7655 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7656 			BPF_MOV64_IMM(BPF_REG_2, 0),
7657 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7658 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
7659 			BPF_MOV64_IMM(BPF_REG_3, 0),
7660 			BPF_MOV64_IMM(BPF_REG_4, 0),
7661 			BPF_MOV64_IMM(BPF_REG_5, 0),
7662 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7663 			BPF_EXIT_INSN(),
7664 		},
7665 		.result = ACCEPT,
7666 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7667 	},
7668 	{
7669 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7670 		.insns = {
7671 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7672 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7673 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7674 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7675 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7676 				     BPF_FUNC_map_lookup_elem),
7677 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7678 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7679 			BPF_MOV64_IMM(BPF_REG_2, 0),
7680 			BPF_MOV64_IMM(BPF_REG_3, 0),
7681 			BPF_MOV64_IMM(BPF_REG_4, 0),
7682 			BPF_MOV64_IMM(BPF_REG_5, 0),
7683 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7684 			BPF_EXIT_INSN(),
7685 		},
7686 		.fixup_map_hash_8b = { 3 },
7687 		.result = ACCEPT,
7688 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7689 	},
7690 	{
7691 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
7692 		.insns = {
7693 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7694 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7695 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7696 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7697 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7698 				     BPF_FUNC_map_lookup_elem),
7699 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
7700 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7701 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
7702 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7703 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7704 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
7705 			BPF_MOV64_IMM(BPF_REG_3, 0),
7706 			BPF_MOV64_IMM(BPF_REG_4, 0),
7707 			BPF_MOV64_IMM(BPF_REG_5, 0),
7708 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7709 			BPF_EXIT_INSN(),
7710 		},
7711 		.fixup_map_hash_8b = { 3 },
7712 		.result = ACCEPT,
7713 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7714 	},
7715 	{
7716 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
7717 		.insns = {
7718 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7719 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7720 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7721 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7722 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7723 				     BPF_FUNC_map_lookup_elem),
7724 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
7725 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7726 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7727 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7728 			BPF_MOV64_IMM(BPF_REG_3, 0),
7729 			BPF_MOV64_IMM(BPF_REG_4, 0),
7730 			BPF_MOV64_IMM(BPF_REG_5, 0),
7731 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7732 			BPF_EXIT_INSN(),
7733 		},
7734 		.fixup_map_hash_8b = { 3 },
7735 		.result = ACCEPT,
7736 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7737 	},
7738 	{
7739 		"helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
7740 		.insns = {
7741 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
7742 				    offsetof(struct __sk_buff, data)),
7743 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7744 				    offsetof(struct __sk_buff, data_end)),
7745 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
7746 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
7747 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
7748 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
7749 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
7750 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7751 			BPF_MOV64_IMM(BPF_REG_3, 0),
7752 			BPF_MOV64_IMM(BPF_REG_4, 0),
7753 			BPF_MOV64_IMM(BPF_REG_5, 0),
7754 			BPF_EMIT_CALL(BPF_FUNC_csum_diff),
7755 			BPF_EXIT_INSN(),
7756 		},
7757 		.result = ACCEPT,
7758 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
7759 		.retval = 0 /* csum_diff of 64-byte packet */,
7760 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7761 	},
7762 	{
7763 		"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7764 		.insns = {
7765 			BPF_MOV64_IMM(BPF_REG_1, 0),
7766 			BPF_MOV64_IMM(BPF_REG_2, 0),
7767 			BPF_MOV64_IMM(BPF_REG_3, 0),
7768 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7769 			BPF_EXIT_INSN(),
7770 		},
7771 		.errstr = "R1 type=inv expected=fp",
7772 		.result = REJECT,
7773 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7774 	},
7775 	{
7776 		"helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
7777 		.insns = {
7778 			BPF_MOV64_IMM(BPF_REG_1, 0),
7779 			BPF_MOV64_IMM(BPF_REG_2, 1),
7780 			BPF_MOV64_IMM(BPF_REG_3, 0),
7781 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7782 			BPF_EXIT_INSN(),
7783 		},
7784 		.errstr = "R1 type=inv expected=fp",
7785 		.result = REJECT,
7786 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7787 	},
7788 	{
7789 		"helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7790 		.insns = {
7791 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7792 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7793 			BPF_MOV64_IMM(BPF_REG_2, 0),
7794 			BPF_MOV64_IMM(BPF_REG_3, 0),
7795 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7796 			BPF_EXIT_INSN(),
7797 		},
7798 		.result = ACCEPT,
7799 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7800 	},
7801 	{
7802 		"helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7803 		.insns = {
7804 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7805 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7806 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7807 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7808 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7809 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7811 			BPF_MOV64_IMM(BPF_REG_2, 0),
7812 			BPF_MOV64_IMM(BPF_REG_3, 0),
7813 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7814 			BPF_EXIT_INSN(),
7815 		},
7816 		.fixup_map_hash_8b = { 3 },
7817 		.result = ACCEPT,
7818 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7819 	},
7820 	{
7821 		"helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7822 		.insns = {
7823 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7824 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7825 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7826 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7827 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7828 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7829 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7830 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
7831 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7832 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
7833 			BPF_MOV64_IMM(BPF_REG_3, 0),
7834 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7835 			BPF_EXIT_INSN(),
7836 		},
7837 		.fixup_map_hash_8b = { 3 },
7838 		.result = ACCEPT,
7839 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7840 	},
7841 	{
7842 		"helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
7843 		.insns = {
7844 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7845 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7846 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7847 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7848 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7849 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7850 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7851 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
7852 			BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
7853 			BPF_MOV64_IMM(BPF_REG_3, 0),
7854 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7855 			BPF_EXIT_INSN(),
7856 		},
7857 		.fixup_map_hash_8b = { 3 },
7858 		.result = ACCEPT,
7859 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7860 	},
7861 	{
7862 		"helper access to variable memory: 8 bytes leak",
7863 		.insns = {
7864 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7866 			BPF_MOV64_IMM(BPF_REG_0, 0),
7867 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7868 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7869 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7870 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7871 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7872 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7873 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7874 			BPF_MOV64_IMM(BPF_REG_2, 1),
7875 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
7876 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
7877 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
7878 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
7879 			BPF_MOV64_IMM(BPF_REG_3, 0),
7880 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7881 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7882 			BPF_EXIT_INSN(),
7883 		},
7884 		.errstr = "invalid indirect read from stack off -64+32 size 64",
7885 		.result = REJECT,
7886 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7887 	},
7888 	{
7889 		"helper access to variable memory: 8 bytes no leak (init memory)",
7890 		.insns = {
7891 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7892 			BPF_MOV64_IMM(BPF_REG_0, 0),
7893 			BPF_MOV64_IMM(BPF_REG_0, 0),
7894 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
7895 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
7896 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
7897 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
7898 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
7899 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
7900 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
7901 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
7902 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
7903 			BPF_MOV64_IMM(BPF_REG_2, 0),
7904 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
7905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
7906 			BPF_MOV64_IMM(BPF_REG_3, 0),
7907 			BPF_EMIT_CALL(BPF_FUNC_probe_read),
7908 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
7909 			BPF_EXIT_INSN(),
7910 		},
7911 		.result = ACCEPT,
7912 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
7913 	},
7914 	{
7915 		"invalid and of negative number",
7916 		.insns = {
7917 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7918 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7919 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7920 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7921 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7922 				     BPF_FUNC_map_lookup_elem),
7923 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
7924 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7925 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
7926 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
7927 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7928 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7929 				   offsetof(struct test_val, foo)),
7930 			BPF_EXIT_INSN(),
7931 		},
7932 		.fixup_map_hash_48b = { 3 },
7933 		.errstr = "R0 max value is outside of the array range",
7934 		.result = REJECT,
7935 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7936 	},
7937 	{
7938 		"invalid range check",
7939 		.insns = {
7940 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7941 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7943 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7944 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7945 				     BPF_FUNC_map_lookup_elem),
7946 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
7947 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
7948 			BPF_MOV64_IMM(BPF_REG_9, 1),
7949 			BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
7950 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
7951 			BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
7952 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
7953 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
7954 			BPF_MOV32_IMM(BPF_REG_3, 1),
7955 			BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
7956 			BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
7957 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
7958 			BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
7959 			BPF_MOV64_REG(BPF_REG_0, 0),
7960 			BPF_EXIT_INSN(),
7961 		},
7962 		.fixup_map_hash_48b = { 3 },
7963 		.errstr = "R0 max value is outside of the array range",
7964 		.result = REJECT,
7965 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7966 	},
7967 	{
7968 		"map in map access",
7969 		.insns = {
7970 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7971 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7973 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7974 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7975 				     BPF_FUNC_map_lookup_elem),
7976 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7977 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7978 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7979 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7980 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
7981 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7982 				     BPF_FUNC_map_lookup_elem),
7983 			BPF_MOV64_IMM(BPF_REG_0, 0),
7984 			BPF_EXIT_INSN(),
7985 		},
7986 		.fixup_map_in_map = { 3 },
7987 		.result = ACCEPT,
7988 	},
7989 	{
7990 		"invalid inner map pointer",
7991 		.insns = {
7992 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
7993 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7994 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
7995 			BPF_LD_MAP_FD(BPF_REG_1, 0),
7996 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7997 				     BPF_FUNC_map_lookup_elem),
7998 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7999 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8000 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8001 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8002 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8003 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
8004 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8005 				     BPF_FUNC_map_lookup_elem),
8006 			BPF_MOV64_IMM(BPF_REG_0, 0),
8007 			BPF_EXIT_INSN(),
8008 		},
8009 		.fixup_map_in_map = { 3 },
8010 		.errstr = "R1 pointer arithmetic on map_ptr prohibited",
8011 		.result = REJECT,
8012 	},
8013 	{
8014 		"forgot null checking on the inner map pointer",
8015 		.insns = {
8016 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8017 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8018 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8019 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8020 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8021 				     BPF_FUNC_map_lookup_elem),
8022 			BPF_ST_MEM(0, BPF_REG_10, -4, 0),
8023 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8024 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
8025 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
8026 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8027 				     BPF_FUNC_map_lookup_elem),
8028 			BPF_MOV64_IMM(BPF_REG_0, 0),
8029 			BPF_EXIT_INSN(),
8030 		},
8031 		.fixup_map_in_map = { 3 },
8032 		.errstr = "R1 type=map_value_or_null expected=map_ptr",
8033 		.result = REJECT,
8034 	},
8035 	{
8036 		"ld_abs: check calling conv, r1",
8037 		.insns = {
8038 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8039 			BPF_MOV64_IMM(BPF_REG_1, 0),
8040 			BPF_LD_ABS(BPF_W, -0x200000),
8041 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8042 			BPF_EXIT_INSN(),
8043 		},
8044 		.errstr = "R1 !read_ok",
8045 		.result = REJECT,
8046 	},
8047 	{
8048 		"ld_abs: check calling conv, r2",
8049 		.insns = {
8050 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8051 			BPF_MOV64_IMM(BPF_REG_2, 0),
8052 			BPF_LD_ABS(BPF_W, -0x200000),
8053 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8054 			BPF_EXIT_INSN(),
8055 		},
8056 		.errstr = "R2 !read_ok",
8057 		.result = REJECT,
8058 	},
8059 	{
8060 		"ld_abs: check calling conv, r3",
8061 		.insns = {
8062 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8063 			BPF_MOV64_IMM(BPF_REG_3, 0),
8064 			BPF_LD_ABS(BPF_W, -0x200000),
8065 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8066 			BPF_EXIT_INSN(),
8067 		},
8068 		.errstr = "R3 !read_ok",
8069 		.result = REJECT,
8070 	},
8071 	{
8072 		"ld_abs: check calling conv, r4",
8073 		.insns = {
8074 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8075 			BPF_MOV64_IMM(BPF_REG_4, 0),
8076 			BPF_LD_ABS(BPF_W, -0x200000),
8077 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8078 			BPF_EXIT_INSN(),
8079 		},
8080 		.errstr = "R4 !read_ok",
8081 		.result = REJECT,
8082 	},
8083 	{
8084 		"ld_abs: check calling conv, r5",
8085 		.insns = {
8086 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8087 			BPF_MOV64_IMM(BPF_REG_5, 0),
8088 			BPF_LD_ABS(BPF_W, -0x200000),
8089 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8090 			BPF_EXIT_INSN(),
8091 		},
8092 		.errstr = "R5 !read_ok",
8093 		.result = REJECT,
8094 	},
8095 	{
8096 		"ld_abs: check calling conv, r7",
8097 		.insns = {
8098 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8099 			BPF_MOV64_IMM(BPF_REG_7, 0),
8100 			BPF_LD_ABS(BPF_W, -0x200000),
8101 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8102 			BPF_EXIT_INSN(),
8103 		},
8104 		.result = ACCEPT,
8105 	},
8106 	{
8107 		"ld_abs: tests on r6 and skb data reload helper",
8108 		.insns = {
8109 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8110 			BPF_LD_ABS(BPF_B, 0),
8111 			BPF_LD_ABS(BPF_H, 0),
8112 			BPF_LD_ABS(BPF_W, 0),
8113 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
8114 			BPF_MOV64_IMM(BPF_REG_6, 0),
8115 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
8116 			BPF_MOV64_IMM(BPF_REG_2, 1),
8117 			BPF_MOV64_IMM(BPF_REG_3, 2),
8118 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8119 				     BPF_FUNC_skb_vlan_push),
8120 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
8121 			BPF_LD_ABS(BPF_B, 0),
8122 			BPF_LD_ABS(BPF_H, 0),
8123 			BPF_LD_ABS(BPF_W, 0),
8124 			BPF_MOV64_IMM(BPF_REG_0, 42),
8125 			BPF_EXIT_INSN(),
8126 		},
8127 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8128 		.result = ACCEPT,
8129 		.retval = 42 /* ultimate return value */,
8130 	},
8131 	{
8132 		"ld_ind: check calling conv, r1",
8133 		.insns = {
8134 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8135 			BPF_MOV64_IMM(BPF_REG_1, 1),
8136 			BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
8137 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
8138 			BPF_EXIT_INSN(),
8139 		},
8140 		.errstr = "R1 !read_ok",
8141 		.result = REJECT,
8142 	},
8143 	{
8144 		"ld_ind: check calling conv, r2",
8145 		.insns = {
8146 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8147 			BPF_MOV64_IMM(BPF_REG_2, 1),
8148 			BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
8149 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
8150 			BPF_EXIT_INSN(),
8151 		},
8152 		.errstr = "R2 !read_ok",
8153 		.result = REJECT,
8154 	},
8155 	{
8156 		"ld_ind: check calling conv, r3",
8157 		.insns = {
8158 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8159 			BPF_MOV64_IMM(BPF_REG_3, 1),
8160 			BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
8161 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
8162 			BPF_EXIT_INSN(),
8163 		},
8164 		.errstr = "R3 !read_ok",
8165 		.result = REJECT,
8166 	},
8167 	{
8168 		"ld_ind: check calling conv, r4",
8169 		.insns = {
8170 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8171 			BPF_MOV64_IMM(BPF_REG_4, 1),
8172 			BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
8173 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
8174 			BPF_EXIT_INSN(),
8175 		},
8176 		.errstr = "R4 !read_ok",
8177 		.result = REJECT,
8178 	},
8179 	{
8180 		"ld_ind: check calling conv, r5",
8181 		.insns = {
8182 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8183 			BPF_MOV64_IMM(BPF_REG_5, 1),
8184 			BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
8185 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
8186 			BPF_EXIT_INSN(),
8187 		},
8188 		.errstr = "R5 !read_ok",
8189 		.result = REJECT,
8190 	},
8191 	{
8192 		"ld_ind: check calling conv, r7",
8193 		.insns = {
8194 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
8195 			BPF_MOV64_IMM(BPF_REG_7, 1),
8196 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
8197 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
8198 			BPF_EXIT_INSN(),
8199 		},
8200 		.result = ACCEPT,
8201 		.retval = 1,
8202 	},
8203 	{
8204 		"check bpf_perf_event_data->sample_period byte load permitted",
8205 		.insns = {
8206 			BPF_MOV64_IMM(BPF_REG_0, 0),
8207 #if __BYTE_ORDER == __LITTLE_ENDIAN
8208 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8209 				    offsetof(struct bpf_perf_event_data, sample_period)),
8210 #else
8211 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
8212 				    offsetof(struct bpf_perf_event_data, sample_period) + 7),
8213 #endif
8214 			BPF_EXIT_INSN(),
8215 		},
8216 		.result = ACCEPT,
8217 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8218 	},
8219 	{
8220 		"check bpf_perf_event_data->sample_period half load permitted",
8221 		.insns = {
8222 			BPF_MOV64_IMM(BPF_REG_0, 0),
8223 #if __BYTE_ORDER == __LITTLE_ENDIAN
8224 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8225 				    offsetof(struct bpf_perf_event_data, sample_period)),
8226 #else
8227 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8228 				    offsetof(struct bpf_perf_event_data, sample_period) + 6),
8229 #endif
8230 			BPF_EXIT_INSN(),
8231 		},
8232 		.result = ACCEPT,
8233 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8234 	},
8235 	{
8236 		"check bpf_perf_event_data->sample_period word load permitted",
8237 		.insns = {
8238 			BPF_MOV64_IMM(BPF_REG_0, 0),
8239 #if __BYTE_ORDER == __LITTLE_ENDIAN
8240 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8241 				    offsetof(struct bpf_perf_event_data, sample_period)),
8242 #else
8243 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
8244 				    offsetof(struct bpf_perf_event_data, sample_period) + 4),
8245 #endif
8246 			BPF_EXIT_INSN(),
8247 		},
8248 		.result = ACCEPT,
8249 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8250 	},
8251 	{
8252 		"check bpf_perf_event_data->sample_period dword load permitted",
8253 		.insns = {
8254 			BPF_MOV64_IMM(BPF_REG_0, 0),
8255 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
8256 				    offsetof(struct bpf_perf_event_data, sample_period)),
8257 			BPF_EXIT_INSN(),
8258 		},
8259 		.result = ACCEPT,
8260 		.prog_type = BPF_PROG_TYPE_PERF_EVENT,
8261 	},
8262 	{
8263 		"check skb->data half load not permitted",
8264 		.insns = {
8265 			BPF_MOV64_IMM(BPF_REG_0, 0),
8266 #if __BYTE_ORDER == __LITTLE_ENDIAN
8267 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8268 				    offsetof(struct __sk_buff, data)),
8269 #else
8270 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8271 				    offsetof(struct __sk_buff, data) + 2),
8272 #endif
8273 			BPF_EXIT_INSN(),
8274 		},
8275 		.result = REJECT,
8276 		.errstr = "invalid bpf_context access",
8277 	},
8278 	{
8279 		"check skb->tc_classid half load not permitted for lwt prog",
8280 		.insns = {
8281 			BPF_MOV64_IMM(BPF_REG_0, 0),
8282 #if __BYTE_ORDER == __LITTLE_ENDIAN
8283 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8284 				    offsetof(struct __sk_buff, tc_classid)),
8285 #else
8286 			BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
8287 				    offsetof(struct __sk_buff, tc_classid) + 2),
8288 #endif
8289 			BPF_EXIT_INSN(),
8290 		},
8291 		.result = REJECT,
8292 		.errstr = "invalid bpf_context access",
8293 		.prog_type = BPF_PROG_TYPE_LWT_IN,
8294 	},
8295 	{
8296 		"bounds checks mixing signed and unsigned, positive bounds",
8297 		.insns = {
8298 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8299 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8300 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8301 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8302 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8303 				     BPF_FUNC_map_lookup_elem),
8304 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8305 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8306 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8307 			BPF_MOV64_IMM(BPF_REG_2, 2),
8308 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
8309 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
8310 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8311 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8312 			BPF_MOV64_IMM(BPF_REG_0, 0),
8313 			BPF_EXIT_INSN(),
8314 		},
8315 		.fixup_map_hash_8b = { 3 },
8316 		.errstr = "unbounded min value",
8317 		.result = REJECT,
8318 	},
8319 	{
8320 		"bounds checks mixing signed and unsigned",
8321 		.insns = {
8322 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8323 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8325 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8326 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8327 				     BPF_FUNC_map_lookup_elem),
8328 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8329 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8330 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8331 			BPF_MOV64_IMM(BPF_REG_2, -1),
8332 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8333 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8334 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8335 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8336 			BPF_MOV64_IMM(BPF_REG_0, 0),
8337 			BPF_EXIT_INSN(),
8338 		},
8339 		.fixup_map_hash_8b = { 3 },
8340 		.errstr = "unbounded min value",
8341 		.result = REJECT,
8342 	},
8343 	{
8344 		"bounds checks mixing signed and unsigned, variant 2",
8345 		.insns = {
8346 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8347 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8348 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8349 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8350 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8351 				     BPF_FUNC_map_lookup_elem),
8352 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8353 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8354 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8355 			BPF_MOV64_IMM(BPF_REG_2, -1),
8356 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8357 			BPF_MOV64_IMM(BPF_REG_8, 0),
8358 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
8359 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8360 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8361 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8362 			BPF_MOV64_IMM(BPF_REG_0, 0),
8363 			BPF_EXIT_INSN(),
8364 		},
8365 		.fixup_map_hash_8b = { 3 },
8366 		.errstr = "unbounded min value",
8367 		.result = REJECT,
8368 	},
8369 	{
8370 		"bounds checks mixing signed and unsigned, variant 3",
8371 		.insns = {
8372 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8373 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8374 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8375 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8376 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8377 				     BPF_FUNC_map_lookup_elem),
8378 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8379 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8380 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8381 			BPF_MOV64_IMM(BPF_REG_2, -1),
8382 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
8383 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
8384 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
8385 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
8386 			BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
8387 			BPF_MOV64_IMM(BPF_REG_0, 0),
8388 			BPF_EXIT_INSN(),
8389 		},
8390 		.fixup_map_hash_8b = { 3 },
8391 		.errstr = "unbounded min value",
8392 		.result = REJECT,
8393 	},
8394 	{
8395 		"bounds checks mixing signed and unsigned, variant 4",
8396 		.insns = {
8397 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8398 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8399 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8400 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8401 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8402 				     BPF_FUNC_map_lookup_elem),
8403 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8404 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8405 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8406 			BPF_MOV64_IMM(BPF_REG_2, 1),
8407 			BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
8408 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8409 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8410 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8411 			BPF_MOV64_IMM(BPF_REG_0, 0),
8412 			BPF_EXIT_INSN(),
8413 		},
8414 		.fixup_map_hash_8b = { 3 },
8415 		.result = ACCEPT,
8416 	},
8417 	{
8418 		"bounds checks mixing signed and unsigned, variant 5",
8419 		.insns = {
8420 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8421 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8422 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8423 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8424 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8425 				     BPF_FUNC_map_lookup_elem),
8426 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8427 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8428 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8429 			BPF_MOV64_IMM(BPF_REG_2, -1),
8430 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
8431 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
8432 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
8433 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
8434 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8435 			BPF_MOV64_IMM(BPF_REG_0, 0),
8436 			BPF_EXIT_INSN(),
8437 		},
8438 		.fixup_map_hash_8b = { 3 },
8439 		.errstr = "unbounded min value",
8440 		.result = REJECT,
8441 	},
8442 	{
8443 		"bounds checks mixing signed and unsigned, variant 6",
8444 		.insns = {
8445 			BPF_MOV64_IMM(BPF_REG_2, 0),
8446 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
8447 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
8448 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8449 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
8450 			BPF_MOV64_IMM(BPF_REG_6, -1),
8451 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
8452 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
8453 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
8454 			BPF_MOV64_IMM(BPF_REG_5, 0),
8455 			BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
8456 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8457 				     BPF_FUNC_skb_load_bytes),
8458 			BPF_MOV64_IMM(BPF_REG_0, 0),
8459 			BPF_EXIT_INSN(),
8460 		},
8461 		.errstr = "R4 min value is negative, either use unsigned",
8462 		.result = REJECT,
8463 	},
8464 	{
8465 		"bounds checks mixing signed and unsigned, variant 7",
8466 		.insns = {
8467 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8468 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8469 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8470 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8471 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8472 				     BPF_FUNC_map_lookup_elem),
8473 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
8474 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8475 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8476 			BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
8477 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
8478 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8479 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8480 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8481 			BPF_MOV64_IMM(BPF_REG_0, 0),
8482 			BPF_EXIT_INSN(),
8483 		},
8484 		.fixup_map_hash_8b = { 3 },
8485 		.result = ACCEPT,
8486 	},
8487 	{
8488 		"bounds checks mixing signed and unsigned, variant 8",
8489 		.insns = {
8490 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8491 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8492 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8493 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8494 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8495 				     BPF_FUNC_map_lookup_elem),
8496 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8497 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8498 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8499 			BPF_MOV64_IMM(BPF_REG_2, -1),
8500 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8501 			BPF_MOV64_IMM(BPF_REG_0, 0),
8502 			BPF_EXIT_INSN(),
8503 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8504 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8505 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8506 			BPF_MOV64_IMM(BPF_REG_0, 0),
8507 			BPF_EXIT_INSN(),
8508 		},
8509 		.fixup_map_hash_8b = { 3 },
8510 		.errstr = "unbounded min value",
8511 		.result = REJECT,
8512 	},
8513 	{
8514 		"bounds checks mixing signed and unsigned, variant 9",
8515 		.insns = {
8516 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8517 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8519 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8520 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8521 				     BPF_FUNC_map_lookup_elem),
8522 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
8523 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8524 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8525 			BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
8526 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8527 			BPF_MOV64_IMM(BPF_REG_0, 0),
8528 			BPF_EXIT_INSN(),
8529 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8530 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8531 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8532 			BPF_MOV64_IMM(BPF_REG_0, 0),
8533 			BPF_EXIT_INSN(),
8534 		},
8535 		.fixup_map_hash_8b = { 3 },
8536 		.result = ACCEPT,
8537 	},
8538 	{
8539 		"bounds checks mixing signed and unsigned, variant 10",
8540 		.insns = {
8541 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8542 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8543 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8544 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8545 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8546 				     BPF_FUNC_map_lookup_elem),
8547 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8548 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8549 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8550 			BPF_MOV64_IMM(BPF_REG_2, 0),
8551 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
8552 			BPF_MOV64_IMM(BPF_REG_0, 0),
8553 			BPF_EXIT_INSN(),
8554 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8555 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8556 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8557 			BPF_MOV64_IMM(BPF_REG_0, 0),
8558 			BPF_EXIT_INSN(),
8559 		},
8560 		.fixup_map_hash_8b = { 3 },
8561 		.errstr = "unbounded min value",
8562 		.result = REJECT,
8563 	},
8564 	{
8565 		"bounds checks mixing signed and unsigned, variant 11",
8566 		.insns = {
8567 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8568 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8569 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8570 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8571 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8572 				     BPF_FUNC_map_lookup_elem),
8573 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8574 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8575 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8576 			BPF_MOV64_IMM(BPF_REG_2, -1),
8577 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8578 			/* Dead branch. */
8579 			BPF_MOV64_IMM(BPF_REG_0, 0),
8580 			BPF_EXIT_INSN(),
8581 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8582 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8583 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8584 			BPF_MOV64_IMM(BPF_REG_0, 0),
8585 			BPF_EXIT_INSN(),
8586 		},
8587 		.fixup_map_hash_8b = { 3 },
8588 		.errstr = "unbounded min value",
8589 		.result = REJECT,
8590 	},
8591 	{
8592 		"bounds checks mixing signed and unsigned, variant 12",
8593 		.insns = {
8594 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8595 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8597 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8599 				     BPF_FUNC_map_lookup_elem),
8600 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8601 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8602 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8603 			BPF_MOV64_IMM(BPF_REG_2, -6),
8604 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8605 			BPF_MOV64_IMM(BPF_REG_0, 0),
8606 			BPF_EXIT_INSN(),
8607 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8608 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8609 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8610 			BPF_MOV64_IMM(BPF_REG_0, 0),
8611 			BPF_EXIT_INSN(),
8612 		},
8613 		.fixup_map_hash_8b = { 3 },
8614 		.errstr = "unbounded min value",
8615 		.result = REJECT,
8616 	},
8617 	{
8618 		"bounds checks mixing signed and unsigned, variant 13",
8619 		.insns = {
8620 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8621 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8622 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8623 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8624 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8625 				     BPF_FUNC_map_lookup_elem),
8626 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
8627 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8628 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8629 			BPF_MOV64_IMM(BPF_REG_2, 2),
8630 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8631 			BPF_MOV64_IMM(BPF_REG_7, 1),
8632 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
8633 			BPF_MOV64_IMM(BPF_REG_0, 0),
8634 			BPF_EXIT_INSN(),
8635 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
8636 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
8637 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
8638 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8639 			BPF_MOV64_IMM(BPF_REG_0, 0),
8640 			BPF_EXIT_INSN(),
8641 		},
8642 		.fixup_map_hash_8b = { 3 },
8643 		.errstr = "unbounded min value",
8644 		.result = REJECT,
8645 	},
8646 	{
8647 		"bounds checks mixing signed and unsigned, variant 14",
8648 		.insns = {
8649 			BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
8650 				    offsetof(struct __sk_buff, mark)),
8651 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8652 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8653 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8654 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8655 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8656 				     BPF_FUNC_map_lookup_elem),
8657 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8658 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8659 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8660 			BPF_MOV64_IMM(BPF_REG_2, -1),
8661 			BPF_MOV64_IMM(BPF_REG_8, 2),
8662 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
8663 			BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
8664 			BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
8665 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8666 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8667 			BPF_MOV64_IMM(BPF_REG_0, 0),
8668 			BPF_EXIT_INSN(),
8669 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
8670 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
8671 		},
8672 		.fixup_map_hash_8b = { 4 },
8673 		.errstr = "R0 invalid mem access 'inv'",
8674 		.result = REJECT,
8675 	},
8676 	{
8677 		"bounds checks mixing signed and unsigned, variant 15",
8678 		.insns = {
8679 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8680 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8681 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8682 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8683 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8684 				     BPF_FUNC_map_lookup_elem),
8685 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8686 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
8687 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
8688 			BPF_MOV64_IMM(BPF_REG_2, -6),
8689 			BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
8690 			BPF_MOV64_IMM(BPF_REG_0, 0),
8691 			BPF_EXIT_INSN(),
8692 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8693 			BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
8694 			BPF_MOV64_IMM(BPF_REG_0, 0),
8695 			BPF_EXIT_INSN(),
8696 			BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
8697 			BPF_MOV64_IMM(BPF_REG_0, 0),
8698 			BPF_EXIT_INSN(),
8699 		},
8700 		.fixup_map_hash_8b = { 3 },
8701 		.errstr = "unbounded min value",
8702 		.result = REJECT,
8703 		.result_unpriv = REJECT,
8704 	},
8705 	{
8706 		"subtraction bounds (map value) variant 1",
8707 		.insns = {
8708 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8709 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8710 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8711 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8712 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8713 				     BPF_FUNC_map_lookup_elem),
8714 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8715 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8716 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
8717 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8718 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
8719 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8720 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
8721 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8722 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8723 			BPF_EXIT_INSN(),
8724 			BPF_MOV64_IMM(BPF_REG_0, 0),
8725 			BPF_EXIT_INSN(),
8726 		},
8727 		.fixup_map_hash_8b = { 3 },
8728 		.errstr = "R0 max value is outside of the array range",
8729 		.result = REJECT,
8730 	},
8731 	{
8732 		"subtraction bounds (map value) variant 2",
8733 		.insns = {
8734 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8735 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8736 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8737 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8738 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8739 				     BPF_FUNC_map_lookup_elem),
8740 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
8741 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8742 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
8743 			BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
8744 			BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
8745 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
8746 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8747 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8748 			BPF_EXIT_INSN(),
8749 			BPF_MOV64_IMM(BPF_REG_0, 0),
8750 			BPF_EXIT_INSN(),
8751 		},
8752 		.fixup_map_hash_8b = { 3 },
8753 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
8754 		.result = REJECT,
8755 	},
8756 	{
8757 		"bounds check based on zero-extended MOV",
8758 		.insns = {
8759 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8760 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8761 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8762 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8763 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8764 				     BPF_FUNC_map_lookup_elem),
8765 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8766 			/* r2 = 0x0000'0000'ffff'ffff */
8767 			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
8768 			/* r2 = 0 */
8769 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8770 			/* no-op */
8771 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8772 			/* access at offset 0 */
8773 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8774 			/* exit */
8775 			BPF_MOV64_IMM(BPF_REG_0, 0),
8776 			BPF_EXIT_INSN(),
8777 		},
8778 		.fixup_map_hash_8b = { 3 },
8779 		.result = ACCEPT
8780 	},
8781 	{
8782 		"bounds check based on sign-extended MOV. test1",
8783 		.insns = {
8784 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8785 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8786 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8787 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8788 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8789 				     BPF_FUNC_map_lookup_elem),
8790 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8791 			/* r2 = 0xffff'ffff'ffff'ffff */
8792 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8793 			/* r2 = 0xffff'ffff */
8794 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
8795 			/* r0 = <oob pointer> */
8796 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8797 			/* access to OOB pointer */
8798 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8799 			/* exit */
8800 			BPF_MOV64_IMM(BPF_REG_0, 0),
8801 			BPF_EXIT_INSN(),
8802 		},
8803 		.fixup_map_hash_8b = { 3 },
8804 		.errstr = "map_value pointer and 4294967295",
8805 		.result = REJECT
8806 	},
8807 	{
8808 		"bounds check based on sign-extended MOV. test2",
8809 		.insns = {
8810 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8811 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8812 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8813 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8814 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8815 				     BPF_FUNC_map_lookup_elem),
8816 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8817 			/* r2 = 0xffff'ffff'ffff'ffff */
8818 			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
8819 			/* r2 = 0xfff'ffff */
8820 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
8821 			/* r0 = <oob pointer> */
8822 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
8823 			/* access to OOB pointer */
8824 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8825 			/* exit */
8826 			BPF_MOV64_IMM(BPF_REG_0, 0),
8827 			BPF_EXIT_INSN(),
8828 		},
8829 		.fixup_map_hash_8b = { 3 },
8830 		.errstr = "R0 min value is outside of the array range",
8831 		.result = REJECT
8832 	},
8833 	{
8834 		"bounds check based on reg_off + var_off + insn_off. test1",
8835 		.insns = {
8836 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8837 				    offsetof(struct __sk_buff, mark)),
8838 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8839 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8840 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8841 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8842 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8843 				     BPF_FUNC_map_lookup_elem),
8844 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8845 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8846 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
8847 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8849 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8850 			BPF_MOV64_IMM(BPF_REG_0, 0),
8851 			BPF_EXIT_INSN(),
8852 		},
8853 		.fixup_map_hash_8b = { 4 },
8854 		.errstr = "value_size=8 off=1073741825",
8855 		.result = REJECT,
8856 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8857 	},
8858 	{
8859 		"bounds check based on reg_off + var_off + insn_off. test2",
8860 		.insns = {
8861 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
8862 				    offsetof(struct __sk_buff, mark)),
8863 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8864 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8865 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8866 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8867 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8868 				     BPF_FUNC_map_lookup_elem),
8869 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
8870 			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
8871 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
8872 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
8873 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
8874 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
8875 			BPF_MOV64_IMM(BPF_REG_0, 0),
8876 			BPF_EXIT_INSN(),
8877 		},
8878 		.fixup_map_hash_8b = { 4 },
8879 		.errstr = "value 1073741823",
8880 		.result = REJECT,
8881 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
8882 	},
8883 	{
8884 		"bounds check after truncation of non-boundary-crossing range",
8885 		.insns = {
8886 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8887 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8888 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8889 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8890 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8891 				     BPF_FUNC_map_lookup_elem),
8892 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8893 			/* r1 = [0x00, 0xff] */
8894 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8895 			BPF_MOV64_IMM(BPF_REG_2, 1),
8896 			/* r2 = 0x10'0000'0000 */
8897 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
8898 			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
8899 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
8900 			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
8901 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
8902 			/* r1 = [0x00, 0xff] */
8903 			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
8904 			/* r1 = 0 */
8905 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8906 			/* no-op */
8907 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8908 			/* access at offset 0 */
8909 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8910 			/* exit */
8911 			BPF_MOV64_IMM(BPF_REG_0, 0),
8912 			BPF_EXIT_INSN(),
8913 		},
8914 		.fixup_map_hash_8b = { 3 },
8915 		.result = ACCEPT
8916 	},
8917 	{
8918 		"bounds check after truncation of boundary-crossing range (1)",
8919 		.insns = {
8920 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8921 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8922 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8923 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8924 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8925 				     BPF_FUNC_map_lookup_elem),
8926 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8927 			/* r1 = [0x00, 0xff] */
8928 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8929 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8930 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8931 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8932 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8933 			 *      [0x0000'0000, 0x0000'007f]
8934 			 */
8935 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
8936 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8937 			/* r1 = [0x00, 0xff] or
8938 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8939 			 */
8940 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8941 			/* r1 = 0 or
8942 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8943 			 */
8944 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8945 			/* no-op or OOB pointer computation */
8946 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8947 			/* potentially OOB access */
8948 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8949 			/* exit */
8950 			BPF_MOV64_IMM(BPF_REG_0, 0),
8951 			BPF_EXIT_INSN(),
8952 		},
8953 		.fixup_map_hash_8b = { 3 },
8954 		/* not actually fully unbounded, but the bound is very high */
8955 		.errstr = "R0 unbounded memory access",
8956 		.result = REJECT
8957 	},
8958 	{
8959 		"bounds check after truncation of boundary-crossing range (2)",
8960 		.insns = {
8961 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
8962 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
8963 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
8964 			BPF_LD_MAP_FD(BPF_REG_1, 0),
8965 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8966 				     BPF_FUNC_map_lookup_elem),
8967 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
8968 			/* r1 = [0x00, 0xff] */
8969 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
8970 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8971 			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
8972 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
8973 			/* r1 = [0xffff'ff80, 0xffff'ffff] or
8974 			 *      [0x0000'0000, 0x0000'007f]
8975 			 * difference to previous test: truncation via MOV32
8976 			 * instead of ALU32.
8977 			 */
8978 			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
8979 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8980 			/* r1 = [0x00, 0xff] or
8981 			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
8982 			 */
8983 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
8984 			/* r1 = 0 or
8985 			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
8986 			 */
8987 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
8988 			/* no-op or OOB pointer computation */
8989 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
8990 			/* potentially OOB access */
8991 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
8992 			/* exit */
8993 			BPF_MOV64_IMM(BPF_REG_0, 0),
8994 			BPF_EXIT_INSN(),
8995 		},
8996 		.fixup_map_hash_8b = { 3 },
8997 		/* not actually fully unbounded, but the bound is very high */
8998 		.errstr = "R0 unbounded memory access",
8999 		.result = REJECT
9000 	},
9001 	{
9002 		"bounds check after wrapping 32-bit addition",
9003 		.insns = {
9004 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9005 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9006 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9007 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9009 				     BPF_FUNC_map_lookup_elem),
9010 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
9011 			/* r1 = 0x7fff'ffff */
9012 			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
9013 			/* r1 = 0xffff'fffe */
9014 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9015 			/* r1 = 0 */
9016 			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
9017 			/* no-op */
9018 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9019 			/* access at offset 0 */
9020 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9021 			/* exit */
9022 			BPF_MOV64_IMM(BPF_REG_0, 0),
9023 			BPF_EXIT_INSN(),
9024 		},
9025 		.fixup_map_hash_8b = { 3 },
9026 		.result = ACCEPT
9027 	},
9028 	{
9029 		"bounds check after shift with oversized count operand",
9030 		.insns = {
9031 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9032 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9033 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9034 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9035 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9036 				     BPF_FUNC_map_lookup_elem),
9037 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9038 			BPF_MOV64_IMM(BPF_REG_2, 32),
9039 			BPF_MOV64_IMM(BPF_REG_1, 1),
9040 			/* r1 = (u32)1 << (u32)32 = ? */
9041 			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
9042 			/* r1 = [0x0000, 0xffff] */
9043 			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
9044 			/* computes unknown pointer, potentially OOB */
9045 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9046 			/* potentially OOB access */
9047 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9048 			/* exit */
9049 			BPF_MOV64_IMM(BPF_REG_0, 0),
9050 			BPF_EXIT_INSN(),
9051 		},
9052 		.fixup_map_hash_8b = { 3 },
9053 		.errstr = "R0 max value is outside of the array range",
9054 		.result = REJECT
9055 	},
9056 	{
9057 		"bounds check after right shift of maybe-negative number",
9058 		.insns = {
9059 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9060 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9061 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9062 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9063 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9064 				     BPF_FUNC_map_lookup_elem),
9065 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
9066 			/* r1 = [0x00, 0xff] */
9067 			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9068 			/* r1 = [-0x01, 0xfe] */
9069 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
9070 			/* r1 = 0 or 0xff'ffff'ffff'ffff */
9071 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9072 			/* r1 = 0 or 0xffff'ffff'ffff */
9073 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
9074 			/* computes unknown pointer, potentially OOB */
9075 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9076 			/* potentially OOB access */
9077 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9078 			/* exit */
9079 			BPF_MOV64_IMM(BPF_REG_0, 0),
9080 			BPF_EXIT_INSN(),
9081 		},
9082 		.fixup_map_hash_8b = { 3 },
9083 		.errstr = "R0 unbounded memory access",
9084 		.result = REJECT
9085 	},
9086 	{
9087 		"bounds check map access with off+size signed 32bit overflow. test1",
9088 		.insns = {
9089 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9090 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9091 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9092 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9093 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9094 				     BPF_FUNC_map_lookup_elem),
9095 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9096 			BPF_EXIT_INSN(),
9097 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
9098 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9099 			BPF_JMP_A(0),
9100 			BPF_EXIT_INSN(),
9101 		},
9102 		.fixup_map_hash_8b = { 3 },
9103 		.errstr = "map_value pointer and 2147483646",
9104 		.result = REJECT
9105 	},
9106 	{
9107 		"bounds check map access with off+size signed 32bit overflow. test2",
9108 		.insns = {
9109 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9110 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9111 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9112 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9113 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9114 				     BPF_FUNC_map_lookup_elem),
9115 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9116 			BPF_EXIT_INSN(),
9117 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9118 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9119 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
9120 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9121 			BPF_JMP_A(0),
9122 			BPF_EXIT_INSN(),
9123 		},
9124 		.fixup_map_hash_8b = { 3 },
9125 		.errstr = "pointer offset 1073741822",
9126 		.result = REJECT
9127 	},
9128 	{
9129 		"bounds check map access with off+size signed 32bit overflow. test3",
9130 		.insns = {
9131 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9132 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9133 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9134 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9135 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9136 				     BPF_FUNC_map_lookup_elem),
9137 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9138 			BPF_EXIT_INSN(),
9139 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9140 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
9141 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9142 			BPF_JMP_A(0),
9143 			BPF_EXIT_INSN(),
9144 		},
9145 		.fixup_map_hash_8b = { 3 },
9146 		.errstr = "pointer offset -1073741822",
9147 		.result = REJECT
9148 	},
9149 	{
9150 		"bounds check map access with off+size signed 32bit overflow. test4",
9151 		.insns = {
9152 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9153 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9154 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9155 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9156 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9157 				     BPF_FUNC_map_lookup_elem),
9158 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
9159 			BPF_EXIT_INSN(),
9160 			BPF_MOV64_IMM(BPF_REG_1, 1000000),
9161 			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
9162 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9163 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
9164 			BPF_JMP_A(0),
9165 			BPF_EXIT_INSN(),
9166 		},
9167 		.fixup_map_hash_8b = { 3 },
9168 		.errstr = "map_value pointer and 1000000000000",
9169 		.result = REJECT
9170 	},
9171 	{
9172 		"pointer/scalar confusion in state equality check (way 1)",
9173 		.insns = {
9174 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9175 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9176 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9177 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9178 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9179 				     BPF_FUNC_map_lookup_elem),
9180 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
9181 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9182 			BPF_JMP_A(1),
9183 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9184 			BPF_JMP_A(0),
9185 			BPF_EXIT_INSN(),
9186 		},
9187 		.fixup_map_hash_8b = { 3 },
9188 		.result = ACCEPT,
9189 		.retval = POINTER_VALUE,
9190 		.result_unpriv = REJECT,
9191 		.errstr_unpriv = "R0 leaks addr as return value"
9192 	},
9193 	{
9194 		"pointer/scalar confusion in state equality check (way 2)",
9195 		.insns = {
9196 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9197 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9198 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9199 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9200 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9201 				     BPF_FUNC_map_lookup_elem),
9202 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
9203 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
9204 			BPF_JMP_A(1),
9205 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
9206 			BPF_EXIT_INSN(),
9207 		},
9208 		.fixup_map_hash_8b = { 3 },
9209 		.result = ACCEPT,
9210 		.retval = POINTER_VALUE,
9211 		.result_unpriv = REJECT,
9212 		.errstr_unpriv = "R0 leaks addr as return value"
9213 	},
9214 	{
9215 		"variable-offset ctx access",
9216 		.insns = {
9217 			/* Get an unknown value */
9218 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9219 			/* Make it small and 4-byte aligned */
9220 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9221 			/* add it to skb.  We now have either &skb->len or
9222 			 * &skb->pkt_type, but we don't know which
9223 			 */
9224 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
9225 			/* dereference it */
9226 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
9227 			BPF_EXIT_INSN(),
9228 		},
9229 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
9230 		.result = REJECT,
9231 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9232 	},
9233 	{
9234 		"variable-offset stack access",
9235 		.insns = {
9236 			/* Fill the top 8 bytes of the stack */
9237 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9238 			/* Get an unknown value */
9239 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9240 			/* Make it small and 4-byte aligned */
9241 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9242 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9243 			/* add it to fp.  We now have either fp-4 or fp-8, but
9244 			 * we don't know which
9245 			 */
9246 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9247 			/* dereference it */
9248 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
9249 			BPF_EXIT_INSN(),
9250 		},
9251 		.errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
9252 		.result = REJECT,
9253 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9254 	},
9255 	{
9256 		"indirect variable-offset stack access",
9257 		.insns = {
9258 			/* Fill the top 8 bytes of the stack */
9259 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9260 			/* Get an unknown value */
9261 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9262 			/* Make it small and 4-byte aligned */
9263 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
9264 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
9265 			/* add it to fp.  We now have either fp-4 or fp-8, but
9266 			 * we don't know which
9267 			 */
9268 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
9269 			/* dereference it indirectly */
9270 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9271 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9272 				     BPF_FUNC_map_lookup_elem),
9273 			BPF_MOV64_IMM(BPF_REG_0, 0),
9274 			BPF_EXIT_INSN(),
9275 		},
9276 		.fixup_map_hash_8b = { 5 },
9277 		.errstr = "variable stack read R2",
9278 		.result = REJECT,
9279 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9280 	},
9281 	{
9282 		"direct stack access with 32-bit wraparound. test1",
9283 		.insns = {
9284 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9286 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
9287 			BPF_MOV32_IMM(BPF_REG_0, 0),
9288 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9289 			BPF_EXIT_INSN()
9290 		},
9291 		.errstr = "fp pointer and 2147483647",
9292 		.result = REJECT
9293 	},
9294 	{
9295 		"direct stack access with 32-bit wraparound. test2",
9296 		.insns = {
9297 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9298 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9299 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
9300 			BPF_MOV32_IMM(BPF_REG_0, 0),
9301 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9302 			BPF_EXIT_INSN()
9303 		},
9304 		.errstr = "fp pointer and 1073741823",
9305 		.result = REJECT
9306 	},
9307 	{
9308 		"direct stack access with 32-bit wraparound. test3",
9309 		.insns = {
9310 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
9311 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9312 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
9313 			BPF_MOV32_IMM(BPF_REG_0, 0),
9314 			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
9315 			BPF_EXIT_INSN()
9316 		},
9317 		.errstr = "fp pointer offset 1073741822",
9318 		.result = REJECT
9319 	},
9320 	{
9321 		"liveness pruning and write screening",
9322 		.insns = {
9323 			/* Get an unknown value */
9324 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
9325 			/* branch conditions teach us nothing about R2 */
9326 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9327 			BPF_MOV64_IMM(BPF_REG_0, 0),
9328 			BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
9329 			BPF_MOV64_IMM(BPF_REG_0, 0),
9330 			BPF_EXIT_INSN(),
9331 		},
9332 		.errstr = "R0 !read_ok",
9333 		.result = REJECT,
9334 		.prog_type = BPF_PROG_TYPE_LWT_IN,
9335 	},
9336 	{
9337 		"varlen_map_value_access pruning",
9338 		.insns = {
9339 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9340 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9341 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9342 			BPF_LD_MAP_FD(BPF_REG_1, 0),
9343 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9344 				     BPF_FUNC_map_lookup_elem),
9345 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
9346 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
9347 			BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
9348 			BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
9349 			BPF_MOV32_IMM(BPF_REG_1, 0),
9350 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
9351 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
9352 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9353 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
9354 				   offsetof(struct test_val, foo)),
9355 			BPF_EXIT_INSN(),
9356 		},
9357 		.fixup_map_hash_48b = { 3 },
9358 		.errstr_unpriv = "R0 leaks addr",
9359 		.errstr = "R0 unbounded memory access",
9360 		.result_unpriv = REJECT,
9361 		.result = REJECT,
9362 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9363 	},
9364 	{
9365 		"invalid 64-bit BPF_END",
9366 		.insns = {
9367 			BPF_MOV32_IMM(BPF_REG_0, 0),
9368 			{
9369 				.code  = BPF_ALU64 | BPF_END | BPF_TO_LE,
9370 				.dst_reg = BPF_REG_0,
9371 				.src_reg = 0,
9372 				.off   = 0,
9373 				.imm   = 32,
9374 			},
9375 			BPF_EXIT_INSN(),
9376 		},
9377 		.errstr = "unknown opcode d7",
9378 		.result = REJECT,
9379 	},
9380 	{
9381 		"XDP, using ifindex from netdev",
9382 		.insns = {
9383 			BPF_MOV64_IMM(BPF_REG_0, 0),
9384 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9385 				    offsetof(struct xdp_md, ingress_ifindex)),
9386 			BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
9387 			BPF_MOV64_IMM(BPF_REG_0, 1),
9388 			BPF_EXIT_INSN(),
9389 		},
9390 		.result = ACCEPT,
9391 		.prog_type = BPF_PROG_TYPE_XDP,
9392 		.retval = 1,
9393 	},
9394 	{
9395 		"meta access, test1",
9396 		.insns = {
9397 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9398 				    offsetof(struct xdp_md, data_meta)),
9399 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9400 				    offsetof(struct xdp_md, data)),
9401 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9402 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9403 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9404 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9405 			BPF_MOV64_IMM(BPF_REG_0, 0),
9406 			BPF_EXIT_INSN(),
9407 		},
9408 		.result = ACCEPT,
9409 		.prog_type = BPF_PROG_TYPE_XDP,
9410 	},
9411 	{
9412 		"meta access, test2",
9413 		.insns = {
9414 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9415 				    offsetof(struct xdp_md, data_meta)),
9416 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9417 				    offsetof(struct xdp_md, data)),
9418 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9419 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
9420 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9421 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9422 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9423 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
9424 			BPF_MOV64_IMM(BPF_REG_0, 0),
9425 			BPF_EXIT_INSN(),
9426 		},
9427 		.result = REJECT,
9428 		.errstr = "invalid access to packet, off=-8",
9429 		.prog_type = BPF_PROG_TYPE_XDP,
9430 	},
9431 	{
9432 		"meta access, test3",
9433 		.insns = {
9434 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9435 				    offsetof(struct xdp_md, data_meta)),
9436 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9437 				    offsetof(struct xdp_md, data_end)),
9438 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
9439 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9440 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9441 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9442 			BPF_MOV64_IMM(BPF_REG_0, 0),
9443 			BPF_EXIT_INSN(),
9444 		},
9445 		.result = REJECT,
9446 		.errstr = "invalid access to packet",
9447 		.prog_type = BPF_PROG_TYPE_XDP,
9448 	},
9449 	{
9450 		"meta access, test4",
9451 		.insns = {
9452 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9453 				    offsetof(struct xdp_md, data_meta)),
9454 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9455 				    offsetof(struct xdp_md, data_end)),
9456 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9457 				    offsetof(struct xdp_md, data)),
9458 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
9459 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9460 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
9461 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9462 			BPF_MOV64_IMM(BPF_REG_0, 0),
9463 			BPF_EXIT_INSN(),
9464 		},
9465 		.result = REJECT,
9466 		.errstr = "invalid access to packet",
9467 		.prog_type = BPF_PROG_TYPE_XDP,
9468 	},
9469 	{
9470 		"meta access, test5",
9471 		.insns = {
9472 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9473 				    offsetof(struct xdp_md, data_meta)),
9474 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9475 				    offsetof(struct xdp_md, data)),
9476 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9477 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9478 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
9479 			BPF_MOV64_IMM(BPF_REG_2, -8),
9480 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9481 				     BPF_FUNC_xdp_adjust_meta),
9482 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9483 			BPF_MOV64_IMM(BPF_REG_0, 0),
9484 			BPF_EXIT_INSN(),
9485 		},
9486 		.result = REJECT,
9487 		.errstr = "R3 !read_ok",
9488 		.prog_type = BPF_PROG_TYPE_XDP,
9489 	},
9490 	{
9491 		"meta access, test6",
9492 		.insns = {
9493 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9494 				    offsetof(struct xdp_md, data_meta)),
9495 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9496 				    offsetof(struct xdp_md, data)),
9497 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9498 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9499 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9501 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
9502 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9503 			BPF_MOV64_IMM(BPF_REG_0, 0),
9504 			BPF_EXIT_INSN(),
9505 		},
9506 		.result = REJECT,
9507 		.errstr = "invalid access to packet",
9508 		.prog_type = BPF_PROG_TYPE_XDP,
9509 	},
9510 	{
9511 		"meta access, test7",
9512 		.insns = {
9513 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9514 				    offsetof(struct xdp_md, data_meta)),
9515 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9516 				    offsetof(struct xdp_md, data)),
9517 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
9518 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
9519 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9520 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
9521 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9522 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9523 			BPF_MOV64_IMM(BPF_REG_0, 0),
9524 			BPF_EXIT_INSN(),
9525 		},
9526 		.result = ACCEPT,
9527 		.prog_type = BPF_PROG_TYPE_XDP,
9528 	},
9529 	{
9530 		"meta access, test8",
9531 		.insns = {
9532 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9533 				    offsetof(struct xdp_md, data_meta)),
9534 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9535 				    offsetof(struct xdp_md, data)),
9536 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9537 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9538 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9539 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9540 			BPF_MOV64_IMM(BPF_REG_0, 0),
9541 			BPF_EXIT_INSN(),
9542 		},
9543 		.result = ACCEPT,
9544 		.prog_type = BPF_PROG_TYPE_XDP,
9545 	},
9546 	{
9547 		"meta access, test9",
9548 		.insns = {
9549 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9550 				    offsetof(struct xdp_md, data_meta)),
9551 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9552 				    offsetof(struct xdp_md, data)),
9553 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
9554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
9555 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
9556 			BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
9557 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9558 			BPF_MOV64_IMM(BPF_REG_0, 0),
9559 			BPF_EXIT_INSN(),
9560 		},
9561 		.result = REJECT,
9562 		.errstr = "invalid access to packet",
9563 		.prog_type = BPF_PROG_TYPE_XDP,
9564 	},
9565 	{
9566 		"meta access, test10",
9567 		.insns = {
9568 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9569 				    offsetof(struct xdp_md, data_meta)),
9570 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9571 				    offsetof(struct xdp_md, data)),
9572 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9573 				    offsetof(struct xdp_md, data_end)),
9574 			BPF_MOV64_IMM(BPF_REG_5, 42),
9575 			BPF_MOV64_IMM(BPF_REG_6, 24),
9576 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9577 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9578 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9579 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9580 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
9581 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9582 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9583 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9584 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
9585 			BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
9586 			BPF_MOV64_IMM(BPF_REG_0, 0),
9587 			BPF_EXIT_INSN(),
9588 		},
9589 		.result = REJECT,
9590 		.errstr = "invalid access to packet",
9591 		.prog_type = BPF_PROG_TYPE_XDP,
9592 	},
9593 	{
9594 		"meta access, test11",
9595 		.insns = {
9596 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9597 				    offsetof(struct xdp_md, data_meta)),
9598 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9599 				    offsetof(struct xdp_md, data)),
9600 			BPF_MOV64_IMM(BPF_REG_5, 42),
9601 			BPF_MOV64_IMM(BPF_REG_6, 24),
9602 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
9603 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
9604 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
9605 			BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
9606 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
9607 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9608 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
9610 			BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
9611 			BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
9612 			BPF_MOV64_IMM(BPF_REG_0, 0),
9613 			BPF_EXIT_INSN(),
9614 		},
9615 		.result = ACCEPT,
9616 		.prog_type = BPF_PROG_TYPE_XDP,
9617 	},
9618 	{
9619 		"meta access, test12",
9620 		.insns = {
9621 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9622 				    offsetof(struct xdp_md, data_meta)),
9623 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9624 				    offsetof(struct xdp_md, data)),
9625 			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
9626 				    offsetof(struct xdp_md, data_end)),
9627 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
9628 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9629 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
9630 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
9631 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
9632 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
9633 			BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
9634 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
9635 			BPF_MOV64_IMM(BPF_REG_0, 0),
9636 			BPF_EXIT_INSN(),
9637 		},
9638 		.result = ACCEPT,
9639 		.prog_type = BPF_PROG_TYPE_XDP,
9640 	},
9641 	{
9642 		"arithmetic ops make PTR_TO_CTX unusable",
9643 		.insns = {
9644 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
9645 				      offsetof(struct __sk_buff, data) -
9646 				      offsetof(struct __sk_buff, mark)),
9647 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9648 				    offsetof(struct __sk_buff, mark)),
9649 			BPF_EXIT_INSN(),
9650 		},
9651 		.errstr = "dereference of modified ctx ptr",
9652 		.result = REJECT,
9653 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9654 	},
9655 	{
9656 		"pkt_end - pkt_start is allowed",
9657 		.insns = {
9658 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
9659 				    offsetof(struct __sk_buff, data_end)),
9660 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9661 				    offsetof(struct __sk_buff, data)),
9662 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
9663 			BPF_EXIT_INSN(),
9664 		},
9665 		.result = ACCEPT,
9666 		.retval = TEST_DATA_LEN,
9667 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
9668 	},
9669 	{
9670 		"XDP pkt read, pkt_end mangling, bad access 1",
9671 		.insns = {
9672 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9673 				    offsetof(struct xdp_md, data)),
9674 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9675 				    offsetof(struct xdp_md, data_end)),
9676 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9677 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9678 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
9679 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9680 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9681 			BPF_MOV64_IMM(BPF_REG_0, 0),
9682 			BPF_EXIT_INSN(),
9683 		},
9684 		.errstr = "R3 pointer arithmetic on pkt_end",
9685 		.result = REJECT,
9686 		.prog_type = BPF_PROG_TYPE_XDP,
9687 	},
9688 	{
9689 		"XDP pkt read, pkt_end mangling, bad access 2",
9690 		.insns = {
9691 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9692 				    offsetof(struct xdp_md, data)),
9693 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9694 				    offsetof(struct xdp_md, data_end)),
9695 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9696 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9697 			BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
9698 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9699 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9700 			BPF_MOV64_IMM(BPF_REG_0, 0),
9701 			BPF_EXIT_INSN(),
9702 		},
9703 		.errstr = "R3 pointer arithmetic on pkt_end",
9704 		.result = REJECT,
9705 		.prog_type = BPF_PROG_TYPE_XDP,
9706 	},
9707 	{
9708 		"XDP pkt read, pkt_data' > pkt_end, good access",
9709 		.insns = {
9710 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9711 				    offsetof(struct xdp_md, data)),
9712 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9713 				    offsetof(struct xdp_md, data_end)),
9714 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9715 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9716 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9717 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9718 			BPF_MOV64_IMM(BPF_REG_0, 0),
9719 			BPF_EXIT_INSN(),
9720 		},
9721 		.result = ACCEPT,
9722 		.prog_type = BPF_PROG_TYPE_XDP,
9723 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9724 	},
9725 	{
9726 		"XDP pkt read, pkt_data' > pkt_end, bad access 1",
9727 		.insns = {
9728 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9729 				    offsetof(struct xdp_md, data)),
9730 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9731 				    offsetof(struct xdp_md, data_end)),
9732 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9733 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9734 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
9735 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9736 			BPF_MOV64_IMM(BPF_REG_0, 0),
9737 			BPF_EXIT_INSN(),
9738 		},
9739 		.errstr = "R1 offset is outside of the packet",
9740 		.result = REJECT,
9741 		.prog_type = BPF_PROG_TYPE_XDP,
9742 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9743 	},
9744 	{
9745 		"XDP pkt read, pkt_data' > pkt_end, bad access 2",
9746 		.insns = {
9747 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9748 				    offsetof(struct xdp_md, data)),
9749 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9750 				    offsetof(struct xdp_md, data_end)),
9751 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9752 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9753 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
9754 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9755 			BPF_MOV64_IMM(BPF_REG_0, 0),
9756 			BPF_EXIT_INSN(),
9757 		},
9758 		.errstr = "R1 offset is outside of the packet",
9759 		.result = REJECT,
9760 		.prog_type = BPF_PROG_TYPE_XDP,
9761 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9762 	},
9763 	{
9764 		"XDP pkt read, pkt_end > pkt_data', good access",
9765 		.insns = {
9766 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9767 				    offsetof(struct xdp_md, data)),
9768 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9769 				    offsetof(struct xdp_md, data_end)),
9770 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9771 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9772 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9773 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9774 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9775 			BPF_MOV64_IMM(BPF_REG_0, 0),
9776 			BPF_EXIT_INSN(),
9777 		},
9778 		.result = ACCEPT,
9779 		.prog_type = BPF_PROG_TYPE_XDP,
9780 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9781 	},
9782 	{
9783 		"XDP pkt read, pkt_end > pkt_data', bad access 1",
9784 		.insns = {
9785 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9786 				    offsetof(struct xdp_md, data)),
9787 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9788 				    offsetof(struct xdp_md, data_end)),
9789 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9790 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9791 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9792 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9793 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9794 			BPF_MOV64_IMM(BPF_REG_0, 0),
9795 			BPF_EXIT_INSN(),
9796 		},
9797 		.errstr = "R1 offset is outside of the packet",
9798 		.result = REJECT,
9799 		.prog_type = BPF_PROG_TYPE_XDP,
9800 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9801 	},
9802 	{
9803 		"XDP pkt read, pkt_end > pkt_data', bad access 2",
9804 		.insns = {
9805 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9806 				    offsetof(struct xdp_md, data)),
9807 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9808 				    offsetof(struct xdp_md, data_end)),
9809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9810 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9811 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
9812 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9813 			BPF_MOV64_IMM(BPF_REG_0, 0),
9814 			BPF_EXIT_INSN(),
9815 		},
9816 		.errstr = "R1 offset is outside of the packet",
9817 		.result = REJECT,
9818 		.prog_type = BPF_PROG_TYPE_XDP,
9819 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9820 	},
9821 	{
9822 		"XDP pkt read, pkt_data' < pkt_end, good access",
9823 		.insns = {
9824 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9825 				    offsetof(struct xdp_md, data)),
9826 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9827 				    offsetof(struct xdp_md, data_end)),
9828 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9829 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9830 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9831 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9832 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9833 			BPF_MOV64_IMM(BPF_REG_0, 0),
9834 			BPF_EXIT_INSN(),
9835 		},
9836 		.result = ACCEPT,
9837 		.prog_type = BPF_PROG_TYPE_XDP,
9838 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9839 	},
9840 	{
9841 		"XDP pkt read, pkt_data' < pkt_end, bad access 1",
9842 		.insns = {
9843 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9844 				    offsetof(struct xdp_md, data)),
9845 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9846 				    offsetof(struct xdp_md, data_end)),
9847 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9848 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9849 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9850 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9851 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9852 			BPF_MOV64_IMM(BPF_REG_0, 0),
9853 			BPF_EXIT_INSN(),
9854 		},
9855 		.errstr = "R1 offset is outside of the packet",
9856 		.result = REJECT,
9857 		.prog_type = BPF_PROG_TYPE_XDP,
9858 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9859 	},
9860 	{
9861 		"XDP pkt read, pkt_data' < pkt_end, bad access 2",
9862 		.insns = {
9863 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9864 				    offsetof(struct xdp_md, data)),
9865 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9866 				    offsetof(struct xdp_md, data_end)),
9867 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9868 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9869 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
9870 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9871 			BPF_MOV64_IMM(BPF_REG_0, 0),
9872 			BPF_EXIT_INSN(),
9873 		},
9874 		.errstr = "R1 offset is outside of the packet",
9875 		.result = REJECT,
9876 		.prog_type = BPF_PROG_TYPE_XDP,
9877 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9878 	},
9879 	{
9880 		"XDP pkt read, pkt_end < pkt_data', good access",
9881 		.insns = {
9882 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9883 				    offsetof(struct xdp_md, data)),
9884 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9885 				    offsetof(struct xdp_md, data_end)),
9886 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9887 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9888 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9889 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9890 			BPF_MOV64_IMM(BPF_REG_0, 0),
9891 			BPF_EXIT_INSN(),
9892 		},
9893 		.result = ACCEPT,
9894 		.prog_type = BPF_PROG_TYPE_XDP,
9895 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9896 	},
9897 	{
9898 		"XDP pkt read, pkt_end < pkt_data', bad access 1",
9899 		.insns = {
9900 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9901 				    offsetof(struct xdp_md, data)),
9902 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9903 				    offsetof(struct xdp_md, data_end)),
9904 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9905 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9906 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
9907 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
9908 			BPF_MOV64_IMM(BPF_REG_0, 0),
9909 			BPF_EXIT_INSN(),
9910 		},
9911 		.errstr = "R1 offset is outside of the packet",
9912 		.result = REJECT,
9913 		.prog_type = BPF_PROG_TYPE_XDP,
9914 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9915 	},
9916 	{
9917 		"XDP pkt read, pkt_end < pkt_data', bad access 2",
9918 		.insns = {
9919 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9920 				    offsetof(struct xdp_md, data)),
9921 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9922 				    offsetof(struct xdp_md, data_end)),
9923 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9924 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9925 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
9926 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9927 			BPF_MOV64_IMM(BPF_REG_0, 0),
9928 			BPF_EXIT_INSN(),
9929 		},
9930 		.errstr = "R1 offset is outside of the packet",
9931 		.result = REJECT,
9932 		.prog_type = BPF_PROG_TYPE_XDP,
9933 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9934 	},
9935 	{
9936 		"XDP pkt read, pkt_data' >= pkt_end, good access",
9937 		.insns = {
9938 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9939 				    offsetof(struct xdp_md, data)),
9940 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9941 				    offsetof(struct xdp_md, data_end)),
9942 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9943 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9944 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9945 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9946 			BPF_MOV64_IMM(BPF_REG_0, 0),
9947 			BPF_EXIT_INSN(),
9948 		},
9949 		.result = ACCEPT,
9950 		.prog_type = BPF_PROG_TYPE_XDP,
9951 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9952 	},
9953 	{
9954 		"XDP pkt read, pkt_data' >= pkt_end, bad access 1",
9955 		.insns = {
9956 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9957 				    offsetof(struct xdp_md, data)),
9958 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9959 				    offsetof(struct xdp_md, data_end)),
9960 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9961 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9962 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
9963 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
9964 			BPF_MOV64_IMM(BPF_REG_0, 0),
9965 			BPF_EXIT_INSN(),
9966 		},
9967 		.errstr = "R1 offset is outside of the packet",
9968 		.result = REJECT,
9969 		.prog_type = BPF_PROG_TYPE_XDP,
9970 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9971 	},
9972 	{
9973 		"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
9974 		.insns = {
9975 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9976 				    offsetof(struct xdp_md, data)),
9977 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9978 				    offsetof(struct xdp_md, data_end)),
9979 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9980 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
9981 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
9982 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
9983 			BPF_MOV64_IMM(BPF_REG_0, 0),
9984 			BPF_EXIT_INSN(),
9985 		},
9986 		.errstr = "R1 offset is outside of the packet",
9987 		.result = REJECT,
9988 		.prog_type = BPF_PROG_TYPE_XDP,
9989 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
9990 	},
9991 	{
9992 		"XDP pkt read, pkt_end >= pkt_data', good access",
9993 		.insns = {
9994 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
9995 				    offsetof(struct xdp_md, data)),
9996 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
9997 				    offsetof(struct xdp_md, data_end)),
9998 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
9999 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10000 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10001 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10002 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10003 			BPF_MOV64_IMM(BPF_REG_0, 0),
10004 			BPF_EXIT_INSN(),
10005 		},
10006 		.result = ACCEPT,
10007 		.prog_type = BPF_PROG_TYPE_XDP,
10008 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10009 	},
10010 	{
10011 		"XDP pkt read, pkt_end >= pkt_data', bad access 1",
10012 		.insns = {
10013 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10014 				    offsetof(struct xdp_md, data)),
10015 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10016 				    offsetof(struct xdp_md, data_end)),
10017 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10018 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10019 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10020 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10021 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10022 			BPF_MOV64_IMM(BPF_REG_0, 0),
10023 			BPF_EXIT_INSN(),
10024 		},
10025 		.errstr = "R1 offset is outside of the packet",
10026 		.result = REJECT,
10027 		.prog_type = BPF_PROG_TYPE_XDP,
10028 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10029 	},
10030 	{
10031 		"XDP pkt read, pkt_end >= pkt_data', bad access 2",
10032 		.insns = {
10033 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10034 				    offsetof(struct xdp_md, data)),
10035 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10036 				    offsetof(struct xdp_md, data_end)),
10037 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10038 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10039 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10040 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10041 			BPF_MOV64_IMM(BPF_REG_0, 0),
10042 			BPF_EXIT_INSN(),
10043 		},
10044 		.errstr = "R1 offset is outside of the packet",
10045 		.result = REJECT,
10046 		.prog_type = BPF_PROG_TYPE_XDP,
10047 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10048 	},
10049 	{
10050 		"XDP pkt read, pkt_data' <= pkt_end, good access",
10051 		.insns = {
10052 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10053 				    offsetof(struct xdp_md, data)),
10054 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10055 				    offsetof(struct xdp_md, data_end)),
10056 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10057 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10058 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10059 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10060 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10061 			BPF_MOV64_IMM(BPF_REG_0, 0),
10062 			BPF_EXIT_INSN(),
10063 		},
10064 		.result = ACCEPT,
10065 		.prog_type = BPF_PROG_TYPE_XDP,
10066 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10067 	},
10068 	{
10069 		"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
10070 		.insns = {
10071 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10072 				    offsetof(struct xdp_md, data)),
10073 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10074 				    offsetof(struct xdp_md, data_end)),
10075 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10076 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10077 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10078 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10079 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10080 			BPF_MOV64_IMM(BPF_REG_0, 0),
10081 			BPF_EXIT_INSN(),
10082 		},
10083 		.errstr = "R1 offset is outside of the packet",
10084 		.result = REJECT,
10085 		.prog_type = BPF_PROG_TYPE_XDP,
10086 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10087 	},
10088 	{
10089 		"XDP pkt read, pkt_data' <= pkt_end, bad access 2",
10090 		.insns = {
10091 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10092 				    offsetof(struct xdp_md, data)),
10093 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10094 				    offsetof(struct xdp_md, data_end)),
10095 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10096 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10097 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10098 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10099 			BPF_MOV64_IMM(BPF_REG_0, 0),
10100 			BPF_EXIT_INSN(),
10101 		},
10102 		.errstr = "R1 offset is outside of the packet",
10103 		.result = REJECT,
10104 		.prog_type = BPF_PROG_TYPE_XDP,
10105 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10106 	},
10107 	{
10108 		"XDP pkt read, pkt_end <= pkt_data', good access",
10109 		.insns = {
10110 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10111 				    offsetof(struct xdp_md, data)),
10112 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10113 				    offsetof(struct xdp_md, data_end)),
10114 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10115 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10116 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10117 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10118 			BPF_MOV64_IMM(BPF_REG_0, 0),
10119 			BPF_EXIT_INSN(),
10120 		},
10121 		.result = ACCEPT,
10122 		.prog_type = BPF_PROG_TYPE_XDP,
10123 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10124 	},
10125 	{
10126 		"XDP pkt read, pkt_end <= pkt_data', bad access 1",
10127 		.insns = {
10128 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10129 				    offsetof(struct xdp_md, data)),
10130 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10131 				    offsetof(struct xdp_md, data_end)),
10132 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10133 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10134 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10135 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10136 			BPF_MOV64_IMM(BPF_REG_0, 0),
10137 			BPF_EXIT_INSN(),
10138 		},
10139 		.errstr = "R1 offset is outside of the packet",
10140 		.result = REJECT,
10141 		.prog_type = BPF_PROG_TYPE_XDP,
10142 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10143 	},
10144 	{
10145 		"XDP pkt read, pkt_end <= pkt_data', bad access 2",
10146 		.insns = {
10147 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10148 				    offsetof(struct xdp_md, data)),
10149 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10150 				    offsetof(struct xdp_md, data_end)),
10151 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10152 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10153 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10154 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10155 			BPF_MOV64_IMM(BPF_REG_0, 0),
10156 			BPF_EXIT_INSN(),
10157 		},
10158 		.errstr = "R1 offset is outside of the packet",
10159 		.result = REJECT,
10160 		.prog_type = BPF_PROG_TYPE_XDP,
10161 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10162 	},
10163 	{
10164 		"XDP pkt read, pkt_meta' > pkt_data, good access",
10165 		.insns = {
10166 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10167 				    offsetof(struct xdp_md, data_meta)),
10168 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10169 				    offsetof(struct xdp_md, data)),
10170 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10171 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10172 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10173 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10174 			BPF_MOV64_IMM(BPF_REG_0, 0),
10175 			BPF_EXIT_INSN(),
10176 		},
10177 		.result = ACCEPT,
10178 		.prog_type = BPF_PROG_TYPE_XDP,
10179 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10180 	},
10181 	{
10182 		"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
10183 		.insns = {
10184 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10185 				    offsetof(struct xdp_md, data_meta)),
10186 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10187 				    offsetof(struct xdp_md, data)),
10188 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10189 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10190 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
10191 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10192 			BPF_MOV64_IMM(BPF_REG_0, 0),
10193 			BPF_EXIT_INSN(),
10194 		},
10195 		.errstr = "R1 offset is outside of the packet",
10196 		.result = REJECT,
10197 		.prog_type = BPF_PROG_TYPE_XDP,
10198 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10199 	},
10200 	{
10201 		"XDP pkt read, pkt_meta' > pkt_data, bad access 2",
10202 		.insns = {
10203 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10204 				    offsetof(struct xdp_md, data_meta)),
10205 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10206 				    offsetof(struct xdp_md, data)),
10207 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10208 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10209 			BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
10210 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10211 			BPF_MOV64_IMM(BPF_REG_0, 0),
10212 			BPF_EXIT_INSN(),
10213 		},
10214 		.errstr = "R1 offset is outside of the packet",
10215 		.result = REJECT,
10216 		.prog_type = BPF_PROG_TYPE_XDP,
10217 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10218 	},
10219 	{
10220 		"XDP pkt read, pkt_data > pkt_meta', good access",
10221 		.insns = {
10222 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10223 				    offsetof(struct xdp_md, data_meta)),
10224 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10225 				    offsetof(struct xdp_md, data)),
10226 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10227 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10228 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10229 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10230 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10231 			BPF_MOV64_IMM(BPF_REG_0, 0),
10232 			BPF_EXIT_INSN(),
10233 		},
10234 		.result = ACCEPT,
10235 		.prog_type = BPF_PROG_TYPE_XDP,
10236 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10237 	},
10238 	{
10239 		"XDP pkt read, pkt_data > pkt_meta', bad access 1",
10240 		.insns = {
10241 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10242 				    offsetof(struct xdp_md, data_meta)),
10243 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10244 				    offsetof(struct xdp_md, data)),
10245 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10246 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10247 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10248 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10249 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10250 			BPF_MOV64_IMM(BPF_REG_0, 0),
10251 			BPF_EXIT_INSN(),
10252 		},
10253 		.errstr = "R1 offset is outside of the packet",
10254 		.result = REJECT,
10255 		.prog_type = BPF_PROG_TYPE_XDP,
10256 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10257 	},
10258 	{
10259 		"XDP pkt read, pkt_data > pkt_meta', bad access 2",
10260 		.insns = {
10261 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10262 				    offsetof(struct xdp_md, data_meta)),
10263 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10264 				    offsetof(struct xdp_md, data)),
10265 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10266 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10267 			BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
10268 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10269 			BPF_MOV64_IMM(BPF_REG_0, 0),
10270 			BPF_EXIT_INSN(),
10271 		},
10272 		.errstr = "R1 offset is outside of the packet",
10273 		.result = REJECT,
10274 		.prog_type = BPF_PROG_TYPE_XDP,
10275 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10276 	},
10277 	{
10278 		"XDP pkt read, pkt_meta' < pkt_data, good access",
10279 		.insns = {
10280 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10281 				    offsetof(struct xdp_md, data_meta)),
10282 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10283 				    offsetof(struct xdp_md, data)),
10284 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10285 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10286 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10287 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10288 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10289 			BPF_MOV64_IMM(BPF_REG_0, 0),
10290 			BPF_EXIT_INSN(),
10291 		},
10292 		.result = ACCEPT,
10293 		.prog_type = BPF_PROG_TYPE_XDP,
10294 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10295 	},
10296 	{
10297 		"XDP pkt read, pkt_meta' < pkt_data, bad access 1",
10298 		.insns = {
10299 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10300 				    offsetof(struct xdp_md, data_meta)),
10301 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10302 				    offsetof(struct xdp_md, data)),
10303 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10304 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10305 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10306 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10307 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10308 			BPF_MOV64_IMM(BPF_REG_0, 0),
10309 			BPF_EXIT_INSN(),
10310 		},
10311 		.errstr = "R1 offset is outside of the packet",
10312 		.result = REJECT,
10313 		.prog_type = BPF_PROG_TYPE_XDP,
10314 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10315 	},
10316 	{
10317 		"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
10318 		.insns = {
10319 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10320 				    offsetof(struct xdp_md, data_meta)),
10321 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10322 				    offsetof(struct xdp_md, data)),
10323 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10324 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10325 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
10326 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10327 			BPF_MOV64_IMM(BPF_REG_0, 0),
10328 			BPF_EXIT_INSN(),
10329 		},
10330 		.errstr = "R1 offset is outside of the packet",
10331 		.result = REJECT,
10332 		.prog_type = BPF_PROG_TYPE_XDP,
10333 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10334 	},
10335 	{
10336 		"XDP pkt read, pkt_data < pkt_meta', good access",
10337 		.insns = {
10338 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10339 				    offsetof(struct xdp_md, data_meta)),
10340 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10341 				    offsetof(struct xdp_md, data)),
10342 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10343 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10344 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10345 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10346 			BPF_MOV64_IMM(BPF_REG_0, 0),
10347 			BPF_EXIT_INSN(),
10348 		},
10349 		.result = ACCEPT,
10350 		.prog_type = BPF_PROG_TYPE_XDP,
10351 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10352 	},
10353 	{
10354 		"XDP pkt read, pkt_data < pkt_meta', bad access 1",
10355 		.insns = {
10356 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10357 				    offsetof(struct xdp_md, data_meta)),
10358 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10359 				    offsetof(struct xdp_md, data)),
10360 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10362 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
10363 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10364 			BPF_MOV64_IMM(BPF_REG_0, 0),
10365 			BPF_EXIT_INSN(),
10366 		},
10367 		.errstr = "R1 offset is outside of the packet",
10368 		.result = REJECT,
10369 		.prog_type = BPF_PROG_TYPE_XDP,
10370 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10371 	},
10372 	{
10373 		"XDP pkt read, pkt_data < pkt_meta', bad access 2",
10374 		.insns = {
10375 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10376 				    offsetof(struct xdp_md, data_meta)),
10377 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10378 				    offsetof(struct xdp_md, data)),
10379 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10380 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10381 			BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
10382 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10383 			BPF_MOV64_IMM(BPF_REG_0, 0),
10384 			BPF_EXIT_INSN(),
10385 		},
10386 		.errstr = "R1 offset is outside of the packet",
10387 		.result = REJECT,
10388 		.prog_type = BPF_PROG_TYPE_XDP,
10389 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10390 	},
10391 	{
10392 		"XDP pkt read, pkt_meta' >= pkt_data, good access",
10393 		.insns = {
10394 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10395 				    offsetof(struct xdp_md, data_meta)),
10396 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10397 				    offsetof(struct xdp_md, data)),
10398 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10399 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10400 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10401 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10402 			BPF_MOV64_IMM(BPF_REG_0, 0),
10403 			BPF_EXIT_INSN(),
10404 		},
10405 		.result = ACCEPT,
10406 		.prog_type = BPF_PROG_TYPE_XDP,
10407 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10408 	},
10409 	{
10410 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
10411 		.insns = {
10412 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10413 				    offsetof(struct xdp_md, data_meta)),
10414 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10415 				    offsetof(struct xdp_md, data)),
10416 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10417 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10418 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
10419 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10420 			BPF_MOV64_IMM(BPF_REG_0, 0),
10421 			BPF_EXIT_INSN(),
10422 		},
10423 		.errstr = "R1 offset is outside of the packet",
10424 		.result = REJECT,
10425 		.prog_type = BPF_PROG_TYPE_XDP,
10426 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10427 	},
10428 	{
10429 		"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
10430 		.insns = {
10431 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10432 				    offsetof(struct xdp_md, data_meta)),
10433 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10434 				    offsetof(struct xdp_md, data)),
10435 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10436 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10437 			BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
10438 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10439 			BPF_MOV64_IMM(BPF_REG_0, 0),
10440 			BPF_EXIT_INSN(),
10441 		},
10442 		.errstr = "R1 offset is outside of the packet",
10443 		.result = REJECT,
10444 		.prog_type = BPF_PROG_TYPE_XDP,
10445 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10446 	},
10447 	{
10448 		"XDP pkt read, pkt_data >= pkt_meta', good access",
10449 		.insns = {
10450 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10451 				    offsetof(struct xdp_md, data_meta)),
10452 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10453 				    offsetof(struct xdp_md, data)),
10454 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10455 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10456 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10457 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10458 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10459 			BPF_MOV64_IMM(BPF_REG_0, 0),
10460 			BPF_EXIT_INSN(),
10461 		},
10462 		.result = ACCEPT,
10463 		.prog_type = BPF_PROG_TYPE_XDP,
10464 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10465 	},
10466 	{
10467 		"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
10468 		.insns = {
10469 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10470 				    offsetof(struct xdp_md, data_meta)),
10471 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10472 				    offsetof(struct xdp_md, data)),
10473 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10474 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10475 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10476 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10477 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10478 			BPF_MOV64_IMM(BPF_REG_0, 0),
10479 			BPF_EXIT_INSN(),
10480 		},
10481 		.errstr = "R1 offset is outside of the packet",
10482 		.result = REJECT,
10483 		.prog_type = BPF_PROG_TYPE_XDP,
10484 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10485 	},
10486 	{
10487 		"XDP pkt read, pkt_data >= pkt_meta', bad access 2",
10488 		.insns = {
10489 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10490 				    offsetof(struct xdp_md, data_meta)),
10491 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10492 				    offsetof(struct xdp_md, data)),
10493 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10494 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10495 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
10496 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10497 			BPF_MOV64_IMM(BPF_REG_0, 0),
10498 			BPF_EXIT_INSN(),
10499 		},
10500 		.errstr = "R1 offset is outside of the packet",
10501 		.result = REJECT,
10502 		.prog_type = BPF_PROG_TYPE_XDP,
10503 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10504 	},
10505 	{
10506 		"XDP pkt read, pkt_meta' <= pkt_data, good access",
10507 		.insns = {
10508 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10509 				    offsetof(struct xdp_md, data_meta)),
10510 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10511 				    offsetof(struct xdp_md, data)),
10512 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10513 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10514 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10515 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10516 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10517 			BPF_MOV64_IMM(BPF_REG_0, 0),
10518 			BPF_EXIT_INSN(),
10519 		},
10520 		.result = ACCEPT,
10521 		.prog_type = BPF_PROG_TYPE_XDP,
10522 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10523 	},
10524 	{
10525 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
10526 		.insns = {
10527 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10528 				    offsetof(struct xdp_md, data_meta)),
10529 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10530 				    offsetof(struct xdp_md, data)),
10531 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10533 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10534 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
10535 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
10536 			BPF_MOV64_IMM(BPF_REG_0, 0),
10537 			BPF_EXIT_INSN(),
10538 		},
10539 		.errstr = "R1 offset is outside of the packet",
10540 		.result = REJECT,
10541 		.prog_type = BPF_PROG_TYPE_XDP,
10542 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10543 	},
10544 	{
10545 		"XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
10546 		.insns = {
10547 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10548 				    offsetof(struct xdp_md, data_meta)),
10549 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10550 				    offsetof(struct xdp_md, data)),
10551 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10552 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10553 			BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
10554 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10555 			BPF_MOV64_IMM(BPF_REG_0, 0),
10556 			BPF_EXIT_INSN(),
10557 		},
10558 		.errstr = "R1 offset is outside of the packet",
10559 		.result = REJECT,
10560 		.prog_type = BPF_PROG_TYPE_XDP,
10561 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10562 	},
10563 	{
10564 		"XDP pkt read, pkt_data <= pkt_meta', good access",
10565 		.insns = {
10566 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10567 				    offsetof(struct xdp_md, data_meta)),
10568 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10569 				    offsetof(struct xdp_md, data)),
10570 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10571 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10572 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10573 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10574 			BPF_MOV64_IMM(BPF_REG_0, 0),
10575 			BPF_EXIT_INSN(),
10576 		},
10577 		.result = ACCEPT,
10578 		.prog_type = BPF_PROG_TYPE_XDP,
10579 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10580 	},
10581 	{
10582 		"XDP pkt read, pkt_data <= pkt_meta', bad access 1",
10583 		.insns = {
10584 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10585 				    offsetof(struct xdp_md, data_meta)),
10586 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10587 				    offsetof(struct xdp_md, data)),
10588 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10589 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10590 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
10591 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
10592 			BPF_MOV64_IMM(BPF_REG_0, 0),
10593 			BPF_EXIT_INSN(),
10594 		},
10595 		.errstr = "R1 offset is outside of the packet",
10596 		.result = REJECT,
10597 		.prog_type = BPF_PROG_TYPE_XDP,
10598 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10599 	},
10600 	{
10601 		"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
10602 		.insns = {
10603 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
10604 				    offsetof(struct xdp_md, data_meta)),
10605 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
10606 				    offsetof(struct xdp_md, data)),
10607 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
10608 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
10609 			BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
10610 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
10611 			BPF_MOV64_IMM(BPF_REG_0, 0),
10612 			BPF_EXIT_INSN(),
10613 		},
10614 		.errstr = "R1 offset is outside of the packet",
10615 		.result = REJECT,
10616 		.prog_type = BPF_PROG_TYPE_XDP,
10617 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10618 	},
10619 	{
10620 		"check deducing bounds from const, 1",
10621 		.insns = {
10622 			BPF_MOV64_IMM(BPF_REG_0, 1),
10623 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
10624 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10625 			BPF_EXIT_INSN(),
10626 		},
10627 		.result = REJECT,
10628 		.errstr = "R0 tried to subtract pointer from scalar",
10629 	},
10630 	{
10631 		"check deducing bounds from const, 2",
10632 		.insns = {
10633 			BPF_MOV64_IMM(BPF_REG_0, 1),
10634 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
10635 			BPF_EXIT_INSN(),
10636 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
10637 			BPF_EXIT_INSN(),
10638 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10639 			BPF_EXIT_INSN(),
10640 		},
10641 		.result = ACCEPT,
10642 		.retval = 1,
10643 	},
10644 	{
10645 		"check deducing bounds from const, 3",
10646 		.insns = {
10647 			BPF_MOV64_IMM(BPF_REG_0, 0),
10648 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10649 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10650 			BPF_EXIT_INSN(),
10651 		},
10652 		.result = REJECT,
10653 		.errstr = "R0 tried to subtract pointer from scalar",
10654 	},
10655 	{
10656 		"check deducing bounds from const, 4",
10657 		.insns = {
10658 			BPF_MOV64_IMM(BPF_REG_0, 0),
10659 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
10660 			BPF_EXIT_INSN(),
10661 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10662 			BPF_EXIT_INSN(),
10663 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10664 			BPF_EXIT_INSN(),
10665 		},
10666 		.result = ACCEPT,
10667 	},
10668 	{
10669 		"check deducing bounds from const, 5",
10670 		.insns = {
10671 			BPF_MOV64_IMM(BPF_REG_0, 0),
10672 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10673 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10674 			BPF_EXIT_INSN(),
10675 		},
10676 		.result = REJECT,
10677 		.errstr = "R0 tried to subtract pointer from scalar",
10678 	},
10679 	{
10680 		"check deducing bounds from const, 6",
10681 		.insns = {
10682 			BPF_MOV64_IMM(BPF_REG_0, 0),
10683 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10684 			BPF_EXIT_INSN(),
10685 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10686 			BPF_EXIT_INSN(),
10687 		},
10688 		.result = REJECT,
10689 		.errstr = "R0 tried to subtract pointer from scalar",
10690 	},
10691 	{
10692 		"check deducing bounds from const, 7",
10693 		.insns = {
10694 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10695 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10696 			BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
10697 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10698 				    offsetof(struct __sk_buff, mark)),
10699 			BPF_EXIT_INSN(),
10700 		},
10701 		.result = REJECT,
10702 		.errstr = "dereference of modified ctx ptr",
10703 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10704 	},
10705 	{
10706 		"check deducing bounds from const, 8",
10707 		.insns = {
10708 			BPF_MOV64_IMM(BPF_REG_0, ~0),
10709 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
10710 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
10711 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10712 				    offsetof(struct __sk_buff, mark)),
10713 			BPF_EXIT_INSN(),
10714 		},
10715 		.result = REJECT,
10716 		.errstr = "dereference of modified ctx ptr",
10717 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
10718 	},
10719 	{
10720 		"check deducing bounds from const, 9",
10721 		.insns = {
10722 			BPF_MOV64_IMM(BPF_REG_0, 0),
10723 			BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
10724 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10725 			BPF_EXIT_INSN(),
10726 		},
10727 		.result = REJECT,
10728 		.errstr = "R0 tried to subtract pointer from scalar",
10729 	},
10730 	{
10731 		"check deducing bounds from const, 10",
10732 		.insns = {
10733 			BPF_MOV64_IMM(BPF_REG_0, 0),
10734 			BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
10735 			/* Marks reg as unknown. */
10736 			BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
10737 			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
10738 			BPF_EXIT_INSN(),
10739 		},
10740 		.result = REJECT,
10741 		.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
10742 	},
10743 	{
10744 		"bpf_exit with invalid return code. test1",
10745 		.insns = {
10746 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10747 			BPF_EXIT_INSN(),
10748 		},
10749 		.errstr = "R0 has value (0x0; 0xffffffff)",
10750 		.result = REJECT,
10751 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10752 	},
10753 	{
10754 		"bpf_exit with invalid return code. test2",
10755 		.insns = {
10756 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10757 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
10758 			BPF_EXIT_INSN(),
10759 		},
10760 		.result = ACCEPT,
10761 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10762 	},
10763 	{
10764 		"bpf_exit with invalid return code. test3",
10765 		.insns = {
10766 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10767 			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
10768 			BPF_EXIT_INSN(),
10769 		},
10770 		.errstr = "R0 has value (0x0; 0x3)",
10771 		.result = REJECT,
10772 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10773 	},
10774 	{
10775 		"bpf_exit with invalid return code. test4",
10776 		.insns = {
10777 			BPF_MOV64_IMM(BPF_REG_0, 1),
10778 			BPF_EXIT_INSN(),
10779 		},
10780 		.result = ACCEPT,
10781 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10782 	},
10783 	{
10784 		"bpf_exit with invalid return code. test5",
10785 		.insns = {
10786 			BPF_MOV64_IMM(BPF_REG_0, 2),
10787 			BPF_EXIT_INSN(),
10788 		},
10789 		.errstr = "R0 has value (0x2; 0x0)",
10790 		.result = REJECT,
10791 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10792 	},
10793 	{
10794 		"bpf_exit with invalid return code. test6",
10795 		.insns = {
10796 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
10797 			BPF_EXIT_INSN(),
10798 		},
10799 		.errstr = "R0 is not a known value (ctx)",
10800 		.result = REJECT,
10801 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10802 	},
10803 	{
10804 		"bpf_exit with invalid return code. test7",
10805 		.insns = {
10806 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
10807 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
10808 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
10809 			BPF_EXIT_INSN(),
10810 		},
10811 		.errstr = "R0 has unknown scalar value",
10812 		.result = REJECT,
10813 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
10814 	},
10815 	{
10816 		"calls: basic sanity",
10817 		.insns = {
10818 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10819 			BPF_MOV64_IMM(BPF_REG_0, 1),
10820 			BPF_EXIT_INSN(),
10821 			BPF_MOV64_IMM(BPF_REG_0, 2),
10822 			BPF_EXIT_INSN(),
10823 		},
10824 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10825 		.result = ACCEPT,
10826 	},
10827 	{
10828 		"calls: not on unpriviledged",
10829 		.insns = {
10830 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10831 			BPF_MOV64_IMM(BPF_REG_0, 1),
10832 			BPF_EXIT_INSN(),
10833 			BPF_MOV64_IMM(BPF_REG_0, 2),
10834 			BPF_EXIT_INSN(),
10835 		},
10836 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
10837 		.result_unpriv = REJECT,
10838 		.result = ACCEPT,
10839 		.retval = 1,
10840 	},
10841 	{
10842 		"calls: div by 0 in subprog",
10843 		.insns = {
10844 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10845 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10846 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10847 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10848 				    offsetof(struct __sk_buff, data_end)),
10849 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10850 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10851 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10852 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10853 			BPF_MOV64_IMM(BPF_REG_0, 1),
10854 			BPF_EXIT_INSN(),
10855 			BPF_MOV32_IMM(BPF_REG_2, 0),
10856 			BPF_MOV32_IMM(BPF_REG_3, 1),
10857 			BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
10858 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10859 				    offsetof(struct __sk_buff, data)),
10860 			BPF_EXIT_INSN(),
10861 		},
10862 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10863 		.result = ACCEPT,
10864 		.retval = 1,
10865 	},
10866 	{
10867 		"calls: multiple ret types in subprog 1",
10868 		.insns = {
10869 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10870 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10871 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10872 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10873 				    offsetof(struct __sk_buff, data_end)),
10874 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10875 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10876 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10877 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10878 			BPF_MOV64_IMM(BPF_REG_0, 1),
10879 			BPF_EXIT_INSN(),
10880 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10881 				    offsetof(struct __sk_buff, data)),
10882 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10883 			BPF_MOV32_IMM(BPF_REG_0, 42),
10884 			BPF_EXIT_INSN(),
10885 		},
10886 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10887 		.result = REJECT,
10888 		.errstr = "R0 invalid mem access 'inv'",
10889 	},
10890 	{
10891 		"calls: multiple ret types in subprog 2",
10892 		.insns = {
10893 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10894 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
10895 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
10896 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
10897 				    offsetof(struct __sk_buff, data_end)),
10898 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
10899 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
10900 			BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
10901 			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
10902 			BPF_MOV64_IMM(BPF_REG_0, 1),
10903 			BPF_EXIT_INSN(),
10904 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10905 				    offsetof(struct __sk_buff, data)),
10906 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
10907 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
10908 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
10909 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10910 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10911 			BPF_LD_MAP_FD(BPF_REG_1, 0),
10912 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
10913 				     BPF_FUNC_map_lookup_elem),
10914 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
10915 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
10916 				    offsetof(struct __sk_buff, data)),
10917 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
10918 			BPF_EXIT_INSN(),
10919 		},
10920 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
10921 		.fixup_map_hash_8b = { 16 },
10922 		.result = REJECT,
10923 		.errstr = "R0 min value is outside of the array range",
10924 	},
10925 	{
10926 		"calls: overlapping caller/callee",
10927 		.insns = {
10928 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
10929 			BPF_MOV64_IMM(BPF_REG_0, 1),
10930 			BPF_EXIT_INSN(),
10931 		},
10932 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10933 		.errstr = "last insn is not an exit or jmp",
10934 		.result = REJECT,
10935 	},
10936 	{
10937 		"calls: wrong recursive calls",
10938 		.insns = {
10939 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10940 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
10941 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10942 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10943 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
10944 			BPF_MOV64_IMM(BPF_REG_0, 1),
10945 			BPF_EXIT_INSN(),
10946 		},
10947 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10948 		.errstr = "jump out of range",
10949 		.result = REJECT,
10950 	},
10951 	{
10952 		"calls: wrong src reg",
10953 		.insns = {
10954 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
10955 			BPF_MOV64_IMM(BPF_REG_0, 1),
10956 			BPF_EXIT_INSN(),
10957 		},
10958 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10959 		.errstr = "BPF_CALL uses reserved fields",
10960 		.result = REJECT,
10961 	},
10962 	{
10963 		"calls: wrong off value",
10964 		.insns = {
10965 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
10966 			BPF_MOV64_IMM(BPF_REG_0, 1),
10967 			BPF_EXIT_INSN(),
10968 			BPF_MOV64_IMM(BPF_REG_0, 2),
10969 			BPF_EXIT_INSN(),
10970 		},
10971 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10972 		.errstr = "BPF_CALL uses reserved fields",
10973 		.result = REJECT,
10974 	},
10975 	{
10976 		"calls: jump back loop",
10977 		.insns = {
10978 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
10979 			BPF_MOV64_IMM(BPF_REG_0, 1),
10980 			BPF_EXIT_INSN(),
10981 		},
10982 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10983 		.errstr = "back-edge from insn 0 to 0",
10984 		.result = REJECT,
10985 	},
10986 	{
10987 		"calls: conditional call",
10988 		.insns = {
10989 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
10990 				    offsetof(struct __sk_buff, mark)),
10991 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
10992 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
10993 			BPF_MOV64_IMM(BPF_REG_0, 1),
10994 			BPF_EXIT_INSN(),
10995 			BPF_MOV64_IMM(BPF_REG_0, 2),
10996 			BPF_EXIT_INSN(),
10997 		},
10998 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
10999 		.errstr = "jump out of range",
11000 		.result = REJECT,
11001 	},
11002 	{
11003 		"calls: conditional call 2",
11004 		.insns = {
11005 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11006 				    offsetof(struct __sk_buff, mark)),
11007 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11009 			BPF_MOV64_IMM(BPF_REG_0, 1),
11010 			BPF_EXIT_INSN(),
11011 			BPF_MOV64_IMM(BPF_REG_0, 2),
11012 			BPF_EXIT_INSN(),
11013 			BPF_MOV64_IMM(BPF_REG_0, 3),
11014 			BPF_EXIT_INSN(),
11015 		},
11016 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11017 		.result = ACCEPT,
11018 	},
11019 	{
11020 		"calls: conditional call 3",
11021 		.insns = {
11022 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11023 				    offsetof(struct __sk_buff, mark)),
11024 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11025 			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
11026 			BPF_MOV64_IMM(BPF_REG_0, 1),
11027 			BPF_EXIT_INSN(),
11028 			BPF_MOV64_IMM(BPF_REG_0, 1),
11029 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11030 			BPF_MOV64_IMM(BPF_REG_0, 3),
11031 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11032 		},
11033 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11034 		.errstr = "back-edge from insn",
11035 		.result = REJECT,
11036 	},
11037 	{
11038 		"calls: conditional call 4",
11039 		.insns = {
11040 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11041 				    offsetof(struct __sk_buff, mark)),
11042 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11043 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11044 			BPF_MOV64_IMM(BPF_REG_0, 1),
11045 			BPF_EXIT_INSN(),
11046 			BPF_MOV64_IMM(BPF_REG_0, 1),
11047 			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
11048 			BPF_MOV64_IMM(BPF_REG_0, 3),
11049 			BPF_EXIT_INSN(),
11050 		},
11051 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11052 		.result = ACCEPT,
11053 	},
11054 	{
11055 		"calls: conditional call 5",
11056 		.insns = {
11057 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11058 				    offsetof(struct __sk_buff, mark)),
11059 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
11060 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11061 			BPF_MOV64_IMM(BPF_REG_0, 1),
11062 			BPF_EXIT_INSN(),
11063 			BPF_MOV64_IMM(BPF_REG_0, 1),
11064 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
11065 			BPF_MOV64_IMM(BPF_REG_0, 3),
11066 			BPF_EXIT_INSN(),
11067 		},
11068 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11069 		.errstr = "back-edge from insn",
11070 		.result = REJECT,
11071 	},
11072 	{
11073 		"calls: conditional call 6",
11074 		.insns = {
11075 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11076 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
11077 			BPF_EXIT_INSN(),
11078 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11079 				    offsetof(struct __sk_buff, mark)),
11080 			BPF_EXIT_INSN(),
11081 		},
11082 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11083 		.errstr = "back-edge from insn",
11084 		.result = REJECT,
11085 	},
11086 	{
11087 		"calls: using r0 returned by callee",
11088 		.insns = {
11089 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11090 			BPF_EXIT_INSN(),
11091 			BPF_MOV64_IMM(BPF_REG_0, 2),
11092 			BPF_EXIT_INSN(),
11093 		},
11094 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11095 		.result = ACCEPT,
11096 	},
11097 	{
11098 		"calls: using uninit r0 from callee",
11099 		.insns = {
11100 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11101 			BPF_EXIT_INSN(),
11102 			BPF_EXIT_INSN(),
11103 		},
11104 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11105 		.errstr = "!read_ok",
11106 		.result = REJECT,
11107 	},
11108 	{
11109 		"calls: callee is using r1",
11110 		.insns = {
11111 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11112 			BPF_EXIT_INSN(),
11113 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11114 				    offsetof(struct __sk_buff, len)),
11115 			BPF_EXIT_INSN(),
11116 		},
11117 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
11118 		.result = ACCEPT,
11119 		.retval = TEST_DATA_LEN,
11120 	},
11121 	{
11122 		"calls: callee using args1",
11123 		.insns = {
11124 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11125 			BPF_EXIT_INSN(),
11126 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11127 			BPF_EXIT_INSN(),
11128 		},
11129 		.errstr_unpriv = "allowed for root only",
11130 		.result_unpriv = REJECT,
11131 		.result = ACCEPT,
11132 		.retval = POINTER_VALUE,
11133 	},
11134 	{
11135 		"calls: callee using wrong args2",
11136 		.insns = {
11137 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11138 			BPF_EXIT_INSN(),
11139 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11140 			BPF_EXIT_INSN(),
11141 		},
11142 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11143 		.errstr = "R2 !read_ok",
11144 		.result = REJECT,
11145 	},
11146 	{
11147 		"calls: callee using two args",
11148 		.insns = {
11149 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11150 			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
11151 				    offsetof(struct __sk_buff, len)),
11152 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
11153 				    offsetof(struct __sk_buff, len)),
11154 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11155 			BPF_EXIT_INSN(),
11156 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
11157 			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
11158 			BPF_EXIT_INSN(),
11159 		},
11160 		.errstr_unpriv = "allowed for root only",
11161 		.result_unpriv = REJECT,
11162 		.result = ACCEPT,
11163 		.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
11164 	},
11165 	{
11166 		"calls: callee changing pkt pointers",
11167 		.insns = {
11168 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
11169 				    offsetof(struct xdp_md, data)),
11170 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
11171 				    offsetof(struct xdp_md, data_end)),
11172 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
11173 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
11174 			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
11175 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11176 			/* clear_all_pkt_pointers() has to walk all frames
11177 			 * to make sure that pkt pointers in the caller
11178 			 * are cleared when callee is calling a helper that
11179 			 * adjusts packet size
11180 			 */
11181 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11182 			BPF_MOV32_IMM(BPF_REG_0, 0),
11183 			BPF_EXIT_INSN(),
11184 			BPF_MOV64_IMM(BPF_REG_2, 0),
11185 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11186 				     BPF_FUNC_xdp_adjust_head),
11187 			BPF_EXIT_INSN(),
11188 		},
11189 		.result = REJECT,
11190 		.errstr = "R6 invalid mem access 'inv'",
11191 		.prog_type = BPF_PROG_TYPE_XDP,
11192 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
11193 	},
11194 	{
11195 		"calls: two calls with args",
11196 		.insns = {
11197 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11198 			BPF_EXIT_INSN(),
11199 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11200 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11201 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11202 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11203 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11204 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11205 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11206 			BPF_EXIT_INSN(),
11207 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11208 				    offsetof(struct __sk_buff, len)),
11209 			BPF_EXIT_INSN(),
11210 		},
11211 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11212 		.result = ACCEPT,
11213 		.retval = TEST_DATA_LEN + TEST_DATA_LEN,
11214 	},
11215 	{
11216 		"calls: calls with stack arith",
11217 		.insns = {
11218 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11219 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11220 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11221 			BPF_EXIT_INSN(),
11222 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11223 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11224 			BPF_EXIT_INSN(),
11225 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
11226 			BPF_MOV64_IMM(BPF_REG_0, 42),
11227 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11228 			BPF_EXIT_INSN(),
11229 		},
11230 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11231 		.result = ACCEPT,
11232 		.retval = 42,
11233 	},
11234 	{
11235 		"calls: calls with misaligned stack access",
11236 		.insns = {
11237 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11238 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11239 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11240 			BPF_EXIT_INSN(),
11241 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
11242 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11243 			BPF_EXIT_INSN(),
11244 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
11245 			BPF_MOV64_IMM(BPF_REG_0, 42),
11246 			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
11247 			BPF_EXIT_INSN(),
11248 		},
11249 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11250 		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
11251 		.errstr = "misaligned stack access",
11252 		.result = REJECT,
11253 	},
11254 	{
11255 		"calls: calls control flow, jump test",
11256 		.insns = {
11257 			BPF_MOV64_IMM(BPF_REG_0, 42),
11258 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11259 			BPF_MOV64_IMM(BPF_REG_0, 43),
11260 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11261 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11262 			BPF_EXIT_INSN(),
11263 		},
11264 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11265 		.result = ACCEPT,
11266 		.retval = 43,
11267 	},
11268 	{
11269 		"calls: calls control flow, jump test 2",
11270 		.insns = {
11271 			BPF_MOV64_IMM(BPF_REG_0, 42),
11272 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
11273 			BPF_MOV64_IMM(BPF_REG_0, 43),
11274 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
11275 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11276 			BPF_EXIT_INSN(),
11277 		},
11278 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11279 		.errstr = "jump out of range from insn 1 to 4",
11280 		.result = REJECT,
11281 	},
11282 	{
11283 		"calls: two calls with bad jump",
11284 		.insns = {
11285 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11286 			BPF_EXIT_INSN(),
11287 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11288 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11289 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11290 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11291 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11292 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11293 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11294 			BPF_EXIT_INSN(),
11295 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11296 				    offsetof(struct __sk_buff, len)),
11297 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
11298 			BPF_EXIT_INSN(),
11299 		},
11300 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11301 		.errstr = "jump out of range from insn 11 to 9",
11302 		.result = REJECT,
11303 	},
11304 	{
11305 		"calls: recursive call. test1",
11306 		.insns = {
11307 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11308 			BPF_EXIT_INSN(),
11309 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
11310 			BPF_EXIT_INSN(),
11311 		},
11312 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11313 		.errstr = "back-edge",
11314 		.result = REJECT,
11315 	},
11316 	{
11317 		"calls: recursive call. test2",
11318 		.insns = {
11319 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11320 			BPF_EXIT_INSN(),
11321 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
11322 			BPF_EXIT_INSN(),
11323 		},
11324 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11325 		.errstr = "back-edge",
11326 		.result = REJECT,
11327 	},
11328 	{
11329 		"calls: unreachable code",
11330 		.insns = {
11331 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11332 			BPF_EXIT_INSN(),
11333 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11334 			BPF_EXIT_INSN(),
11335 			BPF_MOV64_IMM(BPF_REG_0, 0),
11336 			BPF_EXIT_INSN(),
11337 			BPF_MOV64_IMM(BPF_REG_0, 0),
11338 			BPF_EXIT_INSN(),
11339 		},
11340 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11341 		.errstr = "unreachable insn 6",
11342 		.result = REJECT,
11343 	},
11344 	{
11345 		"calls: invalid call",
11346 		.insns = {
11347 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11348 			BPF_EXIT_INSN(),
11349 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
11350 			BPF_EXIT_INSN(),
11351 		},
11352 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11353 		.errstr = "invalid destination",
11354 		.result = REJECT,
11355 	},
11356 	{
11357 		"calls: invalid call 2",
11358 		.insns = {
11359 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11360 			BPF_EXIT_INSN(),
11361 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
11362 			BPF_EXIT_INSN(),
11363 		},
11364 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11365 		.errstr = "invalid destination",
11366 		.result = REJECT,
11367 	},
11368 	{
11369 		"calls: jumping across function bodies. test1",
11370 		.insns = {
11371 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11372 			BPF_MOV64_IMM(BPF_REG_0, 0),
11373 			BPF_EXIT_INSN(),
11374 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
11375 			BPF_EXIT_INSN(),
11376 		},
11377 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11378 		.errstr = "jump out of range",
11379 		.result = REJECT,
11380 	},
11381 	{
11382 		"calls: jumping across function bodies. test2",
11383 		.insns = {
11384 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
11385 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11386 			BPF_MOV64_IMM(BPF_REG_0, 0),
11387 			BPF_EXIT_INSN(),
11388 			BPF_EXIT_INSN(),
11389 		},
11390 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11391 		.errstr = "jump out of range",
11392 		.result = REJECT,
11393 	},
11394 	{
11395 		"calls: call without exit",
11396 		.insns = {
11397 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11398 			BPF_EXIT_INSN(),
11399 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11400 			BPF_EXIT_INSN(),
11401 			BPF_MOV64_IMM(BPF_REG_0, 0),
11402 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
11403 		},
11404 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11405 		.errstr = "not an exit",
11406 		.result = REJECT,
11407 	},
11408 	{
11409 		"calls: call into middle of ld_imm64",
11410 		.insns = {
11411 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11412 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11413 			BPF_MOV64_IMM(BPF_REG_0, 0),
11414 			BPF_EXIT_INSN(),
11415 			BPF_LD_IMM64(BPF_REG_0, 0),
11416 			BPF_EXIT_INSN(),
11417 		},
11418 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11419 		.errstr = "last insn",
11420 		.result = REJECT,
11421 	},
11422 	{
11423 		"calls: call into middle of other call",
11424 		.insns = {
11425 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11426 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11427 			BPF_MOV64_IMM(BPF_REG_0, 0),
11428 			BPF_EXIT_INSN(),
11429 			BPF_MOV64_IMM(BPF_REG_0, 0),
11430 			BPF_MOV64_IMM(BPF_REG_0, 0),
11431 			BPF_EXIT_INSN(),
11432 		},
11433 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11434 		.errstr = "last insn",
11435 		.result = REJECT,
11436 	},
11437 	{
11438 		"calls: ld_abs with changing ctx data in callee",
11439 		.insns = {
11440 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11441 			BPF_LD_ABS(BPF_B, 0),
11442 			BPF_LD_ABS(BPF_H, 0),
11443 			BPF_LD_ABS(BPF_W, 0),
11444 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
11445 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11446 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
11447 			BPF_LD_ABS(BPF_B, 0),
11448 			BPF_LD_ABS(BPF_H, 0),
11449 			BPF_LD_ABS(BPF_W, 0),
11450 			BPF_EXIT_INSN(),
11451 			BPF_MOV64_IMM(BPF_REG_2, 1),
11452 			BPF_MOV64_IMM(BPF_REG_3, 2),
11453 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11454 				     BPF_FUNC_skb_vlan_push),
11455 			BPF_EXIT_INSN(),
11456 		},
11457 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
11458 		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
11459 		.result = REJECT,
11460 	},
11461 	{
11462 		"calls: two calls with bad fallthrough",
11463 		.insns = {
11464 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11465 			BPF_EXIT_INSN(),
11466 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11467 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11468 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11469 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11470 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11471 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11472 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11473 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
11474 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
11475 				    offsetof(struct __sk_buff, len)),
11476 			BPF_EXIT_INSN(),
11477 		},
11478 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
11479 		.errstr = "not an exit",
11480 		.result = REJECT,
11481 	},
11482 	{
11483 		"calls: two calls with stack read",
11484 		.insns = {
11485 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11486 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11487 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11488 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11489 			BPF_EXIT_INSN(),
11490 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11491 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
11492 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
11493 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11494 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11495 			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
11496 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
11497 			BPF_EXIT_INSN(),
11498 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11499 			BPF_EXIT_INSN(),
11500 		},
11501 		.prog_type = BPF_PROG_TYPE_XDP,
11502 		.result = ACCEPT,
11503 	},
11504 	{
11505 		"calls: two calls with stack write",
11506 		.insns = {
11507 			/* main prog */
11508 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11509 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11510 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11511 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11512 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11513 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11514 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11515 			BPF_EXIT_INSN(),
11516 
11517 			/* subprog 1 */
11518 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11519 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11520 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
11521 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
11522 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11523 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11524 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
11525 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
11526 			/* write into stack frame of main prog */
11527 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
11528 			BPF_EXIT_INSN(),
11529 
11530 			/* subprog 2 */
11531 			/* read from stack frame of main prog */
11532 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
11533 			BPF_EXIT_INSN(),
11534 		},
11535 		.prog_type = BPF_PROG_TYPE_XDP,
11536 		.result = ACCEPT,
11537 	},
11538 	{
11539 		"calls: stack overflow using two frames (pre-call access)",
11540 		.insns = {
11541 			/* prog 1 */
11542 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11543 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
11544 			BPF_EXIT_INSN(),
11545 
11546 			/* prog 2 */
11547 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11548 			BPF_MOV64_IMM(BPF_REG_0, 0),
11549 			BPF_EXIT_INSN(),
11550 		},
11551 		.prog_type = BPF_PROG_TYPE_XDP,
11552 		.errstr = "combined stack size",
11553 		.result = REJECT,
11554 	},
11555 	{
11556 		"calls: stack overflow using two frames (post-call access)",
11557 		.insns = {
11558 			/* prog 1 */
11559 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
11560 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11561 			BPF_EXIT_INSN(),
11562 
11563 			/* prog 2 */
11564 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11565 			BPF_MOV64_IMM(BPF_REG_0, 0),
11566 			BPF_EXIT_INSN(),
11567 		},
11568 		.prog_type = BPF_PROG_TYPE_XDP,
11569 		.errstr = "combined stack size",
11570 		.result = REJECT,
11571 	},
11572 	{
11573 		"calls: stack depth check using three frames. test1",
11574 		.insns = {
11575 			/* main */
11576 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11577 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11578 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11579 			BPF_MOV64_IMM(BPF_REG_0, 0),
11580 			BPF_EXIT_INSN(),
11581 			/* A */
11582 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11583 			BPF_EXIT_INSN(),
11584 			/* B */
11585 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11586 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11587 			BPF_EXIT_INSN(),
11588 		},
11589 		.prog_type = BPF_PROG_TYPE_XDP,
11590 		/* stack_main=32, stack_A=256, stack_B=64
11591 		 * and max(main+A, main+A+B) < 512
11592 		 */
11593 		.result = ACCEPT,
11594 	},
11595 	{
11596 		"calls: stack depth check using three frames. test2",
11597 		.insns = {
11598 			/* main */
11599 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11600 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
11601 			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
11602 			BPF_MOV64_IMM(BPF_REG_0, 0),
11603 			BPF_EXIT_INSN(),
11604 			/* A */
11605 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11606 			BPF_EXIT_INSN(),
11607 			/* B */
11608 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
11609 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11610 			BPF_EXIT_INSN(),
11611 		},
11612 		.prog_type = BPF_PROG_TYPE_XDP,
11613 		/* stack_main=32, stack_A=64, stack_B=256
11614 		 * and max(main+A, main+A+B) < 512
11615 		 */
11616 		.result = ACCEPT,
11617 	},
11618 	{
11619 		"calls: stack depth check using three frames. test3",
11620 		.insns = {
11621 			/* main */
11622 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11623 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11624 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11625 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
11626 			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
11627 			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
11628 			BPF_MOV64_IMM(BPF_REG_0, 0),
11629 			BPF_EXIT_INSN(),
11630 			/* A */
11631 			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
11632 			BPF_EXIT_INSN(),
11633 			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
11634 			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
11635 			/* B */
11636 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
11637 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
11638 			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
11639 			BPF_EXIT_INSN(),
11640 		},
11641 		.prog_type = BPF_PROG_TYPE_XDP,
11642 		/* stack_main=64, stack_A=224, stack_B=256
11643 		 * and max(main+A, main+A+B) > 512
11644 		 */
11645 		.errstr = "combined stack",
11646 		.result = REJECT,
11647 	},
11648 	{
11649 		"calls: stack depth check using three frames. test4",
11650 		/* void main(void) {
11651 		 *   func1(0);
11652 		 *   func1(1);
11653 		 *   func2(1);
11654 		 * }
11655 		 * void func1(int alloc_or_recurse) {
11656 		 *   if (alloc_or_recurse) {
11657 		 *     frame_pointer[-300] = 1;
11658 		 *   } else {
11659 		 *     func2(alloc_or_recurse);
11660 		 *   }
11661 		 * }
11662 		 * void func2(int alloc_or_recurse) {
11663 		 *   if (alloc_or_recurse) {
11664 		 *     frame_pointer[-300] = 1;
11665 		 *   }
11666 		 * }
11667 		 */
11668 		.insns = {
11669 			/* main */
11670 			BPF_MOV64_IMM(BPF_REG_1, 0),
11671 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
11672 			BPF_MOV64_IMM(BPF_REG_1, 1),
11673 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
11674 			BPF_MOV64_IMM(BPF_REG_1, 1),
11675 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
11676 			BPF_MOV64_IMM(BPF_REG_0, 0),
11677 			BPF_EXIT_INSN(),
11678 			/* A */
11679 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
11680 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11681 			BPF_EXIT_INSN(),
11682 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11683 			BPF_EXIT_INSN(),
11684 			/* B */
11685 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11686 			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
11687 			BPF_EXIT_INSN(),
11688 		},
11689 		.prog_type = BPF_PROG_TYPE_XDP,
11690 		.result = REJECT,
11691 		.errstr = "combined stack",
11692 	},
11693 	{
11694 		"calls: stack depth check using three frames. test5",
11695 		.insns = {
11696 			/* main */
11697 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
11698 			BPF_EXIT_INSN(),
11699 			/* A */
11700 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
11701 			BPF_EXIT_INSN(),
11702 			/* B */
11703 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
11704 			BPF_EXIT_INSN(),
11705 			/* C */
11706 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
11707 			BPF_EXIT_INSN(),
11708 			/* D */
11709 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
11710 			BPF_EXIT_INSN(),
11711 			/* E */
11712 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
11713 			BPF_EXIT_INSN(),
11714 			/* F */
11715 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
11716 			BPF_EXIT_INSN(),
11717 			/* G */
11718 			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
11719 			BPF_EXIT_INSN(),
11720 			/* H */
11721 			BPF_MOV64_IMM(BPF_REG_0, 0),
11722 			BPF_EXIT_INSN(),
11723 		},
11724 		.prog_type = BPF_PROG_TYPE_XDP,
11725 		.errstr = "call stack",
11726 		.result = REJECT,
11727 	},
11728 	{
11729 		"calls: spill into caller stack frame",
11730 		.insns = {
11731 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11732 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11733 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11734 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11735 			BPF_EXIT_INSN(),
11736 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
11737 			BPF_MOV64_IMM(BPF_REG_0, 0),
11738 			BPF_EXIT_INSN(),
11739 		},
11740 		.prog_type = BPF_PROG_TYPE_XDP,
11741 		.errstr = "cannot spill",
11742 		.result = REJECT,
11743 	},
11744 	{
11745 		"calls: write into caller stack frame",
11746 		.insns = {
11747 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11748 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11749 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11750 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11751 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11752 			BPF_EXIT_INSN(),
11753 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
11754 			BPF_MOV64_IMM(BPF_REG_0, 0),
11755 			BPF_EXIT_INSN(),
11756 		},
11757 		.prog_type = BPF_PROG_TYPE_XDP,
11758 		.result = ACCEPT,
11759 		.retval = 42,
11760 	},
11761 	{
11762 		"calls: write into callee stack frame",
11763 		.insns = {
11764 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11765 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
11766 			BPF_EXIT_INSN(),
11767 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
11768 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
11769 			BPF_EXIT_INSN(),
11770 		},
11771 		.prog_type = BPF_PROG_TYPE_XDP,
11772 		.errstr = "cannot return stack pointer",
11773 		.result = REJECT,
11774 	},
11775 	{
11776 		"calls: two calls with stack write and void return",
11777 		.insns = {
11778 			/* main prog */
11779 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11780 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11781 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11782 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11783 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11784 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11785 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11786 			BPF_EXIT_INSN(),
11787 
11788 			/* subprog 1 */
11789 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11790 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11791 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11792 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11793 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11794 			BPF_EXIT_INSN(),
11795 
11796 			/* subprog 2 */
11797 			/* write into stack frame of main prog */
11798 			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
11799 			BPF_EXIT_INSN(), /* void return */
11800 		},
11801 		.prog_type = BPF_PROG_TYPE_XDP,
11802 		.result = ACCEPT,
11803 	},
11804 	{
11805 		"calls: ambiguous return value",
11806 		.insns = {
11807 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11808 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
11809 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11810 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
11811 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11812 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
11813 			BPF_EXIT_INSN(),
11814 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
11815 			BPF_MOV64_IMM(BPF_REG_0, 0),
11816 			BPF_EXIT_INSN(),
11817 		},
11818 		.errstr_unpriv = "allowed for root only",
11819 		.result_unpriv = REJECT,
11820 		.errstr = "R0 !read_ok",
11821 		.result = REJECT,
11822 	},
11823 	{
11824 		"calls: two calls that return map_value",
11825 		.insns = {
11826 			/* main prog */
11827 			/* pass fp-16, fp-8 into a function */
11828 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11829 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11830 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11831 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11832 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
11833 
11834 			/* fetch map_value_ptr from the stack of this function */
11835 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
11836 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11837 			/* write into map value */
11838 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11839 			/* fetch secound map_value_ptr from the stack */
11840 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
11841 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
11842 			/* write into map value */
11843 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11844 			BPF_MOV64_IMM(BPF_REG_0, 0),
11845 			BPF_EXIT_INSN(),
11846 
11847 			/* subprog 1 */
11848 			/* call 3rd function twice */
11849 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11850 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11851 			/* first time with fp-8 */
11852 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
11853 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11854 			/* second time with fp-16 */
11855 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
11856 			BPF_EXIT_INSN(),
11857 
11858 			/* subprog 2 */
11859 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11860 			/* lookup from map */
11861 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11862 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11863 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11864 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11865 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11866 				     BPF_FUNC_map_lookup_elem),
11867 			/* write map_value_ptr into stack frame of main prog */
11868 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11869 			BPF_MOV64_IMM(BPF_REG_0, 0),
11870 			BPF_EXIT_INSN(), /* return 0 */
11871 		},
11872 		.prog_type = BPF_PROG_TYPE_XDP,
11873 		.fixup_map_hash_8b = { 23 },
11874 		.result = ACCEPT,
11875 	},
11876 	{
11877 		"calls: two calls that return map_value with bool condition",
11878 		.insns = {
11879 			/* main prog */
11880 			/* pass fp-16, fp-8 into a function */
11881 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11882 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11883 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11884 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11885 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11886 			BPF_MOV64_IMM(BPF_REG_0, 0),
11887 			BPF_EXIT_INSN(),
11888 
11889 			/* subprog 1 */
11890 			/* call 3rd function twice */
11891 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11892 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11893 			/* first time with fp-8 */
11894 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11895 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11896 			/* fetch map_value_ptr from the stack of this function */
11897 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11898 			/* write into map value */
11899 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11900 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11901 			/* second time with fp-16 */
11902 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11903 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11904 			/* fetch secound map_value_ptr from the stack */
11905 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11906 			/* write into map value */
11907 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11908 			BPF_EXIT_INSN(),
11909 
11910 			/* subprog 2 */
11911 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11912 			/* lookup from map */
11913 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11914 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11915 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11916 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11917 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11918 				     BPF_FUNC_map_lookup_elem),
11919 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11920 			BPF_MOV64_IMM(BPF_REG_0, 0),
11921 			BPF_EXIT_INSN(), /* return 0 */
11922 			/* write map_value_ptr into stack frame of main prog */
11923 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11924 			BPF_MOV64_IMM(BPF_REG_0, 1),
11925 			BPF_EXIT_INSN(), /* return 1 */
11926 		},
11927 		.prog_type = BPF_PROG_TYPE_XDP,
11928 		.fixup_map_hash_8b = { 23 },
11929 		.result = ACCEPT,
11930 	},
11931 	{
11932 		"calls: two calls that return map_value with incorrect bool check",
11933 		.insns = {
11934 			/* main prog */
11935 			/* pass fp-16, fp-8 into a function */
11936 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11937 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11938 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11939 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11940 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11941 			BPF_MOV64_IMM(BPF_REG_0, 0),
11942 			BPF_EXIT_INSN(),
11943 
11944 			/* subprog 1 */
11945 			/* call 3rd function twice */
11946 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11947 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
11948 			/* first time with fp-8 */
11949 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
11950 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
11951 			/* fetch map_value_ptr from the stack of this function */
11952 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
11953 			/* write into map value */
11954 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11955 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
11956 			/* second time with fp-16 */
11957 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
11958 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11959 			/* fetch secound map_value_ptr from the stack */
11960 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
11961 			/* write into map value */
11962 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
11963 			BPF_EXIT_INSN(),
11964 
11965 			/* subprog 2 */
11966 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
11967 			/* lookup from map */
11968 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
11969 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11970 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
11971 			BPF_LD_MAP_FD(BPF_REG_1, 0),
11972 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
11973 				     BPF_FUNC_map_lookup_elem),
11974 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
11975 			BPF_MOV64_IMM(BPF_REG_0, 0),
11976 			BPF_EXIT_INSN(), /* return 0 */
11977 			/* write map_value_ptr into stack frame of main prog */
11978 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
11979 			BPF_MOV64_IMM(BPF_REG_0, 1),
11980 			BPF_EXIT_INSN(), /* return 1 */
11981 		},
11982 		.prog_type = BPF_PROG_TYPE_XDP,
11983 		.fixup_map_hash_8b = { 23 },
11984 		.result = REJECT,
11985 		.errstr = "invalid read from stack off -16+0 size 8",
11986 	},
11987 	{
11988 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
11989 		.insns = {
11990 			/* main prog */
11991 			/* pass fp-16, fp-8 into a function */
11992 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
11993 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
11994 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
11995 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
11996 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
11997 			BPF_MOV64_IMM(BPF_REG_0, 0),
11998 			BPF_EXIT_INSN(),
11999 
12000 			/* subprog 1 */
12001 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12002 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12003 			/* 1st lookup from map */
12004 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12005 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12006 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12007 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12008 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12009 				     BPF_FUNC_map_lookup_elem),
12010 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12011 			BPF_MOV64_IMM(BPF_REG_8, 0),
12012 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12013 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12014 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12015 			BPF_MOV64_IMM(BPF_REG_8, 1),
12016 
12017 			/* 2nd lookup from map */
12018 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12019 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12020 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12021 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12022 				     BPF_FUNC_map_lookup_elem),
12023 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12024 			BPF_MOV64_IMM(BPF_REG_9, 0),
12025 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12026 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12027 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12028 			BPF_MOV64_IMM(BPF_REG_9, 1),
12029 
12030 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12031 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12032 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12033 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12034 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12035 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12036 			BPF_EXIT_INSN(),
12037 
12038 			/* subprog 2 */
12039 			/* if arg2 == 1 do *arg1 = 0 */
12040 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12041 			/* fetch map_value_ptr from the stack of this function */
12042 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12043 			/* write into map value */
12044 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12045 
12046 			/* if arg4 == 1 do *arg3 = 0 */
12047 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12048 			/* fetch map_value_ptr from the stack of this function */
12049 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12050 			/* write into map value */
12051 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12052 			BPF_EXIT_INSN(),
12053 		},
12054 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12055 		.fixup_map_hash_8b = { 12, 22 },
12056 		.result = REJECT,
12057 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12058 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12059 	},
12060 	{
12061 		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
12062 		.insns = {
12063 			/* main prog */
12064 			/* pass fp-16, fp-8 into a function */
12065 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12066 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12067 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12068 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12069 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12070 			BPF_MOV64_IMM(BPF_REG_0, 0),
12071 			BPF_EXIT_INSN(),
12072 
12073 			/* subprog 1 */
12074 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12075 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12076 			/* 1st lookup from map */
12077 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12078 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12079 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12080 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12081 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12082 				     BPF_FUNC_map_lookup_elem),
12083 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12084 			BPF_MOV64_IMM(BPF_REG_8, 0),
12085 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12086 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12087 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12088 			BPF_MOV64_IMM(BPF_REG_8, 1),
12089 
12090 			/* 2nd lookup from map */
12091 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
12092 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12093 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12094 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
12095 				     BPF_FUNC_map_lookup_elem),
12096 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12097 			BPF_MOV64_IMM(BPF_REG_9, 0),
12098 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12099 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12100 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12101 			BPF_MOV64_IMM(BPF_REG_9, 1),
12102 
12103 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12104 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
12105 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12106 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12107 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12108 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
12109 			BPF_EXIT_INSN(),
12110 
12111 			/* subprog 2 */
12112 			/* if arg2 == 1 do *arg1 = 0 */
12113 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12114 			/* fetch map_value_ptr from the stack of this function */
12115 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12116 			/* write into map value */
12117 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12118 
12119 			/* if arg4 == 1 do *arg3 = 0 */
12120 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12121 			/* fetch map_value_ptr from the stack of this function */
12122 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12123 			/* write into map value */
12124 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12125 			BPF_EXIT_INSN(),
12126 		},
12127 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12128 		.fixup_map_hash_8b = { 12, 22 },
12129 		.result = ACCEPT,
12130 	},
12131 	{
12132 		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
12133 		.insns = {
12134 			/* main prog */
12135 			/* pass fp-16, fp-8 into a function */
12136 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12137 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12138 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12139 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12140 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12141 			BPF_MOV64_IMM(BPF_REG_0, 0),
12142 			BPF_EXIT_INSN(),
12143 
12144 			/* subprog 1 */
12145 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12146 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12147 			/* 1st lookup from map */
12148 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
12149 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12150 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12151 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12152 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12153 				     BPF_FUNC_map_lookup_elem),
12154 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12155 			BPF_MOV64_IMM(BPF_REG_8, 0),
12156 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12157 			/* write map_value_ptr into stack frame of main prog at fp-8 */
12158 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12159 			BPF_MOV64_IMM(BPF_REG_8, 1),
12160 
12161 			/* 2nd lookup from map */
12162 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12163 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
12164 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12165 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12166 				     BPF_FUNC_map_lookup_elem),
12167 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12168 			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
12169 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12170 			/* write map_value_ptr into stack frame of main prog at fp-16 */
12171 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12172 			BPF_MOV64_IMM(BPF_REG_9, 1),
12173 
12174 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12175 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
12176 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12177 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12178 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12179 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
12180 			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
12181 
12182 			/* subprog 2 */
12183 			/* if arg2 == 1 do *arg1 = 0 */
12184 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12185 			/* fetch map_value_ptr from the stack of this function */
12186 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12187 			/* write into map value */
12188 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12189 
12190 			/* if arg4 == 1 do *arg3 = 0 */
12191 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12192 			/* fetch map_value_ptr from the stack of this function */
12193 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12194 			/* write into map value */
12195 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
12196 			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
12197 		},
12198 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12199 		.fixup_map_hash_8b = { 12, 22 },
12200 		.result = REJECT,
12201 		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
12202 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12203 	},
12204 	{
12205 		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
12206 		.insns = {
12207 			/* main prog */
12208 			/* pass fp-16, fp-8 into a function */
12209 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12210 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12211 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12212 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12213 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12214 			BPF_MOV64_IMM(BPF_REG_0, 0),
12215 			BPF_EXIT_INSN(),
12216 
12217 			/* subprog 1 */
12218 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12219 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12220 			/* 1st lookup from map */
12221 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12222 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12223 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12224 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12225 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12226 				     BPF_FUNC_map_lookup_elem),
12227 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12228 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12229 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12230 			BPF_MOV64_IMM(BPF_REG_8, 0),
12231 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12232 			BPF_MOV64_IMM(BPF_REG_8, 1),
12233 
12234 			/* 2nd lookup from map */
12235 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12236 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12237 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12238 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12239 				     BPF_FUNC_map_lookup_elem),
12240 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12241 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12242 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12243 			BPF_MOV64_IMM(BPF_REG_9, 0),
12244 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12245 			BPF_MOV64_IMM(BPF_REG_9, 1),
12246 
12247 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12248 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12249 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12250 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12251 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12252 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12253 			BPF_EXIT_INSN(),
12254 
12255 			/* subprog 2 */
12256 			/* if arg2 == 1 do *arg1 = 0 */
12257 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12258 			/* fetch map_value_ptr from the stack of this function */
12259 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12260 			/* write into map value */
12261 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12262 
12263 			/* if arg4 == 1 do *arg3 = 0 */
12264 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
12265 			/* fetch map_value_ptr from the stack of this function */
12266 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12267 			/* write into map value */
12268 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12269 			BPF_EXIT_INSN(),
12270 		},
12271 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12272 		.fixup_map_hash_8b = { 12, 22 },
12273 		.result = ACCEPT,
12274 	},
12275 	{
12276 		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
12277 		.insns = {
12278 			/* main prog */
12279 			/* pass fp-16, fp-8 into a function */
12280 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
12281 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
12282 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12283 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
12284 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
12285 			BPF_MOV64_IMM(BPF_REG_0, 0),
12286 			BPF_EXIT_INSN(),
12287 
12288 			/* subprog 1 */
12289 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
12290 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
12291 			/* 1st lookup from map */
12292 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12293 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12294 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12295 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12296 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12297 				     BPF_FUNC_map_lookup_elem),
12298 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12299 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12300 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12301 			BPF_MOV64_IMM(BPF_REG_8, 0),
12302 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12303 			BPF_MOV64_IMM(BPF_REG_8, 1),
12304 
12305 			/* 2nd lookup from map */
12306 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12307 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12308 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12309 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12310 				     BPF_FUNC_map_lookup_elem),
12311 			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
12312 			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
12313 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
12314 			BPF_MOV64_IMM(BPF_REG_9, 0),
12315 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12316 			BPF_MOV64_IMM(BPF_REG_9, 1),
12317 
12318 			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
12319 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
12320 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
12321 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
12322 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
12323 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12324 			BPF_EXIT_INSN(),
12325 
12326 			/* subprog 2 */
12327 			/* if arg2 == 1 do *arg1 = 0 */
12328 			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
12329 			/* fetch map_value_ptr from the stack of this function */
12330 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
12331 			/* write into map value */
12332 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12333 
12334 			/* if arg4 == 0 do *arg3 = 0 */
12335 			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
12336 			/* fetch map_value_ptr from the stack of this function */
12337 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
12338 			/* write into map value */
12339 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
12340 			BPF_EXIT_INSN(),
12341 		},
12342 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12343 		.fixup_map_hash_8b = { 12, 22 },
12344 		.result = REJECT,
12345 		.errstr = "R0 invalid mem access 'inv'",
12346 	},
12347 	{
12348 		"calls: pkt_ptr spill into caller stack",
12349 		.insns = {
12350 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12351 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12352 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
12353 			BPF_EXIT_INSN(),
12354 
12355 			/* subprog 1 */
12356 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12357 				    offsetof(struct __sk_buff, data)),
12358 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12359 				    offsetof(struct __sk_buff, data_end)),
12360 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12361 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12362 			/* spill unchecked pkt_ptr into stack of caller */
12363 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12364 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12365 			/* now the pkt range is verified, read pkt_ptr from stack */
12366 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12367 			/* write 4 bytes into packet */
12368 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12369 			BPF_EXIT_INSN(),
12370 		},
12371 		.result = ACCEPT,
12372 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12373 		.retval = POINTER_VALUE,
12374 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12375 	},
12376 	{
12377 		"calls: pkt_ptr spill into caller stack 2",
12378 		.insns = {
12379 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12380 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12381 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12382 			/* Marking is still kept, but not in all cases safe. */
12383 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12384 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12385 			BPF_EXIT_INSN(),
12386 
12387 			/* subprog 1 */
12388 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12389 				    offsetof(struct __sk_buff, data)),
12390 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12391 				    offsetof(struct __sk_buff, data_end)),
12392 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12393 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12394 			/* spill unchecked pkt_ptr into stack of caller */
12395 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12396 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12397 			/* now the pkt range is verified, read pkt_ptr from stack */
12398 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12399 			/* write 4 bytes into packet */
12400 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12401 			BPF_EXIT_INSN(),
12402 		},
12403 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12404 		.errstr = "invalid access to packet",
12405 		.result = REJECT,
12406 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12407 	},
12408 	{
12409 		"calls: pkt_ptr spill into caller stack 3",
12410 		.insns = {
12411 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12412 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12413 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12414 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12415 			/* Marking is still kept and safe here. */
12416 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12417 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12418 			BPF_EXIT_INSN(),
12419 
12420 			/* subprog 1 */
12421 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12422 				    offsetof(struct __sk_buff, data)),
12423 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12424 				    offsetof(struct __sk_buff, data_end)),
12425 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12426 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12427 			/* spill unchecked pkt_ptr into stack of caller */
12428 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12429 			BPF_MOV64_IMM(BPF_REG_5, 0),
12430 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12431 			BPF_MOV64_IMM(BPF_REG_5, 1),
12432 			/* now the pkt range is verified, read pkt_ptr from stack */
12433 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
12434 			/* write 4 bytes into packet */
12435 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12436 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12437 			BPF_EXIT_INSN(),
12438 		},
12439 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12440 		.result = ACCEPT,
12441 		.retval = 1,
12442 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12443 	},
12444 	{
12445 		"calls: pkt_ptr spill into caller stack 4",
12446 		.insns = {
12447 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12448 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12449 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12450 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12451 			/* Check marking propagated. */
12452 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12453 			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
12454 			BPF_EXIT_INSN(),
12455 
12456 			/* subprog 1 */
12457 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12458 				    offsetof(struct __sk_buff, data)),
12459 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12460 				    offsetof(struct __sk_buff, data_end)),
12461 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12462 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12463 			/* spill unchecked pkt_ptr into stack of caller */
12464 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12465 			BPF_MOV64_IMM(BPF_REG_5, 0),
12466 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12467 			BPF_MOV64_IMM(BPF_REG_5, 1),
12468 			/* don't read back pkt_ptr from stack here */
12469 			/* write 4 bytes into packet */
12470 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12471 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12472 			BPF_EXIT_INSN(),
12473 		},
12474 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12475 		.result = ACCEPT,
12476 		.retval = 1,
12477 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12478 	},
12479 	{
12480 		"calls: pkt_ptr spill into caller stack 5",
12481 		.insns = {
12482 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12483 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12484 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
12485 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12486 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12487 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12488 			BPF_EXIT_INSN(),
12489 
12490 			/* subprog 1 */
12491 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12492 				    offsetof(struct __sk_buff, data)),
12493 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12494 				    offsetof(struct __sk_buff, data_end)),
12495 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12496 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12497 			BPF_MOV64_IMM(BPF_REG_5, 0),
12498 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12499 			/* spill checked pkt_ptr into stack of caller */
12500 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12501 			BPF_MOV64_IMM(BPF_REG_5, 1),
12502 			/* don't read back pkt_ptr from stack here */
12503 			/* write 4 bytes into packet */
12504 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12505 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12506 			BPF_EXIT_INSN(),
12507 		},
12508 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12509 		.errstr = "same insn cannot be used with different",
12510 		.result = REJECT,
12511 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12512 	},
12513 	{
12514 		"calls: pkt_ptr spill into caller stack 6",
12515 		.insns = {
12516 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12517 				    offsetof(struct __sk_buff, data_end)),
12518 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12519 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12520 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12521 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12522 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12523 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12524 			BPF_EXIT_INSN(),
12525 
12526 			/* subprog 1 */
12527 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12528 				    offsetof(struct __sk_buff, data)),
12529 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12530 				    offsetof(struct __sk_buff, data_end)),
12531 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12532 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12533 			BPF_MOV64_IMM(BPF_REG_5, 0),
12534 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12535 			/* spill checked pkt_ptr into stack of caller */
12536 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12537 			BPF_MOV64_IMM(BPF_REG_5, 1),
12538 			/* don't read back pkt_ptr from stack here */
12539 			/* write 4 bytes into packet */
12540 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12541 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12542 			BPF_EXIT_INSN(),
12543 		},
12544 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12545 		.errstr = "R4 invalid mem access",
12546 		.result = REJECT,
12547 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12548 	},
12549 	{
12550 		"calls: pkt_ptr spill into caller stack 7",
12551 		.insns = {
12552 			BPF_MOV64_IMM(BPF_REG_2, 0),
12553 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12554 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12555 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12556 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12557 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12558 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12559 			BPF_EXIT_INSN(),
12560 
12561 			/* subprog 1 */
12562 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12563 				    offsetof(struct __sk_buff, data)),
12564 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12565 				    offsetof(struct __sk_buff, data_end)),
12566 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12567 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12568 			BPF_MOV64_IMM(BPF_REG_5, 0),
12569 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12570 			/* spill checked pkt_ptr into stack of caller */
12571 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12572 			BPF_MOV64_IMM(BPF_REG_5, 1),
12573 			/* don't read back pkt_ptr from stack here */
12574 			/* write 4 bytes into packet */
12575 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12576 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12577 			BPF_EXIT_INSN(),
12578 		},
12579 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12580 		.errstr = "R4 invalid mem access",
12581 		.result = REJECT,
12582 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12583 	},
12584 	{
12585 		"calls: pkt_ptr spill into caller stack 8",
12586 		.insns = {
12587 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12588 				    offsetof(struct __sk_buff, data)),
12589 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12590 				    offsetof(struct __sk_buff, data_end)),
12591 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12592 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12593 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12594 			BPF_EXIT_INSN(),
12595 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12596 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12597 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12598 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12599 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12600 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12601 			BPF_EXIT_INSN(),
12602 
12603 			/* subprog 1 */
12604 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12605 				    offsetof(struct __sk_buff, data)),
12606 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12607 				    offsetof(struct __sk_buff, data_end)),
12608 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12609 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12610 			BPF_MOV64_IMM(BPF_REG_5, 0),
12611 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
12612 			/* spill checked pkt_ptr into stack of caller */
12613 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12614 			BPF_MOV64_IMM(BPF_REG_5, 1),
12615 			/* don't read back pkt_ptr from stack here */
12616 			/* write 4 bytes into packet */
12617 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12618 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12619 			BPF_EXIT_INSN(),
12620 		},
12621 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12622 		.result = ACCEPT,
12623 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12624 	},
12625 	{
12626 		"calls: pkt_ptr spill into caller stack 9",
12627 		.insns = {
12628 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12629 				    offsetof(struct __sk_buff, data)),
12630 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12631 				    offsetof(struct __sk_buff, data_end)),
12632 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12633 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12634 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
12635 			BPF_EXIT_INSN(),
12636 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
12637 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
12638 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12639 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
12640 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
12641 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
12642 			BPF_EXIT_INSN(),
12643 
12644 			/* subprog 1 */
12645 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
12646 				    offsetof(struct __sk_buff, data)),
12647 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
12648 				    offsetof(struct __sk_buff, data_end)),
12649 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
12650 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
12651 			BPF_MOV64_IMM(BPF_REG_5, 0),
12652 			/* spill unchecked pkt_ptr into stack of caller */
12653 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
12654 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
12655 			BPF_MOV64_IMM(BPF_REG_5, 1),
12656 			/* don't read back pkt_ptr from stack here */
12657 			/* write 4 bytes into packet */
12658 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
12659 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
12660 			BPF_EXIT_INSN(),
12661 		},
12662 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12663 		.errstr = "invalid access to packet",
12664 		.result = REJECT,
12665 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
12666 	},
12667 	{
12668 		"calls: caller stack init to zero or map_value_or_null",
12669 		.insns = {
12670 			BPF_MOV64_IMM(BPF_REG_0, 0),
12671 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12672 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12673 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12674 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
12675 			/* fetch map_value_or_null or const_zero from stack */
12676 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12677 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
12678 			/* store into map_value */
12679 			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
12680 			BPF_EXIT_INSN(),
12681 
12682 			/* subprog 1 */
12683 			/* if (ctx == 0) return; */
12684 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
12685 			/* else bpf_map_lookup() and *(fp - 8) = r0 */
12686 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
12687 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12688 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12689 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12690 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12691 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12692 				     BPF_FUNC_map_lookup_elem),
12693 			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
12694 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
12695 			BPF_EXIT_INSN(),
12696 		},
12697 		.fixup_map_hash_8b = { 13 },
12698 		.result = ACCEPT,
12699 		.prog_type = BPF_PROG_TYPE_XDP,
12700 	},
12701 	{
12702 		"calls: stack init to zero and pruning",
12703 		.insns = {
12704 			/* first make allocated_stack 16 byte */
12705 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
12706 			/* now fork the execution such that the false branch
12707 			 * of JGT insn will be verified second and it skisp zero
12708 			 * init of fp-8 stack slot. If stack liveness marking
12709 			 * is missing live_read marks from call map_lookup
12710 			 * processing then pruning will incorrectly assume
12711 			 * that fp-8 stack slot was unused in the fall-through
12712 			 * branch and will accept the program incorrectly
12713 			 */
12714 			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
12715 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12716 			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
12717 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12718 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12719 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12720 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12721 				     BPF_FUNC_map_lookup_elem),
12722 			BPF_EXIT_INSN(),
12723 		},
12724 		.fixup_map_hash_48b = { 6 },
12725 		.errstr = "invalid indirect read from stack off -8+0 size 8",
12726 		.result = REJECT,
12727 		.prog_type = BPF_PROG_TYPE_XDP,
12728 	},
12729 	{
12730 		"calls: two calls returning different map pointers for lookup (hash, array)",
12731 		.insns = {
12732 			/* main prog */
12733 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12734 			BPF_CALL_REL(11),
12735 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12736 			BPF_CALL_REL(12),
12737 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12738 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12739 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12740 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12741 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12742 				     BPF_FUNC_map_lookup_elem),
12743 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12744 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12745 				   offsetof(struct test_val, foo)),
12746 			BPF_MOV64_IMM(BPF_REG_0, 1),
12747 			BPF_EXIT_INSN(),
12748 			/* subprog 1 */
12749 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12750 			BPF_EXIT_INSN(),
12751 			/* subprog 2 */
12752 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12753 			BPF_EXIT_INSN(),
12754 		},
12755 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12756 		.fixup_map_hash_48b = { 13 },
12757 		.fixup_map_array_48b = { 16 },
12758 		.result = ACCEPT,
12759 		.retval = 1,
12760 	},
12761 	{
12762 		"calls: two calls returning different map pointers for lookup (hash, map in map)",
12763 		.insns = {
12764 			/* main prog */
12765 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
12766 			BPF_CALL_REL(11),
12767 			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
12768 			BPF_CALL_REL(12),
12769 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
12770 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12771 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12772 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12773 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12774 				     BPF_FUNC_map_lookup_elem),
12775 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
12776 			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
12777 				   offsetof(struct test_val, foo)),
12778 			BPF_MOV64_IMM(BPF_REG_0, 1),
12779 			BPF_EXIT_INSN(),
12780 			/* subprog 1 */
12781 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12782 			BPF_EXIT_INSN(),
12783 			/* subprog 2 */
12784 			BPF_LD_MAP_FD(BPF_REG_0, 0),
12785 			BPF_EXIT_INSN(),
12786 		},
12787 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12788 		.fixup_map_in_map = { 16 },
12789 		.fixup_map_array_48b = { 13 },
12790 		.result = REJECT,
12791 		.errstr = "R0 invalid mem access 'map_ptr'",
12792 	},
12793 	{
12794 		"cond: two branches returning different map pointers for lookup (tail, tail)",
12795 		.insns = {
12796 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12797 				    offsetof(struct __sk_buff, mark)),
12798 			BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
12799 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12800 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12801 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12802 			BPF_MOV64_IMM(BPF_REG_3, 7),
12803 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12804 				     BPF_FUNC_tail_call),
12805 			BPF_MOV64_IMM(BPF_REG_0, 1),
12806 			BPF_EXIT_INSN(),
12807 		},
12808 		.fixup_prog1 = { 5 },
12809 		.fixup_prog2 = { 2 },
12810 		.result_unpriv = REJECT,
12811 		.errstr_unpriv = "tail_call abusing map_ptr",
12812 		.result = ACCEPT,
12813 		.retval = 42,
12814 	},
12815 	{
12816 		"cond: two branches returning same map pointers for lookup (tail, tail)",
12817 		.insns = {
12818 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
12819 				    offsetof(struct __sk_buff, mark)),
12820 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
12821 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12822 			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
12823 			BPF_LD_MAP_FD(BPF_REG_2, 0),
12824 			BPF_MOV64_IMM(BPF_REG_3, 7),
12825 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12826 				     BPF_FUNC_tail_call),
12827 			BPF_MOV64_IMM(BPF_REG_0, 1),
12828 			BPF_EXIT_INSN(),
12829 		},
12830 		.fixup_prog2 = { 2, 5 },
12831 		.result_unpriv = ACCEPT,
12832 		.result = ACCEPT,
12833 		.retval = 42,
12834 	},
12835 	{
12836 		"search pruning: all branches should be verified (nop operation)",
12837 		.insns = {
12838 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12839 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12840 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12841 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12842 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12843 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
12844 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12845 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12846 			BPF_MOV64_IMM(BPF_REG_4, 0),
12847 			BPF_JMP_A(1),
12848 			BPF_MOV64_IMM(BPF_REG_4, 1),
12849 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12850 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12851 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12852 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
12853 			BPF_MOV64_IMM(BPF_REG_6, 0),
12854 			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
12855 			BPF_EXIT_INSN(),
12856 		},
12857 		.fixup_map_hash_8b = { 3 },
12858 		.errstr = "R6 invalid mem access 'inv'",
12859 		.result = REJECT,
12860 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12861 	},
12862 	{
12863 		"search pruning: all branches should be verified (invalid stack access)",
12864 		.insns = {
12865 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12866 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12867 			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
12868 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12869 			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
12870 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
12871 			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
12872 			BPF_MOV64_IMM(BPF_REG_4, 0),
12873 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
12874 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
12875 			BPF_JMP_A(1),
12876 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
12877 			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
12878 			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
12879 			BPF_EXIT_INSN(),
12880 		},
12881 		.fixup_map_hash_8b = { 3 },
12882 		.errstr = "invalid read from stack off -16+0 size 8",
12883 		.result = REJECT,
12884 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
12885 	},
12886 	{
12887 		"jit: lsh, rsh, arsh by 1",
12888 		.insns = {
12889 			BPF_MOV64_IMM(BPF_REG_0, 1),
12890 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
12891 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
12892 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
12893 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
12894 			BPF_EXIT_INSN(),
12895 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
12896 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
12897 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
12898 			BPF_EXIT_INSN(),
12899 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
12900 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
12901 			BPF_EXIT_INSN(),
12902 			BPF_MOV64_IMM(BPF_REG_0, 2),
12903 			BPF_EXIT_INSN(),
12904 		},
12905 		.result = ACCEPT,
12906 		.retval = 2,
12907 	},
12908 	{
12909 		"jit: mov32 for ldimm64, 1",
12910 		.insns = {
12911 			BPF_MOV64_IMM(BPF_REG_0, 2),
12912 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
12913 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
12914 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
12915 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12916 			BPF_MOV64_IMM(BPF_REG_0, 1),
12917 			BPF_EXIT_INSN(),
12918 		},
12919 		.result = ACCEPT,
12920 		.retval = 2,
12921 	},
12922 	{
12923 		"jit: mov32 for ldimm64, 2",
12924 		.insns = {
12925 			BPF_MOV64_IMM(BPF_REG_0, 1),
12926 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
12927 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
12928 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
12929 			BPF_MOV64_IMM(BPF_REG_0, 2),
12930 			BPF_EXIT_INSN(),
12931 		},
12932 		.result = ACCEPT,
12933 		.retval = 2,
12934 	},
12935 	{
12936 		"jit: various mul tests",
12937 		.insns = {
12938 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12939 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12940 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
12941 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12942 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12943 			BPF_MOV64_IMM(BPF_REG_0, 1),
12944 			BPF_EXIT_INSN(),
12945 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12946 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12947 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12948 			BPF_MOV64_IMM(BPF_REG_0, 1),
12949 			BPF_EXIT_INSN(),
12950 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
12951 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
12952 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
12953 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
12954 			BPF_MOV64_IMM(BPF_REG_0, 1),
12955 			BPF_EXIT_INSN(),
12956 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
12957 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
12958 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
12959 			BPF_MOV64_IMM(BPF_REG_0, 1),
12960 			BPF_EXIT_INSN(),
12961 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
12962 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
12963 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
12964 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
12965 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
12966 			BPF_MOV64_IMM(BPF_REG_0, 1),
12967 			BPF_EXIT_INSN(),
12968 			BPF_MOV64_IMM(BPF_REG_0, 2),
12969 			BPF_EXIT_INSN(),
12970 		},
12971 		.result = ACCEPT,
12972 		.retval = 2,
12973 	},
12974 	{
12975 		"xadd/w check unaligned stack",
12976 		.insns = {
12977 			BPF_MOV64_IMM(BPF_REG_0, 1),
12978 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12979 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
12980 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12981 			BPF_EXIT_INSN(),
12982 		},
12983 		.result = REJECT,
12984 		.errstr = "misaligned stack access off",
12985 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
12986 	},
12987 	{
12988 		"xadd/w check unaligned map",
12989 		.insns = {
12990 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
12991 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
12992 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
12993 			BPF_LD_MAP_FD(BPF_REG_1, 0),
12994 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
12995 				     BPF_FUNC_map_lookup_elem),
12996 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
12997 			BPF_EXIT_INSN(),
12998 			BPF_MOV64_IMM(BPF_REG_1, 1),
12999 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
13000 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
13001 			BPF_EXIT_INSN(),
13002 		},
13003 		.fixup_map_hash_8b = { 3 },
13004 		.result = REJECT,
13005 		.errstr = "misaligned value access off",
13006 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13007 	},
13008 	{
13009 		"xadd/w check unaligned pkt",
13010 		.insns = {
13011 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13012 				    offsetof(struct xdp_md, data)),
13013 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13014 				    offsetof(struct xdp_md, data_end)),
13015 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
13016 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
13017 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
13018 			BPF_MOV64_IMM(BPF_REG_0, 99),
13019 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
13020 			BPF_MOV64_IMM(BPF_REG_0, 1),
13021 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
13022 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
13023 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
13024 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
13025 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
13026 			BPF_EXIT_INSN(),
13027 		},
13028 		.result = REJECT,
13029 		.errstr = "BPF_XADD stores into R2 pkt is not allowed",
13030 		.prog_type = BPF_PROG_TYPE_XDP,
13031 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13032 	},
13033 	{
13034 		"xadd/w check whether src/dst got mangled, 1",
13035 		.insns = {
13036 			BPF_MOV64_IMM(BPF_REG_0, 1),
13037 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13038 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13039 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13040 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13041 			BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
13042 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13043 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13044 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
13045 			BPF_EXIT_INSN(),
13046 			BPF_MOV64_IMM(BPF_REG_0, 42),
13047 			BPF_EXIT_INSN(),
13048 		},
13049 		.result = ACCEPT,
13050 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13051 		.retval = 3,
13052 	},
13053 	{
13054 		"xadd/w check whether src/dst got mangled, 2",
13055 		.insns = {
13056 			BPF_MOV64_IMM(BPF_REG_0, 1),
13057 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13058 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
13059 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13060 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13061 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
13062 			BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
13063 			BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
13064 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
13065 			BPF_EXIT_INSN(),
13066 			BPF_MOV64_IMM(BPF_REG_0, 42),
13067 			BPF_EXIT_INSN(),
13068 		},
13069 		.result = ACCEPT,
13070 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13071 		.retval = 3,
13072 	},
13073 	{
13074 		"bpf_get_stack return R0 within range",
13075 		.insns = {
13076 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13077 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
13078 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
13079 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
13080 			BPF_LD_MAP_FD(BPF_REG_1, 0),
13081 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13082 				     BPF_FUNC_map_lookup_elem),
13083 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
13084 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13085 			BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
13086 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13087 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13088 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
13089 			BPF_MOV64_IMM(BPF_REG_4, 256),
13090 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13091 			BPF_MOV64_IMM(BPF_REG_1, 0),
13092 			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
13093 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
13094 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
13095 			BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
13096 			BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
13097 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
13098 			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
13099 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
13100 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
13101 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
13102 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
13103 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
13104 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13105 			BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
13106 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
13107 			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
13108 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13109 			BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
13110 			BPF_MOV64_IMM(BPF_REG_4, 0),
13111 			BPF_EMIT_CALL(BPF_FUNC_get_stack),
13112 			BPF_EXIT_INSN(),
13113 		},
13114 		.fixup_map_hash_48b = { 4 },
13115 		.result = ACCEPT,
13116 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
13117 	},
13118 	{
13119 		"ld_abs: invalid op 1",
13120 		.insns = {
13121 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13122 			BPF_LD_ABS(BPF_DW, 0),
13123 			BPF_EXIT_INSN(),
13124 		},
13125 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13126 		.result = REJECT,
13127 		.errstr = "unknown opcode",
13128 	},
13129 	{
13130 		"ld_abs: invalid op 2",
13131 		.insns = {
13132 			BPF_MOV32_IMM(BPF_REG_0, 256),
13133 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13134 			BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
13135 			BPF_EXIT_INSN(),
13136 		},
13137 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13138 		.result = REJECT,
13139 		.errstr = "unknown opcode",
13140 	},
13141 	{
13142 		"ld_abs: nmap reduced",
13143 		.insns = {
13144 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13145 			BPF_LD_ABS(BPF_H, 12),
13146 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
13147 			BPF_LD_ABS(BPF_H, 12),
13148 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
13149 			BPF_MOV32_IMM(BPF_REG_0, 18),
13150 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
13151 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
13152 			BPF_LD_IND(BPF_W, BPF_REG_7, 14),
13153 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
13154 			BPF_MOV32_IMM(BPF_REG_0, 280971478),
13155 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13156 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13157 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
13158 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13159 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
13160 			BPF_LD_ABS(BPF_H, 12),
13161 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
13162 			BPF_MOV32_IMM(BPF_REG_0, 22),
13163 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
13164 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
13165 			BPF_LD_IND(BPF_H, BPF_REG_7, 14),
13166 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
13167 			BPF_MOV32_IMM(BPF_REG_0, 17366),
13168 			BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
13169 			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
13170 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
13171 			BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
13172 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
13173 			BPF_MOV32_IMM(BPF_REG_0, 256),
13174 			BPF_EXIT_INSN(),
13175 			BPF_MOV32_IMM(BPF_REG_0, 0),
13176 			BPF_EXIT_INSN(),
13177 		},
13178 		.data = {
13179 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
13180 			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
13181 			0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
13182 		},
13183 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13184 		.result = ACCEPT,
13185 		.retval = 256,
13186 	},
13187 	{
13188 		"ld_abs: div + abs, test 1",
13189 		.insns = {
13190 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13191 			BPF_LD_ABS(BPF_B, 3),
13192 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13193 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13194 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13195 			BPF_LD_ABS(BPF_B, 4),
13196 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13197 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13198 			BPF_EXIT_INSN(),
13199 		},
13200 		.data = {
13201 			10, 20, 30, 40, 50,
13202 		},
13203 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13204 		.result = ACCEPT,
13205 		.retval = 10,
13206 	},
13207 	{
13208 		"ld_abs: div + abs, test 2",
13209 		.insns = {
13210 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13211 			BPF_LD_ABS(BPF_B, 3),
13212 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
13213 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
13214 			BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
13215 			BPF_LD_ABS(BPF_B, 128),
13216 			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
13217 			BPF_LD_IND(BPF_B, BPF_REG_8, -70),
13218 			BPF_EXIT_INSN(),
13219 		},
13220 		.data = {
13221 			10, 20, 30, 40, 50,
13222 		},
13223 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13224 		.result = ACCEPT,
13225 		.retval = 0,
13226 	},
13227 	{
13228 		"ld_abs: div + abs, test 3",
13229 		.insns = {
13230 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13231 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13232 			BPF_LD_ABS(BPF_B, 3),
13233 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13234 			BPF_EXIT_INSN(),
13235 		},
13236 		.data = {
13237 			10, 20, 30, 40, 50,
13238 		},
13239 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13240 		.result = ACCEPT,
13241 		.retval = 0,
13242 	},
13243 	{
13244 		"ld_abs: div + abs, test 4",
13245 		.insns = {
13246 			BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
13247 			BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
13248 			BPF_LD_ABS(BPF_B, 256),
13249 			BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
13250 			BPF_EXIT_INSN(),
13251 		},
13252 		.data = {
13253 			10, 20, 30, 40, 50,
13254 		},
13255 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13256 		.result = ACCEPT,
13257 		.retval = 0,
13258 	},
13259 	{
13260 		"ld_abs: vlan + abs, test 1",
13261 		.insns = { },
13262 		.data = {
13263 			0x34,
13264 		},
13265 		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
13266 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13267 		.result = ACCEPT,
13268 		.retval = 0xbef,
13269 	},
13270 	{
13271 		"ld_abs: vlan + abs, test 2",
13272 		.insns = {
13273 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13274 			BPF_LD_ABS(BPF_B, 0),
13275 			BPF_LD_ABS(BPF_H, 0),
13276 			BPF_LD_ABS(BPF_W, 0),
13277 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
13278 			BPF_MOV64_IMM(BPF_REG_6, 0),
13279 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13280 			BPF_MOV64_IMM(BPF_REG_2, 1),
13281 			BPF_MOV64_IMM(BPF_REG_3, 2),
13282 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13283 				     BPF_FUNC_skb_vlan_push),
13284 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
13285 			BPF_LD_ABS(BPF_B, 0),
13286 			BPF_LD_ABS(BPF_H, 0),
13287 			BPF_LD_ABS(BPF_W, 0),
13288 			BPF_MOV64_IMM(BPF_REG_0, 42),
13289 			BPF_EXIT_INSN(),
13290 		},
13291 		.data = {
13292 			0x34,
13293 		},
13294 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13295 		.result = ACCEPT,
13296 		.retval = 42,
13297 	},
13298 	{
13299 		"ld_abs: jump around ld_abs",
13300 		.insns = { },
13301 		.data = {
13302 			10, 11,
13303 		},
13304 		.fill_helper = bpf_fill_jump_around_ld_abs,
13305 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13306 		.result = ACCEPT,
13307 		.retval = 10,
13308 	},
13309 	{
13310 		"ld_dw: xor semi-random 64 bit imms, test 1",
13311 		.insns = { },
13312 		.data = { },
13313 		.fill_helper = bpf_fill_rand_ld_dw,
13314 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13315 		.result = ACCEPT,
13316 		.retval = 4090,
13317 	},
13318 	{
13319 		"ld_dw: xor semi-random 64 bit imms, test 2",
13320 		.insns = { },
13321 		.data = { },
13322 		.fill_helper = bpf_fill_rand_ld_dw,
13323 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13324 		.result = ACCEPT,
13325 		.retval = 2047,
13326 	},
13327 	{
13328 		"ld_dw: xor semi-random 64 bit imms, test 3",
13329 		.insns = { },
13330 		.data = { },
13331 		.fill_helper = bpf_fill_rand_ld_dw,
13332 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13333 		.result = ACCEPT,
13334 		.retval = 511,
13335 	},
13336 	{
13337 		"ld_dw: xor semi-random 64 bit imms, test 4",
13338 		.insns = { },
13339 		.data = { },
13340 		.fill_helper = bpf_fill_rand_ld_dw,
13341 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13342 		.result = ACCEPT,
13343 		.retval = 5,
13344 	},
13345 	{
13346 		"pass unmodified ctx pointer to helper",
13347 		.insns = {
13348 			BPF_MOV64_IMM(BPF_REG_2, 0),
13349 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13350 				     BPF_FUNC_csum_update),
13351 			BPF_MOV64_IMM(BPF_REG_0, 0),
13352 			BPF_EXIT_INSN(),
13353 		},
13354 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13355 		.result = ACCEPT,
13356 	},
13357 	{
13358 		"reference tracking: leak potential reference",
13359 		.insns = {
13360 			BPF_SK_LOOKUP,
13361 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
13362 			BPF_EXIT_INSN(),
13363 		},
13364 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13365 		.errstr = "Unreleased reference",
13366 		.result = REJECT,
13367 	},
13368 	{
13369 		"reference tracking: leak potential reference on stack",
13370 		.insns = {
13371 			BPF_SK_LOOKUP,
13372 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13373 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13374 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13375 			BPF_MOV64_IMM(BPF_REG_0, 0),
13376 			BPF_EXIT_INSN(),
13377 		},
13378 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13379 		.errstr = "Unreleased reference",
13380 		.result = REJECT,
13381 	},
13382 	{
13383 		"reference tracking: leak potential reference on stack 2",
13384 		.insns = {
13385 			BPF_SK_LOOKUP,
13386 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13387 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13388 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13389 			BPF_MOV64_IMM(BPF_REG_0, 0),
13390 			BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
13391 			BPF_EXIT_INSN(),
13392 		},
13393 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13394 		.errstr = "Unreleased reference",
13395 		.result = REJECT,
13396 	},
13397 	{
13398 		"reference tracking: zero potential reference",
13399 		.insns = {
13400 			BPF_SK_LOOKUP,
13401 			BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
13402 			BPF_EXIT_INSN(),
13403 		},
13404 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13405 		.errstr = "Unreleased reference",
13406 		.result = REJECT,
13407 	},
13408 	{
13409 		"reference tracking: copy and zero potential references",
13410 		.insns = {
13411 			BPF_SK_LOOKUP,
13412 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
13413 			BPF_MOV64_IMM(BPF_REG_0, 0),
13414 			BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
13415 			BPF_EXIT_INSN(),
13416 		},
13417 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13418 		.errstr = "Unreleased reference",
13419 		.result = REJECT,
13420 	},
13421 	{
13422 		"reference tracking: release reference without check",
13423 		.insns = {
13424 			BPF_SK_LOOKUP,
13425 			/* reference in r0 may be NULL */
13426 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13427 			BPF_MOV64_IMM(BPF_REG_2, 0),
13428 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13429 			BPF_EXIT_INSN(),
13430 		},
13431 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13432 		.errstr = "type=sock_or_null expected=sock",
13433 		.result = REJECT,
13434 	},
13435 	{
13436 		"reference tracking: release reference",
13437 		.insns = {
13438 			BPF_SK_LOOKUP,
13439 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13440 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13441 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13442 			BPF_EXIT_INSN(),
13443 		},
13444 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13445 		.result = ACCEPT,
13446 	},
13447 	{
13448 		"reference tracking: release reference 2",
13449 		.insns = {
13450 			BPF_SK_LOOKUP,
13451 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13452 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
13453 			BPF_EXIT_INSN(),
13454 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13455 			BPF_EXIT_INSN(),
13456 		},
13457 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13458 		.result = ACCEPT,
13459 	},
13460 	{
13461 		"reference tracking: release reference twice",
13462 		.insns = {
13463 			BPF_SK_LOOKUP,
13464 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13465 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13466 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13467 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13468 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13469 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13470 			BPF_EXIT_INSN(),
13471 		},
13472 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13473 		.errstr = "type=inv expected=sock",
13474 		.result = REJECT,
13475 	},
13476 	{
13477 		"reference tracking: release reference twice inside branch",
13478 		.insns = {
13479 			BPF_SK_LOOKUP,
13480 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13481 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13482 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
13483 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13484 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13485 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13486 			BPF_EXIT_INSN(),
13487 		},
13488 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13489 		.errstr = "type=inv expected=sock",
13490 		.result = REJECT,
13491 	},
13492 	{
13493 		"reference tracking: alloc, check, free in one subbranch",
13494 		.insns = {
13495 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13496 				    offsetof(struct __sk_buff, data)),
13497 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13498 				    offsetof(struct __sk_buff, data_end)),
13499 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13500 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13501 			/* if (offsetof(skb, mark) > data_len) exit; */
13502 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13503 			BPF_EXIT_INSN(),
13504 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13505 				    offsetof(struct __sk_buff, mark)),
13506 			BPF_SK_LOOKUP,
13507 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
13508 			/* Leak reference in R0 */
13509 			BPF_EXIT_INSN(),
13510 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13511 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13512 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13513 			BPF_EXIT_INSN(),
13514 		},
13515 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13516 		.errstr = "Unreleased reference",
13517 		.result = REJECT,
13518 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13519 	},
13520 	{
13521 		"reference tracking: alloc, check, free in both subbranches",
13522 		.insns = {
13523 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
13524 				    offsetof(struct __sk_buff, data)),
13525 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
13526 				    offsetof(struct __sk_buff, data_end)),
13527 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
13528 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
13529 			/* if (offsetof(skb, mark) > data_len) exit; */
13530 			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
13531 			BPF_EXIT_INSN(),
13532 			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
13533 				    offsetof(struct __sk_buff, mark)),
13534 			BPF_SK_LOOKUP,
13535 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
13536 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13537 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13538 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13539 			BPF_EXIT_INSN(),
13540 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
13541 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13542 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13543 			BPF_EXIT_INSN(),
13544 		},
13545 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13546 		.result = ACCEPT,
13547 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
13548 	},
13549 	{
13550 		"reference tracking in call: free reference in subprog",
13551 		.insns = {
13552 			BPF_SK_LOOKUP,
13553 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13554 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13555 			BPF_MOV64_IMM(BPF_REG_0, 0),
13556 			BPF_EXIT_INSN(),
13557 
13558 			/* subprog 1 */
13559 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13560 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13561 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13562 			BPF_EXIT_INSN(),
13563 		},
13564 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13565 		.result = ACCEPT,
13566 	},
13567 	{
13568 		"pass modified ctx pointer to helper, 1",
13569 		.insns = {
13570 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13571 			BPF_MOV64_IMM(BPF_REG_2, 0),
13572 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13573 				     BPF_FUNC_csum_update),
13574 			BPF_MOV64_IMM(BPF_REG_0, 0),
13575 			BPF_EXIT_INSN(),
13576 		},
13577 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13578 		.result = REJECT,
13579 		.errstr = "dereference of modified ctx ptr",
13580 	},
13581 	{
13582 		"pass modified ctx pointer to helper, 2",
13583 		.insns = {
13584 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
13585 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13586 				     BPF_FUNC_get_socket_cookie),
13587 			BPF_MOV64_IMM(BPF_REG_0, 0),
13588 			BPF_EXIT_INSN(),
13589 		},
13590 		.result_unpriv = REJECT,
13591 		.result = REJECT,
13592 		.errstr_unpriv = "dereference of modified ctx ptr",
13593 		.errstr = "dereference of modified ctx ptr",
13594 	},
13595 	{
13596 		"pass modified ctx pointer to helper, 3",
13597 		.insns = {
13598 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
13599 			BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
13600 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
13601 			BPF_MOV64_IMM(BPF_REG_2, 0),
13602 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13603 				     BPF_FUNC_csum_update),
13604 			BPF_MOV64_IMM(BPF_REG_0, 0),
13605 			BPF_EXIT_INSN(),
13606 		},
13607 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13608 		.result = REJECT,
13609 		.errstr = "variable ctx access var_off=(0x0; 0x4)",
13610 	},
13611 	{
13612 		"mov64 src == dst",
13613 		.insns = {
13614 			BPF_MOV64_IMM(BPF_REG_2, 0),
13615 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
13616 			// Check bounds are OK
13617 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13618 			BPF_MOV64_IMM(BPF_REG_0, 0),
13619 			BPF_EXIT_INSN(),
13620 		},
13621 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13622 		.result = ACCEPT,
13623 	},
13624 	{
13625 		"mov64 src != dst",
13626 		.insns = {
13627 			BPF_MOV64_IMM(BPF_REG_3, 0),
13628 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
13629 			// Check bounds are OK
13630 			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
13631 			BPF_MOV64_IMM(BPF_REG_0, 0),
13632 			BPF_EXIT_INSN(),
13633 		},
13634 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13635 		.result = ACCEPT,
13636 	},
13637 	{
13638 		"reference tracking in call: free reference in subprog and outside",
13639 		.insns = {
13640 			BPF_SK_LOOKUP,
13641 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
13642 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13643 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13644 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13645 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13646 			BPF_EXIT_INSN(),
13647 
13648 			/* subprog 1 */
13649 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
13650 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
13651 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13652 			BPF_EXIT_INSN(),
13653 		},
13654 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13655 		.errstr = "type=inv expected=sock",
13656 		.result = REJECT,
13657 	},
13658 	{
13659 		"reference tracking in call: alloc & leak reference in subprog",
13660 		.insns = {
13661 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13662 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13663 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
13664 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13665 			BPF_MOV64_IMM(BPF_REG_0, 0),
13666 			BPF_EXIT_INSN(),
13667 
13668 			/* subprog 1 */
13669 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
13670 			BPF_SK_LOOKUP,
13671 			/* spill unchecked sk_ptr into stack of caller */
13672 			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
13673 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13674 			BPF_EXIT_INSN(),
13675 		},
13676 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13677 		.errstr = "Unreleased reference",
13678 		.result = REJECT,
13679 	},
13680 	{
13681 		"reference tracking in call: alloc in subprog, release outside",
13682 		.insns = {
13683 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13684 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
13685 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13686 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13687 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13688 			BPF_EXIT_INSN(),
13689 
13690 			/* subprog 1 */
13691 			BPF_SK_LOOKUP,
13692 			BPF_EXIT_INSN(), /* return sk */
13693 		},
13694 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13695 		.retval = POINTER_VALUE,
13696 		.result = ACCEPT,
13697 	},
13698 	{
13699 		"reference tracking in call: sk_ptr leak into caller stack",
13700 		.insns = {
13701 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13702 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13703 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13704 			BPF_MOV64_IMM(BPF_REG_0, 0),
13705 			BPF_EXIT_INSN(),
13706 
13707 			/* subprog 1 */
13708 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13709 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13710 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13711 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
13712 			/* spill unchecked sk_ptr into stack of caller */
13713 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13714 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13715 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13716 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13717 			BPF_EXIT_INSN(),
13718 
13719 			/* subprog 2 */
13720 			BPF_SK_LOOKUP,
13721 			BPF_EXIT_INSN(),
13722 		},
13723 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13724 		.errstr = "Unreleased reference",
13725 		.result = REJECT,
13726 	},
13727 	{
13728 		"reference tracking in call: sk_ptr spill into caller stack",
13729 		.insns = {
13730 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
13731 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
13732 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
13733 			BPF_MOV64_IMM(BPF_REG_0, 0),
13734 			BPF_EXIT_INSN(),
13735 
13736 			/* subprog 1 */
13737 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13738 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13739 			BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
13740 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
13741 			/* spill unchecked sk_ptr into stack of caller */
13742 			BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
13743 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
13744 			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
13745 			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
13746 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13747 			/* now the sk_ptr is verified, free the reference */
13748 			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
13749 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13750 			BPF_EXIT_INSN(),
13751 
13752 			/* subprog 2 */
13753 			BPF_SK_LOOKUP,
13754 			BPF_EXIT_INSN(),
13755 		},
13756 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13757 		.result = ACCEPT,
13758 	},
13759 	{
13760 		"reference tracking: allow LD_ABS",
13761 		.insns = {
13762 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13763 			BPF_SK_LOOKUP,
13764 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13765 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13766 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13767 			BPF_LD_ABS(BPF_B, 0),
13768 			BPF_LD_ABS(BPF_H, 0),
13769 			BPF_LD_ABS(BPF_W, 0),
13770 			BPF_EXIT_INSN(),
13771 		},
13772 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13773 		.result = ACCEPT,
13774 	},
13775 	{
13776 		"reference tracking: forbid LD_ABS while holding reference",
13777 		.insns = {
13778 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13779 			BPF_SK_LOOKUP,
13780 			BPF_LD_ABS(BPF_B, 0),
13781 			BPF_LD_ABS(BPF_H, 0),
13782 			BPF_LD_ABS(BPF_W, 0),
13783 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13784 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13785 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13786 			BPF_EXIT_INSN(),
13787 		},
13788 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13789 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13790 		.result = REJECT,
13791 	},
13792 	{
13793 		"reference tracking: allow LD_IND",
13794 		.insns = {
13795 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13796 			BPF_SK_LOOKUP,
13797 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13798 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13799 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13800 			BPF_MOV64_IMM(BPF_REG_7, 1),
13801 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13802 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13803 			BPF_EXIT_INSN(),
13804 		},
13805 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13806 		.result = ACCEPT,
13807 		.retval = 1,
13808 	},
13809 	{
13810 		"reference tracking: forbid LD_IND while holding reference",
13811 		.insns = {
13812 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
13813 			BPF_SK_LOOKUP,
13814 			BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
13815 			BPF_MOV64_IMM(BPF_REG_7, 1),
13816 			BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
13817 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
13818 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
13819 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13820 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13821 			BPF_EXIT_INSN(),
13822 		},
13823 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13824 		.errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
13825 		.result = REJECT,
13826 	},
13827 	{
13828 		"reference tracking: check reference or tail call",
13829 		.insns = {
13830 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13831 			BPF_SK_LOOKUP,
13832 			/* if (sk) bpf_sk_release() */
13833 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13834 			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
13835 			/* bpf_tail_call() */
13836 			BPF_MOV64_IMM(BPF_REG_3, 2),
13837 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13838 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13839 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13840 				     BPF_FUNC_tail_call),
13841 			BPF_MOV64_IMM(BPF_REG_0, 0),
13842 			BPF_EXIT_INSN(),
13843 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13844 			BPF_EXIT_INSN(),
13845 		},
13846 		.fixup_prog1 = { 17 },
13847 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13848 		.result = ACCEPT,
13849 	},
13850 	{
13851 		"reference tracking: release reference then tail call",
13852 		.insns = {
13853 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13854 			BPF_SK_LOOKUP,
13855 			/* if (sk) bpf_sk_release() */
13856 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13857 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13858 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13859 			/* bpf_tail_call() */
13860 			BPF_MOV64_IMM(BPF_REG_3, 2),
13861 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13862 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13863 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13864 				     BPF_FUNC_tail_call),
13865 			BPF_MOV64_IMM(BPF_REG_0, 0),
13866 			BPF_EXIT_INSN(),
13867 		},
13868 		.fixup_prog1 = { 18 },
13869 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13870 		.result = ACCEPT,
13871 	},
13872 	{
13873 		"reference tracking: leak possible reference over tail call",
13874 		.insns = {
13875 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13876 			/* Look up socket and store in REG_6 */
13877 			BPF_SK_LOOKUP,
13878 			/* bpf_tail_call() */
13879 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13880 			BPF_MOV64_IMM(BPF_REG_3, 2),
13881 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13882 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13883 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13884 				     BPF_FUNC_tail_call),
13885 			BPF_MOV64_IMM(BPF_REG_0, 0),
13886 			/* if (sk) bpf_sk_release() */
13887 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13888 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
13889 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13890 			BPF_EXIT_INSN(),
13891 		},
13892 		.fixup_prog1 = { 16 },
13893 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13894 		.errstr = "tail_call would lead to reference leak",
13895 		.result = REJECT,
13896 	},
13897 	{
13898 		"reference tracking: leak checked reference over tail call",
13899 		.insns = {
13900 			BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
13901 			/* Look up socket and store in REG_6 */
13902 			BPF_SK_LOOKUP,
13903 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13904 			/* if (!sk) goto end */
13905 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
13906 			/* bpf_tail_call() */
13907 			BPF_MOV64_IMM(BPF_REG_3, 0),
13908 			BPF_LD_MAP_FD(BPF_REG_2, 0),
13909 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
13910 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
13911 				     BPF_FUNC_tail_call),
13912 			BPF_MOV64_IMM(BPF_REG_0, 0),
13913 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13914 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13915 			BPF_EXIT_INSN(),
13916 		},
13917 		.fixup_prog1 = { 17 },
13918 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13919 		.errstr = "tail_call would lead to reference leak",
13920 		.result = REJECT,
13921 	},
13922 	{
13923 		"reference tracking: mangle and release sock_or_null",
13924 		.insns = {
13925 			BPF_SK_LOOKUP,
13926 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13927 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13928 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
13929 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13930 			BPF_EXIT_INSN(),
13931 		},
13932 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13933 		.errstr = "R1 pointer arithmetic on sock_or_null prohibited",
13934 		.result = REJECT,
13935 	},
13936 	{
13937 		"reference tracking: mangle and release sock",
13938 		.insns = {
13939 			BPF_SK_LOOKUP,
13940 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
13941 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
13942 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
13943 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13944 			BPF_EXIT_INSN(),
13945 		},
13946 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13947 		.errstr = "R1 pointer arithmetic on sock prohibited",
13948 		.result = REJECT,
13949 	},
13950 	{
13951 		"reference tracking: access member",
13952 		.insns = {
13953 			BPF_SK_LOOKUP,
13954 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13955 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13956 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
13957 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13958 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13959 			BPF_EXIT_INSN(),
13960 		},
13961 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13962 		.result = ACCEPT,
13963 	},
13964 	{
13965 		"reference tracking: write to member",
13966 		.insns = {
13967 			BPF_SK_LOOKUP,
13968 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13969 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
13970 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13971 			BPF_LD_IMM64(BPF_REG_2, 42),
13972 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
13973 				    offsetof(struct bpf_sock, mark)),
13974 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13975 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13976 			BPF_LD_IMM64(BPF_REG_0, 0),
13977 			BPF_EXIT_INSN(),
13978 		},
13979 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13980 		.errstr = "cannot write into socket",
13981 		.result = REJECT,
13982 	},
13983 	{
13984 		"reference tracking: invalid 64-bit access of member",
13985 		.insns = {
13986 			BPF_SK_LOOKUP,
13987 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
13988 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
13989 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
13990 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
13991 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
13992 			BPF_EXIT_INSN(),
13993 		},
13994 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
13995 		.errstr = "invalid bpf_sock access off=0 size=8",
13996 		.result = REJECT,
13997 	},
13998 	{
13999 		"reference tracking: access after release",
14000 		.insns = {
14001 			BPF_SK_LOOKUP,
14002 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14003 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
14004 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14005 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
14006 			BPF_EXIT_INSN(),
14007 		},
14008 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14009 		.errstr = "!read_ok",
14010 		.result = REJECT,
14011 	},
14012 	{
14013 		"reference tracking: direct access for lookup",
14014 		.insns = {
14015 			/* Check that the packet is at least 64B long */
14016 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
14017 				    offsetof(struct __sk_buff, data)),
14018 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
14019 				    offsetof(struct __sk_buff, data_end)),
14020 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
14021 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
14022 			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
14023 			/* sk = sk_lookup_tcp(ctx, skb->data, ...) */
14024 			BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
14025 			BPF_MOV64_IMM(BPF_REG_4, 0),
14026 			BPF_MOV64_IMM(BPF_REG_5, 0),
14027 			BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
14028 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
14029 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
14030 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
14031 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14032 			BPF_EMIT_CALL(BPF_FUNC_sk_release),
14033 			BPF_EXIT_INSN(),
14034 		},
14035 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14036 		.result = ACCEPT,
14037 	},
14038 	{
14039 		"calls: ctx read at start of subprog",
14040 		.insns = {
14041 			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
14042 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
14043 			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
14044 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
14045 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
14046 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
14047 			BPF_EXIT_INSN(),
14048 			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
14049 			BPF_MOV64_IMM(BPF_REG_0, 0),
14050 			BPF_EXIT_INSN(),
14051 		},
14052 		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
14053 		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
14054 		.result_unpriv = REJECT,
14055 		.result = ACCEPT,
14056 	},
14057 	{
14058 		"check wire_len is not readable by sockets",
14059 		.insns = {
14060 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14061 				    offsetof(struct __sk_buff, wire_len)),
14062 			BPF_EXIT_INSN(),
14063 		},
14064 		.errstr = "invalid bpf_context access",
14065 		.result = REJECT,
14066 	},
14067 	{
14068 		"check wire_len is readable by tc classifier",
14069 		.insns = {
14070 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
14071 				    offsetof(struct __sk_buff, wire_len)),
14072 			BPF_EXIT_INSN(),
14073 		},
14074 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14075 		.result = ACCEPT,
14076 	},
14077 	{
14078 		"check wire_len is not writable by tc classifier",
14079 		.insns = {
14080 			BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
14081 				    offsetof(struct __sk_buff, wire_len)),
14082 			BPF_EXIT_INSN(),
14083 		},
14084 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
14085 		.errstr = "invalid bpf_context access",
14086 		.errstr_unpriv = "R1 leaks addr",
14087 		.result = REJECT,
14088 	},
14089 };
14090 
14091 static int probe_filter_length(const struct bpf_insn *fp)
14092 {
14093 	int len;
14094 
14095 	for (len = MAX_INSNS - 1; len > 0; --len)
14096 		if (fp[len].code != 0 || fp[len].imm != 0)
14097 			break;
14098 	return len + 1;
14099 }
14100 
14101 static int create_map(uint32_t type, uint32_t size_key,
14102 		      uint32_t size_value, uint32_t max_elem)
14103 {
14104 	int fd;
14105 
14106 	fd = bpf_create_map(type, size_key, size_value, max_elem,
14107 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
14108 	if (fd < 0)
14109 		printf("Failed to create hash map '%s'!\n", strerror(errno));
14110 
14111 	return fd;
14112 }
14113 
14114 static int create_prog_dummy1(enum bpf_map_type prog_type)
14115 {
14116 	struct bpf_insn prog[] = {
14117 		BPF_MOV64_IMM(BPF_REG_0, 42),
14118 		BPF_EXIT_INSN(),
14119 	};
14120 
14121 	return bpf_load_program(prog_type, prog,
14122 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14123 }
14124 
14125 static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
14126 {
14127 	struct bpf_insn prog[] = {
14128 		BPF_MOV64_IMM(BPF_REG_3, idx),
14129 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
14130 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
14131 			     BPF_FUNC_tail_call),
14132 		BPF_MOV64_IMM(BPF_REG_0, 41),
14133 		BPF_EXIT_INSN(),
14134 	};
14135 
14136 	return bpf_load_program(prog_type, prog,
14137 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
14138 }
14139 
14140 static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
14141 			     int p1key)
14142 {
14143 	int p2key = 1;
14144 	int mfd, p1fd, p2fd;
14145 
14146 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
14147 			     sizeof(int), max_elem, 0);
14148 	if (mfd < 0) {
14149 		printf("Failed to create prog array '%s'!\n", strerror(errno));
14150 		return -1;
14151 	}
14152 
14153 	p1fd = create_prog_dummy1(prog_type);
14154 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
14155 	if (p1fd < 0 || p2fd < 0)
14156 		goto out;
14157 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
14158 		goto out;
14159 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
14160 		goto out;
14161 	close(p2fd);
14162 	close(p1fd);
14163 
14164 	return mfd;
14165 out:
14166 	close(p2fd);
14167 	close(p1fd);
14168 	close(mfd);
14169 	return -1;
14170 }
14171 
14172 static int create_map_in_map(void)
14173 {
14174 	int inner_map_fd, outer_map_fd;
14175 
14176 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14177 				      sizeof(int), 1, 0);
14178 	if (inner_map_fd < 0) {
14179 		printf("Failed to create array '%s'!\n", strerror(errno));
14180 		return inner_map_fd;
14181 	}
14182 
14183 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
14184 					     sizeof(int), inner_map_fd, 1, 0);
14185 	if (outer_map_fd < 0)
14186 		printf("Failed to create array of maps '%s'!\n",
14187 		       strerror(errno));
14188 
14189 	close(inner_map_fd);
14190 
14191 	return outer_map_fd;
14192 }
14193 
14194 static int create_cgroup_storage(bool percpu)
14195 {
14196 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
14197 		BPF_MAP_TYPE_CGROUP_STORAGE;
14198 	int fd;
14199 
14200 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
14201 			    TEST_DATA_LEN, 0, 0);
14202 	if (fd < 0)
14203 		printf("Failed to create cgroup storage '%s'!\n",
14204 		       strerror(errno));
14205 
14206 	return fd;
14207 }
14208 
14209 static char bpf_vlog[UINT_MAX >> 8];
14210 
14211 static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
14212 			  struct bpf_insn *prog, int *map_fds)
14213 {
14214 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
14215 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
14216 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
14217 	int *fixup_map_array_48b = test->fixup_map_array_48b;
14218 	int *fixup_map_sockmap = test->fixup_map_sockmap;
14219 	int *fixup_map_sockhash = test->fixup_map_sockhash;
14220 	int *fixup_map_xskmap = test->fixup_map_xskmap;
14221 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
14222 	int *fixup_prog1 = test->fixup_prog1;
14223 	int *fixup_prog2 = test->fixup_prog2;
14224 	int *fixup_map_in_map = test->fixup_map_in_map;
14225 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
14226 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
14227 
14228 	if (test->fill_helper)
14229 		test->fill_helper(test);
14230 
14231 	/* Allocating HTs with 1 elem is fine here, since we only test
14232 	 * for verifier and not do a runtime lookup, so the only thing
14233 	 * that really matters is value size in this case.
14234 	 */
14235 	if (*fixup_map_hash_8b) {
14236 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14237 					sizeof(long long), 1);
14238 		do {
14239 			prog[*fixup_map_hash_8b].imm = map_fds[0];
14240 			fixup_map_hash_8b++;
14241 		} while (*fixup_map_hash_8b);
14242 	}
14243 
14244 	if (*fixup_map_hash_48b) {
14245 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14246 					sizeof(struct test_val), 1);
14247 		do {
14248 			prog[*fixup_map_hash_48b].imm = map_fds[1];
14249 			fixup_map_hash_48b++;
14250 		} while (*fixup_map_hash_48b);
14251 	}
14252 
14253 	if (*fixup_map_hash_16b) {
14254 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
14255 					sizeof(struct other_val), 1);
14256 		do {
14257 			prog[*fixup_map_hash_16b].imm = map_fds[2];
14258 			fixup_map_hash_16b++;
14259 		} while (*fixup_map_hash_16b);
14260 	}
14261 
14262 	if (*fixup_map_array_48b) {
14263 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
14264 					sizeof(struct test_val), 1);
14265 		do {
14266 			prog[*fixup_map_array_48b].imm = map_fds[3];
14267 			fixup_map_array_48b++;
14268 		} while (*fixup_map_array_48b);
14269 	}
14270 
14271 	if (*fixup_prog1) {
14272 		map_fds[4] = create_prog_array(prog_type, 4, 0);
14273 		do {
14274 			prog[*fixup_prog1].imm = map_fds[4];
14275 			fixup_prog1++;
14276 		} while (*fixup_prog1);
14277 	}
14278 
14279 	if (*fixup_prog2) {
14280 		map_fds[5] = create_prog_array(prog_type, 8, 7);
14281 		do {
14282 			prog[*fixup_prog2].imm = map_fds[5];
14283 			fixup_prog2++;
14284 		} while (*fixup_prog2);
14285 	}
14286 
14287 	if (*fixup_map_in_map) {
14288 		map_fds[6] = create_map_in_map();
14289 		do {
14290 			prog[*fixup_map_in_map].imm = map_fds[6];
14291 			fixup_map_in_map++;
14292 		} while (*fixup_map_in_map);
14293 	}
14294 
14295 	if (*fixup_cgroup_storage) {
14296 		map_fds[7] = create_cgroup_storage(false);
14297 		do {
14298 			prog[*fixup_cgroup_storage].imm = map_fds[7];
14299 			fixup_cgroup_storage++;
14300 		} while (*fixup_cgroup_storage);
14301 	}
14302 
14303 	if (*fixup_percpu_cgroup_storage) {
14304 		map_fds[8] = create_cgroup_storage(true);
14305 		do {
14306 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
14307 			fixup_percpu_cgroup_storage++;
14308 		} while (*fixup_percpu_cgroup_storage);
14309 	}
14310 	if (*fixup_map_sockmap) {
14311 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
14312 					sizeof(int), 1);
14313 		do {
14314 			prog[*fixup_map_sockmap].imm = map_fds[9];
14315 			fixup_map_sockmap++;
14316 		} while (*fixup_map_sockmap);
14317 	}
14318 	if (*fixup_map_sockhash) {
14319 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
14320 					sizeof(int), 1);
14321 		do {
14322 			prog[*fixup_map_sockhash].imm = map_fds[10];
14323 			fixup_map_sockhash++;
14324 		} while (*fixup_map_sockhash);
14325 	}
14326 	if (*fixup_map_xskmap) {
14327 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
14328 					sizeof(int), 1);
14329 		do {
14330 			prog[*fixup_map_xskmap].imm = map_fds[11];
14331 			fixup_map_xskmap++;
14332 		} while (*fixup_map_xskmap);
14333 	}
14334 	if (*fixup_map_stacktrace) {
14335 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
14336 					 sizeof(u64), 1);
14337 		do {
14338 			prog[*fixup_map_stacktrace].imm = map_fds[12];
14339 			fixup_map_stacktrace++;
14340 		} while (fixup_map_stacktrace);
14341 	}
14342 }
14343 
14344 static int set_admin(bool admin)
14345 {
14346 	cap_t caps;
14347 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14348 	int ret = -1;
14349 
14350 	caps = cap_get_proc();
14351 	if (!caps) {
14352 		perror("cap_get_proc");
14353 		return -1;
14354 	}
14355 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
14356 				admin ? CAP_SET : CAP_CLEAR)) {
14357 		perror("cap_set_flag");
14358 		goto out;
14359 	}
14360 	if (cap_set_proc(caps)) {
14361 		perror("cap_set_proc");
14362 		goto out;
14363 	}
14364 	ret = 0;
14365 out:
14366 	if (cap_free(caps))
14367 		perror("cap_free");
14368 	return ret;
14369 }
14370 
14371 static void do_test_single(struct bpf_test *test, bool unpriv,
14372 			   int *passes, int *errors)
14373 {
14374 	int fd_prog, expected_ret, alignment_prevented_execution;
14375 	int prog_len, prog_type = test->prog_type;
14376 	struct bpf_insn *prog = test->insns;
14377 	int map_fds[MAX_NR_MAPS];
14378 	const char *expected_err;
14379 	uint32_t expected_val;
14380 	uint32_t retval;
14381 	__u32 pflags;
14382 	int i, err;
14383 
14384 	for (i = 0; i < MAX_NR_MAPS; i++)
14385 		map_fds[i] = -1;
14386 
14387 	if (!prog_type)
14388 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
14389 	do_test_fixup(test, prog_type, prog, map_fds);
14390 	prog_len = probe_filter_length(prog);
14391 
14392 	pflags = 0;
14393 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
14394 		pflags |= BPF_F_STRICT_ALIGNMENT;
14395 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
14396 		pflags |= BPF_F_ANY_ALIGNMENT;
14397 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
14398 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
14399 
14400 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
14401 		       test->result_unpriv : test->result;
14402 	expected_err = unpriv && test->errstr_unpriv ?
14403 		       test->errstr_unpriv : test->errstr;
14404 	expected_val = unpriv && test->retval_unpriv ?
14405 		       test->retval_unpriv : test->retval;
14406 
14407 	alignment_prevented_execution = 0;
14408 
14409 	if (expected_ret == ACCEPT) {
14410 		if (fd_prog < 0) {
14411 			printf("FAIL\nFailed to load prog '%s'!\n",
14412 			       strerror(errno));
14413 			goto fail_log;
14414 		}
14415 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14416 		if (fd_prog >= 0 &&
14417 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
14418 			alignment_prevented_execution = 1;
14419 			goto test_ok;
14420 		}
14421 #endif
14422 	} else {
14423 		if (fd_prog >= 0) {
14424 			printf("FAIL\nUnexpected success to load!\n");
14425 			goto fail_log;
14426 		}
14427 		if (!strstr(bpf_vlog, expected_err)) {
14428 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
14429 			      expected_err, bpf_vlog);
14430 			goto fail_log;
14431 		}
14432 	}
14433 
14434 	if (fd_prog >= 0) {
14435 		__u8 tmp[TEST_DATA_LEN << 2];
14436 		__u32 size_tmp = sizeof(tmp);
14437 
14438 		if (unpriv)
14439 			set_admin(true);
14440 		err = bpf_prog_test_run(fd_prog, 1, test->data,
14441 					sizeof(test->data), tmp, &size_tmp,
14442 					&retval, NULL);
14443 		if (unpriv)
14444 			set_admin(false);
14445 		if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
14446 			printf("Unexpected bpf_prog_test_run error\n");
14447 			goto fail_log;
14448 		}
14449 		if (!err && retval != expected_val &&
14450 		    expected_val != POINTER_VALUE) {
14451 			printf("FAIL retval %d != %d\n", retval, expected_val);
14452 			goto fail_log;
14453 		}
14454 	}
14455 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14456 test_ok:
14457 #endif
14458 	(*passes)++;
14459 	printf("OK%s\n", alignment_prevented_execution ?
14460 	       " (NOTE: not executed due to unknown alignment)" : "");
14461 close_fds:
14462 	close(fd_prog);
14463 	for (i = 0; i < MAX_NR_MAPS; i++)
14464 		close(map_fds[i]);
14465 	sched_yield();
14466 	return;
14467 fail_log:
14468 	(*errors)++;
14469 	printf("%s", bpf_vlog);
14470 	goto close_fds;
14471 }
14472 
14473 static bool is_admin(void)
14474 {
14475 	cap_t caps;
14476 	cap_flag_value_t sysadmin = CAP_CLEAR;
14477 	const cap_value_t cap_val = CAP_SYS_ADMIN;
14478 
14479 #ifdef CAP_IS_SUPPORTED
14480 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
14481 		perror("cap_get_flag");
14482 		return false;
14483 	}
14484 #endif
14485 	caps = cap_get_proc();
14486 	if (!caps) {
14487 		perror("cap_get_proc");
14488 		return false;
14489 	}
14490 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
14491 		perror("cap_get_flag");
14492 	if (cap_free(caps))
14493 		perror("cap_free");
14494 	return (sysadmin == CAP_SET);
14495 }
14496 
14497 static void get_unpriv_disabled()
14498 {
14499 	char buf[2];
14500 	FILE *fd;
14501 
14502 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
14503 	if (!fd) {
14504 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
14505 		unpriv_disabled = true;
14506 		return;
14507 	}
14508 	if (fgets(buf, 2, fd) == buf && atoi(buf))
14509 		unpriv_disabled = true;
14510 	fclose(fd);
14511 }
14512 
14513 static bool test_as_unpriv(struct bpf_test *test)
14514 {
14515 	return !test->prog_type ||
14516 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
14517 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
14518 }
14519 
14520 static int do_test(bool unpriv, unsigned int from, unsigned int to)
14521 {
14522 	int i, passes = 0, errors = 0, skips = 0;
14523 
14524 	for (i = from; i < to; i++) {
14525 		struct bpf_test *test = &tests[i];
14526 
14527 		/* Program types that are not supported by non-root we
14528 		 * skip right away.
14529 		 */
14530 		if (test_as_unpriv(test) && unpriv_disabled) {
14531 			printf("#%d/u %s SKIP\n", i, test->descr);
14532 			skips++;
14533 		} else if (test_as_unpriv(test)) {
14534 			if (!unpriv)
14535 				set_admin(false);
14536 			printf("#%d/u %s ", i, test->descr);
14537 			do_test_single(test, true, &passes, &errors);
14538 			if (!unpriv)
14539 				set_admin(true);
14540 		}
14541 
14542 		if (unpriv) {
14543 			printf("#%d/p %s SKIP\n", i, test->descr);
14544 			skips++;
14545 		} else {
14546 			printf("#%d/p %s ", i, test->descr);
14547 			do_test_single(test, false, &passes, &errors);
14548 		}
14549 	}
14550 
14551 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
14552 	       skips, errors);
14553 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
14554 }
14555 
14556 int main(int argc, char **argv)
14557 {
14558 	unsigned int from = 0, to = ARRAY_SIZE(tests);
14559 	bool unpriv = !is_admin();
14560 
14561 	if (argc == 3) {
14562 		unsigned int l = atoi(argv[argc - 2]);
14563 		unsigned int u = atoi(argv[argc - 1]);
14564 
14565 		if (l < to && u < to) {
14566 			from = l;
14567 			to   = u + 1;
14568 		}
14569 	} else if (argc == 2) {
14570 		unsigned int t = atoi(argv[argc - 1]);
14571 
14572 		if (t < to) {
14573 			from = t;
14574 			to   = t + 1;
14575 		}
14576 	}
14577 
14578 	get_unpriv_disabled();
14579 	if (unpriv && unpriv_disabled) {
14580 		printf("Cannot run as unprivileged user with sysctl %s.\n",
14581 		       UNPRIV_SYSCTL);
14582 		return EXIT_FAILURE;
14583 	}
14584 
14585 	bpf_semi_rand_init();
14586 	return do_test(unpriv, from, to);
14587 }
14588